mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-14 02:02:46 +00:00
* [fix] align v3 gufusion pipeline
* fix device kernel selection.
* Add .co direct asm support by CK_USE_ASM_MOE_STAGE2_BLOCKSCALE
* experimental optimization for scale load in blkscale gemm
* Add asm for no-loop v3_128x128x128
* fix bugs
* tune fp8 example
* Update v1_128x128x128 to 2x2 instead of 4x1
* wip
* add warmup to asm launch
* wip2
* 16x16 function merged to moe
* temp save, a performant version.
* wip3
* Update .co binary to 16x16
* 16x16x128 correct; 64x64x128 failed
* update
* use mem_op::set when topk=1
* add mx fp8 b_preshuffle support, function not yet tested.
* Spilt the fp4 target. Fix the known bugs. 128x128x128 sanity checked; remove prints
* some fixes
* fix update
* remove some unnecessary hacky; enable 256x256x256 tilesize
* update for function debug
* Add pipeline v3. Have some runtime issue and register spill
* Fix pipe v3 correctness issue
* remove unnecessary hacky
* clang format
* fix a bug
* fix the bug, functional test passed
* tempsave; buggy at passed 4 e8m0 to scaled mfma
* added fp4_bpreshuffle example, build failures
* fixed some bugs
* implement shuffled scale mxfp4gemm, blocker: opsel not effect
* hotfix
* fix bugs, build passed
* (M, N, K)=(128, 128, 128) function failed.
* temp save for gemm1. Function not ready
* fix compile error. Gemm2 pass. Gemm1 WIP
* fix bug for a lds read
* update moe
* Compile pass. Gemm1 function WIP
* update moe
* fix fp8; fix even/odd
* tempsave
* update moe
* Revert "update"
This reverts commit c7d79dcb672616d9bc0fd9958f714fc80e7c84fd.
* Revert "use mem_op::set when topk=1"
This reverts commit 8c7772860735001a51421e7b6d0a28f6676d6c40.
* Add v3 128x128x128_4x4_16x16.co for gfx950
* temp cmake flag suppression for aiter test
* add code for mxfp4 gemm, blockscale not supported yet
* gemm1 up-only pass. GU WIP
* function pass with inline asm hacky
* revert unexpected file change
* updated and build passed
* update CE elementOP
* added code for debug
* Gemm1 GUFusion function pass. Perf WIP
* Fix fp8/bf8; remove duplicated code
* disable the scheduler in v3; bring it back when compiler feature ready.
* update moe v1 pipeline
* Add gemm1 v1 32x128x128
* remove schedule barrier
* updated
* Fix fp8/bf8 B-row
* mfma using asm, device result correct, host result need to check
* gemm1 v3 64x128x128 debug
* fix cpu ref
* a/b thread_desc stride fix
* Use random scale for init1
* 16x16x128 input size blockscale function passed
* fix blockscale gemm bug
* tempsave. Almost all instances passed.
* v1 fix for mi350.
* temp save
* debug save
* update debug
* fix the bug, 128x128x256 tile function passed
* v3
* rename moe block selector and pipeline
* Add gemm1 v1
* Add gemm1 v1 to selector
* added mx moe block v3 support, function passed
* compile error fix
* Improve the pipeline
* Pack e8m0 as int32_t
* v1 compile pass. Function not ready
* debug synchronize issue over different GPU/ROCm
* minor fix
* Add profiler filter
* Add f4 ckProfiler
* Fix example compile error
* Add f4 profiler examples
* tempsave
* v1 function pass.
* v3 function pass
* align file and function name
* mx_moe_fp4 ready for aiter with clang-format.
* modify the way we represent fp4
* generalize the pipeline scheduling.
* init moe mx f4 scale shuffle
* Cmakelist diable compiler-bound flags
* mx_fp4 default parameter change
* Moe blockscale gemm1&gemm2 asm support for aiter. Suppression cmkae flag til new compler.
* update code
* tempsave; modify the way we represent fp4
* generalize the pipeline scheduling.
* Add gemm1 gfx942 .co support
* updated code, build passed.
* Update gemm2 asm with latest compiler flag
* Fix mx f4 ckProfiler
* Fix blockwise gemm mx v1
* lds conflict free + buffer load lds
* Add gemm2 v3 64x128x128
* fix a, b scale loading bugs, a, b scale loading now correctly
* Add gemm2 v3 64x128x128
* commit with debug info
* fix fp4 profiler
* Add mx fp4 pileline v1 instances
* Fix v2 topk_weight cal. Add silu asm.
* v2 tok_weight WIP
* init mx fp4 B no preshuffle version
* tempsave. compile pass, function wrong
* enable fp4 moe no weigth preshuffle, function pass
* update the TFlops calculation in the example
* Add gemm2 64x128x128 asm. Fix BF16 ref.
* fix 2 typos in fp4_preshuffle
* Better kernel selection in device classes
* correct preShuffleBuffer
we should used packed k to do shuffle.
* lds conflict free + buffer load lds
* optimize offset math in dma
* Fix fp4 ckProfiler
* Fix MX MFMA tests
* fix f4 pipeline issues
* gemm1 func pass
* update mx moe gemm1_bns tile size to 64x128x256
* update mx moe gemm1 gemm2 TF and BW calculation
* fix typo
* temp save
* Fix example_gemm_mx build
* rename the block pipeline
* correct a typo in tail
* Add rotating to mx examples
* fix the correctness issue
* Fix v1; use M padding
* Add NT flag to B/BScale buffer
* Merge gemm_mx_common.hpp
* temp save, 4.4~4.5
* Fix 'Merge gemm_mx_common.hpp'
* refactor the pipeline
* Pad the M for scale buffer unconditionaly
* update MX moe GEMM1 hotloopscheduling
* change the gemm1 tile from 64x128x128 to 128x64x128
* Unconditional Ascale padding
* Pad shuffled a scale only
* pad ascale
* add vmcnt guard for async copy
* Profiler add f4 wp
* Merge preshuffle device
* Add more fp4 wp instances
* Fix do_weight in gemm1. Fix cshuffle_datatype. Clang-format
* Clang-format after 2 merges
* Remove rocm6.3 workaround flags and macro
* Fix fp8 config
* Fix bf8 config
* flag and barrier fix for copmiler branch MainOpSelV3
* Add fp8 profiler instances
* Remove debug infos; Enable flags for blockscale f8
* No asm ver. for merging moe blocksale fp8 into mainline
* update the flag name for f8blockscale
* recover example
* fix performance bug of bpreshuffle f8 gemm
* clang format, remove single rate mfma restriction for f8
* remove single rate mfma restriction for f8 blockscale gemm
* Fix moe blockscale gemm1 barrier 0x800 for new compiler
* add pipeline v1 for MOE Gemm2
* Use v1 pipeline for example_moe_gemm2_xdl_mx_fp4_bns
* Fix OOB; add MB96 instances
* remove unnecessary files
* fix the cmake issue
* Enable splitk for mxfp4; clang format;
* Generate random tensor values with multiple threads
* Use packed_size_v for A/BPackedSize
* Fix warning
* Fix target_compile_options for disabled target on gfx942
* fix moe pki4 on gfx950
* doc the kGroup definition
* Fix ThreadwiseTensorSliceTransfer_v4::Run (Fuse scale)
* Refactor thread_copy_lds_direct_load; fix gfx942 direct lds load example; fix f16_pki4 example
* Fix unknown compiler flag
* fix two failed examples.
* fix some failure tile size in gfx950 universal gemm. fix test_gemm_fp16
* workaround fix for test_gemm_f32; * We have very limited support for lds direct load if input matrix is not K major
* fix test_gemm_splitk;
* Fix compile for mx_mfma_op
* add mfma selection logic for multipled_v3
* Clean up
* Fix device gemm mx link error
* improve the global atomic pattern
* Revert unnecessary copyright updates
* restore minimum_occupancy logic
* Avoid data race in moe gemm2 ref
* Build fp8 gemm_multiply_multiply and moe only on gfx94/95
* update the instance in device_mx_gemm
* Resolve comments
* Copyright 2025
* Remove unused code
* fix library linking issue
---------
Co-authored-by: OscarXu <huaiguxu@amd.com>
Co-authored-by: lalala-sh <Jiaxing.Wen@amd.com>
Co-authored-by: mtgu0705 <mtgu@amd.com>
Co-authored-by: aska-0096 <haocwang@amd.com>
Co-authored-by: Your Name <you@example.com>
Co-authored-by: valarLip <340077269@qq.com>
Co-authored-by: feifei14119 <feiw@amd.com>
Co-authored-by: Lin, Qun <qlin@amd.com>
Co-authored-by: Andriy Roshchenko <andriy.roshchenko@amd.com>
Co-authored-by: joye <joye@amd.com>
Co-authored-by: asleepzzz <hanwen.chang@amd.com>
[ROCm/composable_kernel commit: 37554c31e8]
107 lines
3.1 KiB
C++
107 lines
3.1 KiB
C++
// SPDX-License-Identifier: MIT
|
|
// Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved.
|
|
|
|
#ifndef UTILITY_DEBUG_HPP
|
|
#define UTILITY_DEBUG_HPP
|
|
#include "type.hpp"
|
|
|
|
namespace ck {
|
|
namespace debug {
|
|
|
|
namespace detail {
|
|
template <typename T, typename Enable = void>
|
|
struct PrintAsType;
|
|
|
|
template <typename T>
|
|
struct PrintAsType<T, typename std::enable_if<std::is_floating_point<T>::value>::type>
|
|
{
|
|
using type = float;
|
|
__host__ __device__ static void Print(const T& p) { printf("%.3f ", static_cast<type>(p)); }
|
|
};
|
|
|
|
template <>
|
|
struct PrintAsType<ck::half_t, void>
|
|
{
|
|
using type = float;
|
|
__host__ __device__ static void Print(const ck::half_t& p)
|
|
{
|
|
printf("%.3f ", static_cast<type>(p));
|
|
}
|
|
};
|
|
|
|
template <typename T>
|
|
struct PrintAsType<T, typename std::enable_if<std::is_integral<T>::value>::type>
|
|
{
|
|
using type = int;
|
|
__host__ __device__ static void Print(const T& p) { printf("%d ", static_cast<type>(p)); }
|
|
};
|
|
} // namespace detail
|
|
|
|
// Print at runtime the data in shared memory in 128 bytes per row format given shared mem pointer
|
|
// and the number of elements. Can optionally specify strides between elements and how many bytes'
|
|
// worth of data per row.
|
|
//
|
|
// Usage example:
|
|
//
|
|
// debug::print_shared(a_block_buf.p_data_, index_t(a_block_desc_k0_m_k1.GetElementSpaceSize()));
|
|
//
|
|
template <typename T, index_t element_stride = 1, index_t row_bytes = 128>
|
|
__device__ void print_shared(T const* p_shared, index_t num_elements)
|
|
{
|
|
constexpr index_t row_elements = row_bytes / sizeof(T);
|
|
static_assert((element_stride >= 1 && element_stride <= row_elements),
|
|
"element_stride should between [1, row_elements]");
|
|
|
|
index_t wgid = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
|
|
index_t tid =
|
|
(threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
|
|
|
|
__syncthreads();
|
|
|
|
if(tid == 0)
|
|
{
|
|
printf("\nWorkgroup id %d, bytes per row %d, element stride %d\n\n",
|
|
wgid,
|
|
row_bytes,
|
|
element_stride);
|
|
for(index_t i = 0; i < num_elements; i += row_elements)
|
|
{
|
|
printf("elem %5d: ", i);
|
|
for(index_t j = 0; j < row_elements; j += element_stride)
|
|
{
|
|
detail::PrintAsType<T>::Print(p_shared[i + j]);
|
|
}
|
|
|
|
printf("\n");
|
|
}
|
|
printf("\n");
|
|
}
|
|
|
|
__syncthreads();
|
|
}
|
|
|
|
template <index_t... Ids>
|
|
__device__ static bool is_thread_local_1d_id_idx()
|
|
{
|
|
const auto tid = get_thread_local_1d_id();
|
|
return ((tid == Ids) || ...);
|
|
}
|
|
|
|
// Use `CK_PRINT<T1, T2, ...>()` to inspect values of type T1, T2, ...
|
|
// Use `CK_PRINT<v1, v2, ...>()` to inspect constexpr values of val1, val2, ... of the same type
|
|
// In a non-evaluated context, you can use `using _dummy = decltype(CK_PRINT<...>());`
|
|
// Set BUILD_DEV to OFF to avoid enabling Werror
|
|
template <auto... val>
|
|
[[deprecated("Help function to print value")]] inline constexpr void CK_PRINT()
|
|
{
|
|
}
|
|
template <typename... type>
|
|
[[deprecated("Help function to print value")]] inline constexpr void CK_PRINT()
|
|
{
|
|
}
|
|
|
|
} // namespace debug
|
|
} // namespace ck
|
|
|
|
#endif // UTILITY_DEBUG_HPP
|