mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-14 18:17:44 +00:00
* [fix] align v3 gufusion pipeline
* fix device kernel selection.
* Add .co direct asm support by CK_USE_ASM_MOE_STAGE2_BLOCKSCALE
* experimental optimization for scale load in blkscale gemm
* Add asm for no-loop v3_128x128x128
* fix bugs
* tune fp8 example
* Update v1_128x128x128 to 2x2 instead of 4x1
* wip
* add warmup to asm launch
* wip2
* 16x16 function merged to moe
* temp save, a performant version.
* wip3
* Update .co binary to 16x16
* 16x16x128 correct; 64x64x128 failed
* update
* use mem_op::set when topk=1
* add mx fp8 b_preshuffle support, function not yet tested.
* Spilt the fp4 target. Fix the known bugs. 128x128x128 sanity checked; remove prints
* some fixes
* fix update
* remove some unnecessary hacky; enable 256x256x256 tilesize
* update for function debug
* Add pipeline v3. Have some runtime issue and register spill
* Fix pipe v3 correctness issue
* remove unnecessary hacky
* clang format
* fix a bug
* fix the bug, functional test passed
* tempsave; buggy at passed 4 e8m0 to scaled mfma
* added fp4_bpreshuffle example, build failures
* fixed some bugs
* implement shuffled scale mxfp4gemm, blocker: opsel not effect
* hotfix
* fix bugs, build passed
* (M, N, K)=(128, 128, 128) function failed.
* temp save for gemm1. Function not ready
* fix compile error. Gemm2 pass. Gemm1 WIP
* fix bug for a lds read
* update moe
* Compile pass. Gemm1 function WIP
* update moe
* fix fp8; fix even/odd
* tempsave
* update moe
* Revert "update"
This reverts commit c7d79dcb672616d9bc0fd9958f714fc80e7c84fd.
* Revert "use mem_op::set when topk=1"
This reverts commit 8c7772860735001a51421e7b6d0a28f6676d6c40.
* Add v3 128x128x128_4x4_16x16.co for gfx950
* temp cmake flag suppression for aiter test
* add code for mxfp4 gemm, blockscale not supported yet
* gemm1 up-only pass. GU WIP
* function pass with inline asm hacky
* revert unexpected file change
* updated and build passed
* update CE elementOP
* added code for debug
* Gemm1 GUFusion function pass. Perf WIP
* Fix fp8/bf8; remove duplicated code
* disable the scheduler in v3; bring it back when compiler feature ready.
* update moe v1 pipeline
* Add gemm1 v1 32x128x128
* remove schedule barrier
* updated
* Fix fp8/bf8 B-row
* mfma using asm, device result correct, host result need to check
* gemm1 v3 64x128x128 debug
* fix cpu ref
* a/b thread_desc stride fix
* Use random scale for init1
* 16x16x128 input size blockscale function passed
* fix blockscale gemm bug
* tempsave. Almost all instances passed.
* v1 fix for mi350.
* temp save
* debug save
* update debug
* fix the bug, 128x128x256 tile function passed
* v3
* rename moe block selector and pipeline
* Add gemm1 v1
* Add gemm1 v1 to selector
* added mx moe block v3 support, function passed
* compile error fix
* Improve the pipeline
* Pack e8m0 as int32_t
* v1 compile pass. Function not ready
* debug synchronize issue over different GPU/ROCm
* minor fix
* Add profiler filter
* Add f4 ckProfiler
* Fix example compile error
* Add f4 profiler examples
* tempsave
* v1 function pass.
* v3 function pass
* align file and function name
* mx_moe_fp4 ready for aiter with clang-format.
* modify the way we represent fp4
* generalize the pipeline scheduling.
* init moe mx f4 scale shuffle
* Cmakelist diable compiler-bound flags
* mx_fp4 default parameter change
* Moe blockscale gemm1&gemm2 asm support for aiter. Suppression cmkae flag til new compler.
* update code
* tempsave; modify the way we represent fp4
* generalize the pipeline scheduling.
* Add gemm1 gfx942 .co support
* updated code, build passed.
* Update gemm2 asm with latest compiler flag
* Fix mx f4 ckProfiler
* Fix blockwise gemm mx v1
* lds conflict free + buffer load lds
* Add gemm2 v3 64x128x128
* fix a, b scale loading bugs, a, b scale loading now correctly
* Add gemm2 v3 64x128x128
* commit with debug info
* fix fp4 profiler
* Add mx fp4 pileline v1 instances
* Fix v2 topk_weight cal. Add silu asm.
* v2 tok_weight WIP
* init mx fp4 B no preshuffle version
* tempsave. compile pass, function wrong
* enable fp4 moe no weigth preshuffle, function pass
* update the TFlops calculation in the example
* Add gemm2 64x128x128 asm. Fix BF16 ref.
* fix 2 typos in fp4_preshuffle
* Better kernel selection in device classes
* correct preShuffleBuffer
we should used packed k to do shuffle.
* lds conflict free + buffer load lds
* optimize offset math in dma
* Fix fp4 ckProfiler
* Fix MX MFMA tests
* fix f4 pipeline issues
* gemm1 func pass
* update mx moe gemm1_bns tile size to 64x128x256
* update mx moe gemm1 gemm2 TF and BW calculation
* fix typo
* temp save
* Fix example_gemm_mx build
* rename the block pipeline
* correct a typo in tail
* Add rotating to mx examples
* fix the correctness issue
* Fix v1; use M padding
* Add NT flag to B/BScale buffer
* Merge gemm_mx_common.hpp
* temp save, 4.4~4.5
* Fix 'Merge gemm_mx_common.hpp'
* refactor the pipeline
* Pad the M for scale buffer unconditionaly
* update MX moe GEMM1 hotloopscheduling
* change the gemm1 tile from 64x128x128 to 128x64x128
* Unconditional Ascale padding
* Pad shuffled a scale only
* pad ascale
* add vmcnt guard for async copy
* Profiler add f4 wp
* Merge preshuffle device
* Add more fp4 wp instances
* Fix do_weight in gemm1. Fix cshuffle_datatype. Clang-format
* Clang-format after 2 merges
* Remove rocm6.3 workaround flags and macro
* Fix fp8 config
* Fix bf8 config
* flag and barrier fix for copmiler branch MainOpSelV3
* Add fp8 profiler instances
* Remove debug infos; Enable flags for blockscale f8
* No asm ver. for merging moe blocksale fp8 into mainline
* update the flag name for f8blockscale
* recover example
* fix performance bug of bpreshuffle f8 gemm
* clang format, remove single rate mfma restriction for f8
* remove single rate mfma restriction for f8 blockscale gemm
* Fix moe blockscale gemm1 barrier 0x800 for new compiler
* add pipeline v1 for MOE Gemm2
* Use v1 pipeline for example_moe_gemm2_xdl_mx_fp4_bns
* Fix OOB; add MB96 instances
* remove unnecessary files
* fix the cmake issue
* Enable splitk for mxfp4; clang format;
* Generate random tensor values with multiple threads
* Use packed_size_v for A/BPackedSize
* Fix warning
* Fix target_compile_options for disabled target on gfx942
* fix moe pki4 on gfx950
* doc the kGroup definition
* Fix ThreadwiseTensorSliceTransfer_v4::Run (Fuse scale)
* Refactor thread_copy_lds_direct_load; fix gfx942 direct lds load example; fix f16_pki4 example
* Fix unknown compiler flag
* fix two failed examples.
* fix some failure tile size in gfx950 universal gemm. fix test_gemm_fp16
* workaround fix for test_gemm_f32; * We have very limited support for lds direct load if input matrix is not K major
* fix test_gemm_splitk;
* Fix compile for mx_mfma_op
* add mfma selection logic for multipled_v3
* Clean up
* Fix device gemm mx link error
* improve the global atomic pattern
* Revert unnecessary copyright updates
* restore minimum_occupancy logic
* Avoid data race in moe gemm2 ref
* Build fp8 gemm_multiply_multiply and moe only on gfx94/95
* update the instance in device_mx_gemm
* Resolve comments
* Copyright 2025
* Remove unused code
* fix library linking issue
---------
Co-authored-by: OscarXu <huaiguxu@amd.com>
Co-authored-by: lalala-sh <Jiaxing.Wen@amd.com>
Co-authored-by: mtgu0705 <mtgu@amd.com>
Co-authored-by: aska-0096 <haocwang@amd.com>
Co-authored-by: Your Name <you@example.com>
Co-authored-by: valarLip <340077269@qq.com>
Co-authored-by: feifei14119 <feiw@amd.com>
Co-authored-by: Lin, Qun <qlin@amd.com>
Co-authored-by: Andriy Roshchenko <andriy.roshchenko@amd.com>
Co-authored-by: joye <joye@amd.com>
Co-authored-by: asleepzzz <hanwen.chang@amd.com>
[ROCm/composable_kernel commit: 37554c31e8]
185 lines
6.8 KiB
C++
185 lines
6.8 KiB
C++
// SPDX-License-Identifier: MIT
|
|
// Copyright (c) 2025, Advanced Micro Devices, Inc. All rights reserved.
|
|
|
|
#include <iostream>
|
|
#include <numeric>
|
|
#include <initializer_list>
|
|
#include <cstdlib>
|
|
|
|
#include "profiler/profile_gemm_blockscale_wp_impl.hpp"
|
|
#include "profiler_operation_registry.hpp"
|
|
|
|
enum struct GemmMatrixLayout
|
|
{
|
|
MK_KN_MN, // 0
|
|
MK_NK_MN, // 1
|
|
KM_KN_MN, // 2
|
|
KM_NK_MN, // 3
|
|
};
|
|
|
|
enum struct GemmDataType
|
|
{
|
|
F32_F32_F32, // 0
|
|
F16_F16_F16, // 1
|
|
BF16_BF16_BF16, // 2
|
|
INT8_INT8_INT8, // 3
|
|
F8_F16_F16, // 4
|
|
F16_F8_F16, // 5
|
|
F16_F16_F16_F8, // 6
|
|
F8_F8_BF16, // 7
|
|
};
|
|
|
|
enum struct ScaleBlockTile
|
|
{
|
|
Tile_128_128_128, // 0
|
|
Tile_1_128_128, // 1
|
|
};
|
|
|
|
#define OP_NAME "gemm_blockscale_wp"
|
|
#define OP_DESC "GEMM_BlockScale_WeightPreshuffle"
|
|
|
|
int profile_gemm_blockscale_weighpreshuffle(int argc, char* argv[])
|
|
{
|
|
if(argc != 15 && argc != 18)
|
|
{
|
|
printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
|
|
printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8; 4: f8@f16; 5: f16@f8; 6: "
|
|
"f16->f8; 7: f8->bf16, "
|
|
"comp f8)\n");
|
|
printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
|
|
printf(" 1: A[m, k] * B[n, k] = C[m, n];\n");
|
|
printf(" 2: A[k, m] * B[k, n] = C[m, n];\n");
|
|
printf(" 3: A[k, m] * B[n, k] = C[m, n])\n");
|
|
printf("arg4: scale block tile (0: ScaleBlockM/N/K = [128, 128, 128]; 1: ScaleBlockM/N/K = "
|
|
"[1, 128, 128];\n");
|
|
printf("arg5: verification (0: no; 1: yes)\n");
|
|
printf("arg6: initialization (0: no init; 1: integer value; 2: decimal value)\n");
|
|
printf("arg7: print tensor value (0: no; 1: yes)\n");
|
|
printf("arg8: time kernel (0=no, 1=yes)\n");
|
|
printf("arg9 to 14: M, N, K, StrideA, StrideB, StrideE\n");
|
|
printf("optional:\n");
|
|
printf("arg15: number of warm-up cycles (default 1)\n");
|
|
printf("arg16: number of iterations (default 10)\n");
|
|
printf("arg17: memory for rotating buffer (default 0, size in MB)\n");
|
|
exit(1);
|
|
}
|
|
|
|
const auto data_type = static_cast<GemmDataType>(std::stoi(argv[2]));
|
|
const auto layout = static_cast<GemmMatrixLayout>(std::stoi(argv[3]));
|
|
const auto scale_block_tile = static_cast<ScaleBlockTile>(std::stoi(argv[4]));
|
|
const bool do_verification = std::stoi(argv[5]);
|
|
const int init_method = std::stoi(argv[6]);
|
|
const bool do_log = std::stoi(argv[7]);
|
|
const bool time_kernel = std::stoi(argv[8]);
|
|
|
|
const int M = std::stoi(argv[9]);
|
|
const int N = std::stoi(argv[10]);
|
|
const int K = std::stoi(argv[11]);
|
|
|
|
const int StrideA = std::stoi(argv[12]);
|
|
const int StrideB = std::stoi(argv[13]);
|
|
const int StrideE = std::stoi(argv[14]);
|
|
|
|
int n_warmup = 1;
|
|
int n_iter = 10;
|
|
uint64_t rotating = 0;
|
|
if(argc == 18)
|
|
{
|
|
n_warmup = std::stoi(argv[15]);
|
|
n_iter = std::stoi(argv[16]);
|
|
rotating = std::stoull(argv[17]) * 1024 * 1024;
|
|
}
|
|
|
|
using F32 = float;
|
|
using BF16 = ck::bhalf_t;
|
|
using F8 = ck::f8_t;
|
|
|
|
using Row = ck::tensor_layout::gemm::RowMajor;
|
|
using Col = ck::tensor_layout::gemm::ColumnMajor;
|
|
|
|
auto profile = [&](auto a0_type,
|
|
auto a1_type,
|
|
auto b0_type,
|
|
auto b1_type,
|
|
auto comp_type,
|
|
auto acc_type,
|
|
auto c_type,
|
|
auto scale_block_m,
|
|
auto scale_block_n,
|
|
auto scale_block_k,
|
|
auto a_layout,
|
|
auto b_layout,
|
|
auto e_layout) {
|
|
using A0DataType = decltype(a0_type);
|
|
using A1DataType = decltype(a1_type);
|
|
using B0DataType = decltype(b0_type);
|
|
using B1DataType = decltype(b1_type);
|
|
using ComputeDataType = decltype(comp_type);
|
|
using AccDataType = decltype(acc_type);
|
|
using EDataType = decltype(c_type);
|
|
|
|
using ALayout = decltype(a_layout);
|
|
using BLayout = decltype(b_layout);
|
|
using ELayout = decltype(e_layout);
|
|
|
|
const int DefaultStrideA = ck::is_same_v<ALayout, Row> ? K : M;
|
|
const int DefaultStrideB = ck::is_same_v<BLayout, Row> ? N : K;
|
|
const int DefaultStrideE = ck::is_same_v<ELayout, Row> ? N : M;
|
|
|
|
bool pass = ck::profiler::profile_gemm_blockscale_weighpreshuffle_impl<A0DataType,
|
|
A1DataType,
|
|
B0DataType,
|
|
B1DataType,
|
|
ComputeDataType,
|
|
AccDataType,
|
|
EDataType,
|
|
scale_block_m,
|
|
scale_block_n,
|
|
scale_block_k,
|
|
ALayout,
|
|
BLayout,
|
|
ELayout>(
|
|
do_verification,
|
|
init_method,
|
|
do_log,
|
|
time_kernel,
|
|
M,
|
|
N,
|
|
K,
|
|
(StrideA < 0) ? DefaultStrideA : StrideA,
|
|
(StrideB < 0) ? DefaultStrideB : StrideB,
|
|
(StrideE < 0) ? DefaultStrideE : StrideE,
|
|
n_warmup,
|
|
n_iter,
|
|
rotating);
|
|
|
|
return pass ? 0 : 1;
|
|
};
|
|
|
|
if(data_type == GemmDataType::F8_F8_BF16 && layout == GemmMatrixLayout::MK_NK_MN &&
|
|
scale_block_tile == ScaleBlockTile::Tile_1_128_128)
|
|
{
|
|
return profile(F8{},
|
|
F32{},
|
|
F8{},
|
|
F32{},
|
|
F8{},
|
|
F32{},
|
|
BF16{},
|
|
ck::Number<1>{},
|
|
ck::Number<128>{},
|
|
ck::Number<128>{},
|
|
Row{},
|
|
Col{},
|
|
Row{});
|
|
}
|
|
else
|
|
{
|
|
std::cout << "this data_type & layout is not implemented" << std::endl;
|
|
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_gemm_blockscale_weighpreshuffle);
|