// SPDX-License-Identifier: MIT // Copyright (c) 2024-2025, Advanced Micro Devices, Inc. All rights reserved. #include #include #include #include #include #include #include #include "ck_tile/host.hpp" #include "mx_flatmm.hpp" template static constexpr inline auto is_row_major(Layout layout_) { return ck_tile::bool_constant, ck_tile::tensor_layout::gemm::RowMajor>>{}; } template float mx_flatmm_calc(const ck_tile::ScaleFlatmmHostArgs& args, const ck_tile::stream_config& s) { using CodegenFlatmmShape = ck_tile::TileGemmShape< ck_tile::sequence, ck_tile::sequence, ck_tile::sequence>; using TilePartitioner = ck_tile::GemmSpatiallyLocalTilePartitioner; using Traits = ck_tile::TileGemmTraits; using CodegenGemmTraits = ck_tile::TileGemmUniversalTraits; using ComputeDataType = ADataType; static_assert(sizeof(ComputeDataType) >= sizeof(BDataType), "mixed_prec_flatmm requires ADataType is a wider type than BDataType"); using GemmPipelineProblem = ck_tile::GemmPipelineProblem; using BaseGemmPipeline = ck_tile::BaseFlatmmPipelineAGmemBGmemCRegV1; const ck_tile::index_t k_grain = args.k_batch * FlatmmConfig::K_Tile; const ck_tile::index_t K_split = (args.K + k_grain - 1) / k_grain * FlatmmConfig::K_Tile; const ck_tile::index_t num_loop = TilePartitioner::GetLoopNum(K_split); const bool has_hot_loop = BaseGemmPipeline::BlockHasHotloop(num_loop); const ck_tile::TailNumber tail_num = BaseGemmPipeline::GetBlockLoopTailNum(num_loop); float ave_time{0}; const auto Run = [&](const auto has_hot_loop_, const auto tail_number_, const auto memory_operation_) { constexpr bool has_hot_loop_v = has_hot_loop_.value; constexpr auto tail_number_v = tail_number_.value; constexpr auto scheduler = FlatmmConfig::Scheduler; constexpr auto memory_operation = memory_operation_.value; constexpr int BlockedXDLN_PerWarp = 2; // determined by scale shuffle pattern using CodegenPipelineProblem = ck_tile::MXFlatmmPipelineProblem; using CodegenMXFlatmmPipeline = ck_tile::MXF4FlatmmPipelineAGmemBGmemCRegV1; using GemmEpilogue = ck_tile::CShuffleEpilogue< ck_tile::CShuffleEpilogueProblem>; using Kernel = ck_tile::MXFlatmmKernel; auto kargs = Kernel::MakeKernelArgs(args); const dim3 grids = Kernel::GridSize(kargs); constexpr dim3 blocks = Kernel::BlockSize(); if(!Kernel::IsSupportedArgument(kargs)) { throw std::runtime_error("Wrong! Arguments not supported! Skipping gemm!\n"); } if(s.log_level_ > 0) { std::cout << "Launching kernel with args:" << CodegenFlatmmShape::GetName() << "\n" << "Shape: " << CodegenFlatmmShape::GetName() << "\n" << "problem: " << CodegenPipelineProblem::GetName() << "\n" << "pipeline: " << CodegenFlatmmPipeline::GetName() << "\n" << "grid: {" << grids.x << ", " << grids.y << ", " << grids.z << "}" << ", blocks: {" << blocks.x << ", " << blocks.y << ", " << blocks.z << "}" << std::endl; } if(s.flush_cache_) { std::cout << "Flushing cache..." << std::endl; constexpr ck_tile::index_t APackedSize = ck_tile::numeric_traits::PackedSize; constexpr ck_tile::index_t BPackedSize = ck_tile::numeric_traits::PackedSize; ck_tile::HostTensor a_m(ck_tile::host_tensor_descriptor( args.M, args.K, args.stride_A, is_row_major(ALayout{}))); ck_tile::HostTensor b_n(ck_tile::host_tensor_descriptor( args.K, args.N, args.stride_B, is_row_major(BLayout{}))); auto size_a_buffer = a_m.get_element_space_size_in_bytes() / APackedSize; auto size_b_buffer = b_n.get_element_space_size_in_bytes() / BPackedSize; ck_tile::RotatingMemWrapper rotating_mem( kargs.a_ptr, kargs.b_ptr, s.rotating_count_, size_a_buffer, size_b_buffer); rotating_mem.Print(); auto run_flush_cache = [&]() { // flush icache ck_tile::flush_icache(); // rotating mem rotating_mem.Next(); // clear c mem if(args.k_batch > 1) hipGetErrorString(hipMemsetAsync( args.e_ptr, 0, args.M * args.N * sizeof(CDataType), s.stream_id_)); }; ave_time = ck_tile::launch_kernel_preprocess( s, run_flush_cache, ck_tile::make_kernel( Kernel{}, grids, blocks, 0, kargs)); } else { // ave_time = // ck_tile::launch_kernel(s, // ck_tile::make_kernel( // Kernel{}, grids, blocks, 0, kargs)); } return ave_time; }; const auto RunSplitk = [&](const auto has_hot_loop_, const auto tail_number_) { if(args.k_batch == 1) { Run(has_hot_loop_, tail_number_, ck_tile::integral_constant{}); } else { Run(has_hot_loop_, tail_number_, ck_tile::integral_constant{}); } }; BaseGemmPipeline::TailHandler(RunSplitk, has_hot_loop, tail_num); return ave_time; } template float invoke_mx_flatmm(ck_tile::DeviceMem& a_dev_buf, ck_tile::DeviceMem& b_shuffle_dev_buf, ck_tile::DeviceMem& c_dev_buf, ck_tile::index_t M, ck_tile::index_t N, ck_tile::index_t K, ck_tile::index_t stride_A, ck_tile::index_t stride_B, ck_tile::index_t stride_C, ck_tile::index_t kbatch, ScaleA scale_a, ScaleB scale_b, int n_warmup, int n_repeat) { ck_tile::ScaleFlatmmHostArgs args = {a_dev_buf.GetDeviceBuffer(), b_shuffle_dev_buf.GetDeviceBuffer(), {}, c_dev_buf.GetDeviceBuffer(), kbatch, M, N, K, stride_A, stride_B, {}, stride_C, {}, scale_a, scale_b}; float ave_time = mx_flatmm_calc( args, ck_tile::stream_config{nullptr, true, 1, n_warmup, n_repeat, true, true, 50}); constexpr int APackedSize = ck_tile::numeric_traits::PackedSize; constexpr int BPackedSize = ck_tile::numeric_traits::PackedSize; std::size_t flop = std::size_t(2) * M * N * K + std::size_t(2) * M * N * K / 32; std::size_t num_byte = sizeof(ADataType) * M * K / APackedSize + sizeof(BDataType) * N * K / BPackedSize + sizeof(CDataType) * M * N; float tflops = static_cast(flop) / 1.E9 / ave_time; float gb_per_sec = num_byte / 1.E6 / ave_time; std::cout << "Run A16W4_Flatmm kernel " << " M =" << M << " N =" << N << " K =" << K << " StrideA =" << stride_A << " StrideB =" << stride_B << " StrideC =" << stride_C << " : " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s, " << std::endl; return ave_time; } auto create_args(int argc, char* argv[]) { ck_tile::ArgParser arg_parser; arg_parser.insert("m", "32", "m dimension") .insert("n", "128", "n dimension") .insert("k", "512", "k dimension") .insert("a_layout", "R", "A tensor data layout - Row by default") .insert("b_layout", "C", "B tensor data layout - Row by default") .insert("c_layout", "R", "C tensor data layout - Row by default") .insert("stride_a", "0", "Tensor A stride") .insert("stride_b", "0", "Tensor B stride") .insert("stride_c", "0", "Tensor C stride") .insert("v", "1", "0. No validation, 1. Validation on CPU, 2. Validation on GPU") .insert( "mx_prec", "fp4xfp4", "data type for activation and weight, support: fp6xfp6, fp8xfp8") .insert("warmup", "50", "number of iterations before benchmark the kernel") .insert("repeat", "100", "number of iterations to benchmark the kernel") .insert("timer", "gpu", "gpu:gpu timer, cpu:cpu timer") .insert("split_k", "1", "splitK value") .insert("init", "0", "0:random, 1:constant(1)") .insert("persistent", "0", "0: no persistent, 1: persistent kernel") .insert("warp_tile", "0", "0: 16x16, 1: 32x32, 2: 16x16x128 (950 only), 3: 32x32x64 (950 only)"); bool result = arg_parser.parse(argc, argv); return std::make_tuple(result, arg_parser); } template void preShuffleWeight(const IterSrc src, IterDst dst, int N, int K) { int KPack = 16; int NLane = FlatmmConfig::N_Warp_Tile; int KLane = 64 / NLane; int K_pk = K / 2; int K0 = K_pk / (KLane * KPack); // K -> K0 KLane KPack // N -> N0 NLane // N, K -> N0 K0 KLane NLane KPack int tempk; for(int n = 0; n < N; ++n) { for(int k = 0; k < K_pk; ++k) { int n0 = n / NLane; int n1 = n % NLane; int k0 = k / (KLane * KPack); tempk = k % (KLane * KPack); int k1 = tempk / KPack; int k2 = tempk % KPack; int outputIndex = n0 * KPack * NLane * KLane * K0 + k0 * KPack * NLane * KLane + k1 * KPack * NLane + n1 * KPack + k2; dst[outputIndex] = src[n * K_pk + k]; } } } template auto preShuffleScale(const ck_tile::HostTensor& scale) { assert(scale.get_lengths().size() == 2); int n_ = scale.get_lengths()[1]; int k_ = scale.get_lengths()[0]; constexpr int K_Pack = 2; // fixed for mxfp4 constexpr int N_Pack = 2; // fixed for mxfp4 constexpr int GranularityK = 32; // fixed for mxfp4 constexpr int K_Lane = 64 / FlatmmConfig::N_Warp_Tile; // 4 static_assert(FlatmmConfig::N_Warp_Tile == 16, "only support XDL_N == 16"); static_assert(FlatmmConfig::N_Repeat % N_Pack == 0); static_assert(FlatmmConfig::K_Tile % (K_Pack * K_Lane * GranularityK) == 0); ck_tile::HostTensor shfl_scale({ k_ / K_Pack / K_Lane, K_Pack, K_Lane, n_ / FlatmmConfig::N_Warp_Tile / N_Pack, N_Pack, FlatmmConfig::N_Warp_Tile, }); std::copy(scale.begin(), scale.end(), shfl_scale.begin()); return ck_tile::reference_permute(shfl_scale, {3, 0, 2, 5, 1, 4}); } #include "run_mx_prec_flatmm.inc" template int run_mx_flatmm_example(int argc, char* argv[]) { auto [result, arg_parser] = create_args(argc, argv); if(!result) return -1; using Row = ck_tile::tensor_layout::gemm::RowMajor; using Col = ck_tile::tensor_layout::gemm::ColumnMajor; std::string mx_prec = arg_parser.get_str("mx_prec"); std::string a_layout = arg_parser.get_str("a_layout"); std::string b_layout = arg_parser.get_str("b_layout"); int persistent_opt = arg_parser.get_int("persistent"); if(a_layout == "R" && b_layout == "C") { if(mx_prec == "fp4xfp4") { if(persistent_opt == 0) { run_mx_flatmm_with_layouts(argc, argv, Row{}, Col{}, Row{}); } else { run_mx_flatmm_with_layouts(argc, argv, Row{}, Col{}, Row{}); } } else if(mx_prec == "fp6xfp6") { throw std::runtime_error("Only support fp4xfp4 now!"); } else if(mx_prec == "fp8xfp8") { throw std::runtime_error("Only support fp4xfp4 now!"); } else { throw std::runtime_error("Unsupported data_type!"); } } else { throw std::runtime_error("Unsupported data layout configuration for A,B and C tensors!"); } return -1; } int main(int argc, char* argv[]) { auto [result, arg_parser] = create_args(argc, argv); if(!result) return EXIT_FAILURE; try { int warp_tile = arg_parser.get_int("warp_tile"); if(warp_tile == 0) { return !run_mx_flatmm_example(argc, argv); } else if(warp_tile == 1) { thow std::runtime_error("Only support MFMA_16x16x128 now!"); } else { throw std::runtime_error("Unsupported warp_tile!"); } } catch(const std::runtime_error& e) { std::cerr << "Runtime error: " << e.what() << '\n'; return EXIT_FAILURE; } }