mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-03 13:11:25 +00:00
* Add trait to use a persistent kernel and split the entrypoints in grouped gemm * Some helper functions for persistent kernel case * Get max occupancy grid using device properties * Implement tile loop in main entry point to grouped gemm * Enable GridSize() on device * Handle offset tile index using real current block index * Add persistent kernel choice to grouped gemm example * Use a for-loop for iterating over the group * Reduce VGPR spills by early-exit * Enable persistent kernel choice in grouped_gemm example * Add persistent kernel option to grouped_gemm test * Fix formatting with remod.py * Remove GridUpdateBlocks as blocks are now iteratively computed * Add comment about VGPR spilling * Fix formatting * Use CK_TILE_HOST instead of __host__ * Enable all Row/Col combinations in grouped gemm unit test * Add some KBatch=2 cases to grouped gemm tests * Fix SplitK for grouped gemm * Enable pipeline hotloop/tailnumber selection in-kernel for grouped gemm * Add type traits * Split examples to regular and tileloop * Formatting * Use hipExtStreamGetCUMask to get current active CUs for the given stream * Align test and example kernel config, and disable validation for splitk repeats * Remove debug options from CMakeLists.txt * Separate the code paths for persistent/non-persistent in test * Fix formatting * Address review comments --------- Co-authored-by: Adam Osewski <19374865+aosewski@users.noreply.github.com>
95 lines
3.7 KiB
C++
95 lines
3.7 KiB
C++
// SPDX-License-Identifier: MIT
|
|
// Copyright (c) 2024-2025, Advanced Micro Devices, Inc. All rights reserved.
|
|
|
|
#pragma once
|
|
|
|
#include <string>
|
|
|
|
#include "ck_tile/core.hpp"
|
|
#include "ck_tile/host/kernel_launch.hpp"
|
|
#include "ck_tile/ops/gemm/kernel/grouped_gemm_kernel.hpp"
|
|
|
|
#define CK_TILE_PIPELINE_COMPUTE_V3 1
|
|
#define CK_TILE_PIPELINE_MEMORY 2
|
|
#define CK_TILE_PIPELINE_COMPUTE_V4 3
|
|
|
|
#ifndef CK_TILE_PIPELINE_DEFAULT
|
|
#define CK_TILE_PIPELINE_DEFAULT CK_TILE_PIPELINE_COMPUTE_V3
|
|
#endif
|
|
|
|
#if(CK_TILE_PIPELINE_DEFAULT == CK_TILE_PIPELINE_MEMORY)
|
|
#define GEMM_PIPELINE ck_tile::GemmPipelineAgBgCrMem
|
|
#define UNIVERSAL_GEMM_PIPELINE ck_tile::BaseGemmPipelineAgBgCrMem
|
|
#define GEMM_PIPELINE_SCHEDULER ck_tile::GemmPipelineScheduler::Interwave
|
|
#elif(CK_TILE_PIPELINE_DEFAULT == CK_TILE_PIPELINE_COMPUTE_V3)
|
|
#define GEMM_PIPELINE ck_tile::GemmPipelineAgBgCrCompV3
|
|
#define UNIVERSAL_GEMM_PIPELINE ck_tile::BaseGemmPipelineAgBgCrCompV3
|
|
#define GEMM_PIPELINE_SCHEDULER ck_tile::GemmPipelineScheduler::Intrawave
|
|
#elif(CK_TILE_PIPELINE_DEFAULT == CK_TILE_PIPELINE_COMPUTE_V4)
|
|
#define GEMM_PIPELINE ck_tile::GemmPipelineAgBgCrCompV4
|
|
#define UNIVERSAL_GEMM_PIPELINE ck_tile::BaseGemmPipelineAgBgCrCompV4
|
|
#define GEMM_PIPELINE_SCHEDULER ck_tile::GemmPipelineScheduler::Intrawave
|
|
#else
|
|
#error "unsupported CK_TILE_PIPELINE_DEFAULT value"
|
|
#endif
|
|
|
|
template <typename DataType>
|
|
struct GemmTypeConfig;
|
|
|
|
template <>
|
|
struct GemmTypeConfig<ck_tile::half_t>
|
|
{
|
|
using ADataType = ck_tile::half_t;
|
|
using BDataType = ck_tile::half_t;
|
|
using CDataType = ck_tile::half_t;
|
|
using AccDataType = float;
|
|
};
|
|
|
|
using Types = GemmTypeConfig<ck_tile::half_t>;
|
|
|
|
// Specific type aliases for easy access
|
|
using ADataType = Types::ADataType;
|
|
using BDataType = Types::BDataType;
|
|
using AccDataType = Types::AccDataType;
|
|
using CDataType = Types::CDataType;
|
|
|
|
using grouped_gemm_kargs = ck_tile::GemmHostArgs;
|
|
|
|
auto create_args(int argc, char* argv[])
|
|
{
|
|
ck_tile::ArgParser arg_parser;
|
|
arg_parser.insert("Ms", "", "M dimensions - empty by default.")
|
|
.insert("Ns", "", "N dimensions - empty by default.")
|
|
.insert("Ks", "", "K dimensions - empty by default.")
|
|
.insert("stride_As", "", "Tensor A strides - it is empty by default.")
|
|
.insert("stride_Bs", "", "Tensor B strides - it is empty by default.")
|
|
.insert("stride_Cs", "", "Tensor C strides - it is empty by default.")
|
|
.insert("a_layout", "R", "A tensor data layout - Row by default.")
|
|
.insert("b_layout", "C", "B tensor data layout - Row by default.")
|
|
.insert("c_layout", "R", "C tensor data layout - Row by default.")
|
|
.insert("validate", "1", "0. No validation, 1. Validation on CPU.")
|
|
.insert("warmup", "10", "number of iterations before benchmark the kernel.")
|
|
.insert("repeat", "100", "number of iterations to benchmark the kernel.")
|
|
.insert("group_count", "8", "group count.")
|
|
.insert("kbatch", "1", "kbatch for SplitK");
|
|
|
|
bool result = arg_parser.parse(argc, argv);
|
|
return std::make_tuple(result, arg_parser);
|
|
}
|
|
|
|
inline std::size_t get_workspace_size(const std::vector<grouped_gemm_kargs>& gemm_descs)
|
|
{
|
|
return gemm_descs.size() * sizeof(ck_tile::GemmTransKernelArg);
|
|
}
|
|
|
|
template <typename ALayout, typename BLayout, typename CLayout>
|
|
float grouped_gemm(const std::vector<grouped_gemm_kargs>& gemm_descs,
|
|
const ck_tile::stream_config& s,
|
|
void* kargs_ptr);
|
|
|
|
template <typename ALayout, typename BLayout, typename CLayout>
|
|
float grouped_gemm_tileloop(const ck_tile::stream_config& s,
|
|
const ck_tile::index_t num_groups,
|
|
void* kargs_ptr,
|
|
bool splitk = false);
|