mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-03 21:21:22 +00:00
* Optimize GEMM on MI200/300: 1. Add new blockwise gemm pipeline 2. Add irregular splitk intances * clang format + typo fix * Fix a bug * initial commit * Add more instances to irregular splitk * blkgemm pipeline v1~4 prototype * Sanity Checked. Known issue: 1. Poor performance of splitk 2. Register spill on blkgemmpipeline v3 * Sanity and Performance fix: 1. fix a bug related to sanity in grouped b2c mapping 2. fix a bug related to sanity and performance in splitk offset * Sanity and API update: 1. Remove prefetch stage 2. Fix valid check bug 3, Add first gemm_universal instance into ckProfiler * Add NN instances for gemm universal * 1. Add NT instances for gemm_universal 2. Fix a bug about Kpadding in gemm_universal * Fix a bug regarding padding Odd K number * remove kernel print * Fix KPadding bug... * Update safety check * another try to fix kpadding.. * Sanity checked * new instances.. * clang format+typo fix * remove clang format script's change * Add non-hotloop compile option * 1. Add fp16xfp8 example 2. pull packed convert f8 from pr1150 * Some miscs.. opt and fix * Add pipeline description docs * Split universal gemm instance library to cut profiler compiling time * uncomment cmakefile * Fix a bug caused by blockwise_gemm_pipe_v2 * reduce default splitk to 1 * Add 224x256x64 tile size * update, including: 1. Experiment pipeline 5~7 2. Optimization for pipeline 4 3. Organized instance library * temp save * temp save * Permuted lds layout, sanity and function checked * clang format * Move OOB check from RunRead to RunWrite, for better software pipeline. TODO: agpr spill when NN layout * clangformat * A/B splitpipe scheduler for v3 * Fix two bugs * bug fix * fix a bug in oob check * Example for mixed fp16_fp8 gemm * Clean experimental code blocks * Add mixed precision gemm into profiler * tempsave * optimize m/n major lds layout * Add RRR GEMM mixed precision instances * Optimize f8 matrix transpose * Add test_gemm_universal * A/B spilt schedule for blkpip v5 * Take ds_read2 into iglp scheduling scheme * format * fixed cmake * Add llvm-option into CI cmake flag --------- Co-authored-by: Jing Zhang <jizhan@amd.com>
212 lines
7.3 KiB
C++
212 lines
7.3 KiB
C++
// SPDX-License-Identifier: MIT
|
|
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
|
|
|
#pragma once
|
|
|
|
template <typename ProblemType>
|
|
bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config)
|
|
{
|
|
#if defined(BUILD_INT4_EXAMPLE) && defined(CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4)
|
|
static_assert(sizeof(ck::int4_t) == sizeof(int8_t));
|
|
#endif
|
|
|
|
using namespace ck::literals;
|
|
|
|
auto M = problem_size.M;
|
|
auto N = problem_size.N;
|
|
auto K = problem_size.K;
|
|
auto StrideA = problem_size.StrideA;
|
|
auto StrideB = problem_size.StrideB;
|
|
auto StrideC = problem_size.StrideC;
|
|
auto KBatch = problem_size.KBatch;
|
|
|
|
auto f_host_tensor_descriptor =
|
|
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
|
if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
|
|
{
|
|
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
|
}
|
|
else
|
|
{
|
|
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
|
}
|
|
};
|
|
|
|
auto f_get_default_stride =
|
|
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
|
if(stride == 0)
|
|
{
|
|
// give a chance if stride is zero, return a default packed stride
|
|
if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
|
|
{
|
|
return col;
|
|
}
|
|
else
|
|
{
|
|
return row;
|
|
}
|
|
}
|
|
else
|
|
return stride;
|
|
};
|
|
|
|
StrideA = f_get_default_stride(M, K, StrideA, ALayout{});
|
|
StrideB = f_get_default_stride(K, N, StrideB, BLayout{});
|
|
StrideC = f_get_default_stride(M, N, StrideC, CLayout{});
|
|
|
|
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
|
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
|
|
|
switch(config.init_method)
|
|
{
|
|
case 0:
|
|
a_m_k.GenerateTensorValue(GeneratorTensor_1<ADataType>{1});
|
|
b_k_n.GenerateTensorValue(GeneratorTensor_1<BDataType>{1});
|
|
break;
|
|
case 1:
|
|
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2});
|
|
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-2, 2});
|
|
break;
|
|
case 2:
|
|
a_m_k.GenerateTensorValue(GeneratorTensor_1<ADataType>{1});
|
|
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-2, 2});
|
|
break;
|
|
case 3:
|
|
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2});
|
|
b_k_n.GenerateTensorValue(GeneratorTensor_1<BDataType>{1});
|
|
break;
|
|
default:
|
|
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
|
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
|
}
|
|
#if 0
|
|
printf("B matrix:\n");
|
|
for (int in = 0; in < N; in++)
|
|
{
|
|
for (int ik = 0; ik < K; ik++)
|
|
{
|
|
printf("%02x ", *(reinterpret_cast<uint8_t*>(&b_k_n(ik,in))));
|
|
if(ik%8==7) printf("|");
|
|
}
|
|
printf("\n");
|
|
}
|
|
#endif
|
|
|
|
Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
|
Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
|
|
|
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
|
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
|
std::cout << "c_m_n: " << c_m_n_host_result.mDesc << std::endl;
|
|
|
|
#ifdef BUILD_INT4_EXAMPLE
|
|
DeviceMem a_m_k_device_buf(sizeof(KernelADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
|
DeviceMem b_k_n_device_buf(sizeof(KernelBDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
|
DeviceMem c_m_n_device_buf(sizeof(KernelCDataType) *
|
|
c_m_n_device_result.mDesc.GetElementSpaceSize());
|
|
|
|
const Tensor<KernelADataType> a_m_k_converted(a_m_k);
|
|
const Tensor<KernelBDataType> b_k_n_converted(b_k_n);
|
|
|
|
a_m_k_device_buf.ToDevice(a_m_k_converted.mData.data());
|
|
b_k_n_device_buf.ToDevice(b_k_n_converted.mData.data());
|
|
#else
|
|
DeviceMem a_m_k_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
|
DeviceMem b_k_n_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
|
DeviceMem c_m_n_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize());
|
|
|
|
a_m_k_device_buf.ToDevice(a_m_k.mData.data());
|
|
b_k_n_device_buf.ToDevice(b_k_n.mData.data());
|
|
#endif
|
|
DeviceMem workspace;
|
|
|
|
auto a_element_op = AElementOp{};
|
|
auto b_element_op = BElementOp{};
|
|
auto c_element_op = CElementOp{};
|
|
|
|
// do GEMM
|
|
auto gemm = DeviceGemmV2Instance{};
|
|
auto invoker = gemm.MakeInvoker();
|
|
float ave_time = 0;
|
|
|
|
auto argument = gemm.MakeArgument(
|
|
#ifdef BUILD_INT4_EXAMPLE
|
|
static_cast<KernelADataType*>(a_m_k_device_buf.GetDeviceBuffer()),
|
|
static_cast<KernelBDataType*>(b_k_n_device_buf.GetDeviceBuffer()),
|
|
static_cast<KernelCDataType*>(c_m_n_device_buf.GetDeviceBuffer()),
|
|
#else
|
|
static_cast<ADataType*>(a_m_k_device_buf.GetDeviceBuffer()),
|
|
static_cast<BDataType*>(b_k_n_device_buf.GetDeviceBuffer()),
|
|
static_cast<CDataType*>(c_m_n_device_buf.GetDeviceBuffer()),
|
|
#endif
|
|
M,
|
|
N,
|
|
K,
|
|
StrideA,
|
|
StrideB,
|
|
StrideC,
|
|
KBatch,
|
|
a_element_op,
|
|
b_element_op,
|
|
c_element_op);
|
|
|
|
if(!gemm.IsSupportedArgument(argument))
|
|
{
|
|
std::cerr << gemm.GetTypeString() << " does not support this problem" << std::endl;
|
|
|
|
return true;
|
|
}
|
|
|
|
bool pass = true;
|
|
if(config.do_verification)
|
|
{
|
|
auto ref_gemm = ReferenceGemmInstance{};
|
|
auto ref_invoker = ref_gemm.MakeInvoker();
|
|
|
|
auto ref_argument = ref_gemm.MakeArgument(
|
|
a_m_k, b_k_n, c_m_n_host_result, PassThrough{}, PassThrough{}, PassThrough{});
|
|
|
|
ref_invoker.Run(ref_argument);
|
|
|
|
ave_time = invoker.Run(argument, StreamConfig{nullptr, false, 1});
|
|
#ifdef BUILD_INT4_EXAMPLE
|
|
Tensor<CDataType> c_m_n_device_result_converted(c_m_n_host_result.mDesc);
|
|
|
|
c_m_n_device_buf.FromDevice(c_m_n_device_result_converted.mData.data());
|
|
|
|
c_m_n_device_result = c_m_n_device_result_converted.CopyAsType<CDataType>();
|
|
|
|
return ck::utils::check_err(c_m_n_device_result_converted, c_m_n_host_result);
|
|
#else
|
|
c_m_n_device_buf.FromDevice(c_m_n_device_result.mData.data());
|
|
|
|
pass &= ck::utils::check_err(c_m_n_device_result, c_m_n_host_result);
|
|
#endif
|
|
}
|
|
|
|
if(config.time_kernel)
|
|
{
|
|
ave_time = invoker.Run(argument, StreamConfig{nullptr, config.time_kernel});
|
|
|
|
std::size_t flop = 2_uz * M * N * K;
|
|
std::size_t num_btype =
|
|
sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(CDataType) * M * N;
|
|
|
|
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
|
|
|
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
|
|
|
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
|
<< " GB/s, " << gemm.GetTypeString() << std::endl;
|
|
}
|
|
return pass;
|
|
}
|
|
|
|
bool run_gemm_splitk_example(int argc, char* argv[])
|
|
{
|
|
ProblemSizeSplitK problem_size;
|
|
ExecutionConfig config;
|
|
|
|
return !parse_cmd_args(argc, argv, problem_size, config) || run_gemm(problem_size, config);
|
|
}
|