mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-02 20:51:23 +00:00
* add examples into grouped/batched_gemm * adding splitK examples * fixed splitK * add bfp16 int8 example into splitK * formatting * use static_cast * added common for batched_gemm * add commons for examples of splitK/batched/grouped_gemm * return true * adjust splitK check tol * update example Co-authored-by: Chao Liu <lc.roy86@gmail.com>
197 lines
7.1 KiB
C++
197 lines
7.1 KiB
C++
#pragma once
|
|
|
|
struct ProblemSize final
|
|
{
|
|
ck::index_t M = 3840;
|
|
ck::index_t N = 4096;
|
|
ck::index_t K = 4096;
|
|
|
|
ck::index_t stride_A = K;
|
|
ck::index_t stride_B = K;
|
|
ck::index_t stride_C = N;
|
|
|
|
ck::index_t k_batch = 4;
|
|
};
|
|
|
|
struct ExecutionConfig final
|
|
{
|
|
bool do_verification = true;
|
|
int init_method = 1;
|
|
bool time_kernel = false;
|
|
};
|
|
|
|
bool run_splitK_gemm(const ProblemSize& problem_size, const ExecutionConfig& config)
|
|
{
|
|
using namespace ck::literals;
|
|
|
|
auto& [M, N, K, StrideA, StrideB, StrideC, KBatch] = problem_size;
|
|
|
|
auto f_host_tensor_descriptor =
|
|
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
|
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
|
|
{
|
|
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
|
|
std::vector<std::size_t>({stride, 1}));
|
|
}
|
|
else
|
|
{
|
|
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
|
|
std::vector<std::size_t>({1, stride}));
|
|
}
|
|
};
|
|
|
|
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
|
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
|
Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
|
Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
|
|
|
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
|
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
|
std::cout << "c_m_n: " << c_m_n_host_result.mDesc << std::endl;
|
|
|
|
switch(config.init_method)
|
|
{
|
|
case 0: break;
|
|
case 1:
|
|
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
|
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
|
|
break;
|
|
case 2:
|
|
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
|
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
|
break;
|
|
default:
|
|
a_m_k.GenerateTensorValue(GeneratorTensor_Sequential<0>{});
|
|
b_k_n.GenerateTensorValue(GeneratorTensor_Sequential<1>{});
|
|
}
|
|
|
|
DeviceMem a_m_k_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
|
DeviceMem b_k_n_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
|
DeviceMem c_m_n_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize());
|
|
|
|
a_m_k_device_buf.ToDevice(a_m_k.mData.data());
|
|
b_k_n_device_buf.ToDevice(b_k_n.mData.data());
|
|
c_m_n_device_buf.SetZero();
|
|
|
|
auto a_element_op = AElementOp{};
|
|
auto b_element_op = BElementOp{};
|
|
auto c_element_op = CElementOp{};
|
|
|
|
// do GEMM
|
|
auto gemm = DeviceGemmInstance{};
|
|
auto invoker = gemm.MakeInvoker();
|
|
auto argument = gemm.MakeArgument(static_cast<ADataType*>(a_m_k_device_buf.GetDeviceBuffer()),
|
|
static_cast<BDataType*>(b_k_n_device_buf.GetDeviceBuffer()),
|
|
static_cast<CDataType*>(c_m_n_device_buf.GetDeviceBuffer()),
|
|
M,
|
|
N,
|
|
K,
|
|
StrideA,
|
|
StrideB,
|
|
StrideC,
|
|
a_element_op,
|
|
b_element_op,
|
|
c_element_op,
|
|
KBatch);
|
|
|
|
if(!gemm.IsSupportedArgument(argument))
|
|
{
|
|
std::cout << gemm.GetTypeString() << " does not support this problem" << std::endl;
|
|
|
|
return 0;
|
|
}
|
|
|
|
float ave_time = invoker.Run(argument, StreamConfig{nullptr, config.time_kernel});
|
|
|
|
std::size_t flop = std::size_t(2) * M * N * K;
|
|
std::size_t num_btype =
|
|
sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(CDataType) * M * N;
|
|
|
|
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
|
|
|
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
|
|
|
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s, "
|
|
<< gemm.GetTypeString() << std::endl;
|
|
|
|
c_m_n_device_buf.FromDevice(c_m_n_device_result.mData.data());
|
|
|
|
if(config.do_verification)
|
|
{
|
|
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
|
BDataType,
|
|
CDataType,
|
|
AccDataType,
|
|
AElementOp,
|
|
BElementOp,
|
|
CElementOp>;
|
|
|
|
auto ref_gemm = ReferenceGemmInstance{};
|
|
auto ref_invoker = ref_gemm.MakeInvoker();
|
|
|
|
auto ref_argument = ref_gemm.MakeArgument(
|
|
a_m_k, b_k_n, c_m_n_host_result, a_element_op, b_element_op, c_element_op);
|
|
|
|
ref_invoker.Run(ref_argument);
|
|
|
|
if(std::is_same<CDataType, ck::half_t>::value)
|
|
{
|
|
return ck::utils::check_err(c_m_n_device_result.mData,
|
|
c_m_n_host_result.mData,
|
|
"fp16 incorrect result",
|
|
3e-3,
|
|
1e-3);
|
|
}
|
|
else
|
|
{
|
|
return ck::utils::check_err(c_m_n_device_result.mData, c_m_n_host_result.mData);
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool run_splitK_gemm_example(int argc, char* argv[])
|
|
{
|
|
ProblemSize problem_size;
|
|
ExecutionConfig config;
|
|
|
|
if(argc == 1)
|
|
{
|
|
// use default case
|
|
}
|
|
else if(argc == 5)
|
|
{
|
|
config.do_verification = std::stoi(argv[1]);
|
|
config.init_method = std::stoi(argv[2]);
|
|
config.time_kernel = std::stoi(argv[3]);
|
|
problem_size.k_batch = std::stoi(argv[4]);
|
|
}
|
|
else if(argc == 11)
|
|
{
|
|
config.do_verification = std::stoi(argv[1]);
|
|
config.init_method = std::stoi(argv[2]);
|
|
config.time_kernel = std::stoi(argv[3]);
|
|
problem_size.k_batch = std::stoi(argv[4]);
|
|
|
|
problem_size.M = std::stoi(argv[5]);
|
|
problem_size.N = std::stoi(argv[6]);
|
|
problem_size.K = std::stoi(argv[7]);
|
|
|
|
problem_size.stride_A = std::stoi(argv[8]);
|
|
problem_size.stride_B = std::stoi(argv[9]);
|
|
problem_size.stride_C = std::stoi(argv[10]);
|
|
}
|
|
else
|
|
{
|
|
printf("arg1: verification (0=no, 1=yes)\n");
|
|
printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n");
|
|
printf("arg3: time kernel (0=no, 1=yes)\n");
|
|
printf("arg4: KBatch\n");
|
|
printf("arg5 to 11: M (256x), N(128x), K(32x), StrideA, StrideB, StrideC\n");
|
|
exit(0);
|
|
}
|
|
|
|
return run_splitK_gemm(problem_size, config);
|
|
}
|