mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-14 02:02:46 +00:00
* add proper GEMM layout verification
* Handle "auto" strides.
CalculateStrides only called when tensor's strides are empty or all of them are <=0 (auto strides).
CalculateStrides now supports GEMM::ColumnsMajor order. The assumption is still that it applies only to the inner two dims.
ValidateStrides throws if any of the tensor's strides is <=0.
profile_gemm_multiply_add updated to support "auto" strides for tensors.
Manual tests for profile_gemm_multiply_add (matrix B in Row and Col modes)
auto-strides
bin/ckProfiler gemm_multiply_add 0 0 1 1 0 1 128 128 128 0 0 0 0 0
bin/ckProfiler gemm_multiply_add 0 1 1 1 0 1 128 128 128 0 0 0 0 0
bin/ckProfiler gemm_multiply_add 0 0 1 1 0 1 128 128 128 -1 -1 -1 -1 -1
Note, -1 should be deprecated (use 0 instead)
explicit strides (same as auto)
bin/ckProfiler gemm_multiply_add 0 0 1 1 0 1 128 128 128 128 128 128 128 128
bin/ckProfiler gemm_multiply_add 0 1 1 1 0 1 128 128 128 128 128 128 128 128
explicit strides (not the same as auto)
bin/ckProfiler gemm_multiply_add 0 0 1 1 0 1 128 128 128 130 132 134 136 138
bin/ckProfiler gemm_multiply_add 0 1 1 1 0 1 128 128 128 130 132 134 136 138
mix of explicit and auto strides
bin/ckProfiler gemm_multiply_add 0 0 1 1 0 1 128 128 128 128 128 128 128 0
invalid stride
bin/ckProfiler gemm_multiply_add 0 0 1 1 0 1 128 128 128 0 0 0 0 64
terminate called after throwing an instance of 'std::runtime_error'
what(): Invalid strides for RowMajor: mLens: 128 128 , mStrides: 64 1
Aborted (core dumped)
* - add more names to ck::tensor_layout for easier namespace hierarchy checking
- updated convolutional layouts to use explicit ones or BaseConvolutionalLayout where it is not clear which layout to use (TBD) - see include/ck/library/utility/convolution_host_tensor_descriptor_helper.hpp
* added handling of partially initialized strides for GEMM. fixed more tests.
* clang-format and more fixes
* replace long dash by a simple hyphen - causes build failure in CK codegen.
* increase sizeof input, otherwise output size becomes zero or negative with large filter size
* select stride based on layout
* specify layout explicitly to avoid errors in HostTensorDescriptor creation
* add validation for higher GEMM tensor dimensions.; Add docstring to `HostTensorDescriptor`
* Not clear why permute test in test/permute_scale/test_permute_scale.cpp uses a lot of invalid strides. Setting layout to BypassLayoutVerification to avoid a lot of errors
* fix test (incl removing invalid config)
* fix moe examples:
- (in .cpp) add layout argument to non-2D tensors
- (in .hpp) fix asserts/failures that show up in Debug mode, specifically addressing 2D tensor by a single index (and 3D tensor by 2d index)
* fix moe_gemm2 example.
* fix profile and wmma examples
* clean-up early mods for ckprofile. verified with:
```
ckProfiler gemm_multiply_add 0 0 1 1 0 1 128 128 128 0 0 0 0 0
ckProfiler gemm_multiply_add 0 1 1 1 0 1 128 128 128 0 0 0 0 0
ckProfiler gemm_multiply_add 0 0 1 1 0 1 128 128 128 130 132 134 136 138
ckProfiler gemm_multiply_add 0 1 1 1 0 1 128 128 128 130 132 134 136 138
#
ckProfiler gemm_fastgelu 1 0 1 2 0 1 128 128 128 0 0 0
ckProfiler gemm_fastgelu 1 1 1 2 0 1 128 128 128 0 0 0
ckProfiler gemm_fastgelu 1 2 1 2 0 1 128 128 128 0 0 0
ckProfiler gemm_fastgelu 1 3 1 2 0 1 128 128 128 0 0 0
ckProfiler gemm_fastgelu 1 0 1 2 0 1 128 128 128 128 128 128
#
ckProfiler gemm_add_relu 0 0 1 1 0 1 128 128 128 0 0 0 0
# ckProfiler gemm_add_relu 0 1 1 1 0 1 128 128 128 0 0 0 0 # not implemented
# ckProfiler gemm_add_relu 0 2 1 1 0 1 128 128 128 0 0 0 0 # not implemented
# ckProfiler gemm_add_relu 0 3 1 1 0 1 128 128 128 0 0 0 0 # not implemented
ckProfiler gemm_add_relu 0 0 1 1 0 1 128 128 128 128 128 128 128
#
ckProfiler gemm_add_relu_add_layernorm 1 0 1 1 0 0 128 128 128 0 0 0 0 0
ckProfiler gemm_add_relu_add_layernorm 1 1 1 1 0 0 128 128 128 0 0 0 0 0
ckProfiler gemm_add_relu_add_layernorm 1 2 1 1 0 0 128 128 128 0 0 0 0 0
ckProfiler gemm_add_relu_add_layernorm 1 3 1 1 0 0 128 128 128 0 0 0 0 0
ckProfiler gemm_add_relu_add_layernorm 1 0 1 1 0 0 128 128 128 130 132 134 136 138
#
example_gemm_add_multiply_dl_fp16
example_gemm_add_multiply_xdl_fp16
#
ckProfiler gemm_blockscale_wp 7 1 1 1 1 0 1 128 128 128 0 0 0
ckProfiler gemm_blockscale_wp 7 1 1 1 1 0 1 128 128 128 128 128 128
```
* temporary skip first 8 test configs - they throw error
* temporary skip first 8 test configs in wmma too - they throw error
---------
Co-authored-by: Illia Silin <98187287+illsilin@users.noreply.github.com>
[ROCm/composable_kernel commit: db2524be2d]
259 lines
9.7 KiB
C++
259 lines
9.7 KiB
C++
// SPDX-License-Identifier: MIT
|
|
// Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved.
|
|
|
|
#pragma once
|
|
|
|
// use macro to minimize code change
|
|
#ifndef EXAMPLE_WITH_COMPUTE_DATATYPE
|
|
using ComputeDataType = AccDataType;
|
|
#endif
|
|
|
|
template <typename ProblemType>
|
|
bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config)
|
|
{
|
|
#if defined(BUILD_INT4_EXAMPLE) && defined(CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4)
|
|
static_assert(sizeof(ck::int4_t) == sizeof(int8_t));
|
|
#endif
|
|
|
|
using namespace ck::literals;
|
|
|
|
auto M = problem_size.M;
|
|
auto N = problem_size.N;
|
|
auto K = problem_size.K;
|
|
auto StrideA = problem_size.StrideA;
|
|
auto StrideB = problem_size.StrideB;
|
|
auto StrideC = problem_size.StrideC;
|
|
|
|
auto f_host_tensor_descriptor =
|
|
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
|
if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
|
|
{
|
|
return HostTensorDescriptor({row, col}, {stride, 1_uz}, layout);
|
|
}
|
|
else
|
|
{
|
|
return HostTensorDescriptor({row, col}, {1_uz, stride}, layout);
|
|
}
|
|
};
|
|
|
|
auto f_get_default_stride =
|
|
[](std::size_t row, std::size_t col, ck::index_t stride, auto layout) {
|
|
if(stride == -1 || stride == 0)
|
|
{
|
|
// give a chance if stride is -1, return a default packed stride
|
|
if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
|
|
{
|
|
return static_cast<std::size_t>(col);
|
|
}
|
|
else
|
|
{
|
|
return static_cast<std::size_t>(row);
|
|
}
|
|
}
|
|
else
|
|
return static_cast<std::size_t>(stride);
|
|
};
|
|
|
|
StrideA = f_get_default_stride(M, K, StrideA, ALayout{});
|
|
StrideB = f_get_default_stride(K, N, StrideB, BLayout{});
|
|
StrideC = f_get_default_stride(M, N, StrideC, CLayout{});
|
|
|
|
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
|
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
|
|
|
switch(config.init_method)
|
|
{
|
|
case 0:
|
|
ck::utils::FillConstant<ADataType>{ck::type_convert<ADataType>(1.f)}(a_m_k);
|
|
ck::utils::FillConstant<BDataType>{ck::type_convert<BDataType>(1.f)}(b_k_n);
|
|
break;
|
|
case 1:
|
|
ck::utils::FillUniformDistributionIntegerValue<ADataType>{-5.f, 5.f}(a_m_k);
|
|
ck::utils::FillUniformDistributionIntegerValue<BDataType>{-5.f, 5.f}(b_k_n);
|
|
break;
|
|
case 2:
|
|
ck::utils::FillUniformDistribution<ADataType>{-1.f, 1.f}(a_m_k);
|
|
ck::utils::FillUniformDistribution<BDataType>{-1.f, 1.f}(b_k_n);
|
|
break;
|
|
case 3:
|
|
ck::utils::FillUniformDistributionIntegerValue<ADataType>{1.f, 1.f}(a_m_k);
|
|
ck::utils::FillUniformDistributionIntegerValue<BDataType>{-5.f, 5.f}(b_k_n);
|
|
break;
|
|
case 4:
|
|
ck::utils::FillUniformDistributionIntegerValue<ADataType>{-5.f, 5.f}(a_m_k);
|
|
ck::utils::FillUniformDistributionIntegerValue<BDataType>{1.f, 1.f}(b_k_n);
|
|
break;
|
|
case 5:
|
|
ck::utils::FillUniformDistributionIntegerValue<ADataType>{-2.f, 2.f}(a_m_k);
|
|
ck::utils::FillUniformDistributionIntegerValue<BDataType>{-2.f, 2.f}(b_k_n);
|
|
break;
|
|
default:
|
|
ck::utils::FillUniformDistribution<ADataType>{-0.1f, 0.1f}(a_m_k);
|
|
ck::utils::FillUniformDistribution<BDataType>{-0.1f, 0.1f}(b_k_n);
|
|
}
|
|
|
|
Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
|
Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
|
Tensor<CDataType> c_m_n_device_ref_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
|
|
|
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
|
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
|
std::cout << "c_m_n: " << c_m_n_host_result.mDesc << std::endl;
|
|
|
|
#ifdef BUILD_INT4_EXAMPLE
|
|
DeviceMem a_m_k_device_buf(sizeof(KernelADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
|
DeviceMem b_k_n_device_buf(sizeof(KernelBDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
|
DeviceMem c_m_n_device_buf(sizeof(KernelCDataType) *
|
|
c_m_n_device_result.mDesc.GetElementSpaceSize());
|
|
|
|
const Tensor<KernelADataType> a_m_k_converted(a_m_k);
|
|
const Tensor<KernelBDataType> b_k_n_converted(b_k_n);
|
|
|
|
a_m_k_device_buf.ToDevice(a_m_k_converted.mData.data());
|
|
b_k_n_device_buf.ToDevice(b_k_n_converted.mData.data());
|
|
#else
|
|
DeviceMem a_m_k_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
|
DeviceMem b_k_n_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
|
DeviceMem c_m_n_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize());
|
|
DeviceMem c_m_n_device_ref_buf(sizeof(CDataType) *
|
|
c_m_n_device_ref_result.mDesc.GetElementSpaceSize());
|
|
|
|
a_m_k_device_buf.ToDevice(a_m_k.mData.data());
|
|
b_k_n_device_buf.ToDevice(b_k_n.mData.data());
|
|
#endif
|
|
DeviceMem workspace;
|
|
|
|
auto a_element_op = AElementOp{};
|
|
auto b_element_op = BElementOp{};
|
|
auto c_element_op = CElementOp{};
|
|
|
|
// do GEMM
|
|
auto gemm = DeviceGemmInstance{};
|
|
auto invoker = gemm.MakeInvoker();
|
|
float ave_time = 0;
|
|
|
|
if constexpr(std::is_same<ProblemType, ProblemSize>::value)
|
|
{
|
|
auto argument = gemm.MakeArgument(
|
|
#ifdef BUILD_INT4_EXAMPLE
|
|
static_cast<KernelADataType*>(a_m_k_device_buf.GetDeviceBuffer()),
|
|
static_cast<KernelBDataType*>(b_k_n_device_buf.GetDeviceBuffer()),
|
|
static_cast<KernelCDataType*>(c_m_n_device_buf.GetDeviceBuffer()),
|
|
#else
|
|
static_cast<ADataType*>(a_m_k_device_buf.GetDeviceBuffer()),
|
|
static_cast<BDataType*>(b_k_n_device_buf.GetDeviceBuffer()),
|
|
static_cast<CDataType*>(c_m_n_device_buf.GetDeviceBuffer()),
|
|
#endif
|
|
M,
|
|
N,
|
|
K,
|
|
StrideA,
|
|
StrideB,
|
|
StrideC,
|
|
a_element_op,
|
|
b_element_op,
|
|
c_element_op);
|
|
|
|
if(!gemm.IsSupportedArgument(argument))
|
|
{
|
|
std::cerr << gemm.GetTypeString() << " does not support this problem" << std::endl;
|
|
|
|
return true;
|
|
}
|
|
|
|
ave_time = invoker.Run(argument, StreamConfig{nullptr, config.time_kernel});
|
|
}
|
|
else
|
|
{
|
|
// When the Problem Type and Problem Size does not fit.
|
|
|
|
std::cerr << gemm.GetTypeString() << ": the instance does not support the problem config."
|
|
<< std::endl;
|
|
return true;
|
|
}
|
|
|
|
std::size_t flop = 2_uz * M * N * K;
|
|
std::size_t num_btype =
|
|
sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(CDataType) * M * N;
|
|
|
|
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
|
|
|
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
|
|
|
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s, "
|
|
<< gemm.GetTypeString() << std::endl;
|
|
|
|
bool pass = true;
|
|
|
|
if((config.do_verification == 1) || (config.do_verification == 3))
|
|
{
|
|
// CPU verification
|
|
auto ref_gemm = ReferenceGemmInstance{};
|
|
auto ref_invoker = ref_gemm.MakeInvoker();
|
|
|
|
auto ref_argument = ref_gemm.MakeArgument(
|
|
a_m_k, b_k_n, c_m_n_host_result, a_element_op, b_element_op, c_element_op);
|
|
|
|
std::cout << "Running verification on CPU." << std::endl;
|
|
ref_invoker.Run(ref_argument);
|
|
|
|
#ifdef BUILD_INT4_EXAMPLE
|
|
Tensor<CDataType> c_m_n_device_result_converted(c_m_n_host_result.mDesc);
|
|
|
|
c_m_n_device_buf.FromDevice(c_m_n_device_result_converted.mData.data());
|
|
|
|
c_m_n_device_result = c_m_n_device_result_converted.CopyAsType<CDataType>();
|
|
|
|
return ck::utils::check_err(c_m_n_device_result_converted, c_m_n_host_result);
|
|
#else
|
|
c_m_n_device_buf.FromDevice(c_m_n_device_result.mData.data());
|
|
|
|
pass &= ck::utils::check_err(c_m_n_device_result,
|
|
c_m_n_host_result,
|
|
"Error: Incorrect results!",
|
|
get_rtol<CDataType, ComputeDataType>(),
|
|
get_atol<CDataType, ComputeDataType>());
|
|
#endif
|
|
}
|
|
|
|
if((config.do_verification == 2) || (config.do_verification == 3))
|
|
{
|
|
// GPU verification
|
|
auto ref_gemm_gpu = ReferenceGemmInstanceGPU{};
|
|
auto ref_invoker_gpu = ref_gemm_gpu.MakeInvoker();
|
|
|
|
auto ref_argument_gpu = ref_gemm_gpu.MakeArgument(
|
|
static_cast<ADataType*>(a_m_k_device_buf.GetDeviceBuffer()),
|
|
static_cast<BDataType*>(b_k_n_device_buf.GetDeviceBuffer()),
|
|
static_cast<CDataType*>(c_m_n_device_ref_buf.GetDeviceBuffer()),
|
|
M,
|
|
N,
|
|
K,
|
|
a_element_op,
|
|
b_element_op,
|
|
c_element_op);
|
|
|
|
std::cout << "Running verification on GPU." << std::endl;
|
|
ref_invoker_gpu.Run(ref_argument_gpu, StreamConfig{});
|
|
|
|
c_m_n_device_ref_buf.FromDevice(c_m_n_device_ref_result.mData.data());
|
|
c_m_n_device_buf.FromDevice(c_m_n_device_result.mData.data());
|
|
|
|
pass &= ck::utils::check_err(c_m_n_device_result,
|
|
c_m_n_device_ref_result,
|
|
"Error: Incorrect results!",
|
|
get_rtol<CDataType, ComputeDataType>(),
|
|
get_atol<CDataType, ComputeDataType>());
|
|
}
|
|
|
|
return pass == true;
|
|
}
|
|
|
|
bool run_gemm_example(int argc, char* argv[])
|
|
{
|
|
ProblemSize problem_size;
|
|
ExecutionConfig config;
|
|
|
|
return !parse_cmd_args(argc, argv, problem_size, config) || run_gemm(problem_size, config);
|
|
}
|