mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-14 10:09:41 +00:00
[CK] Add command option instance_index and param_mask to run partial ck test (#2889)
* [CK] Add command option instance_index and param_mask to run partial ck test
Many CK test are instance test. it will loop all instance in the instance library. It causes test often out-of-time if we run test on simulator/emulator.
This PR add option instance_index and param_mask to reduce the workload of instance test
instance_index: only run test 1 available instance with specified index.
param_mask: filter the embedded parameter with specified mask
* fix CI error
* fix clang format
---------
Co-authored-by: illsilin_amdeng <Illia.Silin@amd.com>
[ROCm/composable_kernel commit: e78a897ec0]
This commit is contained in:
@@ -100,13 +100,13 @@ int main(int argc, char* argv[])
|
||||
const std::array<int, 2> reduceDims = {3, 4};
|
||||
// const std::array<int, 3> invariantDims = {0, 1, 2};
|
||||
|
||||
const std::vector<size_t> inLengths_1 = {64, 320, 80, 4, 128};
|
||||
std::vector<size_t> inLengths_1 = {64, 320, 80, 4, 128};
|
||||
|
||||
// input lengths of the second reduction, which is also the output lengths of the first
|
||||
// reduction
|
||||
const std::vector<size_t> inLengths_2 = {64, 320, 80, 4};
|
||||
std::vector<size_t> inLengths_2 = {64, 320, 80, 4};
|
||||
|
||||
const std::vector<size_t> outLengths = {64, 320, 80};
|
||||
std::vector<size_t> outLengths = {64, 320, 80};
|
||||
|
||||
if(argc == 1)
|
||||
{
|
||||
@@ -114,11 +114,26 @@ int main(int argc, char* argv[])
|
||||
init_method = 2;
|
||||
time_kernel = true;
|
||||
}
|
||||
else if(argc == 4)
|
||||
else if((argc == 4) || (argc == 9))
|
||||
{
|
||||
do_verify = static_cast<bool>(argv[1]);
|
||||
init_method = atoi(argv[2]);
|
||||
time_kernel = static_cast<bool>(atoi(argv[3]));
|
||||
if(argc == 9)
|
||||
{
|
||||
inLengths_1[0] = atoi(argv[4]);
|
||||
inLengths_1[1] = atoi(argv[5]);
|
||||
inLengths_1[2] = atoi(argv[6]);
|
||||
inLengths_1[3] = atoi(argv[7]);
|
||||
inLengths_1[4] = atoi(argv[8]);
|
||||
inLengths_2[0] = inLengths_1[0];
|
||||
inLengths_2[1] = inLengths_1[1];
|
||||
inLengths_2[2] = inLengths_1[2];
|
||||
inLengths_2[3] = inLengths_1[3];
|
||||
outLengths[0] = inLengths_1[0];
|
||||
outLengths[1] = inLengths_1[1];
|
||||
outLengths[2] = inLengths_1[2];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
@@ -50,14 +50,14 @@ template<> struct emb_kernel<ck::half_t, 8192> { using kernel_type = DeviceInsta
|
||||
|
||||
// clang-format on
|
||||
|
||||
int main()
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
bool time_kernel = true;
|
||||
|
||||
constexpr auto num_rows = 65536;
|
||||
constexpr auto dims = ck::Sequence<256, 512, 768, 1024, 1536, 2048, 4096, 8192>{};
|
||||
// constexpr auto dims = ck::Sequence<256, 512>{};
|
||||
constexpr auto index_length = 2048;
|
||||
ck::index_t num_rows = 65536;
|
||||
constexpr auto dims = ck::Sequence<256, 512, 768, 1024, 1536, 2048, 4096, 8192>{};
|
||||
ck::index_t index_length = 2048;
|
||||
ck::index_t dim_mask = 0xffff;
|
||||
constexpr AccDataType epsilon = 1e-4;
|
||||
|
||||
auto f_host_tensor_desc_1d = [](std::size_t len_) { return HostTensorDescriptor({len_}); };
|
||||
@@ -73,121 +73,140 @@ int main()
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
OutType>;
|
||||
|
||||
if(argc == 1)
|
||||
{
|
||||
// Use default value
|
||||
}
|
||||
else if(argc == 4)
|
||||
{
|
||||
num_rows = atoi(argv[1]);
|
||||
dim_mask = strtol(argv[2], nullptr, 0);
|
||||
index_length = atoi(argv[3]);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << "Usage of " << argv[0] << std::endl;
|
||||
std::cout << "Arg1-3: num_rows dim_mask index_length" << std::endl;
|
||||
}
|
||||
ck::static_for<0, dims.Size(), 1>{}([&](auto I) {
|
||||
std::srand(std::time(nullptr));
|
||||
constexpr auto current_dim = dims.At(I);
|
||||
Tensor<EmbType> emb_a(f_host_tensor_desc_2d(num_rows, current_dim));
|
||||
Tensor<EmbType> emb_b(f_host_tensor_desc_2d(num_rows, current_dim));
|
||||
Tensor<EmbType> emb_c(f_host_tensor_desc_2d(num_rows, current_dim));
|
||||
|
||||
Tensor<IndexType> index_a(f_host_tensor_desc_1d(index_length));
|
||||
Tensor<IndexType> index_b(f_host_tensor_desc_1d(index_length));
|
||||
Tensor<IndexType> index_c(f_host_tensor_desc_1d(index_length));
|
||||
|
||||
Tensor<GammaDataType> gamma(f_host_tensor_desc_1d(current_dim));
|
||||
Tensor<BetaDataType> beta(f_host_tensor_desc_1d(current_dim));
|
||||
|
||||
Tensor<OutType> out(f_host_tensor_desc_2d(index_length, current_dim));
|
||||
|
||||
emb_a.GenerateTensorValue(GeneratorTensor_3<EmbType>{0.0, 1.0});
|
||||
emb_b.GenerateTensorValue(GeneratorTensor_3<EmbType>{0.0, 1.0});
|
||||
emb_c.GenerateTensorValue(GeneratorTensor_3<EmbType>{0.0, 1.0});
|
||||
|
||||
index_a.GenerateTensorValue(GeneratorTensor_2<IndexType>{0, num_rows});
|
||||
index_b.GenerateTensorValue(GeneratorTensor_2<IndexType>{0, num_rows});
|
||||
index_c.GenerateTensorValue(GeneratorTensor_2<IndexType>{0, num_rows});
|
||||
|
||||
gamma.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{0.0, 1.0});
|
||||
beta.GenerateTensorValue(GeneratorTensor_3<BetaDataType>{0.0, 1.0});
|
||||
|
||||
DeviceMem emb_a_dev(sizeof(EmbType) * emb_a.mDesc.GetElementSpaceSize());
|
||||
DeviceMem emb_b_dev(sizeof(EmbType) * emb_b.mDesc.GetElementSpaceSize());
|
||||
DeviceMem emb_c_dev(sizeof(EmbType) * emb_c.mDesc.GetElementSpaceSize());
|
||||
|
||||
DeviceMem index_a_dev(sizeof(IndexType) * index_a.mDesc.GetElementSpaceSize());
|
||||
DeviceMem index_b_dev(sizeof(IndexType) * index_b.mDesc.GetElementSpaceSize());
|
||||
DeviceMem index_c_dev(sizeof(IndexType) * index_c.mDesc.GetElementSpaceSize());
|
||||
|
||||
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem beta_dev(sizeof(BetaDataType) * beta.mDesc.GetElementSpaceSize());
|
||||
|
||||
DeviceMem out_dev(sizeof(OutType) * out.mDesc.GetElementSpaceSize());
|
||||
|
||||
emb_a_dev.ToDevice(emb_a.mData.data());
|
||||
emb_b_dev.ToDevice(emb_b.mData.data());
|
||||
emb_c_dev.ToDevice(emb_c.mData.data());
|
||||
|
||||
index_a_dev.ToDevice(index_a.mData.data());
|
||||
index_b_dev.ToDevice(index_b.mData.data());
|
||||
index_c_dev.ToDevice(index_c.mData.data());
|
||||
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
beta_dev.ToDevice(beta.mData.data());
|
||||
|
||||
auto device_instance = typename emb_kernel<EmbType, current_dim>::kernel_type{};
|
||||
auto argument_ptr = device_instance.MakeArgumentPointer(
|
||||
out_dev.GetDeviceBuffer(),
|
||||
{ck::type_convert<EmbType*>(emb_a_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<EmbType*>(emb_b_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<EmbType*>(emb_c_dev.GetDeviceBuffer())},
|
||||
{ck::type_convert<IndexType*>(index_a_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<IndexType*>(index_b_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<IndexType*>(index_c_dev.GetDeviceBuffer())},
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
current_dim,
|
||||
index_length,
|
||||
epsilon,
|
||||
EmbElementwiseOperation{});
|
||||
std::cout << "Dim:" << current_dim << ", kernel:" << device_instance.GetTypeString()
|
||||
<< std::endl
|
||||
<< std::flush;
|
||||
|
||||
bool is_supported = device_instance.IsSupportedArgument(argument_ptr.get());
|
||||
|
||||
if(!is_supported)
|
||||
if(dim_mask & (1 << I.value))
|
||||
{
|
||||
std::cout << "Runtime parameters are not supported" << std::endl;
|
||||
return;
|
||||
std::srand(std::time(nullptr));
|
||||
constexpr auto current_dim = dims.At(I);
|
||||
Tensor<EmbType> emb_a(f_host_tensor_desc_2d(num_rows, current_dim));
|
||||
Tensor<EmbType> emb_b(f_host_tensor_desc_2d(num_rows, current_dim));
|
||||
Tensor<EmbType> emb_c(f_host_tensor_desc_2d(num_rows, current_dim));
|
||||
|
||||
Tensor<IndexType> index_a(f_host_tensor_desc_1d(index_length));
|
||||
Tensor<IndexType> index_b(f_host_tensor_desc_1d(index_length));
|
||||
Tensor<IndexType> index_c(f_host_tensor_desc_1d(index_length));
|
||||
|
||||
Tensor<GammaDataType> gamma(f_host_tensor_desc_1d(current_dim));
|
||||
Tensor<BetaDataType> beta(f_host_tensor_desc_1d(current_dim));
|
||||
|
||||
Tensor<OutType> out(f_host_tensor_desc_2d(index_length, current_dim));
|
||||
|
||||
emb_a.GenerateTensorValue(GeneratorTensor_3<EmbType>{0.0, 1.0});
|
||||
emb_b.GenerateTensorValue(GeneratorTensor_3<EmbType>{0.0, 1.0});
|
||||
emb_c.GenerateTensorValue(GeneratorTensor_3<EmbType>{0.0, 1.0});
|
||||
|
||||
index_a.GenerateTensorValue(GeneratorTensor_2<IndexType>{0, num_rows});
|
||||
index_b.GenerateTensorValue(GeneratorTensor_2<IndexType>{0, num_rows});
|
||||
index_c.GenerateTensorValue(GeneratorTensor_2<IndexType>{0, num_rows});
|
||||
|
||||
gamma.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{0.0, 1.0});
|
||||
beta.GenerateTensorValue(GeneratorTensor_3<BetaDataType>{0.0, 1.0});
|
||||
|
||||
DeviceMem emb_a_dev(sizeof(EmbType) * emb_a.mDesc.GetElementSpaceSize());
|
||||
DeviceMem emb_b_dev(sizeof(EmbType) * emb_b.mDesc.GetElementSpaceSize());
|
||||
DeviceMem emb_c_dev(sizeof(EmbType) * emb_c.mDesc.GetElementSpaceSize());
|
||||
|
||||
DeviceMem index_a_dev(sizeof(IndexType) * index_a.mDesc.GetElementSpaceSize());
|
||||
DeviceMem index_b_dev(sizeof(IndexType) * index_b.mDesc.GetElementSpaceSize());
|
||||
DeviceMem index_c_dev(sizeof(IndexType) * index_c.mDesc.GetElementSpaceSize());
|
||||
|
||||
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem beta_dev(sizeof(BetaDataType) * beta.mDesc.GetElementSpaceSize());
|
||||
|
||||
DeviceMem out_dev(sizeof(OutType) * out.mDesc.GetElementSpaceSize());
|
||||
|
||||
emb_a_dev.ToDevice(emb_a.mData.data());
|
||||
emb_b_dev.ToDevice(emb_b.mData.data());
|
||||
emb_c_dev.ToDevice(emb_c.mData.data());
|
||||
|
||||
index_a_dev.ToDevice(index_a.mData.data());
|
||||
index_b_dev.ToDevice(index_b.mData.data());
|
||||
index_c_dev.ToDevice(index_c.mData.data());
|
||||
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
beta_dev.ToDevice(beta.mData.data());
|
||||
|
||||
auto device_instance = typename emb_kernel<EmbType, current_dim>::kernel_type{};
|
||||
auto argument_ptr = device_instance.MakeArgumentPointer(
|
||||
out_dev.GetDeviceBuffer(),
|
||||
{ck::type_convert<EmbType*>(emb_a_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<EmbType*>(emb_b_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<EmbType*>(emb_c_dev.GetDeviceBuffer())},
|
||||
{ck::type_convert<IndexType*>(index_a_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<IndexType*>(index_b_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<IndexType*>(index_c_dev.GetDeviceBuffer())},
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
current_dim,
|
||||
index_length,
|
||||
epsilon,
|
||||
EmbElementwiseOperation{});
|
||||
std::cout << "Dim:" << current_dim << ", kernel:" << device_instance.GetTypeString()
|
||||
<< std::endl
|
||||
<< std::flush;
|
||||
|
||||
bool is_supported = device_instance.IsSupportedArgument(argument_ptr.get());
|
||||
|
||||
if(!is_supported)
|
||||
{
|
||||
std::cout << "Runtime parameters are not supported" << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
auto invoker_ptr = device_instance.MakeInvokerPointer();
|
||||
float time_ms =
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
||||
|
||||
bool pass = true;
|
||||
{
|
||||
Tensor<OutType> out_from_dev(f_host_tensor_desc_2d(index_length, current_dim));
|
||||
ReferenceInstance ref;
|
||||
auto ref_argument = ref.MakeArgument(out,
|
||||
emb_a,
|
||||
emb_b,
|
||||
emb_c,
|
||||
index_a,
|
||||
index_b,
|
||||
index_c,
|
||||
gamma,
|
||||
beta,
|
||||
num_rows,
|
||||
current_dim,
|
||||
index_length,
|
||||
epsilon);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
ref_invoker.Run(ref_argument);
|
||||
|
||||
out_dev.FromDevice(out_from_dev.mData.data());
|
||||
pass &=
|
||||
ck::utils::check_err(out_from_dev, out, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
}
|
||||
|
||||
double total_read = current_dim * index_length * 3 * sizeof(EmbType) +
|
||||
current_dim * sizeof(GammaDataType) +
|
||||
current_dim * sizeof(BetaDataType);
|
||||
double total_write = current_dim * index_length * sizeof(OutType);
|
||||
double gbps = (total_read + total_write) / time_ms / 1e6;
|
||||
|
||||
std::cout << ", total bytes:" << (total_read + total_write) << ", time:" << time_ms
|
||||
<< ", gbps:" << gbps << ", valid:" << (pass ? "y" : "n") << std::endl
|
||||
<< std::flush;
|
||||
}
|
||||
|
||||
auto invoker_ptr = device_instance.MakeInvokerPointer();
|
||||
float time_ms = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
||||
|
||||
bool pass = true;
|
||||
{
|
||||
Tensor<OutType> out_from_dev(f_host_tensor_desc_2d(index_length, current_dim));
|
||||
ReferenceInstance ref;
|
||||
auto ref_argument = ref.MakeArgument(out,
|
||||
emb_a,
|
||||
emb_b,
|
||||
emb_c,
|
||||
index_a,
|
||||
index_b,
|
||||
index_c,
|
||||
gamma,
|
||||
beta,
|
||||
num_rows,
|
||||
current_dim,
|
||||
index_length,
|
||||
epsilon);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
ref_invoker.Run(ref_argument);
|
||||
|
||||
out_dev.FromDevice(out_from_dev.mData.data());
|
||||
pass &= ck::utils::check_err(out_from_dev, out, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
}
|
||||
|
||||
double total_read = current_dim * index_length * 3 * sizeof(EmbType) +
|
||||
current_dim * sizeof(GammaDataType) +
|
||||
current_dim * sizeof(BetaDataType);
|
||||
double total_write = current_dim * index_length * sizeof(OutType);
|
||||
double gbps = (total_read + total_write) / time_ms / 1e6;
|
||||
|
||||
std::cout << ", total bytes:" << (total_read + total_write) << ", time:" << time_ms
|
||||
<< ", gbps:" << gbps << ", valid:" << (pass ? "y" : "n") << std::endl
|
||||
<< std::flush;
|
||||
});
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -68,6 +68,24 @@ int main(int argc, char* argv[])
|
||||
}
|
||||
|
||||
std::vector<std::size_t> nchw = {16, 128, 32, 64};
|
||||
if(argc == 1)
|
||||
{
|
||||
// use default case
|
||||
}
|
||||
else if(argc == 5)
|
||||
{
|
||||
nchw[0] = std::stoi(argv[1]);
|
||||
nchw[1] = std::stoi(argv[2]);
|
||||
nchw[2] = std::stoi(argv[3]);
|
||||
nchw[3] = std::stoi(argv[4]);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cerr << "arg1 to 4: N, C, H, W" << std::endl;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::array<ck::index_t, 4> ab_lengths;
|
||||
std::array<ck::index_t, 4> ab_strides = {static_cast<int>(nchw[1] * nchw[2] * nchw[3]),
|
||||
static_cast<int>(nchw[2] * nchw[3]),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
// Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <numeric>
|
||||
@@ -98,8 +98,23 @@ int main(int argc, char* argv[])
|
||||
exit(0);
|
||||
}
|
||||
|
||||
ck::index_t M = 48 * 256;
|
||||
ck::index_t N = 1024;
|
||||
ck::index_t M = 48 * 256;
|
||||
ck::index_t N = 1024;
|
||||
if(argc == 1)
|
||||
{
|
||||
// use default case
|
||||
}
|
||||
else if(argc == 3)
|
||||
{
|
||||
M = std::stoi(argv[1]);
|
||||
N = std::stoi(argv[2]);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cerr << "arg1 to 2: M, N" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
ck::index_t Stride = N;
|
||||
|
||||
auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) {
|
||||
|
||||
@@ -100,7 +100,7 @@ using GammaBetaDeviceInstance = ck::tensor_operation::device::DeviceNormalizatio
|
||||
4, // DGammaDstVectorSize
|
||||
4>; // DBetaDstVectorSize
|
||||
|
||||
int main()
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
bool time_kernel = false;
|
||||
|
||||
@@ -110,6 +110,25 @@ int main()
|
||||
ck::index_t G = 32;
|
||||
ck::index_t C = 64;
|
||||
|
||||
if(argc == 1)
|
||||
{
|
||||
// use default case
|
||||
}
|
||||
else if(argc == 6)
|
||||
{
|
||||
N = std::stoi(argv[1]);
|
||||
H = std::stoi(argv[2]);
|
||||
W = std::stoi(argv[3]);
|
||||
G = std::stoi(argv[4]);
|
||||
C = std::stoi(argv[5]);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cerr << "arg1 to 5: N, H, W, G, C" << std::endl;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
Tensor<DYDataType> dy({N, H, W, G, C});
|
||||
Tensor<XDataType> x({N, H, W, G, C});
|
||||
Tensor<GammaDataType> gamma({G, C});
|
||||
|
||||
Reference in New Issue
Block a user