mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-14 02:02:46 +00:00
[CK] Add command option instance_index and param_mask to run partial ck test (#2889)
* [CK] Add command option instance_index and param_mask to run partial ck test
Many CK test are instance test. it will loop all instance in the instance library. It causes test often out-of-time if we run test on simulator/emulator.
This PR add option instance_index and param_mask to reduce the workload of instance test
instance_index: only run test 1 available instance with specified index.
param_mask: filter the embedded parameter with specified mask
* fix CI error
* fix clang format
---------
Co-authored-by: illsilin_amdeng <Illia.Silin@amd.com>
[ROCm/composable_kernel commit: e78a897ec0]
This commit is contained in:
@@ -50,14 +50,14 @@ template<> struct emb_kernel<ck::half_t, 8192> { using kernel_type = DeviceInsta
|
||||
|
||||
// clang-format on
|
||||
|
||||
int main()
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
bool time_kernel = true;
|
||||
|
||||
constexpr auto num_rows = 65536;
|
||||
constexpr auto dims = ck::Sequence<256, 512, 768, 1024, 1536, 2048, 4096, 8192>{};
|
||||
// constexpr auto dims = ck::Sequence<256, 512>{};
|
||||
constexpr auto index_length = 2048;
|
||||
ck::index_t num_rows = 65536;
|
||||
constexpr auto dims = ck::Sequence<256, 512, 768, 1024, 1536, 2048, 4096, 8192>{};
|
||||
ck::index_t index_length = 2048;
|
||||
ck::index_t dim_mask = 0xffff;
|
||||
constexpr AccDataType epsilon = 1e-4;
|
||||
|
||||
auto f_host_tensor_desc_1d = [](std::size_t len_) { return HostTensorDescriptor({len_}); };
|
||||
@@ -73,121 +73,140 @@ int main()
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
OutType>;
|
||||
|
||||
if(argc == 1)
|
||||
{
|
||||
// Use default value
|
||||
}
|
||||
else if(argc == 4)
|
||||
{
|
||||
num_rows = atoi(argv[1]);
|
||||
dim_mask = strtol(argv[2], nullptr, 0);
|
||||
index_length = atoi(argv[3]);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << "Usage of " << argv[0] << std::endl;
|
||||
std::cout << "Arg1-3: num_rows dim_mask index_length" << std::endl;
|
||||
}
|
||||
ck::static_for<0, dims.Size(), 1>{}([&](auto I) {
|
||||
std::srand(std::time(nullptr));
|
||||
constexpr auto current_dim = dims.At(I);
|
||||
Tensor<EmbType> emb_a(f_host_tensor_desc_2d(num_rows, current_dim));
|
||||
Tensor<EmbType> emb_b(f_host_tensor_desc_2d(num_rows, current_dim));
|
||||
Tensor<EmbType> emb_c(f_host_tensor_desc_2d(num_rows, current_dim));
|
||||
|
||||
Tensor<IndexType> index_a(f_host_tensor_desc_1d(index_length));
|
||||
Tensor<IndexType> index_b(f_host_tensor_desc_1d(index_length));
|
||||
Tensor<IndexType> index_c(f_host_tensor_desc_1d(index_length));
|
||||
|
||||
Tensor<GammaDataType> gamma(f_host_tensor_desc_1d(current_dim));
|
||||
Tensor<BetaDataType> beta(f_host_tensor_desc_1d(current_dim));
|
||||
|
||||
Tensor<OutType> out(f_host_tensor_desc_2d(index_length, current_dim));
|
||||
|
||||
emb_a.GenerateTensorValue(GeneratorTensor_3<EmbType>{0.0, 1.0});
|
||||
emb_b.GenerateTensorValue(GeneratorTensor_3<EmbType>{0.0, 1.0});
|
||||
emb_c.GenerateTensorValue(GeneratorTensor_3<EmbType>{0.0, 1.0});
|
||||
|
||||
index_a.GenerateTensorValue(GeneratorTensor_2<IndexType>{0, num_rows});
|
||||
index_b.GenerateTensorValue(GeneratorTensor_2<IndexType>{0, num_rows});
|
||||
index_c.GenerateTensorValue(GeneratorTensor_2<IndexType>{0, num_rows});
|
||||
|
||||
gamma.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{0.0, 1.0});
|
||||
beta.GenerateTensorValue(GeneratorTensor_3<BetaDataType>{0.0, 1.0});
|
||||
|
||||
DeviceMem emb_a_dev(sizeof(EmbType) * emb_a.mDesc.GetElementSpaceSize());
|
||||
DeviceMem emb_b_dev(sizeof(EmbType) * emb_b.mDesc.GetElementSpaceSize());
|
||||
DeviceMem emb_c_dev(sizeof(EmbType) * emb_c.mDesc.GetElementSpaceSize());
|
||||
|
||||
DeviceMem index_a_dev(sizeof(IndexType) * index_a.mDesc.GetElementSpaceSize());
|
||||
DeviceMem index_b_dev(sizeof(IndexType) * index_b.mDesc.GetElementSpaceSize());
|
||||
DeviceMem index_c_dev(sizeof(IndexType) * index_c.mDesc.GetElementSpaceSize());
|
||||
|
||||
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem beta_dev(sizeof(BetaDataType) * beta.mDesc.GetElementSpaceSize());
|
||||
|
||||
DeviceMem out_dev(sizeof(OutType) * out.mDesc.GetElementSpaceSize());
|
||||
|
||||
emb_a_dev.ToDevice(emb_a.mData.data());
|
||||
emb_b_dev.ToDevice(emb_b.mData.data());
|
||||
emb_c_dev.ToDevice(emb_c.mData.data());
|
||||
|
||||
index_a_dev.ToDevice(index_a.mData.data());
|
||||
index_b_dev.ToDevice(index_b.mData.data());
|
||||
index_c_dev.ToDevice(index_c.mData.data());
|
||||
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
beta_dev.ToDevice(beta.mData.data());
|
||||
|
||||
auto device_instance = typename emb_kernel<EmbType, current_dim>::kernel_type{};
|
||||
auto argument_ptr = device_instance.MakeArgumentPointer(
|
||||
out_dev.GetDeviceBuffer(),
|
||||
{ck::type_convert<EmbType*>(emb_a_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<EmbType*>(emb_b_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<EmbType*>(emb_c_dev.GetDeviceBuffer())},
|
||||
{ck::type_convert<IndexType*>(index_a_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<IndexType*>(index_b_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<IndexType*>(index_c_dev.GetDeviceBuffer())},
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
current_dim,
|
||||
index_length,
|
||||
epsilon,
|
||||
EmbElementwiseOperation{});
|
||||
std::cout << "Dim:" << current_dim << ", kernel:" << device_instance.GetTypeString()
|
||||
<< std::endl
|
||||
<< std::flush;
|
||||
|
||||
bool is_supported = device_instance.IsSupportedArgument(argument_ptr.get());
|
||||
|
||||
if(!is_supported)
|
||||
if(dim_mask & (1 << I.value))
|
||||
{
|
||||
std::cout << "Runtime parameters are not supported" << std::endl;
|
||||
return;
|
||||
std::srand(std::time(nullptr));
|
||||
constexpr auto current_dim = dims.At(I);
|
||||
Tensor<EmbType> emb_a(f_host_tensor_desc_2d(num_rows, current_dim));
|
||||
Tensor<EmbType> emb_b(f_host_tensor_desc_2d(num_rows, current_dim));
|
||||
Tensor<EmbType> emb_c(f_host_tensor_desc_2d(num_rows, current_dim));
|
||||
|
||||
Tensor<IndexType> index_a(f_host_tensor_desc_1d(index_length));
|
||||
Tensor<IndexType> index_b(f_host_tensor_desc_1d(index_length));
|
||||
Tensor<IndexType> index_c(f_host_tensor_desc_1d(index_length));
|
||||
|
||||
Tensor<GammaDataType> gamma(f_host_tensor_desc_1d(current_dim));
|
||||
Tensor<BetaDataType> beta(f_host_tensor_desc_1d(current_dim));
|
||||
|
||||
Tensor<OutType> out(f_host_tensor_desc_2d(index_length, current_dim));
|
||||
|
||||
emb_a.GenerateTensorValue(GeneratorTensor_3<EmbType>{0.0, 1.0});
|
||||
emb_b.GenerateTensorValue(GeneratorTensor_3<EmbType>{0.0, 1.0});
|
||||
emb_c.GenerateTensorValue(GeneratorTensor_3<EmbType>{0.0, 1.0});
|
||||
|
||||
index_a.GenerateTensorValue(GeneratorTensor_2<IndexType>{0, num_rows});
|
||||
index_b.GenerateTensorValue(GeneratorTensor_2<IndexType>{0, num_rows});
|
||||
index_c.GenerateTensorValue(GeneratorTensor_2<IndexType>{0, num_rows});
|
||||
|
||||
gamma.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{0.0, 1.0});
|
||||
beta.GenerateTensorValue(GeneratorTensor_3<BetaDataType>{0.0, 1.0});
|
||||
|
||||
DeviceMem emb_a_dev(sizeof(EmbType) * emb_a.mDesc.GetElementSpaceSize());
|
||||
DeviceMem emb_b_dev(sizeof(EmbType) * emb_b.mDesc.GetElementSpaceSize());
|
||||
DeviceMem emb_c_dev(sizeof(EmbType) * emb_c.mDesc.GetElementSpaceSize());
|
||||
|
||||
DeviceMem index_a_dev(sizeof(IndexType) * index_a.mDesc.GetElementSpaceSize());
|
||||
DeviceMem index_b_dev(sizeof(IndexType) * index_b.mDesc.GetElementSpaceSize());
|
||||
DeviceMem index_c_dev(sizeof(IndexType) * index_c.mDesc.GetElementSpaceSize());
|
||||
|
||||
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem beta_dev(sizeof(BetaDataType) * beta.mDesc.GetElementSpaceSize());
|
||||
|
||||
DeviceMem out_dev(sizeof(OutType) * out.mDesc.GetElementSpaceSize());
|
||||
|
||||
emb_a_dev.ToDevice(emb_a.mData.data());
|
||||
emb_b_dev.ToDevice(emb_b.mData.data());
|
||||
emb_c_dev.ToDevice(emb_c.mData.data());
|
||||
|
||||
index_a_dev.ToDevice(index_a.mData.data());
|
||||
index_b_dev.ToDevice(index_b.mData.data());
|
||||
index_c_dev.ToDevice(index_c.mData.data());
|
||||
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
beta_dev.ToDevice(beta.mData.data());
|
||||
|
||||
auto device_instance = typename emb_kernel<EmbType, current_dim>::kernel_type{};
|
||||
auto argument_ptr = device_instance.MakeArgumentPointer(
|
||||
out_dev.GetDeviceBuffer(),
|
||||
{ck::type_convert<EmbType*>(emb_a_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<EmbType*>(emb_b_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<EmbType*>(emb_c_dev.GetDeviceBuffer())},
|
||||
{ck::type_convert<IndexType*>(index_a_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<IndexType*>(index_b_dev.GetDeviceBuffer()),
|
||||
ck::type_convert<IndexType*>(index_c_dev.GetDeviceBuffer())},
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
current_dim,
|
||||
index_length,
|
||||
epsilon,
|
||||
EmbElementwiseOperation{});
|
||||
std::cout << "Dim:" << current_dim << ", kernel:" << device_instance.GetTypeString()
|
||||
<< std::endl
|
||||
<< std::flush;
|
||||
|
||||
bool is_supported = device_instance.IsSupportedArgument(argument_ptr.get());
|
||||
|
||||
if(!is_supported)
|
||||
{
|
||||
std::cout << "Runtime parameters are not supported" << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
auto invoker_ptr = device_instance.MakeInvokerPointer();
|
||||
float time_ms =
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
||||
|
||||
bool pass = true;
|
||||
{
|
||||
Tensor<OutType> out_from_dev(f_host_tensor_desc_2d(index_length, current_dim));
|
||||
ReferenceInstance ref;
|
||||
auto ref_argument = ref.MakeArgument(out,
|
||||
emb_a,
|
||||
emb_b,
|
||||
emb_c,
|
||||
index_a,
|
||||
index_b,
|
||||
index_c,
|
||||
gamma,
|
||||
beta,
|
||||
num_rows,
|
||||
current_dim,
|
||||
index_length,
|
||||
epsilon);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
ref_invoker.Run(ref_argument);
|
||||
|
||||
out_dev.FromDevice(out_from_dev.mData.data());
|
||||
pass &=
|
||||
ck::utils::check_err(out_from_dev, out, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
}
|
||||
|
||||
double total_read = current_dim * index_length * 3 * sizeof(EmbType) +
|
||||
current_dim * sizeof(GammaDataType) +
|
||||
current_dim * sizeof(BetaDataType);
|
||||
double total_write = current_dim * index_length * sizeof(OutType);
|
||||
double gbps = (total_read + total_write) / time_ms / 1e6;
|
||||
|
||||
std::cout << ", total bytes:" << (total_read + total_write) << ", time:" << time_ms
|
||||
<< ", gbps:" << gbps << ", valid:" << (pass ? "y" : "n") << std::endl
|
||||
<< std::flush;
|
||||
}
|
||||
|
||||
auto invoker_ptr = device_instance.MakeInvokerPointer();
|
||||
float time_ms = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
||||
|
||||
bool pass = true;
|
||||
{
|
||||
Tensor<OutType> out_from_dev(f_host_tensor_desc_2d(index_length, current_dim));
|
||||
ReferenceInstance ref;
|
||||
auto ref_argument = ref.MakeArgument(out,
|
||||
emb_a,
|
||||
emb_b,
|
||||
emb_c,
|
||||
index_a,
|
||||
index_b,
|
||||
index_c,
|
||||
gamma,
|
||||
beta,
|
||||
num_rows,
|
||||
current_dim,
|
||||
index_length,
|
||||
epsilon);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
ref_invoker.Run(ref_argument);
|
||||
|
||||
out_dev.FromDevice(out_from_dev.mData.data());
|
||||
pass &= ck::utils::check_err(out_from_dev, out, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
}
|
||||
|
||||
double total_read = current_dim * index_length * 3 * sizeof(EmbType) +
|
||||
current_dim * sizeof(GammaDataType) +
|
||||
current_dim * sizeof(BetaDataType);
|
||||
double total_write = current_dim * index_length * sizeof(OutType);
|
||||
double gbps = (total_read + total_write) / time_ms / 1e6;
|
||||
|
||||
std::cout << ", total bytes:" << (total_read + total_write) << ", time:" << time_ms
|
||||
<< ", gbps:" << gbps << ", valid:" << (pass ? "y" : "n") << std::endl
|
||||
<< std::flush;
|
||||
});
|
||||
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user