// SPDX-License-Identifier: MIT // Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved. #include #include #include #include #include #include #include #include #include "transpose_example.hpp" #if 0 template void dump_host_tensor_4d(const ck_tile::HostTensor& x) { auto len = x.get_lengths(); assert(len.size() == 4); std::cout << "["; for(size_t i = 0; i < len[0]; i++) { std::cout << i << ": ["; for(size_t j = 0; j < len[1]; j++) { std::cout << j << ": ["; for(size_t k = 0; k < len[2]; k++) { std::cout << k << ": ["; for(size_t v = 0; v < len[3]; v++) { if constexpr(std::is_same_v) { auto m = ck_tile::type_convert(x(std::vector{i, j, k, v})); std::cout << m; if(v != len[3] - 1) std::cout << ","; } else { std::cout << x(std::vector{i, j, k, v}) << " "; } } std::cout << "]" << std::endl; } std::cout << "]" << std::endl; } std::cout << std::endl; } std::cout << "--------------------" << std::endl; } #endif // different threshold for different dtype template auto get_elimit(std::string /*init_method*/) { double rtol = 1e-3; double atol = 1e-3; return ck_tile::make_tuple(rtol, atol); } template <> auto get_elimit(std::string /*init_method*/) { double rtol = 1e-2; double atol = 1e-2; return ck_tile::make_tuple(rtol, atol); } template <> auto get_elimit(std::string init_method) { if(init_method == "ui" || init_method == "ni") { unsigned max_rounding_point_distance = 0; double atol = 2e-3; return ck_tile::make_tuple(max_rounding_point_distance, atol); } else { unsigned max_rounding_point_distance = 1; double atol = 0.0625; return ck_tile::make_tuple(max_rounding_point_distance, atol); } } auto create_args(int argc, char* argv[]) { ck_tile::ArgParser arg_parser; arg_parser.insert("v", "1", "whether do CPU validation or not") .insert("pr", "fp16", "input data type. fp16/fp32 (representing 8/16/32 bit data)") .insert("N", "2", "input batch size. ") .insert("C", "64", "input channel size.") .insert("H", "1", "input height size.") .insert("W", "64", "input width size. ") .insert("layout_in", "NCHW", "input tensor data layout - NCHW by default") .insert("layout_out", "NHWC", "output tensor data layout - NHWC by default ") .insert("seed", "-1", "seed to be used, -1 means random every time") .insert("kname", "0", "t to 1 will print kernel name"); bool result = arg_parser.parse(argc, argv); return std::make_tuple(result, arg_parser); } template bool run_batched_transpose(ck_tile::ArgParser args) { int validate = args.get_int("v"); std::string prec = args.get_str("pr"); int N = args.get_int("N"); int C = args.get_int("C"); int H = args.get_int("H"); int W = args.get_int("W"); std::string layout_in = args.get_str("layout_in"); std::string layout_out = args.get_str("layout_out"); int seed = args.get_int("seed"); int dim_in[4], dim_out[4]; int stride_dim_in[4], stride_dim_out[4]; bool nchw2nhwc = layout_in == "NCHW" && layout_out == "NHWC"; bool nhwc2nchw = layout_in == "NHWC" && layout_out == "NCHW"; assert(nchw2nhwc != nhwc2nchw); (void)nhwc2nchw; dim_in[0] = N; dim_in[1] = nchw2nhwc ? C : H; dim_in[2] = nchw2nhwc ? H : W; dim_in[3] = nchw2nhwc ? W : C; dim_out[0] = N; dim_out[1] = nchw2nhwc ? H : C; dim_out[2] = nchw2nhwc ? W : H; dim_out[3] = nchw2nhwc ? C : W; stride_dim_in[0] = C * H * W; stride_dim_in[1] = nchw2nhwc ? H * W : C * W; stride_dim_in[2] = nchw2nhwc ? W : C; stride_dim_in[3] = 1; stride_dim_out[0] = C * H * W; stride_dim_out[1] = nchw2nhwc ? C * W : H * W; stride_dim_out[2] = nchw2nhwc ? C : W; stride_dim_out[3] = 1; if(seed < 0) { seed = std::time(nullptr); } ck_tile::HostTensor x_host( {dim_in[0], dim_in[1], dim_in[2], dim_in[3]}, {stride_dim_in[0], stride_dim_in[1], stride_dim_in[2], stride_dim_in[3]}); ck_tile::HostTensor y_host( {dim_out[0], dim_out[1], dim_out[2], dim_out[3]}, {stride_dim_out[0], stride_dim_out[1], stride_dim_out[2], stride_dim_out[3]}); ck_tile::FillUniformDistribution{-.5f, .5f}(x_host); ck_tile::DeviceMem x_dev(x_host.get_element_space_size_in_bytes()); ck_tile::DeviceMem y_dev(y_host.get_element_space_size_in_bytes()); x_dev.ToDevice(x_host.data()); auto trait = batched_transpose_trait{prec, layout_in}; uint32_t height = nchw2nhwc ? C : H * W; uint32_t width = nchw2nhwc ? H * W : C; batched_transpose_kargs karg = [&]() { batched_transpose_kargs a_; a_.p_input = x_dev.GetDeviceBuffer(); a_.p_output = y_dev.GetDeviceBuffer(); a_.batch = N; a_.height = height; a_.width = width; return a_; }(); ck_tile::stream_config sc{nullptr, true}; auto ms = batched_transpose(trait, karg, sc); std::size_t num_operations = N * C * H * (W - 1); std::size_t num_bytes = N * C * H * W * sizeof(Type); float ave_time = ms * 1E-3; float gb_per_sec = num_bytes / ms * 1.E-6; float tflops = static_cast(num_operations) / ms * 1.E-6; std::cout << "Run Batched Transpose kernel with N=" << N << ", C=" << C << ", H=" << H << ", W=" << W << ", layout_in=" << layout_in << ", layout_out=" << layout_out << " : " << ms << " ms (" << ave_time << " ave_time), " << tflops << " TFlops" << gb_per_sec << " GB/s, " << std::endl; printf("[%s]N:%d, C:%d, H:%d, W:%d, layout_in:%s, %f\n", prec.c_str(), N, C, H, W, layout_in.c_str(), ms); if(ms < 0) printf("not supported\n"); fflush(stdout); if(ms < 0) { return false; } y_dev.FromDevice(y_host.data()); bool rtn = true; if(validate) { // this host buffer will not copy to GPU, so no need use stride ck_tile::HostTensor y_ref( {dim_out[0], dim_out[1], dim_out[2], dim_out[3]}, {stride_dim_out[0], stride_dim_out[1], stride_dim_out[2], stride_dim_out[3]}); ck_tile::reference_batched_transpose(x_host, y_ref, layout_in, layout_out); auto [rtol, atol] = get_elimit(""); rtn &= ck_tile::check_err( y_host, y_ref, std::string("y Error: Incorrect results!"), rtol, atol); } printf("valid:%s\n", rtn ? "y" : "n"); fflush(stdout); return rtn; } int main(int argc, char** argv) { auto [result, args] = create_args(argc, argv); if(!result) return -1; std::string prec = args.get_str("pr"); bool r = true; if(prec.compare("fp16") == 0) { r &= run_batched_transpose(args); } else if(prec.compare("fp8") == 0) { r &= run_batched_transpose(args); } else { std::cerr << "Unsupported data type: " << prec << std::endl; } return r ? 0 : -1; }