mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-12 09:16:52 +00:00
* Adding remaining flavors for grouped conv fwd As titled. Following variants are added: - grouped_conv2d_fwd_dynamic_op - grouped_conv3d_fwd_dynamic_op - grouped_conv3d_fwd_bilinear - grouped_conv3d_fwd_convscale - grouped_conv3d_fwd_convinvscale - grouped_conv3d_fwd_convscale_add - grouped_conv3d_fwd_convscale_relu - grouped_conv3d_fwd_scale - grouped_conv3d_fwd_combconvscale - grouped_conv3d_fwd_scaleadd_scaleadd_relu * Fix incomplete parsing of types from source names in add_instance_library() cmakelists function so we don't build f8 on RDNA3. * Do not build f8 / bf8 only flavor tests on RDNA3 * Make sure we have proper generic instances for all instance lists related to the post-ces extra flavors, with scalarPerVector = 1. Then disable all but one generic instance per instance list to reduce compile time. * Post rebase fix: Template parameters for Grouped Conv Fwd Device Impl got tweaked upstream. * adding int8 and fp16 overloads to the elementwise operations * fixed copilot nits * Addressing review comments: - removed unnecessary examples for dynamic op - removed unnecessary conv specalizations for all the flavors - removed spurious bilinear and scale source files * clang-format * reduced no of tests --------- Co-authored-by: Wojciech Laskowski <wojciech.laskowski@streamhpc.com>
208 lines
7.2 KiB
C++
208 lines
7.2 KiB
C++
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
|
#include "profiler/profile_grouped_conv_fwd_impl.hpp"
|
|
|
|
#include "ck/utility/data_type.hpp"
|
|
#include "ck/utility/ignore.hpp"
|
|
#include "profiler_operation_registry.hpp"
|
|
|
|
#include <iostream>
|
|
|
|
enum struct ConvLayout
|
|
{
|
|
GNHWC_GKYXC_GNHWK, // 0
|
|
NHWGC_GKYXC_NHWGK, // 1
|
|
NGCHW_GKYXC_NGKHW, // 2
|
|
NGCHW_GKCYX_NGKHW, // 3
|
|
};
|
|
|
|
enum struct ConvDataType
|
|
{
|
|
F32_F32_F32, // 0
|
|
F16_F16_F16, // 1
|
|
BF16_BF16_BF16, // 2
|
|
INT8_INT8_INT8, // 3
|
|
F8_F8_F8, // 4
|
|
BF8_BF8_F8, // 5
|
|
F8_BF8_F8, // 6
|
|
BF8_F8_F8, // 7
|
|
};
|
|
|
|
enum struct IndexType
|
|
{
|
|
INDEX_T, // 0
|
|
LONG_INDEX_T, // 1
|
|
};
|
|
|
|
#define OP_NAME "grouped_conv_fwd_dynamic_op"
|
|
#define OP_DESC "Grouped Convolution Forward+DynamicUnaryOp"
|
|
|
|
static void print_helper_msg()
|
|
{
|
|
std::cout
|
|
// clang-format off
|
|
<< "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
|
|
<< "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n"
|
|
<< " 1: Input fp16, Weight fp16, Output fp16\n"
|
|
<< " 2: Input bf16, Weight bf16, Output bf16\n"
|
|
<< " 3: Input int8, Weight int8, Output int8\n"
|
|
<< " 4: Input fp8, Weight fp8, Output fp8\n"
|
|
<< " 5: Input bf8, Weight bf8, Output fp8\n"
|
|
<< " 6: Input fp8, Weight bf8, Output fp8\n"
|
|
<< " 7: Input bf8, Weight fp8, Output fp8)\n"
|
|
<< "arg3: tensor layout (0: Input[G, N, Hi, Wi, C], Weight[G, K, Y, X, C], Output[G, N, Ho, Wo, K]\n"
|
|
<< " 1: Input[N, Hi, Wi, G, C], Weight[G, K, Y, X, C], Output[N, Ho, Wo, G, K]\n"
|
|
<< " 2: Input[N, G, C, Hi, Wi], Weight[G, K, Y, X, C], Output[N, "
|
|
"G, K, Ho, Wo]\n"
|
|
<< " 3: Input[N, G, C, Hi, Wi], Weight[G, K, C, Y, X], Output[N, "
|
|
"G, K, Ho, Wo])\n"
|
|
<< "arg4: indexing data type (0: 32-bit, 1: 64-bit)\n"
|
|
<< "arg5: verification (0: no, 1: yes)\n"
|
|
<< "arg6: initialization (0: no init, 1: integer value, 2: decimal value)\n"
|
|
<< "arg7: print tensor value (0: no; 1: yes)\n"
|
|
<< "arg8: time kernel (0: no, 1: yes)\n"
|
|
<< ck::utils::conv::get_conv_param_parser_helper_msg() << std::endl;
|
|
// clang-format on
|
|
}
|
|
|
|
int grouped_conv_fwd_dynamic_op(int argc, char* argv[])
|
|
{
|
|
// 8 for control, 1 for num_dim_spatial
|
|
if(argc < 10)
|
|
{
|
|
print_helper_msg();
|
|
return 1;
|
|
}
|
|
|
|
const auto data_type = static_cast<ConvDataType>(std::stoi(argv[2]));
|
|
const auto layout = static_cast<ConvLayout>(std::stoi(argv[3]));
|
|
const auto index_type = static_cast<IndexType>(std::stoi(argv[4]));
|
|
const bool do_verification = std::stoi(argv[5]);
|
|
const int init_method = std::stoi(argv[6]);
|
|
const bool do_log = std::stoi(argv[7]);
|
|
const bool time_kernel = std::stoi(argv[8]);
|
|
const int num_dim_spatial = std::stoi(argv[9]);
|
|
|
|
// 9 for control, 1 for num_dim_spatial, 4 for G/N/K/C, and 6 * num_dim_spatial
|
|
if(argc != 9 + 1 + 4 + 6 * num_dim_spatial)
|
|
{
|
|
print_helper_msg();
|
|
return 1;
|
|
}
|
|
|
|
const auto params = ck::utils::conv::parse_conv_param(num_dim_spatial, 10, argv);
|
|
|
|
if(index_type != IndexType::INDEX_T)
|
|
{
|
|
std::cout << "this indexing data type is not implemented" << std::endl;
|
|
return 1;
|
|
}
|
|
|
|
using F32 = float;
|
|
using BF16 = ck::bhalf_t;
|
|
using F16 = ck::half_t;
|
|
|
|
using GKZYXC = ck::tensor_layout::convolution::GKZYXC;
|
|
using NDHWGC = ck::tensor_layout::convolution::NDHWGC;
|
|
using NDHWGK = ck::tensor_layout::convolution::NDHWGK;
|
|
|
|
using GKYXC = ck::tensor_layout::convolution::GKYXC;
|
|
using NHWGC = ck::tensor_layout::convolution::NHWGC;
|
|
using NHWGK = ck::tensor_layout::convolution::NHWGK;
|
|
|
|
constexpr auto I2 = ck::Number<2>{};
|
|
constexpr auto I3 = ck::Number<3>{};
|
|
|
|
auto profile = [&](auto num_dim_spatial_tmp,
|
|
auto in_layout,
|
|
auto wei_layout,
|
|
auto out_layout,
|
|
auto in_type,
|
|
auto wei_type,
|
|
auto out_type,
|
|
auto a_compute_type,
|
|
auto b_compute_type) {
|
|
constexpr ck::index_t NDimSpatial = num_dim_spatial_tmp.value;
|
|
|
|
using InLayout = decltype(in_layout);
|
|
using WeiLayout = decltype(wei_layout);
|
|
using OutLayout = decltype(out_layout);
|
|
|
|
using InDataType = decltype(in_type);
|
|
using WeiDataType = decltype(wei_type);
|
|
using OutDataType = decltype(out_type);
|
|
|
|
using AComputeType = decltype(a_compute_type);
|
|
using BComputeType = decltype(b_compute_type);
|
|
|
|
const auto dynamic_op = ck::tensor_operation::element_wise::DynamicUnaryOp{
|
|
ck::tensor_operation::element_wise::PassThrough{}};
|
|
|
|
bool pass = ck::profiler::profile_grouped_conv_fwd_impl<
|
|
NDimSpatial,
|
|
InLayout,
|
|
WeiLayout,
|
|
OutLayout,
|
|
InDataType,
|
|
WeiDataType,
|
|
OutDataType,
|
|
AComputeType,
|
|
BComputeType,
|
|
ck::index_t,
|
|
ck::tensor_operation::element_wise::DynamicUnaryOp>(
|
|
do_verification, init_method, do_log, time_kernel, params, dynamic_op);
|
|
|
|
return pass ? 0 : 1;
|
|
};
|
|
|
|
if(num_dim_spatial == 2 && layout == ConvLayout::NHWGC_GKYXC_NHWGK)
|
|
{
|
|
if(data_type == ConvDataType::F32_F32_F32)
|
|
{
|
|
return profile(I2, NHWGC{}, GKYXC{}, NHWGK{}, F32{}, F32{}, F32{}, F32{}, F32{});
|
|
}
|
|
else if(data_type == ConvDataType::F16_F16_F16)
|
|
{
|
|
return profile(I2, NHWGC{}, GKYXC{}, NHWGK{}, F16{}, F16{}, F16{}, F16{}, F16{});
|
|
}
|
|
else if(data_type == ConvDataType::BF16_BF16_BF16)
|
|
{
|
|
return profile(I2, NHWGC{}, GKYXC{}, NHWGK{}, BF16{}, BF16{}, BF16{}, BF16{}, BF16{});
|
|
}
|
|
else if(data_type == ConvDataType::INT8_INT8_INT8)
|
|
{
|
|
return profile(
|
|
I2, NHWGC{}, GKYXC{}, NHWGK{}, int8_t{}, int8_t{}, int8_t{}, int8_t{}, int8_t{});
|
|
}
|
|
}
|
|
else if(num_dim_spatial == 3 && layout == ConvLayout::NHWGC_GKYXC_NHWGK)
|
|
{
|
|
if(data_type == ConvDataType::F32_F32_F32)
|
|
{
|
|
return profile(I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F32{}, F32{}, F32{}, F32{}, F32{});
|
|
}
|
|
else if(data_type == ConvDataType::F16_F16_F16)
|
|
{
|
|
return profile(I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F16{}, F16{}, F16{}, F16{}, F16{});
|
|
}
|
|
else if(data_type == ConvDataType::BF16_BF16_BF16)
|
|
{
|
|
return profile(
|
|
I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, BF16{}, BF16{}, BF16{}, BF16{}, BF16{});
|
|
}
|
|
else if(data_type == ConvDataType::INT8_INT8_INT8)
|
|
{
|
|
return profile(
|
|
I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, int8_t{}, int8_t{}, int8_t{}, int8_t{}, int8_t{});
|
|
}
|
|
}
|
|
|
|
std::cout << "this data_type & layout is not implemented" << std::endl;
|
|
|
|
return 1;
|
|
}
|
|
|
|
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, grouped_conv_fwd_dynamic_op);
|