mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-17 03:19:48 +00:00
* Adding remaining flavors for grouped conv fwd
As titled. Following variants are added:
- grouped_conv2d_fwd_dynamic_op
- grouped_conv3d_fwd_dynamic_op
- grouped_conv3d_fwd_bilinear
- grouped_conv3d_fwd_convscale
- grouped_conv3d_fwd_convinvscale
- grouped_conv3d_fwd_convscale_add
- grouped_conv3d_fwd_convscale_relu
- grouped_conv3d_fwd_scale
- grouped_conv3d_fwd_combconvscale
- grouped_conv3d_fwd_scaleadd_scaleadd_relu
* Fix incomplete parsing of types from source names in add_instance_library() cmakelists function so we don't build f8 on RDNA3.
* Do not build f8 / bf8 only flavor tests on RDNA3
* Make sure we have proper generic instances for all instance lists related to the post-ces extra flavors, with scalarPerVector = 1. Then disable all but one generic instance per instance list to reduce compile time.
* Post rebase fix: Template parameters for Grouped Conv Fwd Device Impl got tweaked upstream.
* adding int8 and fp16 overloads to the elementwise operations
* fixed copilot nits
* Addressing review comments:
- removed unnecessary examples for dynamic op
- removed unnecessary conv specalizations for all the flavors
- removed spurious bilinear and scale source files
* clang-format
* reduced no of tests
---------
Co-authored-by: Wojciech Laskowski <wojciech.laskowski@streamhpc.com>
[ROCm/composable_kernel commit: 2377a62837]
322 lines
12 KiB
C++
322 lines
12 KiB
C++
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
|
#include "profiler/profile_grouped_conv_fwd_outelementop_impl.hpp"
|
|
|
|
#include "ck/utility/data_type.hpp"
|
|
#include "profiler_operation_registry.hpp"
|
|
|
|
#include <iostream>
|
|
|
|
enum struct ConvLayout
|
|
{
|
|
GNHWC_GKYXC_GNHWK = 0,
|
|
NHWGC_GKYXC_NHWGK = 1
|
|
};
|
|
|
|
enum struct OutElementOp
|
|
{
|
|
ConvScale = 0,
|
|
ConvInvScale = 1,
|
|
CombConvScale = 2,
|
|
ConvScaleRelu = 3,
|
|
Scale = 4,
|
|
CombConvScaleRelu = 5
|
|
};
|
|
|
|
enum struct ConvDataType
|
|
{
|
|
F8_F8_F8 = 0,
|
|
BF8_BF8_F8 = 1,
|
|
F8_BF8_F8 = 2,
|
|
BF8_F8_F8 = 3,
|
|
F16_F16_F16 = 4,
|
|
BF16_BF16_BF16 = 5,
|
|
I8_I8_I8 = 6,
|
|
F8_F8_F32 = 7
|
|
};
|
|
|
|
#define OP_NAME "grouped_conv_fwd_outelementop"
|
|
#define OP_DESC "Grouped Convolution Forward+Elementwise Operation"
|
|
|
|
static void print_helper_msg()
|
|
{
|
|
// clang-format off
|
|
std::cout
|
|
<< "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
|
|
<< "arg2: data type (0: Input fp8, Weight fp8, Output fp8\n"
|
|
<< " 1: Input bf8, Weight bf8, Output fp8\n"
|
|
<< " 2: Input fp8, Weight bf8, Output fp8\n"
|
|
<< " 3: Input bf8, Weight fp8, Output fp8)\n"
|
|
<< " 4: Input f16, Weight f16, Output f16)\n"
|
|
<< " 5: Input bf16, Weight bf16, Output bf16)\n"
|
|
<< " 6: Input i8, Weight i8, Output i8)\n"
|
|
<< " 7: Input f8, Weight f8, Output f32)\n"
|
|
<< "arg3: element-wise operation (0: ConvScale\n"
|
|
<< " 1: ConvInvScale\n"
|
|
<< " 2: CombConvScale\n"
|
|
<< " 3: ConvScaleRelu\n"
|
|
<< " 4: Scale\n"
|
|
<< " 5: CombConvScaleRelu)\n"
|
|
<< "arg4: tensor layout (0: Input[G, N, Hi, Wi, C], Weight[G, K, Y, X, C], Output[G, N, Ho, Wo, K]\n"
|
|
<< " 1: Input[N, Hi, Wi, G, C], Weight[G, K, Y, X, C], Output[N, Ho, Wo, G, K])\n"
|
|
<< "arg5: verification (0: no, 1: yes)\n"
|
|
<< "arg6: initialization (0: no init, 1: integer value, 2: decimal value)\n"
|
|
<< "arg7: print tensor value (0: no; 1: yes)\n"
|
|
<< "arg8: time kernel (0: no, 1: yes)\n"
|
|
<< ck::utils::conv::get_conv_param_parser_helper_msg() << std::endl;
|
|
// clang-format on
|
|
}
|
|
|
|
int grouped_conv_fwd_outelementop(int argc, char* argv[])
|
|
{
|
|
|
|
// 9 total, 1 for num_dim_spatial
|
|
if(argc < 10)
|
|
{
|
|
print_helper_msg();
|
|
return 1;
|
|
}
|
|
|
|
const auto data_type = static_cast<ConvDataType>(std::stoi(argv[2]));
|
|
const auto op = static_cast<OutElementOp>(std::stoi(argv[3]));
|
|
const auto layout = static_cast<ConvLayout>(std::stoi(argv[4]));
|
|
const bool do_verification = std::stoi(argv[5]);
|
|
const int init_method = std::stoi(argv[6]);
|
|
const bool do_log = std::stoi(argv[7]);
|
|
const bool time_kernel = std::stoi(argv[8]);
|
|
const int num_dim_spatial = std::stoi(argv[9]);
|
|
|
|
// 8 for control, 1 for num_dim_spatial, 4 for G/N/K/C, and 6 * num_dim_spatial + 1 for argv[0]
|
|
if(argc != 8 + 1 + 4 + 6 * num_dim_spatial + 1)
|
|
{
|
|
print_helper_msg();
|
|
return 1;
|
|
}
|
|
|
|
const auto params = ck::utils::conv::parse_conv_param(num_dim_spatial, 10, argv);
|
|
|
|
using F8 = ck::f8_t;
|
|
using F16 = ck::half_t;
|
|
using F32 = float;
|
|
using BF8 = ck::bf8_t;
|
|
using BF16 = ck::bhalf_t;
|
|
using I8 = int8_t;
|
|
|
|
using GKZYXC = ck::tensor_layout::convolution::GKZYXC;
|
|
using NDHWGC = ck::tensor_layout::convolution::NDHWGC;
|
|
using NDHWGK = ck::tensor_layout::convolution::NDHWGK;
|
|
|
|
using ConvScale = ck::tensor_operation::element_wise::ConvScale;
|
|
using ConvInvScale = ck::tensor_operation::element_wise::ConvInvscale;
|
|
using CombConvScale = ck::tensor_operation::element_wise::ScaleScalePass;
|
|
using ConvScaleRelu = ck::tensor_operation::element_wise::ConvScaleRelu;
|
|
using Scale = ck::tensor_operation::element_wise::Scale;
|
|
using CombConvScaleRelu = ck::tensor_operation::element_wise::ScaleScaleRelu;
|
|
|
|
constexpr auto I3 = ck::Number<3>{};
|
|
|
|
auto profile = [&](auto num_dim_spatial_tmp,
|
|
auto in_layout,
|
|
auto wei_layout,
|
|
auto out_layout,
|
|
auto in_type,
|
|
auto wei_type,
|
|
auto out_type,
|
|
auto out_element_op,
|
|
auto a_compute_type,
|
|
auto b_compute_type) {
|
|
constexpr ck::index_t NDimSpatial = num_dim_spatial_tmp.value;
|
|
|
|
using InLayout = decltype(in_layout);
|
|
using WeiLayout = decltype(wei_layout);
|
|
using OutLayout = decltype(out_layout);
|
|
|
|
using InDataType = decltype(in_type);
|
|
using WeiDataType = decltype(wei_type);
|
|
using OutDataType = decltype(out_type);
|
|
|
|
using OutElementOp = decltype(out_element_op);
|
|
|
|
using AComputeType = decltype(a_compute_type);
|
|
using BComputeType = decltype(b_compute_type);
|
|
|
|
bool pass = ck::profiler::profile_grouped_conv_fwd_outelementop_impl<NDimSpatial,
|
|
InLayout,
|
|
WeiLayout,
|
|
OutLayout,
|
|
InDataType,
|
|
WeiDataType,
|
|
OutDataType,
|
|
OutElementOp,
|
|
AComputeType,
|
|
BComputeType>(
|
|
do_verification, init_method, do_log, time_kernel, params);
|
|
|
|
return pass ? 0 : 1;
|
|
};
|
|
|
|
if(num_dim_spatial == 3 && layout == ConvLayout::NHWGC_GKYXC_NHWGK)
|
|
{
|
|
if(op == OutElementOp::ConvScale)
|
|
{
|
|
if(data_type == ConvDataType::F8_F8_F8)
|
|
{
|
|
return profile(
|
|
I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F8{}, F8{}, F8{}, ConvScale{}, F8{}, F8{});
|
|
}
|
|
else if(data_type == ConvDataType::BF8_BF8_F8)
|
|
{
|
|
return profile(I3,
|
|
NDHWGC{},
|
|
GKZYXC{},
|
|
NDHWGK{},
|
|
BF8{},
|
|
BF8{},
|
|
F8{},
|
|
ConvScale{},
|
|
BF8{},
|
|
BF8{});
|
|
}
|
|
else if(data_type == ConvDataType::F8_BF8_F8)
|
|
{
|
|
return profile(
|
|
I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F8{}, BF8{}, F8{}, ConvScale{}, F8{}, BF8{});
|
|
}
|
|
else if(data_type == ConvDataType::BF8_F8_F8)
|
|
{
|
|
return profile(
|
|
I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, BF8{}, F8{}, F8{}, ConvScale{}, BF8{}, F8{});
|
|
}
|
|
}
|
|
else if(op == OutElementOp::ConvInvScale)
|
|
{
|
|
if(data_type == ConvDataType::F8_F8_F8)
|
|
{
|
|
return profile(
|
|
I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F8{}, F8{}, F8{}, ConvInvScale{}, F8{}, F8{});
|
|
}
|
|
}
|
|
else if(op == OutElementOp::CombConvScale)
|
|
{
|
|
if(data_type == ConvDataType::F8_F8_F8)
|
|
{
|
|
return profile(I3,
|
|
NDHWGC{},
|
|
GKZYXC{},
|
|
NDHWGK{},
|
|
F8{},
|
|
F8{},
|
|
F8{},
|
|
CombConvScale{},
|
|
F8{},
|
|
F8{});
|
|
}
|
|
else if(data_type == ConvDataType::BF8_BF8_F8)
|
|
{
|
|
return profile(I3,
|
|
NDHWGC{},
|
|
GKZYXC{},
|
|
NDHWGK{},
|
|
BF8{},
|
|
BF8{},
|
|
F8{},
|
|
CombConvScale{},
|
|
BF8{},
|
|
BF8{});
|
|
}
|
|
else if(data_type == ConvDataType::F8_BF8_F8)
|
|
{
|
|
return profile(I3,
|
|
NDHWGC{},
|
|
GKZYXC{},
|
|
NDHWGK{},
|
|
F8{},
|
|
BF8{},
|
|
F8{},
|
|
CombConvScale{},
|
|
F8{},
|
|
BF8{});
|
|
}
|
|
else if(data_type == ConvDataType::BF8_F8_F8)
|
|
{
|
|
return profile(I3,
|
|
NDHWGC{},
|
|
GKZYXC{},
|
|
NDHWGK{},
|
|
BF8{},
|
|
F8{},
|
|
F8{},
|
|
CombConvScale{},
|
|
BF8{},
|
|
F8{});
|
|
}
|
|
}
|
|
else if(op == OutElementOp::ConvScaleRelu)
|
|
{
|
|
if(data_type == ConvDataType::F8_F8_F8)
|
|
{
|
|
return profile(I3,
|
|
NDHWGC{},
|
|
GKZYXC{},
|
|
NDHWGK{},
|
|
F8{},
|
|
F8{},
|
|
F8{},
|
|
ConvScaleRelu{},
|
|
F8{},
|
|
F8{});
|
|
}
|
|
}
|
|
else if(op == OutElementOp::Scale)
|
|
{
|
|
if(data_type == ConvDataType::F16_F16_F16)
|
|
{
|
|
return profile(
|
|
I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F16{}, F16{}, F16{}, Scale{}, F16{}, F16{});
|
|
}
|
|
else if(data_type == ConvDataType::BF16_BF16_BF16)
|
|
{
|
|
return profile(I3,
|
|
NDHWGC{},
|
|
GKZYXC{},
|
|
NDHWGK{},
|
|
BF16{},
|
|
BF16{},
|
|
BF16{},
|
|
Scale{},
|
|
BF16{},
|
|
BF16{});
|
|
}
|
|
else if(data_type == ConvDataType::I8_I8_I8)
|
|
{
|
|
return profile(
|
|
I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, I8{}, I8{}, I8{}, Scale{}, I8{}, I8{});
|
|
}
|
|
}
|
|
else if(op == OutElementOp::CombConvScaleRelu)
|
|
{
|
|
if(data_type == ConvDataType::F8_F8_F32)
|
|
{
|
|
return profile(I3,
|
|
NDHWGC{},
|
|
GKZYXC{},
|
|
NDHWGK{},
|
|
F8{},
|
|
F8{},
|
|
F32{},
|
|
CombConvScaleRelu{},
|
|
F8{},
|
|
F8{});
|
|
}
|
|
}
|
|
}
|
|
|
|
std::cout << "this data_type & layout is not implemented" << std::endl;
|
|
|
|
return 1;
|
|
}
|
|
|
|
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, grouped_conv_fwd_outelementop);
|