mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-15 18:42:06 +00:00
* Convolution ND
* Code unification across dimensions for generating tensor descriptors.
* Example
* Instances
* Move convnd f32 instance file to comply with repo structure.
* Conv 1D tensor layouts.
* Formatting and use ReferenceConv
* Reference ConvFwd supporting 1D and 2D convolution.
* Debug printing TensorLayout name.
* Conv fwd 1D instance f32
* Refactor conv ND example.
Needed to support various conv dimensio.
Needed to support various conv dimensions
* Rename conv nd example director to prevent conflicts.
* Refactor some common utility to single file.
Plus some tests.
* Refactor GetHostTensorDescriptor + UT.
* Add 1D test case.
* Test reference convolution 1d/2d
* Remove some leftovers.
* Fix convolution example error for 1D
* Refactor test check errors utility function.
* Test Conv2D Fwd XDL
* More UT for 1D case.
* Parameterize input & weight initializers.
* Rename example to prevent conflicts.
* Split convnd instance into separate files for 1d/2d
* Address review comments.
* Fix data type for flops/gbytes calculations.
* Assign example number 11.
* 3D cases for convolution utility functions.
* 3D reference convolution.
* Add support for 3D convolution.
* Check for inputs bigger than 2GB.
* Formatting
* Support for bf16/f16/f32/i8 - conv instances + UT.
* Use check_err from test_util.hpp.
* Split convnd test into separate files for each dim.
* Fix data generation and use proper instances.
* Formatting
* Skip tensor initialization if not necessary.
* Fix CMakefiles.
* Remove redundant conv2d_fwd test.
* Lower problem size for conv3D UT.
* 3D case for convnd example.
* Remove leftovers after merge.
* Add Conv Specialization string to GetTypeString
* Skip instance causing numerical errors.
* Small fixes.
* Remove redundant includes.
* Fix namespace name error.
* Script for automatic testing and logging convolution fwd UTs
* Comment out numactl cmd.
* Refine weights initalization and relax rtol for fp16
* Move test_util.hpp to check_err.hpp
* Refine weights initalization and relax rtol for fp16
* Refactor common part of test conv utils.
* Move utility function to single common place.
* Add additional common functions to utility.
* Refactor convnd_fwd_xdl examples.
* Remove redundant files.
* Unify structure.
* Add constructor to ConvParams.
* And add input parameters validation.
* Modify conv examples to use single utility file.
* Remove check_error from host_tensor.hpp
* Get rid of check_indices function.
* Remove bf16_to_f32 function overload for scalars.
* Fix namespace.
* Add half_float::half for check_err.
* Fix conv params size in UT.
* Fix weights initialization for int8.
* Fix weights initialization for int8.
* Add type_convert when store output in ref conv 1D.
* Get back old conv2d_fwd_xdl operation.
* Silence conv debug print.
* format
* clean
* clean
* Fix merge.
* Fix namespace for check_err
* Formatting.
* Fix merge artifacts.
* Remove deleted header.
* Fix some includes and use ck::utils::check_err.
* Remove unused check_indices restored by previous merge.
* Fix namespaces after merge.
* Fix compilation error.
* Small fixes.
* Use common functions.
* Fix filename
* Fix namespaces.
* Fix merge artifact - retrieve removed by accident fun.
* Fix ConvForwardSpecialization.
* Working example of OpInstanceRunEngine for conv2dfwd UT.
* Adhere to coding style rules.
* Formatting and adhere to coding style rules.
* Fix merge artifacts.
* Utility for collecting conv fwd instances.
+ Plus commmon part for parsing cmdline params.
* Refactor FillUniform because of segfault for int8_t.
* Naming convention.
* Elegant version of device mem allocation.
* Use OpInstanceRunEngine in conv fwd nd tests.
* Multiple refinements.
* conditional init
* don't run reference op if not provided.
* Use OpInstanceRunEngine for ckProfiler conv_fwd
* Refactor common tensor fill function to separate file.
* Clean up unused functions.
* Support different init methods.
* Create CMake target for conv_fwd_util.
* Add header for profile_convnd_fwd.cpp
* Fix CMakefiles to link with conv_fwd_util where needed.
* Fix some clutter.
Co-authored-by: Adam Osewski <aosewski@amd.com>
Co-authored-by: Chao Liu <chao.liu2@amd.com>
[ROCm/composable_kernel commit: 1a0cd5d160]
229 lines
8.5 KiB
C++
229 lines
8.5 KiB
C++
#include <iostream>
|
|
#include <numeric>
|
|
#include <initializer_list>
|
|
#include <cstdlib>
|
|
#include <stdlib.h>
|
|
#include <half.hpp>
|
|
|
|
#include "profile_convnd_bwd_data_impl.hpp"
|
|
|
|
namespace {
|
|
|
|
enum struct ConvDataType
|
|
{
|
|
F32_F32_F32, // 0
|
|
F16_F16_F16, // 1
|
|
BF16_BF16_BF16, // 2
|
|
INT8_INT8_INT8, // 3
|
|
};
|
|
|
|
enum struct ConvInputLayout
|
|
{
|
|
NCHW, // 0
|
|
NHWC, // 1
|
|
};
|
|
|
|
enum struct ConvWeightLayout
|
|
{
|
|
KCYX, // 0
|
|
KYXC, // 1
|
|
};
|
|
|
|
enum struct ConvOutputLayout
|
|
{
|
|
NKHW, // 0
|
|
NHWK, // 1
|
|
};
|
|
ck::utils::conv::ConvParams parse_conv_params(int num_dim_spatial, char* argv[], int arg_idx)
|
|
{
|
|
// (N, K, C) + num_dim_spatial * 6 (filter, input, strides, dilations, pad left, pad right)
|
|
ck::utils::conv::ConvParams params;
|
|
|
|
params.num_dim_spatial = num_dim_spatial;
|
|
params.N = std::stoi(argv[arg_idx++]);
|
|
params.K = std::stoi(argv[arg_idx++]);
|
|
params.C = std::stoi(argv[arg_idx++]);
|
|
|
|
params.filter_spatial_lengths.resize(num_dim_spatial);
|
|
for(int i = 0; i < num_dim_spatial; ++i)
|
|
{
|
|
params.filter_spatial_lengths[i] = std::stoi(argv[arg_idx++]);
|
|
}
|
|
params.input_spatial_lengths.resize(num_dim_spatial);
|
|
for(int i = 0; i < num_dim_spatial; ++i)
|
|
{
|
|
params.input_spatial_lengths[i] = std::stoi(argv[arg_idx++]);
|
|
}
|
|
params.conv_filter_strides.resize(num_dim_spatial);
|
|
for(int i = 0; i < num_dim_spatial; ++i)
|
|
{
|
|
params.conv_filter_strides[i] = std::stoi(argv[arg_idx++]);
|
|
}
|
|
params.conv_filter_dilations.resize(num_dim_spatial);
|
|
for(int i = 0; i < num_dim_spatial; ++i)
|
|
{
|
|
params.conv_filter_dilations[i] = std::stoi(argv[arg_idx++]);
|
|
}
|
|
params.input_left_pads.resize(num_dim_spatial);
|
|
for(int i = 0; i < num_dim_spatial; ++i)
|
|
{
|
|
params.input_left_pads[i] = std::stoi(argv[arg_idx++]);
|
|
}
|
|
params.input_right_pads.resize(num_dim_spatial);
|
|
for(int i = 0; i < num_dim_spatial; ++i)
|
|
{
|
|
params.input_right_pads[i] = std::stoi(argv[arg_idx++]);
|
|
}
|
|
|
|
return params;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
int profile_convnd_bwd_data(int argc, char* argv[], int num_dim_spatial)
|
|
{
|
|
const int preParams = 10;
|
|
int conv_args = 3 + num_dim_spatial * 6;
|
|
int cmdline_nargs = conv_args + preParams;
|
|
if(cmdline_nargs != argc)
|
|
{
|
|
printf("arg1: tensor operation (conv[1|2|3]d_bwd_data: BackwardConvolution)\n");
|
|
printf("arg2: data type (0: fp32; 1: fp16)\n");
|
|
printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n");
|
|
printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n");
|
|
printf("arg5: output tensor layout (0: NKHW; 1: NHWK)\n");
|
|
printf("arg6: verification (0: no; 1: yes)\n");
|
|
printf("arg7: initialization (0: no init; 1: integer value; 2: decimal value)\n");
|
|
printf("arg8: print tensor value (0: no; 1: yes)\n");
|
|
printf("arg9: run kernel # of times (>1)\n");
|
|
printf("arg10 to 24: N, K, C, Y, X, Hi, Wi, Sy, Sx, Dy, Dx, LeftPy, LeftPx, RightPy, "
|
|
"RightPx\n");
|
|
return 1;
|
|
}
|
|
|
|
const auto data_type = static_cast<ConvDataType>(std::stoi(argv[2]));
|
|
const auto in_layout = static_cast<ConvInputLayout>(std::stoi(argv[3]));
|
|
const auto wei_layout = static_cast<ConvWeightLayout>(std::stoi(argv[4]));
|
|
const auto out_layout = static_cast<ConvOutputLayout>(std::stoi(argv[5]));
|
|
const bool do_verification = std::stoi(argv[6]);
|
|
const int init_method = std::stoi(argv[7]);
|
|
const bool do_log = std::stoi(argv[8]);
|
|
const int nrepeat = std::stoi(argv[9]);
|
|
|
|
ck::utils::conv::ConvParams params = parse_conv_params(num_dim_spatial, argv, preParams);
|
|
|
|
auto Run = [&](auto input_type, auto wei_type, auto out_type, auto acc_type) {
|
|
using InDataType = decltype(input_type);
|
|
using WeiDataType = decltype(wei_type);
|
|
using OutDataType = decltype(out_type);
|
|
using AccDataType = decltype(acc_type);
|
|
|
|
switch(num_dim_spatial)
|
|
{
|
|
case 1:
|
|
ck::profiler::profile_convnd_bwd_data_impl<1,
|
|
InDataType,
|
|
WeiDataType,
|
|
OutDataType,
|
|
AccDataType,
|
|
ck::tensor_layout::convolution::NWC,
|
|
ck::tensor_layout::convolution::KXC,
|
|
ck::tensor_layout::convolution::NWK>(
|
|
do_verification,
|
|
init_method,
|
|
do_log,
|
|
nrepeat,
|
|
params.N,
|
|
params.K,
|
|
params.C,
|
|
params.input_spatial_lengths,
|
|
params.filter_spatial_lengths,
|
|
params.GetOutputSpatialLengths(),
|
|
params.conv_filter_strides,
|
|
params.conv_filter_dilations,
|
|
params.input_left_pads,
|
|
params.input_right_pads);
|
|
break;
|
|
|
|
case 2:
|
|
ck::profiler::profile_convnd_bwd_data_impl<2,
|
|
InDataType,
|
|
WeiDataType,
|
|
OutDataType,
|
|
AccDataType,
|
|
ck::tensor_layout::convolution::NHWC,
|
|
ck::tensor_layout::convolution::KYXC,
|
|
ck::tensor_layout::convolution::NHWK>(
|
|
do_verification,
|
|
init_method,
|
|
do_log,
|
|
nrepeat,
|
|
params.N,
|
|
params.K,
|
|
params.C,
|
|
params.input_spatial_lengths,
|
|
params.filter_spatial_lengths,
|
|
params.GetOutputSpatialLengths(),
|
|
params.conv_filter_strides,
|
|
params.conv_filter_dilations,
|
|
params.input_left_pads,
|
|
params.input_right_pads);
|
|
break;
|
|
|
|
case 3:
|
|
ck::profiler::profile_convnd_bwd_data_impl<3,
|
|
InDataType,
|
|
WeiDataType,
|
|
OutDataType,
|
|
AccDataType,
|
|
ck::tensor_layout::convolution::NDHWC,
|
|
ck::tensor_layout::convolution::KZYXC,
|
|
ck::tensor_layout::convolution::NDHWK>(
|
|
do_verification,
|
|
init_method,
|
|
do_log,
|
|
nrepeat,
|
|
params.N,
|
|
params.K,
|
|
params.C,
|
|
params.input_spatial_lengths,
|
|
params.filter_spatial_lengths,
|
|
params.GetOutputSpatialLengths(),
|
|
params.conv_filter_strides,
|
|
params.conv_filter_dilations,
|
|
params.input_left_pads,
|
|
params.input_right_pads);
|
|
break;
|
|
|
|
default: break;
|
|
}
|
|
};
|
|
if(data_type == ConvDataType::F32_F32_F32 && in_layout == ConvInputLayout::NHWC &&
|
|
wei_layout == ConvWeightLayout::KYXC && out_layout == ConvOutputLayout::NHWK)
|
|
{
|
|
Run(float{}, float{}, float{}, float{});
|
|
}
|
|
else if(data_type == ConvDataType::F16_F16_F16 && in_layout == ConvInputLayout::NHWC &&
|
|
wei_layout == ConvWeightLayout::KYXC && out_layout == ConvOutputLayout::NHWK)
|
|
{
|
|
Run(ck::half_t{}, ck::half_t{}, ck::half_t{}, float{});
|
|
}
|
|
else if(data_type == ConvDataType::BF16_BF16_BF16 && in_layout == ConvInputLayout::NHWC &&
|
|
wei_layout == ConvWeightLayout::KYXC && out_layout == ConvOutputLayout::NHWK)
|
|
{
|
|
Run(ck::bhalf_t{}, ck::bhalf_t{}, ck::bhalf_t{}, float{});
|
|
}
|
|
else if(data_type == ConvDataType::INT8_INT8_INT8 && in_layout == ConvInputLayout::NHWC &&
|
|
wei_layout == ConvWeightLayout::KYXC && out_layout == ConvOutputLayout::NHWK)
|
|
{
|
|
Run(int8_t{}, int8_t{}, int8_t{}, int32_t{});
|
|
}
|
|
else
|
|
{
|
|
std::cout << "wrong! this Conv data_type & layout is not implemented" << std::endl;
|
|
return 1;
|
|
}
|
|
|
|
return 0;
|
|
}
|