mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-17 03:19:48 +00:00
* Convolution ND
* Code unification across dimensions for generating tensor descriptors.
* Example
* Instances
* Move convnd f32 instance file to comply with repo structure.
* Conv 1D tensor layouts.
* Formatting and use ReferenceConv
* Reference ConvFwd supporting 1D and 2D convolution.
* Debug printing TensorLayout name.
* Conv fwd 1D instance f32
* Refactor conv ND example.
Needed to support various conv dimensio.
Needed to support various conv dimensions
* Rename conv nd example director to prevent conflicts.
* Refactor some common utility to single file.
Plus some tests.
* Refactor GetHostTensorDescriptor + UT.
* Add 1D test case.
* Test reference convolution 1d/2d
* Remove some leftovers.
* Fix convolution example error for 1D
* Refactor test check errors utility function.
* Test Conv2D Fwd XDL
* More UT for 1D case.
* Parameterize input & weight initializers.
* Rename example to prevent conflicts.
* Split convnd instance into separate files for 1d/2d
* Address review comments.
* Fix data type for flops/gbytes calculations.
* Assign example number 11.
* 3D cases for convolution utility functions.
* 3D reference convolution.
* Add support for 3D convolution.
* Check for inputs bigger than 2GB.
* Formatting
* Support for bf16/f16/f32/i8 - conv instances + UT.
* Use check_err from test_util.hpp.
* Split convnd test into separate files for each dim.
* Fix data generation and use proper instances.
* Formatting
* Skip tensor initialization if not necessary.
* Fix CMakefiles.
* Remove redundant conv2d_fwd test.
* Lower problem size for conv3D UT.
* 3D case for convnd example.
* Remove leftovers after merge.
* Add Conv Specialization string to GetTypeString
* Skip instance causing numerical errors.
* Small fixes.
* Remove redundant includes.
* Fix namespace name error.
* Script for automatic testing and logging convolution fwd UTs
* Comment out numactl cmd.
* Refine weights initalization and relax rtol for fp16
* Move test_util.hpp to check_err.hpp
* Refine weights initalization and relax rtol for fp16
* Refactor common part of test conv utils.
* Move utility function to single common place.
* Add additional common functions to utility.
* Refactor convnd_fwd_xdl examples.
* Remove redundant files.
* Unify structure.
* Add constructor to ConvParams.
* And add input parameters validation.
* Modify conv examples to use single utility file.
* Remove check_error from host_tensor.hpp
* Get rid of check_indices function.
* Remove bf16_to_f32 function overload for scalars.
* Fix namespace.
* Add half_float::half for check_err.
* Fix conv params size in UT.
* Fix weights initialization for int8.
* Fix weights initialization for int8.
* Add type_convert when store output in ref conv 1D.
* Get back old conv2d_fwd_xdl operation.
* Silence conv debug print.
* format
* clean
* clean
* Fix merge.
* Fix namespace for check_err
* Formatting.
* Fix merge artifacts.
* Remove deleted header.
* Fix some includes and use ck::utils::check_err.
* Remove unused check_indices restored by previous merge.
* Fix namespaces after merge.
* Fix compilation error.
* Small fixes.
* Use common functions.
* Fix filename
* Fix namespaces.
* Fix merge artifact - retrieve removed by accident fun.
* Fix ConvForwardSpecialization.
* Working example of OpInstanceRunEngine for conv2dfwd UT.
* Adhere to coding style rules.
* Formatting and adhere to coding style rules.
* Fix merge artifacts.
* Utility for collecting conv fwd instances.
+ Plus commmon part for parsing cmdline params.
* Refactor FillUniform because of segfault for int8_t.
* Naming convention.
* Elegant version of device mem allocation.
* Use OpInstanceRunEngine in conv fwd nd tests.
* Multiple refinements.
* conditional init
* don't run reference op if not provided.
* Use OpInstanceRunEngine for ckProfiler conv_fwd
* Refactor common tensor fill function to separate file.
* Clean up unused functions.
* Support different init methods.
* Create CMake target for conv_fwd_util.
* Add header for profile_convnd_fwd.cpp
* Fix CMakefiles to link with conv_fwd_util where needed.
* Fix some clutter.
Co-authored-by: Adam Osewski <aosewski@amd.com>
Co-authored-by: Chao Liu <chao.liu2@amd.com>
[ROCm/composable_kernel commit: 1a0cd5d160]
352 lines
12 KiB
C++
352 lines
12 KiB
C++
#include <cstdlib>
|
|
#include <iostream>
|
|
#include <memory>
|
|
#include <string>
|
|
#include <vector>
|
|
#include <half.hpp>
|
|
|
|
#include "conv_fwd_util.hpp"
|
|
#include "element_wise_operation.hpp"
|
|
#include "fill.hpp"
|
|
#include "profile_convnd_fwd.hpp"
|
|
#include "tensor_layout.hpp"
|
|
|
|
namespace {
|
|
|
|
enum struct ConvDataType
|
|
{
|
|
F32_F32_F32, // 0
|
|
F16_F16_F16, // 1
|
|
BF16_BF16_BF16, // 2
|
|
INT8_INT8_INT8, // 3
|
|
};
|
|
|
|
enum struct ConvDataLayout
|
|
{
|
|
NCHW, // 0
|
|
NHWC, // 1
|
|
};
|
|
|
|
namespace ctl = ck::tensor_layout::convolution;
|
|
|
|
template <int NDim, ConvDataLayout DataLayout>
|
|
struct ConvolutionLayouts;
|
|
|
|
template <>
|
|
struct ConvolutionLayouts<1, ConvDataLayout::NHWC>
|
|
{
|
|
typedef ctl::NWC Input;
|
|
typedef ctl::KXC Weight;
|
|
typedef ctl::NWK Output;
|
|
};
|
|
template <>
|
|
struct ConvolutionLayouts<2, ConvDataLayout::NHWC>
|
|
{
|
|
typedef ctl::NHWC Input;
|
|
typedef ctl::KYXC Weight;
|
|
typedef ctl::NHWK Output;
|
|
};
|
|
template <>
|
|
struct ConvolutionLayouts<3, ConvDataLayout::NHWC>
|
|
{
|
|
typedef ctl::NDHWC Input;
|
|
typedef ctl::KZYXC Weight;
|
|
typedef ctl::NDHWK Output;
|
|
};
|
|
template <>
|
|
struct ConvolutionLayouts<1, ConvDataLayout::NCHW>
|
|
{
|
|
typedef ctl::NCW Input;
|
|
typedef ctl::KCX Weight;
|
|
typedef ctl::NKW Output;
|
|
};
|
|
template <>
|
|
struct ConvolutionLayouts<2, ConvDataLayout::NCHW>
|
|
{
|
|
typedef ctl::NCHW Input;
|
|
typedef ctl::KCYX Weight;
|
|
typedef ctl::NKHW Output;
|
|
};
|
|
template <>
|
|
struct ConvolutionLayouts<3, ConvDataLayout::NCHW>
|
|
{
|
|
typedef ctl::NCDHW Input;
|
|
typedef ctl::KCZYX Weight;
|
|
typedef ctl::NKDHW Output;
|
|
};
|
|
|
|
void print_use_msg()
|
|
{
|
|
std::cout << "arg1: tensor operation (conv_fwd: ForwardConvolution)\n"
|
|
<< "arg2: data type (0: fp32; 1: fp16, 2: bf16, 3: int8)\n"
|
|
<< "arg3: data layout (0: NCHW; 1: NHWC)\n"
|
|
<< "arg4: verification (0=no, 1=yes)\n"
|
|
<< "arg5: initialization (0=no init, 1=integer value, 2=decimal value)\n"
|
|
<< "arg6: print tensor value (0: no; 1: yes)\n"
|
|
<< "arg7: run kernel # of times (>1)\n"
|
|
<< "arg8: N spatial dimensions (default 2)\n"
|
|
<< "Following arguments (depending on number of spatial dims):\n"
|
|
<< " N, K, C, \n"
|
|
<< " <filter spatial dimensions>, (ie Y, X for 2D)\n"
|
|
<< " <input image spatial dimensions>, (ie Hi, Wi for 2D)\n"
|
|
<< " <strides>, (ie Sy, Sx for 2D)\n"
|
|
<< " <dilations>, (ie Dy, Dx for 2D)\n"
|
|
<< " <left padding>, (ie LeftPy, LeftPx for 2D)\n"
|
|
<< " <right padding>, (ie RightPy, RightPx for 2D)\n"
|
|
<< std::endl;
|
|
}
|
|
|
|
ck::utils::conv::ConvParams parse_params(int num_dim_spatial, int argc, char* argv[])
|
|
{
|
|
// (N, K, C) + num_dim_spatial * 6 (filter, input, strides, dilations, pad left, pad right)
|
|
int conv_args = 3 + num_dim_spatial * 6;
|
|
int cmdline_nargs = conv_args + 9;
|
|
if(cmdline_nargs != argc)
|
|
{
|
|
print_use_msg();
|
|
exit(1);
|
|
}
|
|
int arg_idx = 9;
|
|
|
|
return ck::utils::conv::parse_conv_params(num_dim_spatial, arg_idx, argv);
|
|
}
|
|
|
|
template <int NDim,
|
|
typename InDataType,
|
|
typename WeiDataType,
|
|
typename OutDataType,
|
|
typename ConvLayouts>
|
|
void profile_convnd_instances_impl(const ck::utils::conv::ConvParams& params,
|
|
bool do_verification,
|
|
bool do_log,
|
|
int nrepeat,
|
|
int init_method,
|
|
ConvLayouts)
|
|
{
|
|
using namespace std::placeholders;
|
|
using namespace ck::utils;
|
|
|
|
std::unique_ptr<OpInstance<OutDataType, InDataType, WeiDataType>> conv_instance;
|
|
|
|
switch(init_method)
|
|
{
|
|
case 0:
|
|
conv_instance =
|
|
std::make_unique<conv::ConvFwdOpInstance<InDataType,
|
|
WeiDataType,
|
|
OutDataType,
|
|
typename ConvLayouts::Input,
|
|
typename ConvLayouts::Weight,
|
|
typename ConvLayouts::Output>>(params, false);
|
|
break;
|
|
case 1:
|
|
conv_instance = std::make_unique<
|
|
conv::ConvFwdOpInstance<InDataType,
|
|
WeiDataType,
|
|
OutDataType,
|
|
typename ConvLayouts::Input,
|
|
typename ConvLayouts::Weight,
|
|
typename ConvLayouts::Output,
|
|
ck::tensor_operation::element_wise::PassThrough,
|
|
ck::tensor_operation::element_wise::PassThrough,
|
|
ck::tensor_operation::element_wise::PassThrough,
|
|
ck::utils::FillUniform<int>,
|
|
ck::utils::FillUniform<int>>>(
|
|
params, true, ck::utils::FillUniform<int>{}, ck::utils::FillUniform<int>{});
|
|
break;
|
|
case 2:
|
|
conv_instance = std::make_unique<
|
|
conv::ConvFwdOpInstance<InDataType,
|
|
WeiDataType,
|
|
OutDataType,
|
|
typename ConvLayouts::Input,
|
|
typename ConvLayouts::Weight,
|
|
typename ConvLayouts::Output,
|
|
ck::tensor_operation::element_wise::PassThrough,
|
|
ck::tensor_operation::element_wise::PassThrough,
|
|
ck::tensor_operation::element_wise::PassThrough,
|
|
ck::utils::FillUniform<InDataType>,
|
|
ck::utils::FillUniform<WeiDataType>>>(
|
|
params,
|
|
true,
|
|
ck::utils::FillUniform<InDataType>{},
|
|
ck::utils::FillUniform<WeiDataType>{});
|
|
break;
|
|
default: throw std::runtime_error("Unsupported init method!");
|
|
}
|
|
|
|
auto reference_conv_fwd_fun = std::bind(
|
|
conv::run_reference_convolution_forward<NDim, InDataType, WeiDataType, OutDataType>,
|
|
params,
|
|
_1,
|
|
_2,
|
|
_3);
|
|
OpInstanceRunEngine<InDataType, WeiDataType, OutDataType> run_engine(*conv_instance,
|
|
reference_conv_fwd_fun);
|
|
auto best_conf = run_engine.Profile(
|
|
conv::ConvolutionFwdInstances<InDataType, WeiDataType, OutDataType>::template Get<NDim>(),
|
|
nrepeat,
|
|
do_verification,
|
|
do_log);
|
|
|
|
std::cout << "Best configuration parameters:"
|
|
<< "\nname: " << best_conf.best_op_name << "\navg_time: " << best_conf.best_avg_time
|
|
<< "\ntflops: " << best_conf.best_tflops << "\nGB/s: " << best_conf.best_gb_per_sec
|
|
<< std::endl;
|
|
}
|
|
|
|
template <int NDim>
|
|
void profile_convnd_instances(ConvDataType data_type,
|
|
ConvDataLayout data_layout,
|
|
const ck::utils::conv::ConvParams& params,
|
|
bool do_verification,
|
|
bool do_log,
|
|
int nrepeat,
|
|
int init_method)
|
|
{
|
|
switch(data_layout)
|
|
{
|
|
case ConvDataLayout::NHWC: {
|
|
switch(data_type)
|
|
{
|
|
case ConvDataType::F32_F32_F32:
|
|
profile_convnd_instances_impl<NDim, float, float, float>(
|
|
params,
|
|
do_verification,
|
|
do_log,
|
|
nrepeat,
|
|
init_method,
|
|
ConvolutionLayouts<NDim, ConvDataLayout::NHWC>{});
|
|
break;
|
|
case ConvDataType::F16_F16_F16:
|
|
profile_convnd_instances_impl<NDim, ck::half_t, ck::half_t, ck::half_t>(
|
|
params,
|
|
do_verification,
|
|
do_log,
|
|
nrepeat,
|
|
init_method,
|
|
ConvolutionLayouts<NDim, ConvDataLayout::NHWC>{});
|
|
break;
|
|
case ConvDataType::BF16_BF16_BF16:
|
|
profile_convnd_instances_impl<NDim, ck::bhalf_t, ck::bhalf_t, ck::bhalf_t>(
|
|
params,
|
|
do_verification,
|
|
do_log,
|
|
nrepeat,
|
|
init_method,
|
|
ConvolutionLayouts<NDim, ConvDataLayout::NHWC>{});
|
|
break;
|
|
case ConvDataType::INT8_INT8_INT8:
|
|
profile_convnd_instances_impl<NDim, int8_t, int8_t, int8_t>(
|
|
params,
|
|
do_verification,
|
|
do_log,
|
|
nrepeat,
|
|
init_method,
|
|
ConvolutionLayouts<NDim, ConvDataLayout::NHWC>{});
|
|
break;
|
|
}
|
|
break;
|
|
}
|
|
case ConvDataLayout::NCHW: {
|
|
switch(data_type)
|
|
{
|
|
case ConvDataType::F32_F32_F32:
|
|
profile_convnd_instances_impl<NDim, float, float, float>(
|
|
params,
|
|
do_verification,
|
|
do_log,
|
|
nrepeat,
|
|
init_method,
|
|
ConvolutionLayouts<NDim, ConvDataLayout::NCHW>{});
|
|
break;
|
|
case ConvDataType::F16_F16_F16:
|
|
profile_convnd_instances_impl<NDim, ck::half_t, ck::half_t, ck::half_t>(
|
|
params,
|
|
do_verification,
|
|
do_log,
|
|
nrepeat,
|
|
init_method,
|
|
ConvolutionLayouts<NDim, ConvDataLayout::NCHW>{});
|
|
break;
|
|
case ConvDataType::BF16_BF16_BF16:
|
|
profile_convnd_instances_impl<NDim, ck::bhalf_t, ck::bhalf_t, ck::bhalf_t>(
|
|
params,
|
|
do_verification,
|
|
do_log,
|
|
nrepeat,
|
|
init_method,
|
|
ConvolutionLayouts<NDim, ConvDataLayout::NCHW>{});
|
|
break;
|
|
case ConvDataType::INT8_INT8_INT8:
|
|
profile_convnd_instances_impl<NDim, int8_t, int8_t, int8_t>(
|
|
params,
|
|
do_verification,
|
|
do_log,
|
|
nrepeat,
|
|
init_method,
|
|
ConvolutionLayouts<NDim, ConvDataLayout::NCHW>{});
|
|
break;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
} // namespace
|
|
|
|
int ck::profiler::profile_convnd_fwd(int argc, char* argv[])
|
|
{
|
|
using namespace ck::utils::conv;
|
|
|
|
ConvDataType data_type{ConvDataType::F32_F32_F32};
|
|
ConvDataLayout data_layout{ConvDataLayout::NHWC};
|
|
bool do_verification{true};
|
|
int init_method{2};
|
|
bool do_log{false};
|
|
int nrepeat{100};
|
|
int num_dim_spatial{2};
|
|
ConvParams params;
|
|
|
|
if(argc >= 4)
|
|
{
|
|
data_type = static_cast<ConvDataType>(std::stoi(argv[2]));
|
|
data_layout = static_cast<ConvDataLayout>(std::stoi(argv[3]));
|
|
}
|
|
if(argc >= 9)
|
|
{
|
|
do_verification = std::stoi(argv[4]);
|
|
init_method = std::stoi(argv[5]);
|
|
do_log = std::stoi(argv[6]);
|
|
nrepeat = std::stoi(argv[7]);
|
|
num_dim_spatial = std::stoi(argv[8]);
|
|
}
|
|
if(argc >= 10)
|
|
{
|
|
params = parse_params(num_dim_spatial, argc, argv);
|
|
}
|
|
|
|
// TODO Print nice message what is being profiled.
|
|
|
|
switch(num_dim_spatial)
|
|
{
|
|
case 1:
|
|
profile_convnd_instances<1>(
|
|
data_type, data_layout, params, do_verification, do_log, nrepeat, init_method);
|
|
break;
|
|
case 2:
|
|
profile_convnd_instances<2>(
|
|
data_type, data_layout, params, do_verification, do_log, nrepeat, init_method);
|
|
break;
|
|
case 3:
|
|
profile_convnd_instances<3>(
|
|
data_type, data_layout, params, do_verification, do_log, nrepeat, init_method);
|
|
break;
|
|
default:
|
|
throw std::runtime_error("profile_conv_fwd: unsupported num_dim_spatial value: " +
|
|
std::to_string(num_dim_spatial));
|
|
}
|
|
|
|
return 1;
|
|
}
|