mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-14 10:09:41 +00:00
Reduction in Composable Kernel (#82)
* Initial adding of generic reduction
* Initial adding of generic reduction ...
* Updates to make compiling done
* clang-format all files
* clang-format some files again
* Renaming in profiler/include/profile_reduce.hpp
* Updates and make BlockWise cases passed
* Updates and make ThreadWise and MultiBlockTwoCall cases passed
* Remove the support for MUL and NORM1 reduceOp from the profiler and the device instances
* Change to replace the dim0_max_vector_size/dim1_max_vector_size template argument in the device reduce classes
* format
* adding pooling
* added max and average pooling
* comment out cout and kernel timing
* Tiny simplification in profiler/reduce_profiler.cpp
* Add example for reduce_blockwise
* Tiny updates
* Change to pass the ElementWiseOp from device layer to kernel
* Fix the vectorDim and vectorSize in Device layer
* Enable vector load on both dim0 and dim1 for Threadwise method
* Tiny updates
* Change to let the user to pass the preUnaryOp and posUnaryOp
* Make pooling example work
* split device_reduce_instance into two libraries
* Tiny update
* Replace nanPropaOpt enum by boolean propagate_nan
* Simplification in DeviceReduce layer codes
* update build
* Change to clarify the difference between ck::half_t and half_float::half
* Renaming in all the reduction codes
* Add VectorSize as template parameter for device layer
* Add BetaIsZero as kernel template and as AccDataType for alpha
* print
* Small updates for pooling
* Updates for host_generic_reduction for reference
* Update to make AVG pooling pass
* Update to make MAX pooling with indices output pass
* fix
* add OutDst vector store to threadwise reduction and pooling
* tweak
* turn off check_indices that caused build issue
* refactor pooling
* clean up
* turn off check_indices for building issue for php-compiler
* add more tile size for odd C
* tweak conv for odd C
* update script
* clean up elementwise op
* add hack in reduction_operator.hpp to avoid compile error. To fix it, need to use element_wise_op in reduction op
* Add OutVectorSize as device and kernel tunable, also update to Elementwise Operations
* Move reduce operator mapping to host layer file reduction_operator_mapping.hpp from reduction_operator.hpp
* Change to the unary operators
* Move the definitions of unary operations to element_wise_operation.hpp
* re-org files
* Refine in device interfaces and multiblock kernels
* Split the reduction configurations into instances for specific methods
* Update in getTypeString() of device pool2d
* Renaming in host and kernel
* Tiny update in profiler/src/profiler.cpp
* Uncomment in device_operation/CMakeLists.txt to enable the building of all operations
* Make check_indices a templated function to remove some linking issue
* Renaming in the profiler reduce module
* Add support for double Reduction (but disable MultiblockAtomicAdd for double)
* Tiny correction of literal string
* Rename DevicePoolFwd to DevicePool2dFwd
* Split device_reduce_instance_xxx.cpp files according to the data types to speed up compiling
* Add comments for lists of configurations, lists of instances and references of add_reduce_instances_xxx
* Remove un-used header file gridwise_generic_reduction_wrapper_common.hpp
* Renaming and refining in the Reduction codes
* Tiny change in the unary operators
* Renaming symbols and files
* Renaming symbols in the kernels
* Move kernel kernel_set_buffer_value to separate file
* Add IndexDataType template parameter for kernels and use int32_t as index data type in device layer
* Tiny update in the kernels
* Remove definition of sqrtf()/isnan()/abs() for half_t due to some ADL issue
* Simplify a helper function in device layer
* Tiny adjustment in testing data initialization
* Renaming in kernel/device/host
* Add two testing scripts for reduction
* Refine the Unary operators in element_wise_operation.hpp
* Update in the reduce profiler module
* Update to the reduction testing scripts
* reduce compile parallelism
* change CI docker to rocm5.0
* remove unused variables
* fix build
Co-authored-by: Chao Liu <chao.liu2@amd.com>
[ROCm/composable_kernel commit: e17c0d8008]
This commit is contained in:
311
example/12_pool2d_fwd/pool2d_fwd.cpp
Normal file
311
example/12_pool2d_fwd/pool2d_fwd.cpp
Normal file
@@ -0,0 +1,311 @@
|
||||
#include <iostream>
|
||||
#include <numeric>
|
||||
#include <initializer_list>
|
||||
#include <cstdlib>
|
||||
#include <stdlib.h>
|
||||
#include "config.hpp"
|
||||
#include "print.hpp"
|
||||
#include "device.hpp"
|
||||
#include "host_tensor.hpp"
|
||||
#include "host_tensor_generator.hpp"
|
||||
#include "host_reduce_util.hpp"
|
||||
#include "device_tensor.hpp"
|
||||
#include "tensor_layout.hpp"
|
||||
#include "reduction_operator.hpp"
|
||||
#include "device_operation/include/device_pool2d_fwd_nhwc_nhwc.hpp"
|
||||
|
||||
using InDataType = ck::half_t;
|
||||
using OutDataType = ck::half_t;
|
||||
using AccDataType = float;
|
||||
|
||||
using InLayout = ck::tensor_layout::convolution::NHWC;
|
||||
using OutLayout = ck::tensor_layout::convolution::NHWC;
|
||||
|
||||
#if 1
|
||||
static constexpr auto ReduceOpId = ck::ReduceTensorOp_t::MAX;
|
||||
#else
|
||||
static constexpr auto ReduceOpId = ck::ReduceTensorOp_t::AVG;
|
||||
#endif
|
||||
|
||||
static constexpr bool NeedIndices = false;
|
||||
static constexpr bool PropagateNan = false;
|
||||
|
||||
using DevicePoolFwdInstance =
|
||||
ck::tensor_operation::device::DevicePool2dFwd_Input_N_Hi_Wi_C_Output_N_Ho_Wo_C<
|
||||
InDataType, // InDataType
|
||||
OutDataType, // OutDataType
|
||||
AccDataType, // AccDataType
|
||||
ReduceOpId,
|
||||
NeedIndices,
|
||||
64, // BlockSize
|
||||
64, // ReduceMThreadClusterSize
|
||||
1, // ReduceKThreadClusterSize
|
||||
4, // ReduceMThreadSliceSize
|
||||
1, // ReduceKThreadSliceSize
|
||||
4>; // InSrcOutDstVectorSize
|
||||
|
||||
template <typename InDataType,
|
||||
typename OutDataType,
|
||||
typename AccDataType,
|
||||
ck::ReduceTensorOp_t ReduceOpId,
|
||||
bool PropagateNan,
|
||||
bool NeedIndices>
|
||||
static void pool_host_verify(const Tensor<InDataType>& in,
|
||||
Tensor<OutDataType>& out,
|
||||
Tensor<int>& out_indices,
|
||||
const std::array<ck::index_t, 2>& window_spatial_lengths,
|
||||
const std::array<ck::index_t, 2>& window_strides,
|
||||
const std::array<ck::index_t, 2>& in_left_pads,
|
||||
const std::array<ck::index_t, 2>& /*in_right_pads*/)
|
||||
{
|
||||
using namespace ck::host_reduce;
|
||||
|
||||
const int divider = window_spatial_lengths[0] * window_spatial_lengths[1];
|
||||
|
||||
const auto PreUnaryOp = PreUnaryOpFn<AccDataType, ReduceOpId>(divider);
|
||||
const auto PosUnaryOp = PosUnaryOpFn<AccDataType, ReduceOpId>(divider);
|
||||
|
||||
if constexpr(!NeedIndices)
|
||||
{
|
||||
auto opReduce = ReduceOpFn<AccDataType, ReduceOpId>();
|
||||
|
||||
auto f_nchw = [&](auto n, auto c, auto ho, auto wo) {
|
||||
auto accuVal = ReduceOpZeroVal<AccDataType, ReduceOpId>();
|
||||
|
||||
for(int y = 0; y < window_spatial_lengths[0]; ++y)
|
||||
{
|
||||
int hi = ho * window_strides[0] + y - in_left_pads[0];
|
||||
for(int x = 0; x < window_spatial_lengths[1]; ++x)
|
||||
{
|
||||
int wi = wo * window_strides[1] + x - in_left_pads[1];
|
||||
if(hi >= 0 && hi < in.mDesc.GetLengths()[2] && wi >= 0 &&
|
||||
wi < in.mDesc.GetLengths()[3])
|
||||
{
|
||||
AccDataType currVal = static_cast<AccDataType>(in(n, c, hi, wi));
|
||||
|
||||
PreUnaryOp(currVal);
|
||||
|
||||
binop_with_nan_check<AccDataType, PropagateNan>(opReduce, accuVal, currVal);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PosUnaryOp(accuVal);
|
||||
|
||||
out(n, c, ho, wo) = accuVal;
|
||||
};
|
||||
|
||||
make_ParallelTensorFunctor(f_nchw,
|
||||
out.mDesc.GetLengths()[0],
|
||||
out.mDesc.GetLengths()[1],
|
||||
out.mDesc.GetLengths()[2],
|
||||
out.mDesc.GetLengths()[3])(std::thread::hardware_concurrency());
|
||||
}
|
||||
else
|
||||
{
|
||||
auto opReduce = ReduceOpFn2<AccDataType, ReduceOpId>();
|
||||
|
||||
auto f_nchw = [&](auto n, auto c, auto ho, auto wo) {
|
||||
auto accuVal = ReduceOpZeroVal<AccDataType, ReduceOpId>();
|
||||
int accuIndex = 0;
|
||||
|
||||
for(int y = 0; y < window_spatial_lengths[0]; ++y)
|
||||
{
|
||||
int hi = ho * window_strides[0] + y - in_left_pads[0];
|
||||
for(int x = 0; x < window_spatial_lengths[1]; ++x)
|
||||
{
|
||||
int wi = wo * window_strides[1] + x - in_left_pads[1];
|
||||
if(hi >= 0 && hi < in.mDesc.GetLengths()[2] && wi >= 0 &&
|
||||
wi < in.mDesc.GetLengths()[3])
|
||||
{
|
||||
AccDataType currVal = static_cast<AccDataType>(in(n, c, hi, wi));
|
||||
int currIndex = y * window_spatial_lengths[1] + x;
|
||||
|
||||
PreUnaryOp(currVal);
|
||||
|
||||
binop_with_nan_check2<AccDataType, PropagateNan>(
|
||||
opReduce, accuVal, currVal, accuIndex, currIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PosUnaryOp(accuVal);
|
||||
|
||||
out(n, c, ho, wo) = accuVal;
|
||||
out_indices(n, c, ho, wo) = accuIndex;
|
||||
};
|
||||
|
||||
make_ParallelTensorFunctor(f_nchw,
|
||||
out.mDesc.GetLengths()[0],
|
||||
out.mDesc.GetLengths()[1],
|
||||
out.mDesc.GetLengths()[2],
|
||||
out.mDesc.GetLengths()[3])(std::thread::hardware_concurrency());
|
||||
};
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
using namespace ck::host_reduce;
|
||||
|
||||
bool do_verification = 0;
|
||||
int init_method = 0;
|
||||
int nrepeat = 5;
|
||||
|
||||
// Pool shape
|
||||
ck::index_t N = 128;
|
||||
ck::index_t C = 192;
|
||||
ck::index_t Y = 3;
|
||||
ck::index_t X = 3;
|
||||
ck::index_t Hi = 71;
|
||||
ck::index_t Wi = 71;
|
||||
ck::index_t window_stride_h = 2;
|
||||
ck::index_t window_stride_w = 2;
|
||||
ck::index_t in_left_pad_h = 1;
|
||||
ck::index_t in_left_pad_w = 1;
|
||||
ck::index_t in_right_pad_h = 1;
|
||||
ck::index_t in_right_pad_w = 1;
|
||||
|
||||
if(argc == 4)
|
||||
{
|
||||
do_verification = std::stoi(argv[1]);
|
||||
init_method = std::stoi(argv[2]);
|
||||
nrepeat = std::stoi(argv[3]);
|
||||
}
|
||||
else if(argc == 16)
|
||||
{
|
||||
do_verification = std::stoi(argv[1]);
|
||||
init_method = std::stoi(argv[2]);
|
||||
nrepeat = std::stoi(argv[3]);
|
||||
|
||||
N = std::stoi(argv[4]);
|
||||
C = std::stoi(argv[5]);
|
||||
Y = std::stoi(argv[6]);
|
||||
X = std::stoi(argv[7]);
|
||||
Hi = std::stoi(argv[8]);
|
||||
Wi = std::stoi(argv[9]);
|
||||
window_stride_h = std::stoi(argv[10]);
|
||||
window_stride_w = std::stoi(argv[11]);
|
||||
in_left_pad_h = std::stoi(argv[12]);
|
||||
in_left_pad_w = std::stoi(argv[13]);
|
||||
in_right_pad_h = std::stoi(argv[14]);
|
||||
in_right_pad_w = std::stoi(argv[15]);
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("arg1: verification (0=no, 1=yes)\n");
|
||||
printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n");
|
||||
printf("arg3: run kernel # of times (>1)\n");
|
||||
printf("arg4 to 15: N, C, Y, X, Hi, Wi, Sy, Sx, LeftPy, LeftPx, RightPy, "
|
||||
"RightPx\n");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
const ck::index_t Ho = (Hi + in_left_pad_h + in_right_pad_h - Y) / window_stride_h + 1;
|
||||
const ck::index_t Wo = (Wi + in_left_pad_w + in_right_pad_w - X) / window_stride_w + 1;
|
||||
|
||||
const std::array<ck::index_t, 2> window_spatial_lengths{{Y, X}};
|
||||
const std::array<ck::index_t, 2> window_strides{{window_stride_h, window_stride_w}};
|
||||
const std::array<ck::index_t, 2> input_left_pads{{in_left_pad_h, in_left_pad_w}};
|
||||
const std::array<ck::index_t, 2> input_right_pads{{in_right_pad_h, in_right_pad_w}};
|
||||
|
||||
// tensor layout
|
||||
auto f_host_tensor_descriptor =
|
||||
[](std::size_t N_, std::size_t C_, std::size_t H, std::size_t W, auto layout) {
|
||||
if constexpr(ck::is_same<decltype(layout), ck::tensor_layout::convolution::NCHW>::value)
|
||||
{
|
||||
return HostTensorDescriptor(std::vector<std::size_t>({N_, C_, H, W}),
|
||||
std::vector<std::size_t>({C_ * H * W, H * W, W, 1}));
|
||||
}
|
||||
else if constexpr(ck::is_same<decltype(layout),
|
||||
ck::tensor_layout::convolution::NHWC>::value)
|
||||
{
|
||||
return HostTensorDescriptor(std::vector<std::size_t>({N_, C_, H, W}),
|
||||
std::vector<std::size_t>({C_ * H * W, 1, W * C_, C_}));
|
||||
}
|
||||
};
|
||||
|
||||
Tensor<InDataType> in_n_c_hi_wi(f_host_tensor_descriptor(N, C, Hi, Wi, InLayout{}));
|
||||
Tensor<OutDataType> out_n_c_ho_wo_host(f_host_tensor_descriptor(N, C, Ho, Wo, OutLayout{}));
|
||||
Tensor<int> out_indices_n_c_ho_wo_host(f_host_tensor_descriptor(N, C, Ho, Wo, OutLayout{}));
|
||||
Tensor<OutDataType> out_n_c_ho_wo_device(f_host_tensor_descriptor(N, C, Ho, Wo, OutLayout{}));
|
||||
Tensor<int> out_indices_n_c_ho_wo_device(f_host_tensor_descriptor(N, C, Ho, Wo, OutLayout{}));
|
||||
|
||||
std::cout << "in_n_c_hi_wi: " << in_n_c_hi_wi.mDesc << std::endl;
|
||||
std::cout << "out_n_c_ho_wo: " << out_n_c_ho_wo_host.mDesc << std::endl;
|
||||
|
||||
switch(init_method)
|
||||
{
|
||||
case 0: break;
|
||||
case 1: in_n_c_hi_wi.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5}); break;
|
||||
default: in_n_c_hi_wi.GenerateTensorValue(GeneratorTensor_3<InDataType>{0.0, 1.0});
|
||||
}
|
||||
|
||||
DeviceMem in_device_buf(sizeof(InDataType) * in_n_c_hi_wi.mDesc.GetElementSpace());
|
||||
DeviceMem out_device_buf(sizeof(OutDataType) * out_n_c_ho_wo_device.mDesc.GetElementSpace());
|
||||
DeviceMem out_indices_device_buf(sizeof(int) *
|
||||
out_indices_n_c_ho_wo_device.mDesc.GetElementSpace());
|
||||
|
||||
in_device_buf.ToDevice(in_n_c_hi_wi.mData.data());
|
||||
|
||||
auto pool = DevicePoolFwdInstance{};
|
||||
auto invoker_ptr = pool.MakeInvokerPointer();
|
||||
auto argument_ptr =
|
||||
pool.MakeArgumentPointer(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
|
||||
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
|
||||
static_cast<int*>(out_indices_device_buf.GetDeviceBuffer()),
|
||||
N,
|
||||
C,
|
||||
std::array<ck::index_t, 2>{{Hi, Wi}},
|
||||
std::array<ck::index_t, 2>{{Y, X}},
|
||||
std::array<ck::index_t, 2>{{Ho, Wo}},
|
||||
window_strides,
|
||||
input_left_pads,
|
||||
input_right_pads);
|
||||
|
||||
if(!pool.IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
throw std::runtime_error("wrong! device_op with the specified compilation parameters does "
|
||||
"not support this problem");
|
||||
}
|
||||
|
||||
float ave_time = invoker_ptr->Run(argument_ptr.get(), nrepeat);
|
||||
|
||||
std::size_t flop = std::size_t(2) * N * C * Ho * Wo * Y * X;
|
||||
|
||||
std::size_t num_btype =
|
||||
sizeof(InDataType) * (N * C * Hi * Wi) + sizeof(OutDataType) * (N * C * Ho * Wo);
|
||||
|
||||
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
||||
|
||||
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
||||
|
||||
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s"
|
||||
<< std::endl;
|
||||
|
||||
if(do_verification)
|
||||
{
|
||||
pool_host_verify<InDataType,
|
||||
OutDataType,
|
||||
AccDataType,
|
||||
ReduceOpId,
|
||||
PropagateNan,
|
||||
NeedIndices>(in_n_c_hi_wi,
|
||||
out_n_c_ho_wo_host,
|
||||
out_indices_n_c_ho_wo_host,
|
||||
window_spatial_lengths,
|
||||
window_strides,
|
||||
input_left_pads,
|
||||
input_right_pads);
|
||||
|
||||
out_device_buf.FromDevice(out_n_c_ho_wo_device.mData.data());
|
||||
|
||||
check_error(out_n_c_ho_wo_host, out_n_c_ho_wo_device);
|
||||
|
||||
if constexpr(NeedIndices)
|
||||
{
|
||||
out_indices_device_buf.FromDevice(out_indices_n_c_ho_wo_device.mData.data());
|
||||
|
||||
// check_indices(out_indices_n_c_ho_wo_host, out_indices_n_c_ho_wo_device);
|
||||
};
|
||||
}
|
||||
}
|
||||
395
example/13_reduce_blockwise/reduce_blockwise.cpp
Normal file
395
example/13_reduce_blockwise/reduce_blockwise.cpp
Normal file
@@ -0,0 +1,395 @@
|
||||
#include <iostream>
|
||||
#include <numeric>
|
||||
#include <initializer_list>
|
||||
#include <cstdlib>
|
||||
#include <getopt.h>
|
||||
#include <half.hpp>
|
||||
#include "config.hpp"
|
||||
#include "print.hpp"
|
||||
#include "device.hpp"
|
||||
#include "host_tensor.hpp"
|
||||
#include "host_tensor_generator.hpp"
|
||||
#include "device_tensor.hpp"
|
||||
#include "device_base.hpp"
|
||||
#include "device_reduce_blockwise.hpp"
|
||||
#include "host_reduce_util.hpp"
|
||||
#include "host_generic_reduction.hpp"
|
||||
|
||||
#include "reduction_enums.hpp"
|
||||
#include "reduction_operator_mapping.hpp"
|
||||
|
||||
using namespace ck;
|
||||
using namespace ck::tensor_operation::device;
|
||||
|
||||
using InDataType = half_float::half;
|
||||
using OutDataType = half_float::half;
|
||||
using AccDataType = float;
|
||||
|
||||
using kInDataType = ck::half_t;
|
||||
using kOutDataType = ck::half_t;
|
||||
using kAccDataType = float;
|
||||
|
||||
constexpr int Rank = 4;
|
||||
using ReduceDims_ = ck::Sequence<0, 1, 2>;
|
||||
|
||||
constexpr ReduceTensorOp_t ReduceOpId = ReduceTensorOp_t::NORM2;
|
||||
constexpr NanPropagation_t NanOpt = NanPropagation_t::PROPAGATE_NAN;
|
||||
constexpr bool PropagateNan = (NanOpt == NanPropagation_t::NOT_PROPAGATE_NAN) ? false : true;
|
||||
constexpr ReduceTensorIndices_t IndicesOpt = ReduceTensorIndices_t::NO_INDICES;
|
||||
|
||||
using ReduceOperation = typename reduce_binary_operator<AccDataType, ReduceOpId>::opType;
|
||||
using InElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation;
|
||||
using AccElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
|
||||
using DeviceReduceInstance = DeviceReduceBlockWise<kInDataType,
|
||||
kAccDataType,
|
||||
kOutDataType,
|
||||
Rank,
|
||||
ReduceDims_,
|
||||
ReduceOperation,
|
||||
InElementwiseOperation,
|
||||
AccElementwiseOperation,
|
||||
PropagateNan,
|
||||
false,
|
||||
256,
|
||||
4,
|
||||
64,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
1>;
|
||||
|
||||
static struct option long_options[] = {{"inLengths", required_argument, nullptr, 'D'},
|
||||
{"scales", required_argument, nullptr, 'S'},
|
||||
{"verify", required_argument, nullptr, 'v'},
|
||||
{"help", no_argument, nullptr, '?'},
|
||||
{nullptr, 0, nullptr, 0}};
|
||||
|
||||
class SimpleAppArgs
|
||||
{
|
||||
template <typename T>
|
||||
static T getSingleValueFromString(const std::string& valueStr)
|
||||
{
|
||||
std::istringstream iss(valueStr);
|
||||
|
||||
T ret;
|
||||
|
||||
iss >> ret;
|
||||
|
||||
return (ret);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static std::vector<T> getTypeValuesFromString(const char* cstr_values)
|
||||
{
|
||||
std::string valuesStr(cstr_values);
|
||||
|
||||
std::vector<T> values;
|
||||
std::size_t pos = 0;
|
||||
std::size_t new_pos;
|
||||
|
||||
new_pos = valuesStr.find(',', pos);
|
||||
while(new_pos != std::string::npos)
|
||||
{
|
||||
const std::string sliceStr = valuesStr.substr(pos, new_pos - pos);
|
||||
|
||||
T val = getSingleValueFromString<T>(sliceStr);
|
||||
|
||||
values.push_back(val);
|
||||
|
||||
pos = new_pos + 1;
|
||||
new_pos = valuesStr.find(',', pos);
|
||||
};
|
||||
|
||||
std::string sliceStr = valuesStr.substr(pos);
|
||||
T val = getSingleValueFromString<T>(sliceStr);
|
||||
|
||||
values.push_back(val);
|
||||
|
||||
return (values);
|
||||
};
|
||||
|
||||
private:
|
||||
int option_index = 0;
|
||||
|
||||
public:
|
||||
std::vector<size_t> inLengths;
|
||||
std::vector<float> scales;
|
||||
|
||||
bool do_verification = false;
|
||||
|
||||
int init_method = 1;
|
||||
int nrepeat = 5;
|
||||
|
||||
public:
|
||||
void show_usage(const char* cmd)
|
||||
{
|
||||
std::cout << "Usage of " << cmd << std::endl;
|
||||
std::cout << "--inLengths or -D, comma separated list of input tensor dimension lengths"
|
||||
<< std::endl;
|
||||
std::cout << "--scales or -S, comma separated two float values for alpha and beta"
|
||||
<< std::endl;
|
||||
std::cout << "--verify or -v, 1/0 to indicate whether to verify the reduction result by "
|
||||
"comparing with the host-based reduction"
|
||||
<< std::endl;
|
||||
};
|
||||
|
||||
int processArgs(int argc, char* argv[])
|
||||
{
|
||||
unsigned int ch;
|
||||
|
||||
while(1)
|
||||
{
|
||||
ch = getopt_long(argc, argv, "D:S:v:l:", long_options, &option_index);
|
||||
if(ch == -1)
|
||||
break;
|
||||
switch(ch)
|
||||
{
|
||||
case 'D':
|
||||
if(!optarg)
|
||||
throw std::runtime_error("Invalid option format!");
|
||||
|
||||
inLengths = getTypeValuesFromString<size_t>(optarg);
|
||||
break;
|
||||
case 'S':
|
||||
if(!optarg)
|
||||
throw std::runtime_error("Invalid option format!");
|
||||
|
||||
scales = getTypeValuesFromString<float>(optarg);
|
||||
break;
|
||||
case 'v':
|
||||
if(!optarg)
|
||||
throw std::runtime_error("Invalid option format!");
|
||||
|
||||
do_verification = static_cast<bool>(std::atoi(optarg));
|
||||
break;
|
||||
case '?':
|
||||
if(std::string(long_options[option_index].name) == "help")
|
||||
{
|
||||
show_usage(argv[0]);
|
||||
return (-1);
|
||||
};
|
||||
break;
|
||||
default: show_usage(argv[0]); return (-1);
|
||||
};
|
||||
};
|
||||
|
||||
if(optind + 2 > argc)
|
||||
throw std::runtime_error("Invalid cmd-line arguments, more argumetns are needed!");
|
||||
|
||||
init_method = std::atoi(argv[optind++]);
|
||||
nrepeat = std::atoi(argv[optind]);
|
||||
|
||||
if(scales.empty())
|
||||
{
|
||||
scales.push_back(1.0f);
|
||||
scales.push_back(0.0f);
|
||||
};
|
||||
|
||||
return (0);
|
||||
};
|
||||
};
|
||||
|
||||
template <int Rank, typename ReduceDims>
|
||||
static std::vector<int> get_reduce_dims()
|
||||
{
|
||||
std::vector<int> resDims;
|
||||
|
||||
static_for<0, ReduceDims::Size(), 1>{}([&](auto i) { resDims.push_back(ReduceDims::At(i)); });
|
||||
|
||||
return (resDims);
|
||||
};
|
||||
|
||||
template <int Rank, typename ReduceDims>
|
||||
static std::vector<int> get_invariant_dims()
|
||||
{
|
||||
std::vector<int> resDims;
|
||||
unsigned int incFlag = 0;
|
||||
|
||||
static_for<0, ReduceDims::Size(), 1>{}(
|
||||
[&](auto i) { incFlag = incFlag | (0x1 << ReduceDims::At(i)); });
|
||||
|
||||
for(int dim = 0; dim < Rank; dim++)
|
||||
{
|
||||
if(incFlag & (0x1 << dim))
|
||||
continue;
|
||||
resDims.push_back(dim);
|
||||
};
|
||||
|
||||
return (resDims);
|
||||
};
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
using namespace ck::host_reduce;
|
||||
|
||||
SimpleAppArgs args;
|
||||
|
||||
if(args.processArgs(argc, argv) < 0)
|
||||
return (-1);
|
||||
|
||||
constexpr bool op_support_indices =
|
||||
(ReduceOpId == ReduceTensorOp_t::MIN || ReduceOpId == ReduceTensorOp_t::MAX ||
|
||||
ReduceOpId == ReduceTensorOp_t::AMAX);
|
||||
|
||||
constexpr bool NeedIndices =
|
||||
(op_support_indices && (IndicesOpt != ReduceTensorIndices_t::NO_INDICES));
|
||||
|
||||
// if input is half type, no reason to use float for indiced reduction operation and must use
|
||||
// float for non-indiced reduction operation for accuracy
|
||||
constexpr bool invalid_reduce_1 =
|
||||
std::is_same<InDataType, ck::half_t>::value &&
|
||||
((!op_support_indices && !std::is_same<AccDataType, float>::value) ||
|
||||
(op_support_indices && !std::is_same<AccDataType, ck::half_t>::value));
|
||||
|
||||
// if input is float type, no reason to use double for indiced reduction operation
|
||||
constexpr bool invalid_reduce_2 =
|
||||
std::is_same<InDataType, float>::value &&
|
||||
(op_support_indices && !std::is_same<AccDataType, float>::value);
|
||||
|
||||
// indices option can only be used when it is really needed
|
||||
constexpr bool invalid_reduce_3 =
|
||||
(!op_support_indices && IndicesOpt != ReduceTensorIndices_t::NO_INDICES);
|
||||
|
||||
constexpr bool invalid_reduce = (invalid_reduce_1 || invalid_reduce_2 || invalid_reduce_3);
|
||||
|
||||
if constexpr(invalid_reduce)
|
||||
std::cout << "Reduction setting is not supported, exiting!" << std::endl;
|
||||
|
||||
Tensor<InDataType> in(args.inLengths);
|
||||
|
||||
const std::vector<int> InvariantDims = get_invariant_dims<Rank, ReduceDims_>();
|
||||
const std::vector<int> ReduceDims = get_reduce_dims<Rank, ReduceDims_>();
|
||||
|
||||
std::vector<size_t> outLengths;
|
||||
|
||||
if(InvariantDims.empty())
|
||||
outLengths.push_back(1);
|
||||
else
|
||||
for(auto dim : InvariantDims)
|
||||
outLengths.push_back(args.inLengths[dim]);
|
||||
|
||||
Tensor<OutDataType> out_ref(outLengths);
|
||||
Tensor<OutDataType> out(outLengths);
|
||||
Tensor<int> out_indices_ref(outLengths);
|
||||
Tensor<int> out_indices(outLengths);
|
||||
|
||||
auto inStrides = in.mDesc.GetStrides();
|
||||
auto outStrides = out.mDesc.GetStrides();
|
||||
|
||||
size_t invariant_total_length = out.mDesc.GetElementSize();
|
||||
size_t reduce_total_length = in.mDesc.GetElementSize() / invariant_total_length;
|
||||
|
||||
float alpha = args.scales[0];
|
||||
float beta = args.scales[1];
|
||||
|
||||
std::size_t num_thread = std::thread::hardware_concurrency();
|
||||
|
||||
if(args.do_verification)
|
||||
{
|
||||
switch(args.init_method)
|
||||
{
|
||||
case 0:
|
||||
in.GenerateTensorValue(GeneratorTensor_1<InDataType>{}, num_thread);
|
||||
if(beta != 0.0f)
|
||||
out_ref.GenerateTensorValue(GeneratorTensor_1<InDataType>{}, num_thread);
|
||||
break;
|
||||
case 1:
|
||||
in.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5}, num_thread);
|
||||
if(beta != 0.0f)
|
||||
out_ref.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5}, num_thread);
|
||||
break;
|
||||
default:
|
||||
in.GenerateTensorValue(GeneratorTensor_2<InDataType>{1, 5}, num_thread);
|
||||
if(beta != 0.0f)
|
||||
out_ref.GenerateTensorValue(GeneratorTensor_2<InDataType>{1, 5}, num_thread);
|
||||
}
|
||||
|
||||
if(beta != 0.0f)
|
||||
for(size_t i = 0; i < out_ref.mDesc.GetElementSpace(); i++)
|
||||
out.mData[i] = out_ref.mData[i];
|
||||
};
|
||||
|
||||
// these buffers are usually provided by the user application
|
||||
DeviceMem in_dev(sizeof(InDataType) * in.mDesc.GetElementSpace());
|
||||
DeviceMem out_dev(sizeof(OutDataType) * out.mDesc.GetElementSpace());
|
||||
|
||||
in_dev.ToDevice(in.mData.data());
|
||||
|
||||
if(beta != 0.0f)
|
||||
out_dev.ToDevice(out.mData.data());
|
||||
|
||||
size_t indicesSizeInBytes = NeedIndices ? out.mDesc.GetElementSize() * sizeof(int) : 0;
|
||||
|
||||
DeviceMem out_indices_dev(indicesSizeInBytes);
|
||||
|
||||
if(args.do_verification)
|
||||
{
|
||||
ReductionHost<InDataType, AccDataType, OutDataType, ReduceOpId, PropagateNan, NeedIndices>
|
||||
hostReduce(in.mDesc, out_ref.mDesc, InvariantDims, ReduceDims);
|
||||
|
||||
hostReduce.Run(
|
||||
alpha, in.mData.data(), beta, out_ref.mData.data(), out_indices_ref.mData.data());
|
||||
};
|
||||
|
||||
const auto i_inLengths = to_int_vector(args.inLengths);
|
||||
const auto i_inStrides = to_int_vector(inStrides);
|
||||
const auto i_outLengths = to_int_vector(outLengths);
|
||||
const auto i_outStrides = to_int_vector(outStrides);
|
||||
|
||||
auto reduce = DeviceReduceInstance{};
|
||||
|
||||
auto wsSizeInBytes = reduce.GetWorkspaceSizeInBytes(i_inLengths);
|
||||
|
||||
DeviceMem ws_dev(wsSizeInBytes);
|
||||
|
||||
auto argument_ptr =
|
||||
reduce.MakeArgumentPointer(i_inLengths,
|
||||
i_inStrides,
|
||||
i_outLengths,
|
||||
i_outStrides,
|
||||
alpha,
|
||||
beta,
|
||||
in_dev.GetDeviceBuffer(),
|
||||
out_dev.GetDeviceBuffer(),
|
||||
out_indices_dev.GetDeviceBuffer(),
|
||||
ws_dev.GetDeviceBuffer(),
|
||||
InElementwiseOperation{static_cast<int>(reduce_total_length)},
|
||||
AccElementwiseOperation{static_cast<int>(reduce_total_length)});
|
||||
|
||||
if(!reduce.IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
std::cout
|
||||
<< "The runtime parameters seems not supported by the DeviceReduce instance, exiting!"
|
||||
<< std::endl;
|
||||
};
|
||||
|
||||
std::string reduce_name = reduce.GetTypeString();
|
||||
|
||||
auto invoker_ptr = reduce.MakeInvokerPointer();
|
||||
|
||||
float avg_time = invoker_ptr->Run(argument_ptr.get(), args.nrepeat);
|
||||
|
||||
std::size_t num_bytes = invariant_total_length * reduce_total_length * sizeof(InDataType) +
|
||||
invariant_total_length * sizeof(OutDataType);
|
||||
|
||||
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
||||
|
||||
std::cout << "Perf: " << avg_time << " ms, " << gb_per_sec << " GB/s, " << reduce_name
|
||||
<< std::endl;
|
||||
|
||||
if(args.do_verification)
|
||||
{
|
||||
out_dev.FromDevice(out.mData.data());
|
||||
check_error(out_ref, out);
|
||||
|
||||
if(NeedIndices)
|
||||
{
|
||||
out_indices_dev.FromDevice(out_indices.mData.data());
|
||||
check_indices(out_indices_ref, out_indices);
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -28,6 +28,8 @@ set(CONV2D_WRW_XDL_SOURCE 13_conv2d_backward_weight_xdl/main.cpp)
|
||||
set(CONV3D_FWD_XDL_SOURCE 10_conv3d_fwd_xdl/conv3d_fwd_xdl.cpp)
|
||||
set(CONVND_FWD_XDL_SOURCE 11_convnd_fwd_xdl/convnd_fwd_xdl.cpp)
|
||||
set(CONV2D_BWD_DATA_XDL_SOURCE 12_conv2d_bwd_data_xdl/conv2d_bwd_data_xdl.cpp)
|
||||
set(POOL2D_FWD_SOURCE 12_pool2d_fwd/pool2d_fwd.cpp)
|
||||
set(REDUCE_BLOCKWISE_SOURCE 13_reduce_blockwise/reduce_blockwise.cpp)
|
||||
|
||||
add_executable(gemm_xdl ${GEMM_XDL_SOURCE})
|
||||
add_executable(gemm_xdl_int8 ${GEMM_XDL_INT8_SOURCE})
|
||||
@@ -44,6 +46,8 @@ add_executable(conv2d_wrw_xdl ${CONV2D_WRW_XDL_SOURCE})
|
||||
add_executable(conv3d_fwd_xdl ${CONV3D_FWD_XDL_SOURCE})
|
||||
add_executable(convnd_fwd_xdl ${CONVND_FWD_XDL_SOURCE})
|
||||
add_executable(conv2d_bwd_data_xdl ${CONV2D_BWD_DATA_XDL_SOURCE})
|
||||
add_executable(pool2d_fwd ${POOL2D_FWD_SOURCE})
|
||||
add_executable(reduce_blockwise ${REDUCE_BLOCKWISE_SOURCE})
|
||||
|
||||
target_link_libraries(gemm_xdl PRIVATE host_tensor)
|
||||
target_link_libraries(gemm_xdl_int8 PRIVATE host_tensor)
|
||||
@@ -60,4 +64,6 @@ target_link_libraries(conv2d_wrw_xdl PRIVATE host_tensor)
|
||||
target_link_libraries(conv3d_fwd_xdl PRIVATE host_tensor)
|
||||
target_link_libraries(convnd_fwd_xdl PRIVATE host_tensor)
|
||||
target_link_libraries(conv2d_bwd_data_xdl PRIVATE host_tensor)
|
||||
target_link_libraries(pool2d_fwd PRIVATE host_tensor)
|
||||
target_link_libraries(reduce_blockwise PRIVATE host_tensor)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user