mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-15 18:42:06 +00:00
* Initial adding of generic reduction
* Initial adding of generic reduction ...
* Updates to make compiling done
* clang-format all files
* clang-format some files again
* Renaming in profiler/include/profile_reduce.hpp
* Updates and make BlockWise cases passed
* Updates and make ThreadWise and MultiBlockTwoCall cases passed
* Remove the support for MUL and NORM1 reduceOp from the profiler and the device instances
* Change to replace the dim0_max_vector_size/dim1_max_vector_size template argument in the device reduce classes
* format
* adding pooling
* added max and average pooling
* comment out cout and kernel timing
* Tiny simplification in profiler/reduce_profiler.cpp
* Add example for reduce_blockwise
* Tiny updates
* Change to pass the ElementWiseOp from device layer to kernel
* Fix the vectorDim and vectorSize in Device layer
* Enable vector load on both dim0 and dim1 for Threadwise method
* Tiny updates
* Change to let the user to pass the preUnaryOp and posUnaryOp
* Make pooling example work
* split device_reduce_instance into two libraries
* Tiny update
* Replace nanPropaOpt enum by boolean propagate_nan
* Simplification in DeviceReduce layer codes
* update build
* Change to clarify the difference between ck::half_t and half_float::half
* Renaming in all the reduction codes
* Add VectorSize as template parameter for device layer
* Add BetaIsZero as kernel template and as AccDataType for alpha
* print
* Small updates for pooling
* Updates for host_generic_reduction for reference
* Update to make AVG pooling pass
* Update to make MAX pooling with indices output pass
* fix
* add OutDst vector store to threadwise reduction and pooling
* tweak
* turn off check_indices that caused build issue
* refactor pooling
* clean up
* turn off check_indices for building issue for php-compiler
* add more tile size for odd C
* tweak conv for odd C
* update script
* clean up elementwise op
* add hack in reduction_operator.hpp to avoid compile error. To fix it, need to use element_wise_op in reduction op
* Add OutVectorSize as device and kernel tunable, also update to Elementwise Operations
* Move reduce operator mapping to host layer file reduction_operator_mapping.hpp from reduction_operator.hpp
* Change to the unary operators
* Move the definitions of unary operations to element_wise_operation.hpp
* re-org files
* Refine in device interfaces and multiblock kernels
* Split the reduction configurations into instances for specific methods
* Update in getTypeString() of device pool2d
* Renaming in host and kernel
* Tiny update in profiler/src/profiler.cpp
* Uncomment in device_operation/CMakeLists.txt to enable the building of all operations
* Make check_indices a templated function to remove some linking issue
* Renaming in the profiler reduce module
* Add support for double Reduction (but disable MultiblockAtomicAdd for double)
* Tiny correction of literal string
* Rename DevicePoolFwd to DevicePool2dFwd
* Split device_reduce_instance_xxx.cpp files according to the data types to speed up compiling
* Add comments for lists of configurations, lists of instances and references of add_reduce_instances_xxx
* Remove un-used header file gridwise_generic_reduction_wrapper_common.hpp
* Renaming and refining in the Reduction codes
* Tiny change in the unary operators
* Renaming symbols and files
* Renaming symbols in the kernels
* Move kernel kernel_set_buffer_value to separate file
* Add IndexDataType template parameter for kernels and use int32_t as index data type in device layer
* Tiny update in the kernels
* Remove definition of sqrtf()/isnan()/abs() for half_t due to some ADL issue
* Simplify a helper function in device layer
* Tiny adjustment in testing data initialization
* Renaming in kernel/device/host
* Add two testing scripts for reduction
* Refine the Unary operators in element_wise_operation.hpp
* Update in the reduce profiler module
* Update to the reduction testing scripts
* reduce compile parallelism
* change CI docker to rocm5.0
* remove unused variables
* fix build
Co-authored-by: Chao Liu <chao.liu2@amd.com>
[ROCm/composable_kernel commit: e17c0d8008]
627 lines
25 KiB
C++
627 lines
25 KiB
C++
#pragma once
|
|
#include "device_reduce.hpp"
|
|
#include "device_reduce_instance.hpp"
|
|
#include "reduction_enums.hpp"
|
|
#include "host_generic_reduction.hpp"
|
|
|
|
namespace ck {
|
|
namespace tensor_operation {
|
|
namespace device {
|
|
namespace device_reduce_instance {
|
|
|
|
template <int Rank, typename ReduceDims, int ReduceOpId, int NanOpt, int IndicesOpt>
|
|
struct ReduceDescription
|
|
{
|
|
static constexpr int Rank_ = Rank;
|
|
static constexpr int ReduceOpId_ = ReduceOpId;
|
|
static constexpr int NanOpt_ = NanOpt;
|
|
static constexpr int IndicesOpt_ = IndicesOpt;
|
|
|
|
using ReduceDims_ = ReduceDims;
|
|
};
|
|
|
|
using reduce_description_instances =
|
|
std::tuple<ReduceDescription<4, Sequence<0, 1, 2>, 0, 0, 0>, // for ADD
|
|
ReduceDescription<4, Sequence<0>, 0, 0, 0>,
|
|
ReduceDescription<2, Sequence<1>, 0, 0, 0>,
|
|
|
|
ReduceDescription<4, Sequence<0, 1, 2>, 5, 0, 0>, // for AVG
|
|
ReduceDescription<4, Sequence<0>, 5, 0, 0>,
|
|
ReduceDescription<2, Sequence<1>, 5, 0, 0>,
|
|
|
|
ReduceDescription<4, Sequence<0, 1, 2>, 7, 0, 0>, // for NORM2
|
|
ReduceDescription<4, Sequence<0>, 7, 0, 0>,
|
|
ReduceDescription<2, Sequence<1>, 7, 0, 0>,
|
|
|
|
ReduceDescription<4, Sequence<0, 1, 2>, 2, 0, 0>, // for MIN
|
|
ReduceDescription<4, Sequence<0>, 2, 0, 0>,
|
|
ReduceDescription<2, Sequence<1>, 2, 0, 0>,
|
|
ReduceDescription<4, Sequence<0, 1, 2>, 3, 0, 0>, // for MAX
|
|
ReduceDescription<4, Sequence<0>, 3, 0, 0>,
|
|
ReduceDescription<2, Sequence<1>, 3, 0, 0>,
|
|
ReduceDescription<4, Sequence<0, 1, 2>, 4, 0, 0>, // for AMAX
|
|
ReduceDescription<4, Sequence<0>, 4, 0, 0>,
|
|
ReduceDescription<2, Sequence<1>, 4, 0, 0>,
|
|
|
|
ReduceDescription<4, Sequence<0, 1, 2>, 2, 0, 1>, // for MIN
|
|
ReduceDescription<4, Sequence<0>, 2, 0, 1>,
|
|
ReduceDescription<2, Sequence<1>, 2, 0, 1>,
|
|
ReduceDescription<4, Sequence<0, 1, 2>, 3, 0, 1>, // for MAX
|
|
ReduceDescription<4, Sequence<0>, 3, 0, 1>,
|
|
ReduceDescription<2, Sequence<1>, 3, 0, 1>,
|
|
ReduceDescription<4, Sequence<0, 1, 2>, 4, 0, 1>, // for AMAX
|
|
ReduceDescription<4, Sequence<0>, 4, 0, 1>,
|
|
ReduceDescription<2, Sequence<1>, 4, 0, 1>>;
|
|
|
|
template <typename DescriptionType>
|
|
bool description_match(const DescriptionType& description,
|
|
int Rank,
|
|
const std::vector<int>& ReduceDims,
|
|
ReduceTensorOp_t ReduceOpId,
|
|
NanPropagation_t NanOpt,
|
|
ReduceTensorIndices_t IndicesOpt)
|
|
{
|
|
if(description.Rank_ != Rank || description.ReduceOpId_ != static_cast<int>(ReduceOpId) ||
|
|
description.NanOpt_ != static_cast<int>(NanOpt) ||
|
|
description.IndicesOpt_ != static_cast<int>(IndicesOpt))
|
|
return (false);
|
|
|
|
if(DescriptionType::ReduceDims_::Size() != ReduceDims.size())
|
|
return (false);
|
|
|
|
bool result = true;
|
|
|
|
static_for<0, DescriptionType::ReduceDims_::Size(), 1>{}([&](auto i) {
|
|
if(DescriptionType::ReduceDims_::At(i) != ReduceDims[i])
|
|
result = false;
|
|
});
|
|
|
|
return (result);
|
|
};
|
|
|
|
} // namespace device_reduce_instance
|
|
} // namespace device
|
|
} // namespace tensor_operation
|
|
} // namespace ck
|
|
|
|
namespace ck {
|
|
namespace profiler {
|
|
|
|
template <int Rank, typename ReduceDims>
|
|
static std::vector<int> get_reduce_dims()
|
|
{
|
|
std::vector<int> resDims;
|
|
|
|
static_for<0, ReduceDims::Size(), 1>{}([&](auto i) { resDims.push_back(ReduceDims::At(i)); });
|
|
|
|
return (resDims);
|
|
};
|
|
|
|
template <int Rank, typename ReduceDims>
|
|
static std::vector<int> get_invariant_dims()
|
|
{
|
|
std::vector<int> resDims;
|
|
unsigned int incFlag = 0;
|
|
|
|
static_for<0, ReduceDims::Size(), 1>{}(
|
|
[&](auto i) { incFlag = incFlag | (0x1 << ReduceDims::At(i)); });
|
|
|
|
for(int dim = 0; dim < Rank; dim++)
|
|
{
|
|
if(incFlag & (0x1 << dim))
|
|
continue;
|
|
resDims.push_back(dim);
|
|
};
|
|
|
|
return (resDims);
|
|
};
|
|
|
|
template <typename T>
|
|
static void dumpBufferToFile(const char* fileName, T* data, size_t dataNumItems)
|
|
{
|
|
std::ofstream outFile(fileName, std::ios::binary);
|
|
if(outFile)
|
|
{
|
|
outFile.write(reinterpret_cast<char*>(data), dataNumItems * sizeof(T));
|
|
outFile.close();
|
|
std::cout << "Write output to file " << fileName << std::endl;
|
|
}
|
|
else
|
|
{
|
|
std::cout << "Could not open file " << fileName << " for writing" << std::endl;
|
|
}
|
|
};
|
|
|
|
// map the data type used by the GPU kernels to the corresponding type used by the host codes
|
|
template <typename inDataType>
|
|
struct type_mapping
|
|
{
|
|
using outDataType = inDataType;
|
|
};
|
|
|
|
template <>
|
|
struct type_mapping<ck::half_t>
|
|
{
|
|
using outDataType = half_float::half;
|
|
};
|
|
|
|
template <typename InDataType,
|
|
typename AccDataType,
|
|
typename OutDataType,
|
|
int Rank,
|
|
typename ReduceDims_,
|
|
ReduceTensorOp_t ReduceOpId,
|
|
NanPropagation_t NanOpt,
|
|
ReduceTensorIndices_t IndicesOpt>
|
|
void profile_reduce_impl_impl(bool do_verification,
|
|
int init_method,
|
|
bool do_log,
|
|
bool do_dumpout,
|
|
int nrepeat,
|
|
const std::vector<size_t>& inLengths,
|
|
float alpha,
|
|
float beta)
|
|
{
|
|
using namespace ck::tensor_operation::device;
|
|
using namespace ck::tensor_operation::device::device_reduce_instance;
|
|
using namespace ck::host_reduce;
|
|
|
|
constexpr bool op_support_indices =
|
|
(ReduceOpId == ReduceTensorOp_t::MIN || ReduceOpId == ReduceTensorOp_t::MAX ||
|
|
ReduceOpId == ReduceTensorOp_t::AMAX);
|
|
|
|
constexpr bool NeedIndices =
|
|
(op_support_indices && (IndicesOpt != ReduceTensorIndices_t::NO_INDICES));
|
|
|
|
constexpr bool PropagateNan = (NanOpt == NanPropagation_t::PROPAGATE_NAN);
|
|
|
|
constexpr bool out_support_atomic_add = std::is_same<OutDataType, float>::value;
|
|
constexpr bool op_support_atomic_add =
|
|
!op_support_indices && ReduceOpId != ReduceTensorOp_t::NORM2;
|
|
constexpr bool use_atomic_add = (out_support_atomic_add && op_support_atomic_add);
|
|
|
|
// 1) If InDataType is half_t, must use half_t as AccDataType for indexable reduction operations
|
|
// 2) If InDataType is half_t, must use float as AccDataType for non-indexable reduction
|
|
// operations
|
|
constexpr bool invalid_reduce_1 =
|
|
std::is_same<InDataType, half_t>::value &&
|
|
((!op_support_indices && !std::is_same<AccDataType, float>::value) ||
|
|
(op_support_indices && !std::is_same<AccDataType, half_t>::value));
|
|
|
|
// 1) If InDataType is float, must use float as AccDataType for indexable reduction operations
|
|
constexpr bool invalid_reduce_2 =
|
|
std::is_same<InDataType, float>::value &&
|
|
(op_support_indices && !std::is_same<AccDataType, float>::value);
|
|
|
|
// 1) The indices can only be used when the reduction operation is indexable
|
|
constexpr bool invalid_reduce_3 =
|
|
(!op_support_indices && IndicesOpt != ReduceTensorIndices_t::NO_INDICES);
|
|
|
|
constexpr bool invalid_reduce = (invalid_reduce_1 || invalid_reduce_2 || invalid_reduce_3);
|
|
|
|
if constexpr(!invalid_reduce)
|
|
{
|
|
Tensor<InDataType> in(inLengths);
|
|
|
|
const std::vector<int> OuterDims = get_invariant_dims<Rank, ReduceDims_>();
|
|
const std::vector<int> ReduceDims = get_reduce_dims<Rank, ReduceDims_>();
|
|
|
|
std::vector<size_t> outLengths;
|
|
|
|
if(OuterDims.empty())
|
|
outLengths.push_back(1);
|
|
else
|
|
for(auto dim : OuterDims)
|
|
outLengths.push_back(inLengths[dim]);
|
|
|
|
Tensor<OutDataType> out_ref(outLengths);
|
|
Tensor<OutDataType> out(outLengths);
|
|
Tensor<int> out_indices_ref(outLengths);
|
|
Tensor<int> out_indices(outLengths);
|
|
|
|
auto inStrides = in.mDesc.GetStrides();
|
|
auto outStrides = out.mDesc.GetStrides();
|
|
|
|
size_t invariant_total_length = out.mDesc.GetElementSize();
|
|
size_t reduce_total_length = in.mDesc.GetElementSize() / invariant_total_length;
|
|
|
|
std::size_t num_thread = std::thread::hardware_concurrency();
|
|
|
|
if(do_verification)
|
|
{
|
|
switch(init_method)
|
|
{
|
|
case 0:
|
|
in.GenerateTensorValue(GeneratorTensor_1<InDataType>{}, num_thread);
|
|
if(beta != 0.0f)
|
|
out_ref.GenerateTensorValue(GeneratorTensor_1<InDataType>{}, num_thread);
|
|
break;
|
|
case 1:
|
|
in.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5}, num_thread);
|
|
if(beta != 0.0f)
|
|
out_ref.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5}, num_thread);
|
|
break;
|
|
default:
|
|
in.GenerateTensorValue(GeneratorTensor_2<InDataType>{1, 5}, num_thread);
|
|
if(beta != 0.0f)
|
|
out_ref.GenerateTensorValue(GeneratorTensor_2<InDataType>{1, 5}, num_thread);
|
|
}
|
|
|
|
if(beta != 0.0f)
|
|
for(size_t i = 0; i < out_ref.mDesc.GetElementSpace(); i++)
|
|
out.mData[i] = out_ref.mData[i];
|
|
};
|
|
|
|
// these buffers are usually provided by the user application
|
|
DeviceMem in_dev(sizeof(InDataType) * in.mDesc.GetElementSpace());
|
|
DeviceMem out_dev(sizeof(OutDataType) * out.mDesc.GetElementSpace());
|
|
|
|
in_dev.ToDevice(in.mData.data());
|
|
|
|
if(beta != 0.0f)
|
|
out_dev.ToDevice(out.mData.data());
|
|
|
|
size_t indicesSizeInBytes = NeedIndices ? out.mDesc.GetElementSize() * sizeof(int) : 0;
|
|
|
|
DeviceMem out_indices_dev(indicesSizeInBytes);
|
|
|
|
float best_avg_time = 0;
|
|
float best_gb_per_sec = 0;
|
|
|
|
using InElementwiseOperation_0 =
|
|
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::
|
|
InElementwiseOperation;
|
|
using AccElementwiseOperation_0 =
|
|
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::
|
|
AccElementwiseOperation;
|
|
using InElementwiseOperation_1 =
|
|
typename reduce_unary_operator<AccDataType, ReduceOpId, true, false>::
|
|
InElementwiseOperation;
|
|
using AccElementwiseOperation_1 =
|
|
typename reduce_unary_operator<AccDataType, ReduceOpId, true, false>::
|
|
AccElementwiseOperation;
|
|
using InElementwiseOperation_2 =
|
|
typename reduce_unary_operator<AccDataType, ReduceOpId, false, true>::
|
|
InElementwiseOperation;
|
|
using AccElementwiseOperation_2 =
|
|
typename reduce_unary_operator<AccDataType, ReduceOpId, false, true>::
|
|
AccElementwiseOperation;
|
|
|
|
using DeviceReduceInstPtr0 =
|
|
DeviceReducePtr<InElementwiseOperation_0, AccElementwiseOperation_0>;
|
|
using DeviceReduceInstPtr1 =
|
|
DeviceReducePtr<InElementwiseOperation_1, AccElementwiseOperation_1>;
|
|
using DeviceReduceInstPtr2 =
|
|
DeviceReducePtr<InElementwiseOperation_2, AccElementwiseOperation_2>;
|
|
|
|
std::vector<DeviceReduceInstPtr0> reduce0_ptrs;
|
|
std::vector<DeviceReduceInstPtr1> reduce1_ptrs;
|
|
std::vector<DeviceReduceInstPtr2> reduce2_ptrs;
|
|
|
|
add_device_reduce_instance_threadwise<InDataType,
|
|
AccDataType,
|
|
OutDataType,
|
|
Rank,
|
|
ReduceDims_,
|
|
ReduceOpId,
|
|
NanOpt,
|
|
IndicesOpt>(reduce0_ptrs);
|
|
|
|
add_device_reduce_instance_blockwise<InDataType,
|
|
AccDataType,
|
|
OutDataType,
|
|
Rank,
|
|
ReduceDims_,
|
|
ReduceOpId,
|
|
NanOpt,
|
|
IndicesOpt>(reduce0_ptrs);
|
|
|
|
if constexpr(use_atomic_add)
|
|
add_device_reduce_instance_multiblock_atomic_add<InDataType,
|
|
AccDataType,
|
|
OutDataType,
|
|
Rank,
|
|
ReduceDims_,
|
|
ReduceOpId,
|
|
NanOpt,
|
|
IndicesOpt>(reduce0_ptrs);
|
|
else
|
|
add_device_reduce_instance_multiblock_partial_reduce<InDataType,
|
|
AccDataType,
|
|
OutDataType,
|
|
Rank,
|
|
ReduceDims_,
|
|
ReduceOpId,
|
|
NanOpt,
|
|
IndicesOpt>(reduce1_ptrs);
|
|
|
|
// used for secondary reduction
|
|
if constexpr(!use_atomic_add)
|
|
add_device_reduce_instance_blockwise_second_call<AccDataType,
|
|
AccDataType,
|
|
OutDataType,
|
|
Rank,
|
|
ReduceDims_,
|
|
ReduceOpId,
|
|
NanOpt,
|
|
IndicesOpt>(reduce2_ptrs);
|
|
|
|
if(reduce0_ptrs.empty() && reduce1_ptrs.empty())
|
|
{
|
|
throw std::runtime_error("Wrong! No device REDUCE instance found");
|
|
};
|
|
|
|
if(do_verification)
|
|
{
|
|
using hInType = typename type_mapping<InDataType>::outDataType;
|
|
using hOutType = typename type_mapping<OutDataType>::outDataType;
|
|
using hCompType = typename type_mapping<AccDataType>::outDataType;
|
|
|
|
ReductionHost<hInType, hCompType, hOutType, ReduceOpId, PropagateNan, NeedIndices>
|
|
hostReduce(in.mDesc, out_ref.mDesc, OuterDims, ReduceDims);
|
|
|
|
hostReduce.Run(alpha,
|
|
reinterpret_cast<const hInType*>(in.mData.data()),
|
|
beta,
|
|
reinterpret_cast<hOutType*>(out_ref.mData.data()),
|
|
out_indices_ref.mData.data());
|
|
};
|
|
|
|
const auto i_inLengths = to_int_vector(inLengths);
|
|
const auto i_inStrides = to_int_vector(inStrides);
|
|
const auto i_outLengths = to_int_vector(outLengths);
|
|
const auto i_outStrides = to_int_vector(outStrides);
|
|
|
|
for(auto& reduce_ptr : reduce0_ptrs)
|
|
{
|
|
auto wsSizeInBytes = reduce_ptr->GetWorkspaceSizeInBytes(i_inLengths);
|
|
|
|
DeviceMem ws_dev(wsSizeInBytes);
|
|
|
|
auto argument_ptr = reduce_ptr->MakeArgumentPointer(
|
|
i_inLengths,
|
|
i_inStrides,
|
|
i_outLengths,
|
|
i_outStrides,
|
|
alpha,
|
|
beta,
|
|
in_dev.GetDeviceBuffer(),
|
|
out_dev.GetDeviceBuffer(),
|
|
out_indices_dev.GetDeviceBuffer(),
|
|
ws_dev.GetDeviceBuffer(),
|
|
InElementwiseOperation_0{static_cast<int32_t>(reduce_total_length)},
|
|
AccElementwiseOperation_0{static_cast<int32_t>(reduce_total_length)});
|
|
|
|
if(!reduce_ptr->IsSupportedArgument(argument_ptr.get()))
|
|
continue;
|
|
|
|
std::string reduce_name = reduce_ptr->GetTypeString();
|
|
|
|
auto invoker_ptr = reduce_ptr->MakeInvokerPointer();
|
|
|
|
float avg_time = invoker_ptr->Run(argument_ptr.get(), nrepeat);
|
|
|
|
std::size_t num_bytes =
|
|
invariant_total_length * reduce_total_length * sizeof(InDataType) +
|
|
invariant_total_length * sizeof(OutDataType);
|
|
|
|
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
|
|
|
std::cout << "Perf: " << avg_time << " ms, " << gb_per_sec << " GB/s, " << reduce_name
|
|
<< std::endl;
|
|
|
|
if(gb_per_sec > best_gb_per_sec)
|
|
{
|
|
best_avg_time = avg_time;
|
|
best_gb_per_sec = gb_per_sec;
|
|
}
|
|
|
|
if(do_verification)
|
|
{
|
|
out_dev.FromDevice(out.mData.data());
|
|
check_error(out_ref, out);
|
|
|
|
if(NeedIndices)
|
|
{
|
|
out_indices_dev.FromDevice(out_indices.mData.data());
|
|
check_indices(out_indices_ref, out_indices);
|
|
};
|
|
|
|
if(do_log)
|
|
{
|
|
LogRangeAsType<float>(std::cout << "out_host : ", out_ref.mData, ",")
|
|
<< std::endl;
|
|
LogRangeAsType<float>(std::cout << "out_device: ", out.mData, ",") << std::endl;
|
|
};
|
|
};
|
|
|
|
if(do_dumpout)
|
|
{
|
|
dumpBufferToFile("dump_in.bin", in.mData.data(), in.mDesc.GetElementSize());
|
|
dumpBufferToFile("dump_out.bin", out.mData.data(), out.mDesc.GetElementSize());
|
|
dumpBufferToFile(
|
|
"dump_out_host.bin", out_ref.mData.data(), out_ref.mDesc.GetElementSize());
|
|
if(NeedIndices)
|
|
{
|
|
dumpBufferToFile("dump_indices.bin",
|
|
out_indices.mData.data(),
|
|
out_indices.mDesc.GetElementSize());
|
|
dumpBufferToFile("dump_indices_host.bin",
|
|
out_indices_ref.mData.data(),
|
|
out_indices_ref.mDesc.GetElementSize());
|
|
};
|
|
};
|
|
};
|
|
|
|
for(auto& reduce_ptr : reduce1_ptrs)
|
|
{
|
|
auto wsSizeInBytes = reduce_ptr->GetWorkspaceSizeInBytes(i_inLengths);
|
|
|
|
DeviceMem ws_dev(wsSizeInBytes);
|
|
|
|
auto argument_ptr = reduce_ptr->MakeArgumentPointer(
|
|
i_inLengths,
|
|
i_inStrides,
|
|
i_outLengths,
|
|
i_outStrides,
|
|
alpha,
|
|
beta,
|
|
in_dev.GetDeviceBuffer(),
|
|
out_dev.GetDeviceBuffer(),
|
|
out_indices_dev.GetDeviceBuffer(),
|
|
ws_dev.GetDeviceBuffer(),
|
|
InElementwiseOperation_1{static_cast<int32_t>(reduce_total_length)},
|
|
AccElementwiseOperation_1{static_cast<int32_t>(reduce_total_length)});
|
|
|
|
if(!reduce_ptr->IsSupportedArgument(argument_ptr.get()))
|
|
continue;
|
|
|
|
std::string reduce_name = reduce_ptr->GetTypeString();
|
|
|
|
auto invoker_ptr = reduce_ptr->MakeInvokerPointer();
|
|
|
|
float avg_time = invoker_ptr->Run(argument_ptr.get(), nrepeat);
|
|
|
|
std::size_t num_bytes =
|
|
invariant_total_length * reduce_total_length * sizeof(InDataType) +
|
|
invariant_total_length * sizeof(OutDataType);
|
|
|
|
std::vector<int> inLengths2 = reduce_ptr->GetWorkspace2dLengths(argument_ptr.get());
|
|
std::vector<int> inStrides2{inLengths2[1], 1};
|
|
|
|
for(auto& reduce2_ptr : reduce2_ptrs)
|
|
{
|
|
auto argument2_ptr = reduce2_ptr->MakeArgumentPointer(
|
|
inLengths2,
|
|
inStrides2,
|
|
i_outLengths,
|
|
i_outStrides,
|
|
alpha,
|
|
beta,
|
|
ws_dev.GetDeviceBuffer(),
|
|
out_dev.GetDeviceBuffer(),
|
|
out_indices_dev.GetDeviceBuffer(),
|
|
ws_dev.GetDeviceBuffer(),
|
|
InElementwiseOperation_2{static_cast<int32_t>(reduce_total_length)},
|
|
AccElementwiseOperation_2{static_cast<int32_t>(reduce_total_length)});
|
|
|
|
if(!reduce2_ptr->IsSupportedArgument(argument2_ptr.get()))
|
|
continue;
|
|
|
|
std::string reduce2_name = reduce2_ptr->GetTypeString();
|
|
|
|
auto invoker2_ptr = reduce2_ptr->MakeInvokerPointer();
|
|
|
|
float avg_time_2 = invoker2_ptr->Run(argument2_ptr.get(), nrepeat);
|
|
|
|
std::size_t num_bytes_2 =
|
|
static_cast<size_t>(inLengths2[0]) * inLengths2[1] * sizeof(AccDataType);
|
|
|
|
float gb_per_sec = (num_bytes + num_bytes_2) / 1.E6 / (avg_time + avg_time_2);
|
|
|
|
std::cout << "Perf: " << (avg_time + avg_time_2) << " ms, " << gb_per_sec
|
|
<< " GB/s, " << reduce_name << " => " << reduce2_name << std::endl;
|
|
|
|
if(gb_per_sec > best_gb_per_sec)
|
|
{
|
|
best_avg_time = avg_time + avg_time_2;
|
|
best_gb_per_sec = gb_per_sec;
|
|
}
|
|
|
|
if(do_verification)
|
|
{
|
|
out_dev.FromDevice(out.mData.data());
|
|
check_error(out_ref, out);
|
|
|
|
if(NeedIndices)
|
|
{
|
|
out_indices_dev.FromDevice(out_indices.mData.data());
|
|
check_indices(out_indices_ref, out_indices);
|
|
};
|
|
|
|
if(do_log)
|
|
{
|
|
LogRangeAsType<float>(std::cout << "out_host : ", out_ref.mData, ",")
|
|
<< std::endl;
|
|
LogRangeAsType<float>(std::cout << "out_device: ", out.mData, ",")
|
|
<< std::endl;
|
|
}
|
|
}
|
|
|
|
if(do_dumpout)
|
|
{
|
|
dumpBufferToFile("dump_in.bin", in.mData.data(), in.mDesc.GetElementSize());
|
|
dumpBufferToFile("dump_out.bin", out.mData.data(), out.mDesc.GetElementSize());
|
|
dumpBufferToFile(
|
|
"dump_out_host.bin", out_ref.mData.data(), out_ref.mDesc.GetElementSize());
|
|
if(NeedIndices)
|
|
{
|
|
dumpBufferToFile("dump_indices.bin",
|
|
out_indices.mData.data(),
|
|
out_indices.mDesc.GetElementSize());
|
|
dumpBufferToFile("dump_indices_host.bin",
|
|
out_indices_ref.mData.data(),
|
|
out_indices_ref.mDesc.GetElementSize());
|
|
};
|
|
};
|
|
};
|
|
};
|
|
|
|
std::cout << "Best Perf: " << best_avg_time << " ms, " << best_gb_per_sec << " GB/s"
|
|
<< std::endl;
|
|
}
|
|
else
|
|
{
|
|
std::cout << "The requested reduction operation is not supported, please check !!!"
|
|
<< std::endl;
|
|
};
|
|
};
|
|
|
|
template <typename InDataType, typename AccDataType, typename OutDataType>
|
|
void profile_reduce_impl(bool do_verification,
|
|
int init_method,
|
|
bool do_log,
|
|
bool do_dumpout,
|
|
int nrepeat,
|
|
const std::vector<size_t>& inLengths,
|
|
const std::vector<int>& ReduceDims,
|
|
ReduceTensorOp_t ReduceOpId,
|
|
NanPropagation_t NanOpt,
|
|
ReduceTensorIndices_t IndicesOpt,
|
|
float alpha,
|
|
float beta)
|
|
{
|
|
bool matched = false;
|
|
|
|
using tuple_of_description_instances =
|
|
tensor_operation::device::device_reduce_instance::reduce_description_instances;
|
|
|
|
const auto tuple_object = tuple_of_description_instances{};
|
|
|
|
static_for<0, std::tuple_size<tuple_of_description_instances>::value, 1>{}([&](auto i) {
|
|
if(matched)
|
|
return;
|
|
|
|
using descType = remove_cvref_t<decltype(std::get<i>(tuple_object))>;
|
|
|
|
if(!description_match(
|
|
descType{}, inLengths.size(), ReduceDims, ReduceOpId, NanOpt, IndicesOpt))
|
|
return;
|
|
|
|
profile_reduce_impl_impl<InDataType,
|
|
AccDataType,
|
|
OutDataType,
|
|
descType::Rank_,
|
|
typename descType::ReduceDims_,
|
|
static_cast<ReduceTensorOp_t>(descType::ReduceOpId_),
|
|
static_cast<NanPropagation_t>(descType::NanOpt_),
|
|
static_cast<ReduceTensorIndices_t>(descType::IndicesOpt_)>(
|
|
do_verification, init_method, do_log, do_dumpout, nrepeat, inLengths, alpha, beta);
|
|
|
|
matched = true;
|
|
});
|
|
};
|
|
|
|
} // namespace profiler
|
|
} // namespace ck
|