mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-14 02:02:46 +00:00
Regulate reduction accumulator operations and Element-wise operations (#274)
* Remove template from Reducton operation classes and add template to their operator() and GetIdentityValue() interfaces
* Change to unary elementwise operators and the reduce_unary_operator (class for mapping) and dependent variations in all host layers
* Remove the data type template parameter from reduce_binary_operator (class for mapping) and dependent variations in host layers
* Add InMemoryDataOperatonSupportedOnDataType to check the matching between data type and InMemoryDataOperation
* Use struct-scope operator template instantiation for binary and unary element-wise operations
* Change a few more elementwise operations to use template for operator()
* Tiny correction in Normalize operator
* Add static_assert to check the data type appliability for some reduction accumulator and element-wise operatons
* Correction in some examples with regard to using ReduceAccDataType
* Use static_assert for UnaryDivide
* Update to merged codes to use Element-wise operations and Reduction Accumulator operations correctly
* Tiny fix with regard to SetWorkSpacePointer()
[ROCm/composable_kernel commit: 1f543bfa79]
This commit is contained in:
@@ -33,11 +33,11 @@ constexpr ReduceTensorOp ReduceOpId = ReduceTensorOp::NORM2;
|
||||
constexpr bool PropagateNan = true;
|
||||
constexpr bool OutputIndex = false;
|
||||
|
||||
using ReduceOperation = typename reduce_binary_operator<AccDataType, ReduceOpId>::opType;
|
||||
using ReduceOperation = typename reduce_binary_operator<ReduceOpId>::opType;
|
||||
using InElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
|
||||
using AccElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
|
||||
using DeviceReduceInstance = DeviceReduceMultiBlock<InDataType,
|
||||
AccDataType,
|
||||
@@ -247,6 +247,13 @@ int main(int argc, char* argv[])
|
||||
|
||||
DeviceMem out_index_dev(indicesSizeInBytes);
|
||||
|
||||
InElementwiseOperation in_elementwise_op;
|
||||
AccElementwiseOperation acc_elementwise_op;
|
||||
|
||||
std::tie(in_elementwise_op, acc_elementwise_op) =
|
||||
reduce_unary_operator<ReduceOpId, true, true>::GetElementwiseOperator(
|
||||
static_cast<int32_t>(reduce_total_length));
|
||||
|
||||
if(args.do_verification)
|
||||
{
|
||||
ReductionHost<InDataType,
|
||||
@@ -261,8 +268,13 @@ int main(int argc, char* argv[])
|
||||
OutputIndex>
|
||||
hostReduce(in.mDesc, out_ref.mDesc, invariantDims, reduceDims);
|
||||
|
||||
hostReduce.Run(
|
||||
alpha, in.mData.data(), beta, out_ref.mData.data(), out_indices_ref.mData.data());
|
||||
hostReduce.Run(alpha,
|
||||
in.mData.data(),
|
||||
beta,
|
||||
out_ref.mData.data(),
|
||||
out_indices_ref.mData.data(),
|
||||
in_elementwise_op,
|
||||
acc_elementwise_op);
|
||||
};
|
||||
|
||||
std::vector<ck::index_t> i_inLengths;
|
||||
@@ -277,20 +289,19 @@ int main(int argc, char* argv[])
|
||||
|
||||
auto reduce = DeviceReduceInstance{};
|
||||
|
||||
auto argument_ptr = reduce.MakeArgumentPointer(
|
||||
i_inLengths,
|
||||
i_inStrides,
|
||||
i_outLengths,
|
||||
i_outStrides,
|
||||
reduceDims,
|
||||
alpha,
|
||||
beta,
|
||||
in_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
out_dev.GetDeviceBuffer(),
|
||||
out_index_dev.GetDeviceBuffer(),
|
||||
InElementwiseOperation{static_cast<int32_t>(reduce_total_length)},
|
||||
AccElementwiseOperation{static_cast<int32_t>(reduce_total_length)});
|
||||
auto argument_ptr = reduce.MakeArgumentPointer(i_inLengths,
|
||||
i_inStrides,
|
||||
i_outLengths,
|
||||
i_outStrides,
|
||||
reduceDims,
|
||||
alpha,
|
||||
beta,
|
||||
in_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
out_dev.GetDeviceBuffer(),
|
||||
out_index_dev.GetDeviceBuffer(),
|
||||
in_elementwise_op,
|
||||
acc_elementwise_op);
|
||||
|
||||
if(!reduce.IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
|
||||
@@ -31,13 +31,13 @@ constexpr ReduceTensorOp ReduceOpId = ReduceTensorOp::NORM2;
|
||||
constexpr bool PropagateNan = true;
|
||||
constexpr bool OutputIndex = false;
|
||||
|
||||
using ReduceOperation = typename reduce_binary_operator<AccDataType, ReduceOpId>::opType;
|
||||
using ReduceOperation = typename reduce_binary_operator<ReduceOpId>::opType;
|
||||
using InElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
|
||||
using AccElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
|
||||
using PassThroughOp = tensor_operation::element_wise::UnaryIdentic<AccDataType, AccDataType>;
|
||||
using PassThroughOp = tensor_operation::element_wise::PassThrough;
|
||||
|
||||
using DeviceReduceInstance_1 = DeviceReduceMultiBlock<InOutDataType,
|
||||
AccDataType,
|
||||
@@ -184,6 +184,13 @@ int main(int argc, char* argv[])
|
||||
if(beta != 0.0f)
|
||||
out_dev.ToDevice(out.mData.data());
|
||||
|
||||
InElementwiseOperation in_elementwise_op;
|
||||
AccElementwiseOperation acc_elementwise_op;
|
||||
|
||||
std::tie(in_elementwise_op, acc_elementwise_op) =
|
||||
reduce_unary_operator<ReduceOpId, true, true>::GetElementwiseOperator(
|
||||
static_cast<int32_t>(reduce_total_length));
|
||||
|
||||
if(do_verify)
|
||||
{
|
||||
ReductionHost<InOutDataType,
|
||||
@@ -198,7 +205,13 @@ int main(int argc, char* argv[])
|
||||
OutputIndex>
|
||||
hostReduce(in_1.mDesc, out_ref.mDesc, invariantDims, reduceDims);
|
||||
|
||||
hostReduce.Run(alpha, in_1.mData.data(), beta, out_ref.mData.data(), nullptr);
|
||||
hostReduce.Run(alpha,
|
||||
in_1.mData.data(),
|
||||
beta,
|
||||
out_ref.mData.data(),
|
||||
nullptr,
|
||||
in_elementwise_op,
|
||||
acc_elementwise_op);
|
||||
};
|
||||
|
||||
std::vector<ck::index_t> i_inLengths_1;
|
||||
@@ -217,20 +230,19 @@ int main(int argc, char* argv[])
|
||||
|
||||
auto reduce_1 = DeviceReduceInstance_1{};
|
||||
|
||||
auto argument_ptr_1 = reduce_1.MakeArgumentPointer(
|
||||
i_inLengths_1,
|
||||
i_inStrides_1,
|
||||
i_inLengths_2,
|
||||
i_inStrides_2,
|
||||
reduceDims_1,
|
||||
1.0f,
|
||||
0.0f,
|
||||
in_1_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
in_2_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
InElementwiseOperation{static_cast<int32_t>(reduce_total_length)},
|
||||
PassThroughOp{});
|
||||
auto argument_ptr_1 = reduce_1.MakeArgumentPointer(i_inLengths_1,
|
||||
i_inStrides_1,
|
||||
i_inLengths_2,
|
||||
i_inStrides_2,
|
||||
reduceDims_1,
|
||||
1.0f,
|
||||
0.0f,
|
||||
in_1_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
in_2_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
in_elementwise_op,
|
||||
PassThroughOp{});
|
||||
|
||||
if(!reduce_1.IsSupportedArgument(argument_ptr_1.get()))
|
||||
{
|
||||
@@ -243,20 +255,19 @@ int main(int argc, char* argv[])
|
||||
|
||||
auto reduce_2 = DeviceReduceInstance_2{};
|
||||
|
||||
auto argument_ptr_2 = reduce_2.MakeArgumentPointer(
|
||||
i_inLengths_2,
|
||||
i_inStrides_2,
|
||||
i_outLengths,
|
||||
i_outStrides,
|
||||
reduceDims_2,
|
||||
alpha,
|
||||
beta,
|
||||
in_2_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
out_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
PassThroughOp{},
|
||||
AccElementwiseOperation{static_cast<int32_t>(reduce_total_length)});
|
||||
auto argument_ptr_2 = reduce_2.MakeArgumentPointer(i_inLengths_2,
|
||||
i_inStrides_2,
|
||||
i_outLengths,
|
||||
i_outStrides,
|
||||
reduceDims_2,
|
||||
alpha,
|
||||
beta,
|
||||
in_2_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
out_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
PassThroughOp{},
|
||||
acc_elementwise_op);
|
||||
|
||||
if(!reduce_2.IsSupportedArgument(argument_ptr_2.get()))
|
||||
{
|
||||
|
||||
@@ -31,16 +31,15 @@ static void pool_host_verify(const Tensor<InDataType>& in,
|
||||
const std::array<ck::index_t, 2>& in_left_pads,
|
||||
const std::array<ck::index_t, 2>& /*in_right_pads*/)
|
||||
{
|
||||
const int32_t divider = window_spatial_lengths[0] * window_spatial_lengths[1];
|
||||
const int32_t reduceLength = window_spatial_lengths[0] * window_spatial_lengths[1];
|
||||
|
||||
using ReduceOperation = typename ck::reduce_binary_operator<AccDataType, ReduceOpId>::opType;
|
||||
using InElementwiseOperation = typename ck::
|
||||
reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation;
|
||||
using AccElementwiseOperation = typename ck::
|
||||
reduce_unary_operator<AccDataType, ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
using ReduceOperation = typename ck::reduce_binary_operator<ReduceOpId>::opType;
|
||||
|
||||
const InElementwiseOperation in_elementwise_op(divider);
|
||||
const AccElementwiseOperation acc_elementwise_op(divider);
|
||||
auto elementwise_ops =
|
||||
ck::reduce_unary_operator<ReduceOpId, true, true>::GetElementwiseOperator(reduceLength);
|
||||
|
||||
auto in_elementwise_op = std::get<0>(elementwise_ops);
|
||||
auto acc_elementwise_op = std::get<1>(elementwise_ops);
|
||||
|
||||
if constexpr(!OutputIndex)
|
||||
{
|
||||
@@ -48,7 +47,7 @@ static void pool_host_verify(const Tensor<InDataType>& in,
|
||||
ck::detail::AccumulateWithNanCheck<PropagateNan, ReduceOperation, AccDataType>;
|
||||
|
||||
auto f_nchw = [&](auto n, auto c, auto ho, auto wo) {
|
||||
auto accuVal = ReduceOperation::GetIdentityValue();
|
||||
auto accuVal = ReduceOperation::template GetIdentityValue<AccDataType>();
|
||||
|
||||
for(ck::index_t y = 0; y < window_spatial_lengths[0]; ++y)
|
||||
{
|
||||
@@ -86,7 +85,7 @@ static void pool_host_verify(const Tensor<InDataType>& in,
|
||||
AccDataType,
|
||||
IndexDataType>;
|
||||
auto f_nchw = [&](auto n, auto c, auto ho, auto wo) {
|
||||
auto accuVal = ReduceOperation::GetIdentityValue();
|
||||
auto accuVal = ReduceOperation::template GetIdentityValue<AccDataType>();
|
||||
IndexDataType accuIndex = 0;
|
||||
|
||||
for(ck::index_t y = 0; y < window_spatial_lengths[0]; ++y)
|
||||
|
||||
@@ -41,9 +41,8 @@ using CLayout = ck::tensor_layout::gemm::RowMajor;
|
||||
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using DsReduceOp = ck::Tuple<ck::reduce::Max<ReduceAccDataType>>;
|
||||
using DsElementOp = ck::Tuple<
|
||||
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, false>>;
|
||||
using DsReduceOp = ck::Tuple<ck::reduce::Max>;
|
||||
using DsElementOp = ck::Tuple<ck::tensor_operation::element_wise::PassThrough>;
|
||||
using DGlobalMemOp =
|
||||
ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicMax>;
|
||||
|
||||
@@ -236,10 +235,14 @@ int main(int argc, char* argv[])
|
||||
|
||||
for(int m = 0; m < M; ++m)
|
||||
{
|
||||
ReduceAccDataType d_acc = d_reduce_op.GetIdentityValue();
|
||||
ReduceAccDataType d_acc = d_reduce_op.GetIdentityValue<ReduceAccDataType>();
|
||||
|
||||
for(int n = 0; n < N; ++n)
|
||||
d_reduce_op(d_acc, c_m_n_host_result(m, n));
|
||||
{
|
||||
ReduceAccDataType curr_val =
|
||||
ck::type_convert<ReduceAccDataType>(c_m_n_host_result(m, n));
|
||||
d_reduce_op(d_acc, curr_val);
|
||||
};
|
||||
|
||||
d_m_host_result(m) = d_acc;
|
||||
}
|
||||
|
||||
@@ -41,18 +41,15 @@ using CLayout = ck::tensor_layout::gemm::RowMajor;
|
||||
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using D0ReduceOp = ck::reduce::Add<ReduceAccDataType>;
|
||||
using D1ReduceOp = ck::reduce::Add<ReduceAccDataType>;
|
||||
using D0ReduceOp = ck::reduce::Add;
|
||||
using D1ReduceOp = ck::reduce::Add;
|
||||
using DxsReduceOp = ck::Tuple<D0ReduceOp, D1ReduceOp>;
|
||||
|
||||
using UnaryIdenticElementOp =
|
||||
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, false>;
|
||||
using UnaryDivElementOp =
|
||||
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, true>;
|
||||
using UnarySquareElementOp =
|
||||
ck::tensor_operation::element_wise::UnarySquare<ReduceAccDataType, ReduceAccDataType, false>;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
|
||||
using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
|
||||
|
||||
using DGlobalMemOp =
|
||||
ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicAdd,
|
||||
@@ -261,15 +258,14 @@ int main(int argc, char* argv[])
|
||||
|
||||
for(int m = 0; m < M; ++m)
|
||||
{
|
||||
ReduceAccDataType d0_acc = d0_reduce_op.GetIdentityValue();
|
||||
ReduceAccDataType d1_acc = d1_reduce_op.GetIdentityValue();
|
||||
auto d0_acc = d0_reduce_op.GetIdentityValue<ReduceAccDataType>();
|
||||
auto d1_acc = d1_reduce_op.GetIdentityValue<ReduceAccDataType>();
|
||||
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
ReduceAccDataType c_val =
|
||||
ck::type_convert<ReduceAccDataType>(c_m_n_host_result(m, n));
|
||||
ReduceAccDataType d0_val = 0;
|
||||
ReduceAccDataType d1_val = 0;
|
||||
auto c_val = ck::type_convert<ReduceAccDataType>(c_m_n_host_result(m, n));
|
||||
ReduceAccDataType d0_val;
|
||||
ReduceAccDataType d1_val;
|
||||
|
||||
dxs_in_element_op(ck::Number<0>{})(d0_val, c_val);
|
||||
dxs_in_element_op(ck::Number<1>{})(d1_val, c_val);
|
||||
|
||||
@@ -39,16 +39,14 @@ using CLayout = ck::tensor_layout::gemm::RowMajor;
|
||||
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using D0ReduceOp = ck::reduce::Add<ReduceAccDataType>;
|
||||
using D1ReduceOp = ck::reduce::Add<ReduceAccDataType>;
|
||||
using D0ReduceOp = ck::reduce::Add;
|
||||
using D1ReduceOp = ck::reduce::Add;
|
||||
using DxsReduceOp = ck::Tuple<D0ReduceOp, D1ReduceOp>;
|
||||
|
||||
using UnaryIdenticElementOp =
|
||||
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, false>;
|
||||
using UnarySquareElementOp =
|
||||
ck::tensor_operation::element_wise::UnarySquare<ReduceAccDataType, ReduceAccDataType, false>;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryIdenticElementOp, UnaryIdenticElementOp>;
|
||||
using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryIdenticElementOp, UnaryIdenticElementOp>;
|
||||
|
||||
using DGlobalMemOp =
|
||||
ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicAdd,
|
||||
@@ -259,14 +257,15 @@ int main(int argc, char* argv[])
|
||||
{
|
||||
for(int m = 0; m < M; ++m)
|
||||
{
|
||||
float d0_acc = d0_reduce_op.GetIdentityValue();
|
||||
float d1_acc = d1_reduce_op.GetIdentityValue();
|
||||
auto d0_acc = d0_reduce_op.GetIdentityValue<ReduceAccDataType>();
|
||||
auto d1_acc = d1_reduce_op.GetIdentityValue<ReduceAccDataType>();
|
||||
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
float c_val = ck::type_convert<float>(c_g_m_n_host_result(batch, m, n));
|
||||
float d0_val = 0;
|
||||
float d1_val = 0;
|
||||
auto c_val =
|
||||
ck::type_convert<ReduceAccDataType>(c_g_m_n_host_result(batch, m, n));
|
||||
ReduceAccDataType d0_val;
|
||||
ReduceAccDataType d1_val;
|
||||
|
||||
UnaryIdenticElementOp{}(d0_val, c_val);
|
||||
UnarySquareElementOp{}(d1_val, c_val);
|
||||
|
||||
@@ -42,8 +42,7 @@ using ABDataType = F16;
|
||||
using CDataType = F16;
|
||||
using EltwiseComputeDataType = F32;
|
||||
|
||||
using Add = ck::tensor_operation::binary_element_wise::
|
||||
Add<EltwiseComputeDataType, EltwiseComputeDataType, EltwiseComputeDataType>;
|
||||
using Add = ck::tensor_operation::element_wise::Add;
|
||||
|
||||
using DeviceElementwiseAddInstance =
|
||||
ck::tensor_operation::device::DeviceBinaryElementwise<ABDataType,
|
||||
|
||||
@@ -17,8 +17,7 @@ using ABDataType = F16;
|
||||
using CDataType = F16;
|
||||
using EltwiseComputeDataType = F32;
|
||||
|
||||
using Add = ck::tensor_operation::binary_element_wise::
|
||||
Add<EltwiseComputeDataType, EltwiseComputeDataType, EltwiseComputeDataType>;
|
||||
using Add = ck::tensor_operation::element_wise::Add;
|
||||
|
||||
using DeviceElementwiseAddInstance =
|
||||
ck::tensor_operation::device::DeviceBinaryElementwise<ABDataType,
|
||||
|
||||
@@ -42,8 +42,7 @@ using ABDataType = F16;
|
||||
using CDataType = F16;
|
||||
using EltwiseComputeDataType = F32;
|
||||
|
||||
using Add = ck::tensor_operation::binary_element_wise::
|
||||
Add<EltwiseComputeDataType, EltwiseComputeDataType, EltwiseComputeDataType>;
|
||||
using Add = ck::tensor_operation::element_wise::Add;
|
||||
|
||||
using DeviceElementwiseAddInstance =
|
||||
ck::tensor_operation::device::DeviceBinaryElementwise<ABDataType,
|
||||
|
||||
@@ -42,8 +42,7 @@ using ABDataType = F16;
|
||||
using CDataType = F16;
|
||||
using EltwiseComputeDataType = F32;
|
||||
|
||||
using Add = ck::tensor_operation::binary_element_wise::
|
||||
Add<EltwiseComputeDataType, EltwiseComputeDataType, EltwiseComputeDataType>;
|
||||
using Add = ck::tensor_operation::element_wise::Add;
|
||||
|
||||
using DeviceElementwiseAddInstance =
|
||||
ck::tensor_operation::device::DeviceBinaryElementwise<ABDataType,
|
||||
|
||||
@@ -48,17 +48,14 @@ using AElementOp = PassThrough;
|
||||
using BElementOp = PassThrough;
|
||||
using CElementOp = ck::tensor_operation::element_wise::Relu;
|
||||
using C1ElementOp = PassThrough;
|
||||
using ReduceSumOp = ck::reduce::Add<ReduceAccDataType>;
|
||||
using ReduceSumOp = ck::reduce::Add;
|
||||
using DxsReduceOp = ck::Tuple<ReduceSumOp, ReduceSumOp>;
|
||||
|
||||
using UnaryIdenticElementOp =
|
||||
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, false>;
|
||||
using UnaryDivElementOp =
|
||||
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, true>;
|
||||
using UnarySquareElementOp =
|
||||
ck::tensor_operation::element_wise::UnarySquare<ReduceAccDataType, ReduceAccDataType, false>;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
|
||||
using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
|
||||
|
||||
using DxsGlobalMemOp =
|
||||
ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicAdd,
|
||||
@@ -181,8 +178,8 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n,
|
||||
auto reduceSumOpInst = ReduceSumOp{};
|
||||
for(int m = 0; m < M; ++m)
|
||||
{
|
||||
AccDataType mean_acc = reduceSumOpInst.GetIdentityValue();
|
||||
AccDataType square_mean_acc = reduceSumOpInst.GetIdentityValue();
|
||||
auto mean_acc = reduceSumOpInst.GetIdentityValue<AccDataType>();
|
||||
auto square_mean_acc = reduceSumOpInst.GetIdentityValue<AccDataType>();
|
||||
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
@@ -207,7 +204,12 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n,
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
AccDataType out_acc = 0;
|
||||
layerNormInst(out_acc, c_m_n(m, n), mean_m(m), meanSquare_m(m), gamma_n(n), beta_n(n));
|
||||
layerNormInst(out_acc,
|
||||
static_cast<AccDataType>(c_m_n(m, n)),
|
||||
static_cast<AccDataType>(mean_m(m)),
|
||||
static_cast<AccDataType>(meanSquare_m(m)),
|
||||
static_cast<AccDataType>(gamma_n(n)),
|
||||
static_cast<AccDataType>(beta_n(n)));
|
||||
out_m_n(m, n) = static_cast<DDataType>(out_acc);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,17 +44,14 @@ using CLayout = ck::tensor_layout::gemm::RowMajor;
|
||||
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSumOp = ck::reduce::Add<ReduceAccDataType>;
|
||||
using ReduceSumOp = ck::reduce::Add;
|
||||
using DxsReduceOp = ck::Tuple<ReduceSumOp, ReduceSumOp>;
|
||||
|
||||
using UnaryIdenticElementOp =
|
||||
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, false>;
|
||||
using UnaryDivElementOp =
|
||||
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, true>;
|
||||
using UnarySquareElementOp =
|
||||
ck::tensor_operation::element_wise::UnarySquare<ReduceAccDataType, ReduceAccDataType, false>;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
|
||||
using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
|
||||
|
||||
using DxsGlobalMemOp =
|
||||
ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicAdd,
|
||||
@@ -156,13 +153,14 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n,
|
||||
auto reduceSumOpInst = ReduceSumOp{};
|
||||
for(int m = 0; m < M; ++m)
|
||||
{
|
||||
float mean_acc = reduceSumOpInst.GetIdentityValue();
|
||||
float square_mean_acc = reduceSumOpInst.GetIdentityValue();
|
||||
auto mean_acc = reduceSumOpInst.GetIdentityValue<ReduceAccDataType>();
|
||||
auto square_mean_acc = reduceSumOpInst.GetIdentityValue<ReduceAccDataType>();
|
||||
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
ReduceAccDataType c_val = ck::type_convert<ReduceAccDataType>(c_m_n(m, n));
|
||||
ReduceAccDataType square_c_val = 0;
|
||||
auto c_val = ck::type_convert<ReduceAccDataType>(c_m_n(m, n));
|
||||
auto square_c_val = reduceSumOpInst.GetIdentityValue<ReduceAccDataType>();
|
||||
|
||||
UnarySquareElementOp{}(square_c_val, c_val);
|
||||
|
||||
reduceSumOpInst(mean_acc, c_val);
|
||||
@@ -182,7 +180,12 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n,
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
float out_f32 = 0;
|
||||
layerNormInst(out_f32, c_m_n(m, n), mean_m(m), meanSquare_m(m), gamma_n(n), beta_n(n));
|
||||
layerNormInst(out_f32,
|
||||
static_cast<float>(c_m_n(m, n)),
|
||||
static_cast<float>(mean_m(m)),
|
||||
static_cast<float>(meanSquare_m(m)),
|
||||
static_cast<float>(gamma_n(n)),
|
||||
static_cast<float>(beta_n(n)));
|
||||
out_m_n(m, n) = static_cast<out_type>(out_f32);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ struct BaseOperator
|
||||
|
||||
virtual size_t GetWorkSpaceSize(const BaseArgument*) const { return 0; }
|
||||
|
||||
virtual void SetWorkSpacePointer(BaseArgument* p_arg, void* p_workspace) const final
|
||||
virtual void SetWorkSpacePointer(BaseArgument* p_arg, void* p_workspace) const
|
||||
{
|
||||
assert(p_arg);
|
||||
p_arg->p_workspace_ = p_workspace;
|
||||
|
||||
@@ -557,11 +557,9 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
|
||||
|
||||
float ave_time = 0;
|
||||
|
||||
using Add =
|
||||
ck::tensor_operation::binary_element_wise::Add<CDataType, CDataType, CDataType>;
|
||||
using Substract = ck::tensor_operation::binary_element_wise::
|
||||
Substract<CDataType, CDataType, CDataType>;
|
||||
using GridwiseBinAdd = GridwiseBinaryElementwise_1D<CDataType,
|
||||
using Add = ck::tensor_operation::element_wise::Add;
|
||||
using Subtract = ck::tensor_operation::element_wise::Subtract;
|
||||
using GridwiseBinAdd = GridwiseBinaryElementwise_1D<CDataType,
|
||||
CDataType,
|
||||
CDataType,
|
||||
CDataType,
|
||||
@@ -573,19 +571,19 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
|
||||
AScalarPerVector,
|
||||
BScalarPerVector,
|
||||
CScalarPerVector>;
|
||||
using GridwiseBinSubstract = GridwiseBinaryElementwise_1D<CDataType,
|
||||
CDataType,
|
||||
CDataType,
|
||||
CDataType,
|
||||
CGridDesc_M,
|
||||
CGridDesc_M,
|
||||
CGridDesc_M,
|
||||
Substract,
|
||||
MPerThread,
|
||||
AScalarPerVector,
|
||||
BScalarPerVector,
|
||||
CScalarPerVector>;
|
||||
const auto add_kernel = kernel_binary_elementwise_1d<GridwiseBinAdd,
|
||||
using GridwiseBinSubtract = GridwiseBinaryElementwise_1D<CDataType,
|
||||
CDataType,
|
||||
CDataType,
|
||||
CDataType,
|
||||
CGridDesc_M,
|
||||
CGridDesc_M,
|
||||
CGridDesc_M,
|
||||
Subtract,
|
||||
MPerThread,
|
||||
AScalarPerVector,
|
||||
BScalarPerVector,
|
||||
CScalarPerVector>;
|
||||
const auto add_kernel = kernel_binary_elementwise_1d<GridwiseBinAdd,
|
||||
CDataType,
|
||||
CDataType,
|
||||
CDataType,
|
||||
@@ -593,14 +591,14 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
|
||||
CGridDesc_M,
|
||||
CGridDesc_M,
|
||||
Add>;
|
||||
const auto substract_kernel = kernel_binary_elementwise_1d<GridwiseBinSubstract,
|
||||
CDataType,
|
||||
CDataType,
|
||||
CDataType,
|
||||
CGridDesc_M,
|
||||
CGridDesc_M,
|
||||
CGridDesc_M,
|
||||
Substract>;
|
||||
const auto subtract_kernel = kernel_binary_elementwise_1d<GridwiseBinSubtract,
|
||||
CDataType,
|
||||
CDataType,
|
||||
CDataType,
|
||||
CGridDesc_M,
|
||||
CGridDesc_M,
|
||||
CGridDesc_M,
|
||||
Subtract>;
|
||||
|
||||
if(GridwiseGemm::CalculateHasMainKBlockLoop(K))
|
||||
{
|
||||
@@ -653,7 +651,7 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
|
||||
|
||||
// c_real = aux - aux_2
|
||||
ave_time += launch_and_time_kernel(stream_config,
|
||||
substract_kernel,
|
||||
subtract_kernel,
|
||||
dim3(grid_size),
|
||||
dim3(BlockSize),
|
||||
0,
|
||||
@@ -663,7 +661,7 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
|
||||
arg.c_grid_desc_m_,
|
||||
arg.c_grid_desc_m_,
|
||||
arg.c_grid_desc_m_,
|
||||
Substract{});
|
||||
Subtract{});
|
||||
|
||||
ave_time +=
|
||||
launch_and_time_kernel(stream_config,
|
||||
@@ -764,7 +762,7 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
|
||||
|
||||
// c_real = aux - aux_2
|
||||
ave_time += launch_and_time_kernel(stream_config,
|
||||
substract_kernel,
|
||||
subtract_kernel,
|
||||
dim3(grid_size),
|
||||
dim3(BlockSize),
|
||||
0,
|
||||
@@ -774,7 +772,7 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
|
||||
arg.c_grid_desc_m_,
|
||||
arg.c_grid_desc_m_,
|
||||
arg.c_grid_desc_m_,
|
||||
Substract{});
|
||||
Subtract{});
|
||||
|
||||
ave_time +=
|
||||
launch_and_time_kernel(stream_config,
|
||||
|
||||
@@ -35,14 +35,13 @@ struct DevicePool2dFwd_Input_N_Hi_Wi_C_Output_N_Ho_Wo_C : public DevicePool2dFwd
|
||||
|
||||
using IndexDataType = int32_t;
|
||||
|
||||
using ReduceOperation = typename reduce_binary_operator<AccDataType, ReduceOpId>::opType;
|
||||
using ReduceOperation = typename reduce_binary_operator<ReduceOpId>::opType;
|
||||
|
||||
using InElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
|
||||
|
||||
using AccElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::
|
||||
AccElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
|
||||
static constexpr index_t InSrcOutDstVectorDim =
|
||||
0; // for NHWC, the dim C is the vector Dim for both input and output in memory, which is
|
||||
@@ -178,13 +177,10 @@ struct DevicePool2dFwd_Input_N_Hi_Wi_C_Output_N_Ho_Wo_C : public DevicePool2dFwd
|
||||
invariant_lowest_length_ = C;
|
||||
reduce_lowest_length_ = window_spatial_lengths[1];
|
||||
|
||||
// TODO: is this correct?
|
||||
if constexpr(ReduceOpId == ck::ReduceTensorOp::AVG)
|
||||
{
|
||||
ck::index_t divider = window_spatial_lengths[0] * window_spatial_lengths[1];
|
||||
in_element_op_ = InElementwiseOperation{divider};
|
||||
acc_element_op_ = AccElementwiseOperation{divider};
|
||||
}
|
||||
int32_t reduceLength = window_spatial_lengths[0] * window_spatial_lengths[1];
|
||||
|
||||
std::tie(in_element_op_, acc_element_op_) =
|
||||
reduce_unary_operator<ReduceOpId, true, true>::GetElementwiseOperator(reduceLength);
|
||||
}
|
||||
|
||||
const InDataType* p_in_dev_;
|
||||
|
||||
@@ -61,12 +61,9 @@ struct DeviceReduceMultiBlock : public DeviceReduce<InElementwiseOperation, AccE
|
||||
static constexpr bool use_multiblock =
|
||||
(OutMemoryDataOperation == InMemoryDataOperationEnum::AtomicAdd);
|
||||
|
||||
static constexpr bool out_type_compatible_with_atomic_op =
|
||||
std::is_same<OutDataType, float>::value || std::is_same<OutDataType, double>::value;
|
||||
|
||||
static_assert(
|
||||
!use_multiblock || (use_multiblock && out_type_compatible_with_atomic_op),
|
||||
"The OutDataType must support the atomic operation for using MultiBlock reduction");
|
||||
static_assert(ck::reduce::InMemoryDataOperatonSupportedOnDataType<OutMemoryDataOperation,
|
||||
OutDataType>::value,
|
||||
"The OutDataType must support the specified OutMemoryDataOperation!");
|
||||
|
||||
static_assert(!use_multiblock || (use_multiblock && !OutputIndex),
|
||||
"MultiBlock reduction can only be used when outputing index is not required");
|
||||
@@ -349,7 +346,7 @@ struct DeviceReduceMultiBlock : public DeviceReduce<InElementwiseOperation, AccE
|
||||
if constexpr(use_multiblock)
|
||||
{
|
||||
const auto identityVal =
|
||||
ck::reduce::GetIdentityValueueForInMemoryDataOperation<OutDataType>(
|
||||
ck::reduce::GetIdentityValueForInMemoryDataOperation<OutDataType>(
|
||||
OutMemoryDataOperation);
|
||||
|
||||
const auto kernel_pre =
|
||||
@@ -492,7 +489,7 @@ struct DeviceReduceMultiBlock : public DeviceReduce<InElementwiseOperation, AccE
|
||||
auto str = std::stringstream();
|
||||
|
||||
// clang-format off
|
||||
str << "DeviceReduceMultiBlockAtomicAdd<" << BlockSize << ",";
|
||||
str << (OutMemoryDataOperation == InMemoryDataOperationEnum::Set? "DeviceReduceBlockWise<" : "DeviceReduceMultiBlock<") << BlockSize << ",";
|
||||
str << "M_C" << MThreadClusterSize << "_S" << MThreadSliceSize << ",";
|
||||
str << "K_C" << KThreadClusterSize << "_S" << KThreadSliceSize << ",";
|
||||
str << "InSrcVectorDim_" << InSrcVectorDim << "_InSrcVectorSize_" << InSrcVectorSize << "_OutDstVectorSize_" << OutDstVectorSize << ">";
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include "reduction_operator.hpp"
|
||||
#include "reduction_enums.hpp"
|
||||
#include "element_wise_operation.hpp"
|
||||
#include <tuple>
|
||||
|
||||
namespace ck {
|
||||
|
||||
@@ -37,77 +38,69 @@ namespace ck {
|
||||
// The boolean member "indexable" are also provided in reduce_binary_operactor for
|
||||
// easier checking by the upper-layer codes in the kernels.
|
||||
|
||||
template <typename T, ReduceTensorOp Op>
|
||||
template <ReduceTensorOp Op>
|
||||
struct reduce_binary_operator;
|
||||
|
||||
template <typename T>
|
||||
struct reduce_binary_operator<T, ReduceTensorOp::ADD>
|
||||
template <>
|
||||
struct reduce_binary_operator<ReduceTensorOp::ADD>
|
||||
{
|
||||
using opType = reduce::Add<T>;
|
||||
using dataType = T;
|
||||
using opType = reduce::Add;
|
||||
|
||||
static constexpr bool indexable = false;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct reduce_binary_operator<T, ReduceTensorOp::MUL>
|
||||
template <>
|
||||
struct reduce_binary_operator<ReduceTensorOp::MUL>
|
||||
{
|
||||
using opType = reduce::Mul<T>;
|
||||
using dataType = T;
|
||||
using opType = reduce::Mul;
|
||||
|
||||
static constexpr bool indexable = false;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct reduce_binary_operator<T, ReduceTensorOp::MIN>
|
||||
template <>
|
||||
struct reduce_binary_operator<ReduceTensorOp::MIN>
|
||||
{
|
||||
using opType = reduce::Min<T>;
|
||||
using dataType = T;
|
||||
using opType = reduce::Min;
|
||||
|
||||
static constexpr bool indexable = true;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct reduce_binary_operator<T, ReduceTensorOp::MAX>
|
||||
template <>
|
||||
struct reduce_binary_operator<ReduceTensorOp::MAX>
|
||||
{
|
||||
using opType = reduce::Max<T>;
|
||||
using dataType = T;
|
||||
using opType = reduce::Max;
|
||||
|
||||
static constexpr bool indexable = true;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct reduce_binary_operator<T, ReduceTensorOp::AMAX>
|
||||
template <>
|
||||
struct reduce_binary_operator<ReduceTensorOp::AMAX>
|
||||
{
|
||||
using opType = reduce::AMax<T>;
|
||||
using dataType = T;
|
||||
using opType = reduce::AMax;
|
||||
|
||||
static constexpr bool indexable = true;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct reduce_binary_operator<T, ReduceTensorOp::AVG>
|
||||
template <>
|
||||
struct reduce_binary_operator<ReduceTensorOp::AVG>
|
||||
{
|
||||
using opType = reduce::Add<T>;
|
||||
using dataType = T;
|
||||
using opType = reduce::Add;
|
||||
|
||||
static constexpr bool indexable = false;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct reduce_binary_operator<T, ReduceTensorOp::NORM1>
|
||||
template <>
|
||||
struct reduce_binary_operator<ReduceTensorOp::NORM1>
|
||||
{
|
||||
using opType = reduce::Add<T>;
|
||||
using dataType = T;
|
||||
using opType = reduce::Add;
|
||||
|
||||
static constexpr bool indexable = false;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct reduce_binary_operator<T, ReduceTensorOp::NORM2>
|
||||
template <>
|
||||
struct reduce_binary_operator<ReduceTensorOp::NORM2>
|
||||
{
|
||||
using opType = reduce::Add<T>;
|
||||
using dataType = T;
|
||||
using opType = reduce::Add;
|
||||
|
||||
static constexpr bool indexable = false;
|
||||
};
|
||||
@@ -115,53 +108,101 @@ struct reduce_binary_operator<T, ReduceTensorOp::NORM2>
|
||||
// The templated struct reduce_unary_operator maps the enum Ids of Reduce operators to two unary
|
||||
// functor classes.
|
||||
// The two unary functors are called before and afer the Reduction is executed respectively
|
||||
template <typename T, ReduceTensorOp Op, bool IsFirstReduce, bool IsLastReduce>
|
||||
template <ReduceTensorOp Op, bool IsFirstReduce, bool IsLastReduce>
|
||||
struct reduce_unary_operator
|
||||
{
|
||||
using InElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>;
|
||||
using InElementwiseOperation = tensor_operation::element_wise::PassThrough;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::PassThrough;
|
||||
|
||||
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
|
||||
GetElementwiseOperator(int32_t reduceLength)
|
||||
{
|
||||
(void)reduceLength;
|
||||
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{});
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T, bool IsFirstReduce>
|
||||
struct reduce_unary_operator<T, ReduceTensorOp::AVG, IsFirstReduce, true>
|
||||
template <bool IsFirstReduce>
|
||||
struct reduce_unary_operator<ReduceTensorOp::AVG, IsFirstReduce, true>
|
||||
{
|
||||
using InElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T, true>;
|
||||
using InElementwiseOperation = tensor_operation::element_wise::PassThrough;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::UnaryDivide;
|
||||
|
||||
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
|
||||
GetElementwiseOperator(int32_t reduceLength)
|
||||
{
|
||||
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{reduceLength});
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T, bool IsLastReduce>
|
||||
struct reduce_unary_operator<T, ReduceTensorOp::NORM1, true, IsLastReduce>
|
||||
template <bool IsLastReduce>
|
||||
struct reduce_unary_operator<ReduceTensorOp::NORM1, true, IsLastReduce>
|
||||
{
|
||||
using InElementwiseOperation = tensor_operation::element_wise::UnaryAbs<T, T>;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>;
|
||||
using InElementwiseOperation = tensor_operation::element_wise::UnaryAbs;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::PassThrough;
|
||||
|
||||
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
|
||||
GetElementwiseOperator(int32_t reduceLength)
|
||||
{
|
||||
(void)reduceLength;
|
||||
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{});
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T, bool IsLastReduce>
|
||||
struct reduce_unary_operator<T, ReduceTensorOp::AMAX, true, IsLastReduce>
|
||||
template <bool IsLastReduce>
|
||||
struct reduce_unary_operator<ReduceTensorOp::AMAX, true, IsLastReduce>
|
||||
{
|
||||
using InElementwiseOperation = tensor_operation::element_wise::UnaryAbs<T, T>;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>;
|
||||
using InElementwiseOperation = tensor_operation::element_wise::UnaryAbs;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::PassThrough;
|
||||
|
||||
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
|
||||
GetElementwiseOperator(int32_t reduceLength)
|
||||
{
|
||||
(void)reduceLength;
|
||||
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{});
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct reduce_unary_operator<T, ReduceTensorOp::NORM2, true, false>
|
||||
template <>
|
||||
struct reduce_unary_operator<ReduceTensorOp::NORM2, true, false>
|
||||
{
|
||||
using InElementwiseOperation = tensor_operation::element_wise::UnarySquare<T, T>;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>;
|
||||
using InElementwiseOperation = tensor_operation::element_wise::UnarySquare;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::PassThrough;
|
||||
|
||||
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
|
||||
GetElementwiseOperator(int32_t reduceLength)
|
||||
{
|
||||
(void)reduceLength;
|
||||
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{});
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct reduce_unary_operator<T, ReduceTensorOp::NORM2, true, true>
|
||||
template <>
|
||||
struct reduce_unary_operator<ReduceTensorOp::NORM2, true, true>
|
||||
{
|
||||
using InElementwiseOperation = tensor_operation::element_wise::UnarySquare<T, T>;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::UnarySqrt<T, T>;
|
||||
using InElementwiseOperation = tensor_operation::element_wise::UnarySquare;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::UnarySqrt;
|
||||
|
||||
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
|
||||
GetElementwiseOperator(int32_t reduceLength)
|
||||
{
|
||||
(void)reduceLength;
|
||||
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{});
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct reduce_unary_operator<T, ReduceTensorOp::NORM2, false, true>
|
||||
template <>
|
||||
struct reduce_unary_operator<ReduceTensorOp::NORM2, false, true>
|
||||
{
|
||||
using InElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::UnarySqrt<T, T>;
|
||||
using InElementwiseOperation = tensor_operation::element_wise::PassThrough;
|
||||
using AccElementwiseOperation = tensor_operation::element_wise::UnarySqrt;
|
||||
|
||||
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
|
||||
GetElementwiseOperator(int32_t reduceLength)
|
||||
{
|
||||
(void)reduceLength;
|
||||
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{});
|
||||
};
|
||||
};
|
||||
|
||||
} // end of namespace ck
|
||||
|
||||
@@ -28,100 +28,189 @@
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace binary_element_wise {
|
||||
|
||||
template <typename Y, typename X1, typename X2>
|
||||
struct Add;
|
||||
namespace element_wise {
|
||||
|
||||
template <>
|
||||
struct Add<double, double, double>
|
||||
struct Add
|
||||
{
|
||||
template <typename T>
|
||||
__host__ __device__ constexpr void operator()(T& y, const T& x0, const T& x1) const;
|
||||
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()(double& dst, const double& src1, const double& src2) const
|
||||
operator()<float>(float& y, const float& x0, const float& x1) const
|
||||
{
|
||||
dst = src1 + src2;
|
||||
y = x0 + x1;
|
||||
};
|
||||
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<double>(double& y, const double& x0, const double& x1) const
|
||||
{
|
||||
y = x0 + x1;
|
||||
};
|
||||
|
||||
// Question: should half_t be supported ?
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<half_t>(half_t& y, const half_t& x0, const half_t& x1) const
|
||||
{
|
||||
y = x0 + x1;
|
||||
};
|
||||
|
||||
// Question: should bhalf_t be supported ?
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<bhalf_t>(bhalf_t& y, const bhalf_t& x0, const bhalf_t& x1) const
|
||||
{
|
||||
const float x1_tmp = ck::type_convert<float>(x0);
|
||||
const float x2_tmp = ck::type_convert<float>(x1);
|
||||
const float y_tmp = x1_tmp + x2_tmp;
|
||||
y = ck::type_convert<bhalf_t>(y_tmp);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct Add<float, float, float>
|
||||
struct Subtract
|
||||
{
|
||||
template <typename T>
|
||||
__host__ __device__ constexpr void operator()(T& y, const T& x0, const T& x1) const;
|
||||
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()(float& dst, const float& src1, const float& src2) const
|
||||
operator()<float>(float& y, const float& x0, const float& x1) const
|
||||
{
|
||||
dst = src1 + src2;
|
||||
y = x0 - x1;
|
||||
};
|
||||
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<double>(double& y, const double& x0, const double& x1) const
|
||||
{
|
||||
y = x0 - x1;
|
||||
};
|
||||
|
||||
// Question: should half_t be supported ?
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<half_t>(half_t& y, const half_t& x0, const half_t& x1) const
|
||||
{
|
||||
y = x0 - x1;
|
||||
};
|
||||
|
||||
// Question: should bhalf_t be supported ?
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<bhalf_t>(bhalf_t& y, const bhalf_t& x0, const bhalf_t& x1) const
|
||||
{
|
||||
const float x1_tmp = ck::type_convert<float>(x0);
|
||||
const float x2_tmp = ck::type_convert<float>(x1);
|
||||
const float y_tmp = x1_tmp - x2_tmp;
|
||||
y = ck::type_convert<bhalf_t>(y_tmp);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct Add<half_t, half_t, half_t>
|
||||
struct AlphaBetaAdd
|
||||
{
|
||||
AlphaBetaAdd(float alpha, float beta) : alpha_(alpha), beta_(beta){};
|
||||
|
||||
template <typename T>
|
||||
__host__ __device__ constexpr void operator()(T& y, const T& x0, const T& x1) const;
|
||||
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()(half_t& dst, const half_t& src1, const half_t& src2) const
|
||||
operator()<float>(float& y, const float& x0, const float& x1) const
|
||||
{
|
||||
dst = src1 + src2;
|
||||
}
|
||||
y = alpha_ * x0 + beta_ * x1;
|
||||
};
|
||||
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<double>(double& y, const double& x0, const double& x1) const
|
||||
{
|
||||
y = static_cast<double>(alpha_) * x0 + static_cast<double>(beta_) * x1;
|
||||
};
|
||||
|
||||
// Question: should half_t be supported ?
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<half_t>(half_t& y, const half_t& x0, const half_t& x1) const
|
||||
{
|
||||
y = static_cast<half_t>(alpha_ * static_cast<float>(x0) + beta_ * static_cast<float>(x1));
|
||||
};
|
||||
|
||||
float alpha_;
|
||||
float beta_;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct Add<bhalf_t, bhalf_t, bhalf_t>
|
||||
struct AddRelu
|
||||
{
|
||||
template <typename T>
|
||||
__host__ __device__ constexpr void operator()(T& y, const T& x0, const T& x1) const;
|
||||
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()(bhalf_t& dst, const bhalf_t& src1, const bhalf_t& src2) const
|
||||
operator()<float>(float& y, const float& x0, const float& x1) const
|
||||
{
|
||||
const float x1 = ck::type_convert<float>(src1);
|
||||
const float x2 = ck::type_convert<float>(src2);
|
||||
const float y = x1 + x2;
|
||||
dst = ck::type_convert<bhalf_t>(y);
|
||||
}
|
||||
const float a = x0 + x1;
|
||||
y = a > 0.0f ? a : 0.0f;
|
||||
};
|
||||
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<double>(double& y, const double& x0, const double& x1) const
|
||||
{
|
||||
const double a = x0 + x1;
|
||||
y = a > 0.0 ? a : 0.0;
|
||||
};
|
||||
|
||||
// Question: should half_t be supported ?
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<half_t>(half_t& y, const half_t& x0, const half_t& x1) const
|
||||
{
|
||||
const half_t a = x0 + x1;
|
||||
y = a > static_cast<half_t>(0.0f) ? a : static_cast<half_t>(0.0f);
|
||||
};
|
||||
};
|
||||
|
||||
template <typename Y, typename X1, typename X2>
|
||||
struct Substract;
|
||||
|
||||
template <>
|
||||
struct Substract<double, double, double>
|
||||
struct AddHardswish
|
||||
{
|
||||
template <typename T>
|
||||
__host__ __device__ constexpr void operator()(T& y, const T& x0, const T& x1) const;
|
||||
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()(double& dst, const double& src1, const double& src2) const
|
||||
operator()<float>(float& y, const float& x0, const float& x1) const
|
||||
{
|
||||
dst = src1 - src2;
|
||||
}
|
||||
float a = x0 + x1;
|
||||
float b = a + float{3};
|
||||
float c = (b > 0) * (b > 6.0f ? 6.0f : b) * a * 0.166667f;
|
||||
y = c;
|
||||
};
|
||||
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<double>(double& y, const double& x0, const double& x1) const
|
||||
{
|
||||
double a = x0 + x1;
|
||||
double b = a + 3.0;
|
||||
double c = (b > 0) * (b > 6.0 ? 6.0 : b) * a * 0.166667;
|
||||
y = c;
|
||||
};
|
||||
|
||||
// Question: should half_t be supported ?
|
||||
template <>
|
||||
__host__ __device__ constexpr void
|
||||
operator()<half_t>(half_t& y, const half_t& x0, const half_t& x1) const
|
||||
{
|
||||
float a = x0 + x1;
|
||||
float b = a + 3.0f;
|
||||
float c = (b > 0) * (b > 6.0f ? 6.0f : b) * a * 0.166667f;
|
||||
y = c;
|
||||
};
|
||||
};
|
||||
|
||||
template <>
|
||||
struct Substract<float, float, float>
|
||||
{
|
||||
__host__ __device__ constexpr void
|
||||
operator()(float& dst, const float& src1, const float& src2) const
|
||||
{
|
||||
dst = src1 - src2;
|
||||
}
|
||||
};
|
||||
} // namespace element_wise
|
||||
|
||||
template <>
|
||||
struct Substract<half_t, half_t, half_t>
|
||||
{
|
||||
__host__ __device__ constexpr void
|
||||
operator()(half_t& dst, const half_t& src1, const half_t& src2) const
|
||||
{
|
||||
dst = src1 - src2;
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct Substract<bhalf_t, bhalf_t, bhalf_t>
|
||||
{
|
||||
__host__ __device__ constexpr void
|
||||
operator()(bhalf_t& dst, const bhalf_t& src1, const bhalf_t& src2) const
|
||||
{
|
||||
const float x1 = ck::type_convert<float>(src1);
|
||||
const float x2 = ck::type_convert<float>(src2);
|
||||
const float y = x1 - x2;
|
||||
dst = ck::type_convert<bhalf_t>(y);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace binary_element_wise
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
|
||||
@@ -1,97 +1,13 @@
|
||||
#pragma once
|
||||
#include "data_type.hpp"
|
||||
#include "math_v2.hpp"
|
||||
#include "unary_element_wise_operation.hpp"
|
||||
#include "binary_element_wise_operation.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace element_wise {
|
||||
|
||||
struct PassThrough
|
||||
{
|
||||
__host__ __device__ void operator()(float& y, const float& x) const { y = x; }
|
||||
|
||||
__host__ __device__ void operator()(half_t& y, const half_t& x) const { y = x; }
|
||||
|
||||
__host__ __device__ void operator()(bhalf_t& y, const bhalf_t& x) const { y = x; }
|
||||
|
||||
__host__ __device__ void operator()(int32_t& y, const int32_t& x) const { y = x; }
|
||||
|
||||
__host__ __device__ void operator()(int8_t& y, const int8_t& x) const { y = x; }
|
||||
|
||||
__host__ __device__ void operator()(double& y, const double& x) const { y = x; }
|
||||
};
|
||||
|
||||
struct Add
|
||||
{
|
||||
__host__ __device__ constexpr void operator()(float& y, const float& x0, const float& x1) const
|
||||
{
|
||||
y = x0 + x1;
|
||||
}
|
||||
|
||||
__host__ __device__ constexpr void
|
||||
operator()(half_t& y, const half_t& x0, const half_t& x1) const
|
||||
{
|
||||
// FIXME - Use float (acc type) bias in the future.
|
||||
y = x0 + x1;
|
||||
}
|
||||
};
|
||||
|
||||
struct AlphaBetaAdd
|
||||
{
|
||||
AlphaBetaAdd(float alpha, float beta) : alpha_(alpha), beta_(beta) {}
|
||||
|
||||
__host__ __device__ constexpr void operator()(float& y, const float& x0, const float& x1) const
|
||||
{
|
||||
y = alpha_ * x0 + beta_ * x1;
|
||||
}
|
||||
|
||||
__host__ __device__ constexpr void
|
||||
operator()(half_t& y, const half_t& x0, const half_t& x1) const
|
||||
{
|
||||
// FIXME - Let x0 be acc type
|
||||
y = static_cast<half_t>(alpha_ * static_cast<float>(x0) + beta_ * static_cast<float>(x1));
|
||||
}
|
||||
|
||||
float alpha_;
|
||||
float beta_;
|
||||
};
|
||||
|
||||
struct AddRelu
|
||||
{
|
||||
__host__ __device__ constexpr void operator()(float& y, const float& x0, const float& x1) const
|
||||
{
|
||||
const float a = x0 + x1;
|
||||
y = a > 0 ? a : 0;
|
||||
}
|
||||
|
||||
__host__ __device__ constexpr void
|
||||
operator()(half_t& y, const half_t& x0, const half_t& x1) const
|
||||
{
|
||||
const half_t a = x0 + x1;
|
||||
y = a > 0 ? a : 0;
|
||||
}
|
||||
};
|
||||
|
||||
struct AddHardswish
|
||||
{
|
||||
__host__ __device__ constexpr void operator()(float& y, const float& x0, const float& x1) const
|
||||
{
|
||||
float a = x0 + x1;
|
||||
float b = a + float{3};
|
||||
float c = (b > 0) * (b > float{6} ? float{6} : b) * a * float{0.166667};
|
||||
y = c;
|
||||
}
|
||||
|
||||
__host__ __device__ constexpr void
|
||||
operator()(half_t& y, const half_t& x0, const half_t& x1) const
|
||||
{
|
||||
float a = x0 + x1;
|
||||
float b = a + float{3};
|
||||
float c = (b > 0) * (b > float{6} ? float{6} : b) * a * float{0.166667};
|
||||
y = c;
|
||||
}
|
||||
};
|
||||
|
||||
struct AddReluAdd
|
||||
{
|
||||
__host__ __device__ constexpr void
|
||||
@@ -167,204 +83,41 @@ struct Relu
|
||||
|
||||
struct Normalize
|
||||
{
|
||||
Normalize(float epsilon = 1e-4) : epsilon_(epsilon) {}
|
||||
Normalize(double epsilon = 1e-4) : epsilon_(epsilon) {}
|
||||
|
||||
__host__ __device__ constexpr void operator()(float& y,
|
||||
const float& x,
|
||||
const float& mean,
|
||||
const float& mean_square,
|
||||
const float& gamma,
|
||||
const float& beta) const
|
||||
template <typename T>
|
||||
__host__ __device__ constexpr void operator()(
|
||||
T& y, const T& x, const T& mean, const T& mean_square, const T& gamma, const T& beta) const;
|
||||
|
||||
template <>
|
||||
__host__ __device__ constexpr void operator()<float>(float& y,
|
||||
const float& x,
|
||||
const float& mean,
|
||||
const float& mean_square,
|
||||
const float& gamma,
|
||||
const float& beta) const
|
||||
{
|
||||
using ck::math::sqrt;
|
||||
|
||||
float variance = mean_square - (mean * mean);
|
||||
y = ((x - mean) / sqrtf(variance + epsilon_)) * gamma + beta;
|
||||
}
|
||||
|
||||
float epsilon_;
|
||||
};
|
||||
|
||||
// Unary operators are usually called element-wisely before/after the reduction is executed on the
|
||||
// elements. They are needed for easy implementation of reduction types of AVG, NRM1, NRM2
|
||||
|
||||
template <typename Y, typename X, bool HasDividing = false>
|
||||
struct UnaryIdentic;
|
||||
|
||||
template <>
|
||||
struct UnaryIdentic<float, float, false>
|
||||
{
|
||||
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(float& y, const float& x) const { y = x; };
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnaryIdentic<float, float, true>
|
||||
{
|
||||
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { divider_ = divider; };
|
||||
|
||||
__host__ __device__ void operator()(float& y, const float& x) const
|
||||
{
|
||||
y = x / type_convert<float>(divider_);
|
||||
y = ((x - mean) / sqrt(variance + static_cast<float>(epsilon_))) * gamma + beta;
|
||||
};
|
||||
|
||||
int32_t divider_ = 1;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnaryIdentic<half_t, half_t, false>
|
||||
{
|
||||
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(half_t& y, const half_t& x) const { y = x; };
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnaryIdentic<double, double, false>
|
||||
{
|
||||
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(double& y, const double& x) const { y = x; };
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnaryIdentic<double, double, true>
|
||||
{
|
||||
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { divider_ = divider; };
|
||||
|
||||
__host__ __device__ void operator()(double& y, const double& x) const
|
||||
template <>
|
||||
__host__ __device__ constexpr void operator()<double>(double& y,
|
||||
const double& x,
|
||||
const double& mean,
|
||||
const double& mean_square,
|
||||
const double& gamma,
|
||||
const double& beta) const
|
||||
{
|
||||
y = x / type_convert<double>(divider_);
|
||||
using ck::math::sqrt;
|
||||
|
||||
double variance = mean_square - (mean * mean);
|
||||
y = ((x - mean) / sqrt(variance + epsilon_)) * gamma + beta;
|
||||
};
|
||||
|
||||
int32_t divider_ = 1;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnaryIdentic<int32_t, int32_t, false>
|
||||
{
|
||||
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(int32_t& y, const int32_t& x) const { y = x; };
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnaryIdentic<int32_t, int32_t, true>
|
||||
{
|
||||
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { divider_ = divider; };
|
||||
|
||||
__host__ __device__ void operator()(int32_t& y, const int32_t& x) const { y = x / divider_; };
|
||||
|
||||
int32_t divider_ = 1;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnaryIdentic<int8_t, int8_t, false>
|
||||
{
|
||||
__host__ __device__ UnaryIdentic(const int8_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(int8_t& y, const int8_t& x) const { y = x; };
|
||||
};
|
||||
|
||||
template <typename Y, typename X, bool HasDividing = false>
|
||||
struct UnarySquare;
|
||||
|
||||
template <>
|
||||
struct UnarySquare<float, float, false>
|
||||
{
|
||||
__host__ __device__ UnarySquare(const int32_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(float& y, const float& x) const { y = x * x; };
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnarySquare<float, float, true>
|
||||
{
|
||||
__host__ __device__ UnarySquare(const int32_t divider = 1) { divider_ = divider; };
|
||||
|
||||
__host__ __device__ void operator()(float& y, const float& x) const
|
||||
{
|
||||
y = x * x / type_convert<float>(divider_);
|
||||
};
|
||||
|
||||
int32_t divider_ = 1;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnarySquare<double, double, false>
|
||||
{
|
||||
__host__ __device__ UnarySquare(const int32_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(double& y, const double& x) const { y = x * x; };
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnarySquare<double, double, true>
|
||||
{
|
||||
__host__ __device__ UnarySquare(const int32_t divider = 1) { divider_ = divider; };
|
||||
|
||||
__host__ __device__ void operator()(double& y, const double& x) const
|
||||
{
|
||||
y = x * x / type_convert<double>(divider_);
|
||||
};
|
||||
|
||||
int32_t divider_ = 1;
|
||||
};
|
||||
|
||||
template <typename Y, typename X>
|
||||
struct UnaryAbs;
|
||||
|
||||
template <>
|
||||
struct UnaryAbs<float, float>
|
||||
{
|
||||
__host__ __device__ UnaryAbs(const int32_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(float& y, const float& x) const { y = ck::math::abs(x); };
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnaryAbs<half_t, half_t>
|
||||
{
|
||||
__host__ __device__ UnaryAbs(const int32_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(half_t& y, const half_t& x) const { y = ck::math::abs(x); };
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnaryAbs<double, double>
|
||||
{
|
||||
__host__ __device__ UnaryAbs(const int32_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(double& y, const double& x) const { y = ck::math::abs(x); };
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnaryAbs<int8_t, int8_t>
|
||||
{
|
||||
__host__ __device__ UnaryAbs(const int32_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(int8_t& y, const int8_t& x) const { y = ck::math::abs(x); };
|
||||
};
|
||||
|
||||
template <typename Y, typename X>
|
||||
struct UnarySqrt;
|
||||
|
||||
template <>
|
||||
struct UnarySqrt<float, float>
|
||||
{
|
||||
__host__ __device__ UnarySqrt(const int32_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(float& y, const float& x) const { y = ck::math::sqrt(x); };
|
||||
};
|
||||
|
||||
template <>
|
||||
struct UnarySqrt<double, double>
|
||||
{
|
||||
__host__ __device__ UnarySqrt(const int32_t divider = 1) { (void)divider; };
|
||||
|
||||
__host__ __device__ void operator()(double& y, const double& x) const
|
||||
{
|
||||
y = ck::math::sqrt(x);
|
||||
};
|
||||
double epsilon_;
|
||||
};
|
||||
|
||||
template <typename Y, typename X>
|
||||
|
||||
@@ -0,0 +1,80 @@
|
||||
#pragma once
|
||||
#include "data_type.hpp"
|
||||
#include "math_v2.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace element_wise {
|
||||
|
||||
struct PassThrough
|
||||
{
|
||||
template <typename T>
|
||||
__host__ __device__ void operator()(T& y, const T& x) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
|
||||
is_same<T, half_t>::value || is_same<T, bhalf_t>::value ||
|
||||
is_same<T, int32_t>::value || is_same<T, int8_t>::value,
|
||||
"Data type is not supported by this operation!");
|
||||
|
||||
y = x;
|
||||
};
|
||||
};
|
||||
|
||||
struct UnaryDivide
|
||||
{
|
||||
__host__ __device__ UnaryDivide(const int32_t divider = 1) : divider_(divider){};
|
||||
|
||||
template <typename T>
|
||||
__host__ __device__ void operator()(T& y, const T& x) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
|
||||
is_same<T, int32_t>::value,
|
||||
"Data type is not supported by this operation!");
|
||||
|
||||
y = x / type_convert<T>(divider_);
|
||||
};
|
||||
|
||||
int32_t divider_ = 1;
|
||||
};
|
||||
|
||||
struct UnarySquare
|
||||
{
|
||||
template <typename T>
|
||||
__host__ __device__ void operator()(T& y, const T& x) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value,
|
||||
"Data type is not supported by this operation!");
|
||||
|
||||
y = x * x;
|
||||
};
|
||||
};
|
||||
|
||||
struct UnaryAbs
|
||||
{
|
||||
template <typename T>
|
||||
__host__ __device__ void operator()(T& y, const T& x) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
|
||||
is_same<T, half_t>::value || is_same<T, int32_t>::value ||
|
||||
is_same<T, int8_t>::value,
|
||||
"Data type is not supported by this operation!");
|
||||
|
||||
y = ck::math::abs(x);
|
||||
};
|
||||
};
|
||||
|
||||
struct UnarySqrt
|
||||
{
|
||||
template <typename T>
|
||||
__host__ __device__ void operator()(T& y, const T& x) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value,
|
||||
"Data type is not supported by this operation!");
|
||||
|
||||
y = ck::math::sqrt(x);
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace element_wise
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -171,15 +171,15 @@ struct GridwiseReduction_mk_to_m_multiblock
|
||||
AccDataType beta,
|
||||
OutDataType* const __restrict__ p_out_value_global)
|
||||
{
|
||||
const auto identityVal = ReduceOperation::GetIdentityValue();
|
||||
const auto identityVal = ReduceOperation::template GetIdentityValue<AccDataType>();
|
||||
|
||||
// LDS
|
||||
__shared__ AccDataType p_reduce_work_buffer[BlockSize];
|
||||
|
||||
const auto in_global_val_buf =
|
||||
make_dynamic_buffer<AddressSpaceEnum::Global>(p_in_value_global,
|
||||
in_grid_desc_m_k.GetElementSpaceSize(),
|
||||
type_convert<InDataType>(identityVal));
|
||||
const auto in_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_in_value_global,
|
||||
in_grid_desc_m_k.GetElementSpaceSize(),
|
||||
ReduceOperation::template GetIdentityValue<InDataType>());
|
||||
auto out_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_out_value_global, out_grid_desc_m.GetElementSpaceSize());
|
||||
|
||||
@@ -358,12 +358,12 @@ struct GridwiseReduction_mk_to_m_multiblock
|
||||
__shared__ AccDataType p_reduce_work_val_buffer[BlockSize];
|
||||
__shared__ IndexDataType p_reduce_work_idx_buffer[BlockSize];
|
||||
|
||||
const auto identityVal = ReduceOperation::GetIdentityValue();
|
||||
const auto identityVal = ReduceOperation::template GetIdentityValue<AccDataType>();
|
||||
|
||||
const auto in_global_val_buf =
|
||||
make_dynamic_buffer<AddressSpaceEnum::Global>(p_in_value_global,
|
||||
in_grid_desc_m_k.GetElementSpaceSize(),
|
||||
type_convert<InDataType>(identityVal));
|
||||
const auto in_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_in_value_global,
|
||||
in_grid_desc_m_k.GetElementSpaceSize(),
|
||||
ReduceOperation::template GetIdentityValue<InDataType>());
|
||||
const auto in_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_in_index_global, in_grid_desc_m_k.GetElementSpaceSize());
|
||||
auto out_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
|
||||
@@ -135,12 +135,12 @@ struct GridwiseReduction_mk_to_m_threadwise
|
||||
ReduceOperation,
|
||||
PropagateNan>;
|
||||
|
||||
const auto identityVal = ReduceOperation::GetIdentityValue();
|
||||
const auto identityVal = ReduceOperation::template GetIdentityValue<AccDataType>();
|
||||
|
||||
const auto in_global_val_buf =
|
||||
make_dynamic_buffer<AddressSpaceEnum::Global>(p_in_value_global,
|
||||
in_grid_desc_m_k.GetElementSpaceSize(),
|
||||
type_convert<InDataType>(identityVal));
|
||||
const auto in_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_in_value_global,
|
||||
in_grid_desc_m_k.GetElementSpaceSize(),
|
||||
ReduceOperation::template GetIdentityValue<InDataType>());
|
||||
auto dst_global_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_out_value_global, out_grid_desc_m.GetElementSpaceSize());
|
||||
|
||||
@@ -276,12 +276,12 @@ struct GridwiseReduction_mk_to_m_threadwise
|
||||
|
||||
(void)acc_elementwise_op;
|
||||
|
||||
const auto identityVal = ReduceOperation::GetIdentityValue();
|
||||
const auto identityVal = ReduceOperation::template GetIdentityValue<AccDataType>();
|
||||
|
||||
const auto in_global_val_buf =
|
||||
make_dynamic_buffer<AddressSpaceEnum::Global>(p_in_value_global,
|
||||
in_grid_desc_m_k.GetElementSpaceSize(),
|
||||
type_convert<InDataType>(identityVal));
|
||||
const auto in_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_in_value_global,
|
||||
in_grid_desc_m_k.GetElementSpaceSize(),
|
||||
ReduceOperation::template GetIdentityValue<InDataType>());
|
||||
const auto in_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_in_index_global, in_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
|
||||
@@ -927,7 +927,8 @@ struct GridwiseGemmBiasAddReduce_k0mk1_k0nk1_mn_xdl_cshuffle_v1
|
||||
false>;
|
||||
|
||||
// Global write Gemm shuffle + reduction
|
||||
const auto d_zeroVal = DReduceOperation::GetIdentityValue();
|
||||
const auto d_zeroVal =
|
||||
DReduceOperation::template GetIdentityValue<FloatReduceAcc>();
|
||||
|
||||
static_for<0, mreduce_per_thread, 1>{}(
|
||||
[&](auto I) { d_thread_buf(I) = d_zeroVal; });
|
||||
|
||||
@@ -816,7 +816,8 @@ struct GridwiseGemmReduce_k0mk1_k0nk1_mn_xdl_cshuffle_v1
|
||||
false>;
|
||||
|
||||
// Global write Gemm shuffle + reduction
|
||||
const auto d_identityVal = DReduceOperation::GetIdentityValue();
|
||||
const auto d_identityVal =
|
||||
DReduceOperation::template GetIdentityValue<FloatReduceAcc>();
|
||||
|
||||
static_for<0, mreduce_per_thread, 1>{}(
|
||||
[&](auto I) { d_thread_buf(I) = d_identityVal; });
|
||||
|
||||
@@ -37,7 +37,7 @@ __global__ void kernel_buffer_set_value(const Grid1dBufferDescType grid_1d_buffe
|
||||
|
||||
{
|
||||
|
||||
using PassThroughOp = tensor_operation::element_wise::UnaryIdentic<DataType, DataType>;
|
||||
using PassThroughOp = tensor_operation::element_wise::PassThrough;
|
||||
|
||||
constexpr auto I0 = Number<0>{};
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
|
||||
#include "config.hpp"
|
||||
#include "data_type.hpp"
|
||||
#include "type.hpp"
|
||||
|
||||
namespace ck {
|
||||
|
||||
@@ -54,64 +55,92 @@ namespace reduce {
|
||||
// accumulated index also need be
|
||||
// changed.
|
||||
|
||||
template <class T>
|
||||
struct Add
|
||||
{
|
||||
using dataType = T;
|
||||
template <typename T>
|
||||
__host__ __device__ static constexpr T GetIdentityValue()
|
||||
{
|
||||
return type_convert<T>(0.0f);
|
||||
};
|
||||
|
||||
__host__ __device__ static constexpr T GetIdentityValue() { return static_cast<T>(0.0f); };
|
||||
|
||||
__device__ static constexpr bool
|
||||
__host__ __device__ static constexpr bool
|
||||
IsCompatibleInMemoryDataOperation(InMemoryDataOperationEnum operation)
|
||||
{
|
||||
return operation == InMemoryDataOperationEnum::AtomicAdd ||
|
||||
operation == InMemoryDataOperationEnum::Set;
|
||||
};
|
||||
|
||||
__host__ __device__ inline constexpr void operator()(T& a, T b) const { a = a + b; }
|
||||
template <typename T>
|
||||
__host__ __device__ inline constexpr void operator()(T& a, T b) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
|
||||
is_same<T, int32_t>::value,
|
||||
"The data type is not supported by the Add accumulator!");
|
||||
|
||||
a = a + b;
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
struct Mul
|
||||
{
|
||||
using dataType = T;
|
||||
template <typename T>
|
||||
__host__ __device__ static constexpr T GetIdentityValue()
|
||||
{
|
||||
return type_convert<T>(1.0f);
|
||||
};
|
||||
|
||||
__host__ __device__ static constexpr T GetIdentityValue() { return static_cast<T>(1.0f); };
|
||||
|
||||
__device__ static constexpr bool
|
||||
__host__ __device__ static constexpr bool
|
||||
IsCompatibleInMemoryDataOperation(InMemoryDataOperationEnum operation)
|
||||
{
|
||||
return operation == InMemoryDataOperationEnum::Set;
|
||||
};
|
||||
|
||||
__host__ __device__ inline constexpr void operator()(T& a, T b) const { a = a * b; }
|
||||
template <typename T>
|
||||
__host__ __device__ inline constexpr void operator()(T& a, T b) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
|
||||
is_same<T, int32_t>::value,
|
||||
"The data type is not supported by the Mul accumulator!");
|
||||
|
||||
a = a * b;
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
struct Max
|
||||
{
|
||||
using dataType = T;
|
||||
|
||||
template <typename T>
|
||||
__host__ __device__ static constexpr T GetIdentityValue()
|
||||
{
|
||||
return NumericLimits<T>::Lowest();
|
||||
};
|
||||
|
||||
__device__ static constexpr bool
|
||||
__host__ __device__ static constexpr bool
|
||||
IsCompatibleInMemoryDataOperation(InMemoryDataOperationEnum operation)
|
||||
{
|
||||
// ToChange: atomic_max to be added
|
||||
return operation == InMemoryDataOperationEnum::Set;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
__host__ __device__ inline constexpr void operator()(T& a, T b) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
|
||||
is_same<T, half_t>::value || is_same<T, int32_t>::value ||
|
||||
is_same<T, int8_t>::value,
|
||||
"The data type is not supported by the Max accumulator!");
|
||||
|
||||
if(a < b)
|
||||
a = b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
|
||||
is_same<T, half_t>::value || is_same<T, int32_t>::value ||
|
||||
is_same<T, int8_t>::value,
|
||||
"The data type is not supported by the Max accumulator!");
|
||||
|
||||
if(a < b)
|
||||
{
|
||||
a = b;
|
||||
@@ -120,28 +149,41 @@ struct Max
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
struct Min
|
||||
{
|
||||
using dataType = T;
|
||||
template <typename T>
|
||||
__host__ __device__ static constexpr T GetIdentityValue()
|
||||
{
|
||||
return NumericLimits<T>::Max();
|
||||
};
|
||||
|
||||
__host__ __device__ static constexpr T GetIdentityValue() { return NumericLimits<T>::Max(); };
|
||||
|
||||
__device__ static constexpr bool
|
||||
__host__ __device__ static constexpr bool
|
||||
IsCompatibleInMemoryDataOperation(InMemoryDataOperationEnum operation)
|
||||
{
|
||||
// ToChange: atomic_min to be added
|
||||
return operation == InMemoryDataOperationEnum::Set;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
__host__ __device__ inline constexpr void operator()(T& a, T b) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
|
||||
is_same<T, half_t>::value || is_same<T, int32_t>::value ||
|
||||
is_same<T, int8_t>::value,
|
||||
"The data type is not supported by the Min accumulator!");
|
||||
|
||||
if(a > b)
|
||||
a = b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
|
||||
is_same<T, half_t>::value || is_same<T, int32_t>::value ||
|
||||
is_same<T, int8_t>::value,
|
||||
"The data type is not supported by the Min accumulator!");
|
||||
|
||||
if(a > b)
|
||||
{
|
||||
a = b;
|
||||
@@ -150,28 +192,41 @@ struct Min
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
struct AMax
|
||||
{
|
||||
using dataType = T;
|
||||
template <typename T>
|
||||
__host__ __device__ static constexpr T GetIdentityValue()
|
||||
{
|
||||
return type_convert<T>(0.0f);
|
||||
};
|
||||
|
||||
__host__ __device__ static constexpr T GetIdentityValue() { return static_cast<T>(0.0f); };
|
||||
|
||||
__device__ static constexpr bool
|
||||
__host__ __device__ static constexpr bool
|
||||
IsCompatibleInMemoryDataOperation(InMemoryDataOperationEnum operation)
|
||||
{
|
||||
// ToChange: atomic_max to be added
|
||||
return operation == InMemoryDataOperationEnum::Set;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
__host__ __device__ inline constexpr void operator()(T& a, T b) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
|
||||
is_same<T, half_t>::value || is_same<T, int32_t>::value ||
|
||||
is_same<T, int8_t>::value,
|
||||
"The data type is not supported by the AMax accumulator!");
|
||||
|
||||
if(a < b)
|
||||
a = b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
|
||||
is_same<T, half_t>::value || is_same<T, int32_t>::value ||
|
||||
is_same<T, int8_t>::value,
|
||||
"The data type is not supported by the AMax accumulator!");
|
||||
|
||||
if(a < b)
|
||||
{
|
||||
a = b;
|
||||
@@ -181,7 +236,7 @@ struct AMax
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
T GetIdentityValueueForInMemoryDataOperation(InMemoryDataOperationEnum operation)
|
||||
constexpr T GetIdentityValueForInMemoryDataOperation(InMemoryDataOperationEnum operation)
|
||||
{
|
||||
T result = ck::type_convert<T>(0.0f);
|
||||
|
||||
@@ -191,6 +246,44 @@ T GetIdentityValueueForInMemoryDataOperation(InMemoryDataOperationEnum operation
|
||||
return (result);
|
||||
};
|
||||
|
||||
template <InMemoryDataOperationEnum Operation, typename DataType>
|
||||
struct InMemoryDataOperatonSupportedOnDataType
|
||||
{
|
||||
static constexpr bool value = false;
|
||||
};
|
||||
|
||||
template <typename DataType>
|
||||
struct InMemoryDataOperatonSupportedOnDataType<InMemoryDataOperationEnum::AtomicAdd, DataType>
|
||||
{
|
||||
static constexpr bool value =
|
||||
is_same<DataType, float>::value || is_same<DataType, double>::value;
|
||||
};
|
||||
|
||||
template <typename DataType>
|
||||
struct InMemoryDataOperatonSupportedOnDataType<InMemoryDataOperationEnum::AtomicMax, DataType>
|
||||
{
|
||||
static constexpr bool value =
|
||||
is_same<DataType, float>::value || is_same<DataType, double>::value;
|
||||
};
|
||||
|
||||
template <typename DataType>
|
||||
struct InMemoryDataOperatonSupportedOnDataType<InMemoryDataOperationEnum::Set, DataType>
|
||||
{
|
||||
static constexpr bool value =
|
||||
is_same<DataType, float>::value || is_same<DataType, double>::value ||
|
||||
is_same<DataType, half_t>::value || is_same<DataType, bhalf_t>::value ||
|
||||
is_same<DataType, int8_t>::value || is_same<DataType, int32_t>::value;
|
||||
};
|
||||
|
||||
template <typename DataType>
|
||||
struct InMemoryDataOperatonSupportedOnDataType<InMemoryDataOperationEnum::Add, DataType>
|
||||
{
|
||||
static constexpr bool value =
|
||||
is_same<DataType, float>::value || is_same<DataType, double>::value ||
|
||||
is_same<DataType, half_t>::value || is_same<DataType, int8_t>::value ||
|
||||
is_same<DataType, int32_t>::value;
|
||||
};
|
||||
|
||||
}; // end of namespace reduce
|
||||
|
||||
} // end of namespace ck
|
||||
|
||||
@@ -174,15 +174,18 @@ struct ReductionHost
|
||||
const InDataType* in_data,
|
||||
float beta,
|
||||
OutDataType* out_data,
|
||||
IndexDataType* out_indices)
|
||||
IndexDataType* out_indices,
|
||||
InElementwiseOperation in_elementwise_op,
|
||||
AccElementwiseOperation acc_elementwise_op)
|
||||
{
|
||||
if constexpr(OutputIndex)
|
||||
{
|
||||
RunImpl_with_index(alpha, in_data, beta, out_data, out_indices);
|
||||
RunImpl_with_index(
|
||||
alpha, in_data, beta, out_data, out_indices, in_elementwise_op, acc_elementwise_op);
|
||||
}
|
||||
else
|
||||
{
|
||||
RunImpl_no_index(alpha, in_data, beta, out_data);
|
||||
RunImpl_no_index(alpha, in_data, beta, out_data, in_elementwise_op, acc_elementwise_op);
|
||||
};
|
||||
};
|
||||
|
||||
@@ -190,7 +193,9 @@ struct ReductionHost
|
||||
const InDataType* in_data,
|
||||
float beta,
|
||||
OutDataType* out_data,
|
||||
IndexDataType* out_indices)
|
||||
IndexDataType* out_indices,
|
||||
InElementwiseOperation in_elementwise_op,
|
||||
AccElementwiseOperation acc_elementwise_op)
|
||||
{
|
||||
using ck::float_equal_one;
|
||||
using ck::float_equal_zero;
|
||||
@@ -200,12 +205,10 @@ struct ReductionHost
|
||||
ReduceOperation,
|
||||
AccDataType,
|
||||
IndexDataType>;
|
||||
InElementwiseOperation in_elementwise_op(divider);
|
||||
AccElementwiseOperation acc_elementwise_op(divider);
|
||||
|
||||
if constexpr(NumInvariantDim == 0)
|
||||
{
|
||||
AccDataType accuVal = ReduceOperation::GetIdentityValue();
|
||||
AccDataType accuVal = ReduceOperation::template GetIdentityValue<AccDataType>();
|
||||
IndexDataType accuIndex = 0;
|
||||
|
||||
for(std::size_t i = 0; i < reduce_dim_indexes.size(); i++)
|
||||
@@ -236,7 +239,7 @@ struct ReductionHost
|
||||
else
|
||||
{
|
||||
auto thread_reduce_func = [&](auto invariant_index) {
|
||||
AccDataType accuVal = ReduceOperation::GetIdentityValue();
|
||||
AccDataType accuVal = ReduceOperation::template GetIdentityValue<AccDataType>();
|
||||
IndexDataType accuIndex = 0;
|
||||
|
||||
auto offset_invariant =
|
||||
@@ -297,7 +300,12 @@ struct ReductionHost
|
||||
};
|
||||
};
|
||||
|
||||
void RunImpl_no_index(float alpha, const InDataType* in_data, float beta, OutDataType* out_data)
|
||||
void RunImpl_no_index(float alpha,
|
||||
const InDataType* in_data,
|
||||
float beta,
|
||||
OutDataType* out_data,
|
||||
InElementwiseOperation in_elementwise_op,
|
||||
AccElementwiseOperation acc_elementwise_op)
|
||||
{
|
||||
using ck::float_equal_one;
|
||||
using ck::float_equal_zero;
|
||||
@@ -306,12 +314,9 @@ struct ReductionHost
|
||||
using Accumulation =
|
||||
ck::detail::AccumulateWithNanCheck<PropagateNan, ReduceOperation, AccDataType>;
|
||||
|
||||
InElementwiseOperation in_elementwise_op(divider);
|
||||
AccElementwiseOperation acc_elementwise_op(divider);
|
||||
|
||||
if constexpr(NumInvariantDim == 0)
|
||||
{
|
||||
AccDataType accuVal = ReduceOperation::GetIdentityValue();
|
||||
AccDataType accuVal = ReduceOperation::template GetIdentityValue<AccDataType>();
|
||||
|
||||
for(const auto& reduce_index : reduce_dim_indexes)
|
||||
{
|
||||
@@ -338,7 +343,7 @@ struct ReductionHost
|
||||
else
|
||||
{
|
||||
auto thread_reduce_func = [&](auto invariant_index) {
|
||||
AccDataType accuVal = ReduceOperation::GetIdentityValue();
|
||||
AccDataType accuVal = ReduceOperation::template GetIdentityValue<AccDataType>();
|
||||
|
||||
auto offset_invariant =
|
||||
get_offset_from_index<NumInvariantDim>(invariantStrides, invariant_index);
|
||||
|
||||
@@ -106,9 +106,8 @@ struct ReferenceConvBwdData : public device::BaseOperator
|
||||
}
|
||||
}
|
||||
|
||||
float v_in;
|
||||
arg.in_element_op_(v_in, v_acc);
|
||||
arg.input_(n, c, wi) = ck::type_convert<InDataType>(v_in);
|
||||
arg.in_element_op_(v_acc, v_acc);
|
||||
arg.input_(n, c, wi) = ck::type_convert<InDataType>(v_acc);
|
||||
};
|
||||
|
||||
make_ParallelTensorFunctor(f_ncw,
|
||||
|
||||
@@ -66,8 +66,8 @@ struct ReferenceGemmBias2D : public device::BaseOperator
|
||||
|
||||
for(int k = 0; k < K; ++k)
|
||||
{
|
||||
arg.a_element_op_(a, arg.a_m_k_(m, k));
|
||||
arg.b_element_op_(b, arg.b_k_n_(k, n));
|
||||
arg.a_element_op_(a, static_cast<AccDataType>(arg.a_m_k_(m, k)));
|
||||
arg.b_element_op_(b, static_cast<AccDataType>(arg.b_k_n_(k, n)));
|
||||
acc += a * b;
|
||||
}
|
||||
|
||||
|
||||
@@ -61,10 +61,10 @@ using reduce_configuration_2_instances_blockwise = std::tuple<
|
||||
>;
|
||||
#endif
|
||||
|
||||
template <typename AccDataType, ReduceTensorOp ReduceOpId>
|
||||
template <ReduceTensorOp ReduceOpId>
|
||||
using deviceReduceBlockWisePtrType = DeviceReducePtr<
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation,
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::AccElementwiseOperation>;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation,
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation>;
|
||||
|
||||
template <typename InDataType,
|
||||
typename AccDataType,
|
||||
@@ -75,14 +75,13 @@ template <typename InDataType,
|
||||
bool PropagateNan,
|
||||
bool UseIndex>
|
||||
void add_device_reduce_instance_blockwise(
|
||||
std::vector<deviceReduceBlockWisePtrType<AccDataType, ReduceOpId>>& device_op_instances)
|
||||
std::vector<deviceReduceBlockWisePtrType<ReduceOpId>>& device_op_instances)
|
||||
{
|
||||
using ReduceOperation = typename reduce_binary_operator<AccDataType, ReduceOpId>::opType;
|
||||
using ReduceOperation = typename reduce_binary_operator<ReduceOpId>::opType;
|
||||
using InElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
|
||||
using AccElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::
|
||||
AccElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
|
||||
constexpr bool Indexable =
|
||||
(ReduceOpId == ReduceTensorOp::MIN || ReduceOpId == ReduceTensorOp::MAX ||
|
||||
@@ -137,7 +136,7 @@ void add_device_reduce_instance_blockwise(
|
||||
ReduceOpId, \
|
||||
PropagateNan, \
|
||||
UseIndex>( \
|
||||
std::vector<deviceReduceBlockWisePtrType<compT, ReduceOpId>> & device_op_instances)
|
||||
std::vector<deviceReduceBlockWisePtrType<ReduceOpId>> & device_op_instances)
|
||||
|
||||
#define ADD_BLOCKWISE_INST_BY_ID( \
|
||||
inT, compT, outT, ReduceOpId, NanOpt, IndicesOpt, Rank, NumReduceDim) \
|
||||
@@ -150,21 +149,17 @@ void add_device_reduce_instance_blockwise(
|
||||
Rank, \
|
||||
NumReduceDim)
|
||||
|
||||
#define ADD_BLOCKWISE_INST_REF_BY_TYPE( \
|
||||
inT, compT, outT, ReduceOpId, PropagateNan, UseIndex, Rank, NumReduceDim) \
|
||||
extern template void add_device_reduce_instance_blockwise<inT, \
|
||||
compT, \
|
||||
outT, \
|
||||
Rank, \
|
||||
NumReduceDim, \
|
||||
ReduceOpId, \
|
||||
PropagateNan, \
|
||||
UseIndex>( \
|
||||
std::vector<DeviceReducePtr< \
|
||||
typename reduce_unary_operator<compT, ReduceOpId, true, true>::InElementwiseOperation, \
|
||||
typename reduce_unary_operator<compT, ReduceOpId, true, true>:: \
|
||||
AccElementwiseOperation>> & \
|
||||
device_op_instances)
|
||||
#define ADD_BLOCKWISE_INST_REF_BY_TYPE( \
|
||||
inT, compT, outT, ReduceOpId, PropagateNan, UseIndex, Rank, NumReduceDim) \
|
||||
extern template void add_device_reduce_instance_blockwise<inT, \
|
||||
compT, \
|
||||
outT, \
|
||||
Rank, \
|
||||
NumReduceDim, \
|
||||
ReduceOpId, \
|
||||
PropagateNan, \
|
||||
UseIndex>( \
|
||||
std::vector<deviceReduceBlockWisePtrType<ReduceOpId>> & device_op_instances)
|
||||
|
||||
#define ADD_BLOCKWISE_INST_REF_BY_ID( \
|
||||
inT, compT, outT, ReduceOpId, NanOpt, IndicesOpt, Rank, NumReduceDim) \
|
||||
|
||||
@@ -61,12 +61,10 @@ using reduce_configuration_2_instances_multiblock_atomic_add = std::tuple<
|
||||
>;
|
||||
#endif
|
||||
|
||||
template <typename AccDataType, ReduceTensorOp ReduceOperation>
|
||||
using deviceReduceMultiBlockAtomicAddPtrType =
|
||||
DeviceReducePtr<typename reduce_unary_operator<AccDataType, ReduceOperation, true, true>::
|
||||
InElementwiseOperation,
|
||||
typename reduce_unary_operator<AccDataType, ReduceOperation, true, true>::
|
||||
AccElementwiseOperation>;
|
||||
template <ReduceTensorOp ReduceOperation>
|
||||
using deviceReduceMultiBlockAtomicAddPtrType = DeviceReducePtr<
|
||||
typename reduce_unary_operator<ReduceOperation, true, true>::InElementwiseOperation,
|
||||
typename reduce_unary_operator<ReduceOperation, true, true>::AccElementwiseOperation>;
|
||||
|
||||
template <typename InDataType,
|
||||
typename AccDataType,
|
||||
@@ -77,15 +75,13 @@ template <typename InDataType,
|
||||
bool PropagateNan,
|
||||
bool UseIndex>
|
||||
void add_device_reduce_instance_multiblock_atomic_add(
|
||||
std::vector<deviceReduceMultiBlockAtomicAddPtrType<AccDataType, ReduceOpId>>&
|
||||
device_op_instances)
|
||||
std::vector<deviceReduceMultiBlockAtomicAddPtrType<ReduceOpId>>& device_op_instances)
|
||||
{
|
||||
using ReduceOperation = typename reduce_binary_operator<AccDataType, ReduceOpId>::opType;
|
||||
using ReduceOperation = typename reduce_binary_operator<ReduceOpId>::opType;
|
||||
using InElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
|
||||
using AccElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::
|
||||
AccElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
|
||||
constexpr bool Indexable =
|
||||
(ReduceOpId == ReduceTensorOp::MIN || ReduceOpId == ReduceTensorOp::MAX ||
|
||||
@@ -158,8 +154,7 @@ void add_device_reduce_instance_multiblock_atomic_add(
|
||||
ReduceOpId, \
|
||||
PropagateNan, \
|
||||
UseIndex>( \
|
||||
std::vector<deviceReduceMultiBlockAtomicAddPtrType<compT, ReduceOpId>> & \
|
||||
device_op_instances)
|
||||
std::vector<deviceReduceMultiBlockAtomicAddPtrType<ReduceOpId>> & device_op_instances)
|
||||
|
||||
#define ADD_MULTIBLOCK_ATOMIC_ADD_INST_BY_ID( \
|
||||
inT, compT, outT, ReduceOpId, NanOpt, IndicesOpt, Rank, NumReduceDim) \
|
||||
@@ -172,21 +167,17 @@ void add_device_reduce_instance_multiblock_atomic_add(
|
||||
Rank, \
|
||||
NumReduceDim)
|
||||
|
||||
#define ADD_MULTIBLOCK_ATOMIC_ADD_INST_REF_BY_TYPE( \
|
||||
inT, compT, outT, ReduceOpId, PropagateNan, UseIndex, Rank, NumReduceDim) \
|
||||
extern template void add_device_reduce_instance_multiblock_atomic_add<inT, \
|
||||
compT, \
|
||||
outT, \
|
||||
Rank, \
|
||||
NumReduceDim, \
|
||||
ReduceOpId, \
|
||||
PropagateNan, \
|
||||
UseIndex>( \
|
||||
std::vector<DeviceReducePtr< \
|
||||
typename reduce_unary_operator<compT, ReduceOpId, true, true>::InElementwiseOperation, \
|
||||
typename reduce_unary_operator<compT, ReduceOpId, true, true>:: \
|
||||
AccElementwiseOperation>> & \
|
||||
device_op_instances)
|
||||
#define ADD_MULTIBLOCK_ATOMIC_ADD_INST_REF_BY_TYPE( \
|
||||
inT, compT, outT, ReduceOpId, PropagateNan, UseIndex, Rank, NumReduceDim) \
|
||||
extern template void add_device_reduce_instance_multiblock_atomic_add<inT, \
|
||||
compT, \
|
||||
outT, \
|
||||
Rank, \
|
||||
NumReduceDim, \
|
||||
ReduceOpId, \
|
||||
PropagateNan, \
|
||||
UseIndex>( \
|
||||
std::vector<deviceReduceMultiBlockAtomicAddPtrType<ReduceOpId>> & device_op_instances)
|
||||
|
||||
#define ADD_MULTIBLOCK_ATOMIC_ADD_INST_REF_BY_ID( \
|
||||
inT, compT, outT, ReduceOpId, NanOpt, IndicesOpt, Rank, NumReduceDim) \
|
||||
|
||||
@@ -47,10 +47,10 @@ using reduce_configuration_2_instances_threadwise = std::tuple<
|
||||
>;
|
||||
#endif
|
||||
|
||||
template <typename AccDataType, ReduceTensorOp ReduceOpId>
|
||||
template <ReduceTensorOp ReduceOpId>
|
||||
using deviceReduceThreadWisePtrType = DeviceReducePtr<
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation,
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::AccElementwiseOperation>;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation,
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation>;
|
||||
|
||||
template <typename InDataType,
|
||||
typename AccDataType,
|
||||
@@ -61,14 +61,13 @@ template <typename InDataType,
|
||||
bool PropagateNan,
|
||||
bool UseIndex>
|
||||
void add_device_reduce_instance_threadwise(
|
||||
std::vector<deviceReduceThreadWisePtrType<AccDataType, ReduceOpId>>& device_op_instances)
|
||||
std::vector<deviceReduceThreadWisePtrType<ReduceOpId>>& device_op_instances)
|
||||
{
|
||||
using ReduceOperation = typename reduce_binary_operator<AccDataType, ReduceOpId>::opType;
|
||||
using ReduceOperation = typename reduce_binary_operator<ReduceOpId>::opType;
|
||||
using InElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
|
||||
using AccElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::
|
||||
AccElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
|
||||
constexpr bool Indexable =
|
||||
(ReduceOpId == ReduceTensorOp::MIN || ReduceOpId == ReduceTensorOp::MAX ||
|
||||
@@ -114,7 +113,7 @@ void add_device_reduce_instance_threadwise(
|
||||
ReduceOpId, \
|
||||
PropagateNan, \
|
||||
UseIndex>( \
|
||||
std::vector<deviceReduceThreadWisePtrType<compT, ReduceOpId>> & device_op_instances)
|
||||
std::vector<deviceReduceThreadWisePtrType<ReduceOpId>> & device_op_instances)
|
||||
|
||||
#define ADD_THREADWISE_INST_BY_ID( \
|
||||
inT, compT, outT, ReduceOpId, NanOpt, IndicesOpt, Rank, NumReduceDim) \
|
||||
@@ -127,21 +126,17 @@ void add_device_reduce_instance_threadwise(
|
||||
Rank, \
|
||||
NumReduceDim)
|
||||
|
||||
#define ADD_THREADWISE_INST_REF_BY_TYPE( \
|
||||
inT, compT, outT, ReduceOpId, PropagateNan, UseIndex, Rank, NumReduceDim) \
|
||||
extern template void add_device_reduce_instance_threadwise<inT, \
|
||||
compT, \
|
||||
outT, \
|
||||
Rank, \
|
||||
NumReduceDim, \
|
||||
ReduceOpId, \
|
||||
PropagateNan, \
|
||||
UseIndex>( \
|
||||
std::vector<DeviceReducePtr< \
|
||||
typename reduce_unary_operator<compT, ReduceOpId, true, true>::InElementwiseOperation, \
|
||||
typename reduce_unary_operator<compT, ReduceOpId, true, true>:: \
|
||||
AccElementwiseOperation>> & \
|
||||
device_op_instances)
|
||||
#define ADD_THREADWISE_INST_REF_BY_TYPE( \
|
||||
inT, compT, outT, ReduceOpId, PropagateNan, UseIndex, Rank, NumReduceDim) \
|
||||
extern template void add_device_reduce_instance_threadwise<inT, \
|
||||
compT, \
|
||||
outT, \
|
||||
Rank, \
|
||||
NumReduceDim, \
|
||||
ReduceOpId, \
|
||||
PropagateNan, \
|
||||
UseIndex>( \
|
||||
std::vector<deviceReduceThreadWisePtrType<ReduceOpId>> & device_op_instances)
|
||||
|
||||
#define ADD_THREADWISE_INST_REF_BY_ID( \
|
||||
inT, compT, outT, ReduceOpId, NanOpt, IndicesOpt, Rank, NumReduceDim) \
|
||||
|
||||
@@ -21,11 +21,11 @@ template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSum = ck::reduce::Add<F32>;
|
||||
using ReduceSum = ck::reduce::Add;
|
||||
using ReduceOps = ck::Tuple<ReduceSum, ReduceSum>;
|
||||
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Identity, Identity>;
|
||||
|
||||
|
||||
@@ -21,11 +21,11 @@ template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSum = ck::reduce::Add<F32>;
|
||||
using ReduceSum = ck::reduce::Add;
|
||||
using ReduceOps = ck::Tuple<ReduceSum, ReduceSum>;
|
||||
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Identity, Identity>;
|
||||
|
||||
|
||||
@@ -21,11 +21,11 @@ template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSum = ck::reduce::Add<F32>;
|
||||
using ReduceSum = ck::reduce::Add;
|
||||
using ReduceOps = ck::Tuple<ReduceSum, ReduceSum>;
|
||||
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Identity, Identity>;
|
||||
|
||||
|
||||
@@ -21,11 +21,11 @@ template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSum = ck::reduce::Add<F32>;
|
||||
using ReduceSum = ck::reduce::Add;
|
||||
using ReduceOps = ck::Tuple<ReduceSum, ReduceSum>;
|
||||
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Identity, Identity>;
|
||||
|
||||
|
||||
@@ -21,12 +21,12 @@ template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSum = ck::reduce::Add<F32>;
|
||||
using ReduceSum = ck::reduce::Add;
|
||||
using ReduceOps = ck::Tuple<ReduceSum, ReduceSum>;
|
||||
|
||||
using Div = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, true>;
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Div, Div>;
|
||||
|
||||
|
||||
@@ -21,12 +21,12 @@ template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSum = ck::reduce::Add<F32>;
|
||||
using ReduceSum = ck::reduce::Add;
|
||||
using ReduceOps = ck::Tuple<ReduceSum, ReduceSum>;
|
||||
|
||||
using Div = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, true>;
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Div, Div>;
|
||||
|
||||
|
||||
@@ -21,12 +21,12 @@ template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSum = ck::reduce::Add<F32>;
|
||||
using ReduceSum = ck::reduce::Add;
|
||||
using ReduceOps = ck::Tuple<ReduceSum, ReduceSum>;
|
||||
|
||||
using Div = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, true>;
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Div, Div>;
|
||||
|
||||
|
||||
@@ -21,12 +21,12 @@ template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSum = ck::reduce::Add<F32>;
|
||||
using ReduceSum = ck::reduce::Add;
|
||||
using ReduceOps = ck::Tuple<ReduceSum, ReduceSum>;
|
||||
|
||||
using Div = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, true>;
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Div, Div>;
|
||||
|
||||
|
||||
@@ -21,12 +21,12 @@ template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSum = ck::reduce::Add<F32>;
|
||||
using ReduceSum = ck::reduce::Add;
|
||||
using ReduceOps = ck::Tuple<ReduceSum, ReduceSum>;
|
||||
|
||||
using Div = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, true>;
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Div, Div>;
|
||||
|
||||
|
||||
@@ -21,12 +21,12 @@ template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSum = ck::reduce::Add<F32>;
|
||||
using ReduceSum = ck::reduce::Add;
|
||||
using ReduceOps = ck::Tuple<ReduceSum, ReduceSum>;
|
||||
|
||||
using Div = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, true>;
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Div, Div>;
|
||||
|
||||
|
||||
@@ -21,12 +21,12 @@ template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSum = ck::reduce::Add<F32>;
|
||||
using ReduceSum = ck::reduce::Add;
|
||||
using ReduceOps = ck::Tuple<ReduceSum, ReduceSum>;
|
||||
|
||||
using Div = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, true>;
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Div, Div>;
|
||||
|
||||
|
||||
@@ -21,12 +21,12 @@ template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ReduceSum = ck::reduce::Add<F32>;
|
||||
using ReduceSum = ck::reduce::Add;
|
||||
using ReduceOps = ck::Tuple<ReduceSum, ReduceSum>;
|
||||
|
||||
using Div = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, true>;
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Div, Div>;
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ namespace device_gemm_instance {
|
||||
using F32 = float;
|
||||
using F16 = ck::half_t;
|
||||
using DPtrsGlobal = ck::Tuple<F32*, F32*>;
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Identity, Identity>;
|
||||
|
||||
@@ -128,17 +128,15 @@ bool profile_batched_gemm_reduce_impl(int do_verification,
|
||||
b_g_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5}, num_thread);
|
||||
}
|
||||
|
||||
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using D0ReduceOp = ck::reduce::Add<float>;
|
||||
using D1ReduceOp = ck::reduce::Add<float>;
|
||||
using UnaryIdenticElementOp =
|
||||
ck::tensor_operation::element_wise::UnaryIdentic<float, float, false>;
|
||||
using UnarySquareElementOp =
|
||||
ck::tensor_operation::element_wise::UnarySquare<float, float, false>;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryIdenticElementOp, UnaryIdenticElementOp>;
|
||||
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using D0ReduceOp = ck::reduce::Add;
|
||||
using D1ReduceOp = ck::reduce::Add;
|
||||
using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryIdenticElementOp, UnaryIdenticElementOp>;
|
||||
|
||||
const auto a_element_op = AElementOp{};
|
||||
const auto b_element_op = BElementOp{};
|
||||
@@ -170,8 +168,8 @@ bool profile_batched_gemm_reduce_impl(int do_verification,
|
||||
{
|
||||
for(int m = 0; m < M; ++m)
|
||||
{
|
||||
float d0_acc = d0_reduce_op.GetIdentityValue();
|
||||
float d1_acc = d1_reduce_op.GetIdentityValue();
|
||||
float d0_acc = d0_reduce_op.GetIdentityValue<float>();
|
||||
float d1_acc = d1_reduce_op.GetIdentityValue<float>();
|
||||
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
|
||||
@@ -20,9 +20,9 @@ namespace device_gemm_instance {
|
||||
using F32 = float;
|
||||
using F16 = ck::half_t;
|
||||
using DPtrsGlobal = ck::Tuple<F32*, F32*>;
|
||||
using Div = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, true>;
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Div, Div>;
|
||||
|
||||
@@ -136,20 +136,18 @@ void profile_gemm_bias_add_reduce_impl(int do_verification,
|
||||
c1_m_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5}, num_thread);
|
||||
}
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using AElementOp = PassThrough;
|
||||
using BElementOp = PassThrough;
|
||||
using CElementOp = PassThrough;
|
||||
using C1ElementOp = PassThrough;
|
||||
using D0ReduceOp = ck::reduce::Add<float>;
|
||||
using D1ReduceOp = ck::reduce::Add<float>;
|
||||
using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryIdentic<float, float, true>;
|
||||
using UnaryIdenticElementOp =
|
||||
ck::tensor_operation::element_wise::UnaryIdentic<float, float, false>;
|
||||
using UnarySquareElementOp =
|
||||
ck::tensor_operation::element_wise::UnarySquare<float, float, false>;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using AElementOp = PassThrough;
|
||||
using BElementOp = PassThrough;
|
||||
using CElementOp = PassThrough;
|
||||
using C1ElementOp = PassThrough;
|
||||
using D0ReduceOp = ck::reduce::Add;
|
||||
using D1ReduceOp = ck::reduce::Add;
|
||||
using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
|
||||
|
||||
const auto a_element_op = AElementOp{};
|
||||
const auto b_element_op = BElementOp{};
|
||||
@@ -196,15 +194,15 @@ void profile_gemm_bias_add_reduce_impl(int do_verification,
|
||||
|
||||
for(int m = 0; m < M; ++m)
|
||||
{
|
||||
ReduceAccDataType d0_acc = d0_reduce_op.GetIdentityValue();
|
||||
ReduceAccDataType d1_acc = d1_reduce_op.GetIdentityValue();
|
||||
auto d0_acc = d0_reduce_op.GetIdentityValue<ReduceAccDataType>();
|
||||
auto d1_acc = d1_reduce_op.GetIdentityValue<ReduceAccDataType>();
|
||||
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
ReduceAccDataType c_val =
|
||||
ck::type_convert<ReduceAccDataType>(c_m_n_host_result(m, n));
|
||||
ReduceAccDataType d0_val = 0;
|
||||
ReduceAccDataType d1_val = 0;
|
||||
ReduceAccDataType d0_val;
|
||||
ReduceAccDataType d1_val;
|
||||
|
||||
dxs_in_element_op(ck::Number<0>{})(d0_val, c_val);
|
||||
dxs_in_element_op(ck::Number<1>{})(d1_val, c_val);
|
||||
|
||||
@@ -20,9 +20,9 @@ namespace device_gemm_instance {
|
||||
using F32 = float;
|
||||
using F16 = ck::half_t;
|
||||
using DPtrsGlobal = ck::Tuple<F32*, F32*>;
|
||||
using Div = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, true>;
|
||||
using Identity = ck::tensor_operation::element_wise::UnaryIdentic<F32, F32, false>;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare<F32, F32, false>;
|
||||
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
||||
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DInElementOps = ck::Tuple<Identity, Square>;
|
||||
using DOutElementOps = ck::Tuple<Div, Div>;
|
||||
|
||||
@@ -123,18 +123,16 @@ bool profile_gemm_reduce_impl(int do_verification,
|
||||
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5}, num_thread);
|
||||
}
|
||||
|
||||
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using D0ReduceOp = ck::reduce::Add<float>;
|
||||
using D1ReduceOp = ck::reduce::Add<float>;
|
||||
using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryIdentic<float, float, true>;
|
||||
using UnaryIdenticElementOp =
|
||||
ck::tensor_operation::element_wise::UnaryIdentic<float, float, false>;
|
||||
using UnarySquareElementOp =
|
||||
ck::tensor_operation::element_wise::UnarySquare<float, float, false>;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
|
||||
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using D0ReduceOp = ck::reduce::Add;
|
||||
using D1ReduceOp = ck::reduce::Add;
|
||||
using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryDivide;
|
||||
using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
|
||||
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
|
||||
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
|
||||
|
||||
const auto a_element_op = AElementOp{};
|
||||
const auto b_element_op = BElementOp{};
|
||||
@@ -167,15 +165,15 @@ bool profile_gemm_reduce_impl(int do_verification,
|
||||
|
||||
for(int m = 0; m < M; ++m)
|
||||
{
|
||||
ReduceAccDataType d0_acc = d0_reduce_op.GetIdentityValue();
|
||||
ReduceAccDataType d1_acc = d1_reduce_op.GetIdentityValue();
|
||||
auto d0_acc = d0_reduce_op.GetIdentityValue<ReduceAccDataType>();
|
||||
auto d1_acc = d1_reduce_op.GetIdentityValue<ReduceAccDataType>();
|
||||
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
ReduceAccDataType c_val =
|
||||
ck::type_convert<ReduceAccDataType>(c_m_n_host_result(m, n));
|
||||
ReduceAccDataType d0_val = 0;
|
||||
ReduceAccDataType d1_val = 0;
|
||||
ReduceAccDataType d0_val;
|
||||
ReduceAccDataType d1_val;
|
||||
|
||||
dxs_in_element_op(ck::Number<0>{})(d0_val, c_val);
|
||||
dxs_in_element_op(ck::Number<1>{})(d1_val, c_val);
|
||||
|
||||
@@ -261,13 +261,18 @@ bool profile_reduce_impl_impl(bool do_verification,
|
||||
float best_gb_per_sec = 0;
|
||||
|
||||
using InElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::
|
||||
InElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
|
||||
using AccElementwiseOperation =
|
||||
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::
|
||||
AccElementwiseOperation;
|
||||
typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
|
||||
using ReduceOperation = typename reduce_binary_operator<AccDataType, ReduceOpId>::opType;
|
||||
using ReduceOperation = typename reduce_binary_operator<ReduceOpId>::opType;
|
||||
|
||||
InElementwiseOperation in_elementwise_op;
|
||||
AccElementwiseOperation acc_elementwise_op;
|
||||
|
||||
std::tie(in_elementwise_op, acc_elementwise_op) =
|
||||
reduce_unary_operator<ReduceOpId, true, true>::GetElementwiseOperator(
|
||||
static_cast<int32_t>(reduce_total_length));
|
||||
|
||||
using DeviceReduceInstPtr0 =
|
||||
DeviceReducePtr<InElementwiseOperation, AccElementwiseOperation>;
|
||||
@@ -323,8 +328,13 @@ bool profile_reduce_impl_impl(bool do_verification,
|
||||
OutputIndex>
|
||||
hostReduce(in.mDesc, out_ref.mDesc, invariantDims, reduceDims);
|
||||
|
||||
hostReduce.Run(
|
||||
alpha, in.mData.data(), beta, out_ref.mData.data(), out_indices_ref.mData.data());
|
||||
hostReduce.Run(alpha,
|
||||
in.mData.data(),
|
||||
beta,
|
||||
out_ref.mData.data(),
|
||||
out_indices_ref.mData.data(),
|
||||
in_elementwise_op,
|
||||
acc_elementwise_op);
|
||||
};
|
||||
|
||||
std::vector<ck::index_t> i_inLengths;
|
||||
@@ -339,10 +349,6 @@ bool profile_reduce_impl_impl(bool do_verification,
|
||||
|
||||
for(auto& reduce_ptr : reduce0_ptrs)
|
||||
{
|
||||
|
||||
InElementwiseOperation in_elementwise_op(static_cast<int32_t>(reduce_total_length));
|
||||
AccElementwiseOperation acc_elementwise_op(static_cast<int32_t>(reduce_total_length));
|
||||
|
||||
auto argument_ptr = reduce_ptr->MakeArgumentPointer(i_inLengths,
|
||||
i_inStrides,
|
||||
i_outLengths,
|
||||
|
||||
Reference in New Issue
Block a user