mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-04-19 22:39:03 +00:00
Layernorm and groupnorm support to save mean and inverse std in forward (#929)
* save mean and inverse std in normalization * Save mean and inverse std in splitK * Vector save mean and inv std * Modify instance for save mean and std * simplify the layernorm example * Save mean and std in groupnorm example * Save mean and inv std in ckProfiler and test * Remove compute data type from base class * Save mean and inv std in client example * Add changelog * clang format * Fix compile error * Refine naming * Avoid error in bf16 * revert changelog
This commit is contained in:
@@ -12,12 +12,14 @@
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/normalization.hpp"
|
||||
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using ComputeDataType = float;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using SaveMeanInvStdDataType = float;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
#define SAVE_MEAN_INV_STD
|
||||
|
||||
constexpr int Rank = 2;
|
||||
constexpr int NumReduceDim = 1;
|
||||
@@ -50,12 +52,16 @@ int main(int argc, char* argv[])
|
||||
SimpleDeviceMem gamma_device_buf(sizeof(GammaDataType) * N);
|
||||
SimpleDeviceMem beta_device_buf(sizeof(BetaDataType) * N);
|
||||
SimpleDeviceMem y_device_buf(sizeof(YDataType) * xy_size);
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
SimpleDeviceMem save_mean_device_buf(sizeof(SaveMeanInvStdDataType) * M);
|
||||
SimpleDeviceMem save_inv_std_device_buf(sizeof(SaveMeanInvStdDataType) * M);
|
||||
#endif
|
||||
|
||||
using DeviceOp = ck::tensor_operation::device::DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
@@ -84,14 +90,21 @@ int main(int argc, char* argv[])
|
||||
{0, 1}, // gammaStrides
|
||||
{0, 1}, // betaStrides
|
||||
{Stride, 1}, // yStrides
|
||||
{1}, // save_mean Strides
|
||||
{1}, // save_inv_std Strides
|
||||
{1}, // reduceDims
|
||||
1e-4,
|
||||
x_device_buf.GetDeviceBuffer(),
|
||||
gamma_device_buf.GetDeviceBuffer(),
|
||||
beta_device_buf.GetDeviceBuffer(),
|
||||
y_device_buf.GetDeviceBuffer(),
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
save_mean_device_buf.GetDeviceBuffer(),
|
||||
save_inv_std_device_buf.GetDeviceBuffer(),
|
||||
#else
|
||||
nullptr,
|
||||
nullptr,
|
||||
#endif
|
||||
PassThrough{});
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
@@ -109,6 +122,10 @@ int main(int argc, char* argv[])
|
||||
std::size_t num_byte = sizeof(XDataType) * M * N + sizeof(GammaDataType) * N +
|
||||
sizeof(BetaDataType) * N + sizeof(YDataType) * M * N;
|
||||
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
num_byte += sizeof(SaveMeanInvStdDataType) * M * 2;
|
||||
#endif
|
||||
|
||||
float gb_per_sec = num_byte / 1.E6 / ave_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << gb_per_sec << " GB/s, "
|
||||
@@ -140,17 +157,24 @@ int main(int argc, char* argv[])
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer({M, N}, // lengths
|
||||
{Stride, 1}, // xStrides
|
||||
{1}, // gammaStrides
|
||||
{1}, // betaStrides
|
||||
{0, 1}, // gammaStrides
|
||||
{0, 1}, // betaStrides
|
||||
{Stride, 1}, // yStrides
|
||||
{1}, // save_mean Strides
|
||||
{1}, // save_inv_std Strides
|
||||
{1}, // reduceDims
|
||||
1e-4,
|
||||
x_device_buf.GetDeviceBuffer(),
|
||||
gamma_device_buf.GetDeviceBuffer(),
|
||||
beta_device_buf.GetDeviceBuffer(),
|
||||
y_device_buf.GetDeviceBuffer(),
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
save_mean_device_buf.GetDeviceBuffer(),
|
||||
save_inv_std_device_buf.GetDeviceBuffer(),
|
||||
#else
|
||||
nullptr,
|
||||
nullptr,
|
||||
#endif
|
||||
PassThrough{});
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
@@ -12,12 +12,14 @@
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/normalization_swish.hpp"
|
||||
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = float;
|
||||
using BetaDataType = float;
|
||||
using YDataType = ck::half_t;
|
||||
using ComputeDataType = float;
|
||||
using Swish = ck::tensor_operation::element_wise::Swish;
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = float;
|
||||
using BetaDataType = float;
|
||||
using YDataType = ck::half_t;
|
||||
using SaveMeanInvStdDataType = float;
|
||||
using Swish = ck::tensor_operation::element_wise::Swish;
|
||||
|
||||
#define SAVE_MEAN_INV_STD
|
||||
|
||||
constexpr int Rank = 5;
|
||||
constexpr int NumReduceDim = 3;
|
||||
@@ -49,19 +51,24 @@ int main(int argc, char* argv[])
|
||||
std::size_t xy_size = N * H * W * G * C;
|
||||
std::size_t gamma_beta_size = G * C;
|
||||
|
||||
std::vector<ck::index_t> xy_strides = {H * W * G * C, W * G * C, G * C, C, 1};
|
||||
std::vector<ck::index_t> gamma_beta_strides = {0, 0, 0, C, 1};
|
||||
std::vector<ck::index_t> xy_strides = {H * W * G * C, W * G * C, G * C, C, 1};
|
||||
std::vector<ck::index_t> gamma_beta_strides = {0, 0, 0, C, 1};
|
||||
std::vector<ck::index_t> save_mean_inv_std_strides = {G, 1};
|
||||
|
||||
SimpleDeviceMem x_device_buf(sizeof(XDataType) * xy_size);
|
||||
SimpleDeviceMem gamma_device_buf(sizeof(GammaDataType) * gamma_beta_size);
|
||||
SimpleDeviceMem beta_device_buf(sizeof(BetaDataType) * gamma_beta_size);
|
||||
SimpleDeviceMem y_device_buf(sizeof(YDataType) * xy_size);
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
SimpleDeviceMem save_mean_device_buf(sizeof(SaveMeanInvStdDataType) * N * G);
|
||||
SimpleDeviceMem save_inv_std_device_buf(sizeof(SaveMeanInvStdDataType) * N * G);
|
||||
#endif
|
||||
|
||||
using DeviceOp = ck::tensor_operation::device::DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
Swish,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
@@ -75,19 +82,26 @@ int main(int argc, char* argv[])
|
||||
const auto& generic_op_ptr = op_ptrs[0];
|
||||
|
||||
auto generic_argument_ptr =
|
||||
generic_op_ptr->MakeArgumentPointer({N, H, W, G, C}, // lengths
|
||||
xy_strides, // xStrides
|
||||
gamma_beta_strides, // gammaStrides
|
||||
gamma_beta_strides, // betaStrides
|
||||
xy_strides, // yStrides
|
||||
{1, 2, 4}, // reduceDims
|
||||
generic_op_ptr->MakeArgumentPointer({N, H, W, G, C}, // lengths
|
||||
xy_strides, // xStrides
|
||||
gamma_beta_strides, // gammaStrides
|
||||
gamma_beta_strides, // betaStrides
|
||||
xy_strides, // yStrides
|
||||
save_mean_inv_std_strides, // save_mean Strides
|
||||
save_mean_inv_std_strides, // save_inv_std Strides
|
||||
{1, 2, 4}, // reduceDims
|
||||
1e-6,
|
||||
x_device_buf.GetDeviceBuffer(),
|
||||
gamma_device_buf.GetDeviceBuffer(),
|
||||
beta_device_buf.GetDeviceBuffer(),
|
||||
y_device_buf.GetDeviceBuffer(),
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
save_mean_device_buf.GetDeviceBuffer(),
|
||||
save_inv_std_device_buf.GetDeviceBuffer(),
|
||||
#else
|
||||
nullptr,
|
||||
nullptr,
|
||||
#endif
|
||||
Swish{});
|
||||
|
||||
if(!generic_op_ptr->IsSupportedArgument(generic_argument_ptr.get()))
|
||||
@@ -107,21 +121,29 @@ int main(int argc, char* argv[])
|
||||
|
||||
for(int i = 0; i < op_ptrs.size(); ++i)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer({N, H, W, G, C}, // lengths
|
||||
xy_strides, // xStrides
|
||||
gamma_beta_strides, // gammaStrides
|
||||
gamma_beta_strides, // betaStrides
|
||||
xy_strides, // yStrides
|
||||
{1, 2, 4}, // reduceDims
|
||||
1e-6,
|
||||
x_device_buf.GetDeviceBuffer(),
|
||||
gamma_device_buf.GetDeviceBuffer(),
|
||||
beta_device_buf.GetDeviceBuffer(),
|
||||
y_device_buf.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
nullptr,
|
||||
Swish{});
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
auto argument_ptr =
|
||||
op_ptr->MakeArgumentPointer({N, H, W, G, C}, // lengths
|
||||
xy_strides, // xStrides
|
||||
gamma_beta_strides, // gammaStrides
|
||||
gamma_beta_strides, // betaStrides
|
||||
xy_strides, // yStrides
|
||||
save_mean_inv_std_strides, // save_mean Strides
|
||||
save_mean_inv_std_strides, // save_inv_std Strides
|
||||
{1, 2, 4}, // reduceDims
|
||||
1e-6,
|
||||
x_device_buf.GetDeviceBuffer(),
|
||||
gamma_device_buf.GetDeviceBuffer(),
|
||||
beta_device_buf.GetDeviceBuffer(),
|
||||
y_device_buf.GetDeviceBuffer(),
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
save_mean_device_buf.GetDeviceBuffer(),
|
||||
save_inv_std_device_buf.GetDeviceBuffer(),
|
||||
#else
|
||||
nullptr,
|
||||
nullptr,
|
||||
#endif
|
||||
Swish{});
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
@@ -139,6 +161,10 @@ int main(int argc, char* argv[])
|
||||
sizeof(XDataType) * xy_size + sizeof(GammaDataType) * gamma_beta_size +
|
||||
sizeof(BetaDataType) * gamma_beta_size + sizeof(YDataType) * xy_size;
|
||||
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
num_byte += sizeof(SaveMeanInvStdDataType) * N * G * 2;
|
||||
#endif
|
||||
|
||||
float gb_per_sec = num_byte / 1.E6 / ave_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << gb_per_sec << " GB/s, "
|
||||
@@ -169,20 +195,28 @@ int main(int argc, char* argv[])
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
<< std::endl;
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer({N, H, W, G, C}, // lengths
|
||||
xy_strides, // xStrides
|
||||
gamma_beta_strides, // gammaStrides
|
||||
gamma_beta_strides, // betaStrides
|
||||
xy_strides, // yStrides
|
||||
{1, 2, 4}, // reduceDims
|
||||
1e-6,
|
||||
x_device_buf.GetDeviceBuffer(),
|
||||
gamma_device_buf.GetDeviceBuffer(),
|
||||
beta_device_buf.GetDeviceBuffer(),
|
||||
y_device_buf.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
nullptr,
|
||||
Swish{});
|
||||
auto argument_ptr =
|
||||
op_ptr->MakeArgumentPointer({N, H, W, G, C}, // lengths
|
||||
xy_strides, // xStrides
|
||||
gamma_beta_strides, // gammaStrides
|
||||
gamma_beta_strides, // betaStrides
|
||||
xy_strides, // yStrides
|
||||
save_mean_inv_std_strides, // save_mean Strides
|
||||
save_mean_inv_std_strides, // save_inv_std Strides
|
||||
{1, 2, 4}, // reduceDims
|
||||
1e-6,
|
||||
x_device_buf.GetDeviceBuffer(),
|
||||
gamma_device_buf.GetDeviceBuffer(),
|
||||
beta_device_buf.GetDeviceBuffer(),
|
||||
y_device_buf.GetDeviceBuffer(),
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
save_mean_device_buf.GetDeviceBuffer(),
|
||||
save_inv_std_device_buf.GetDeviceBuffer(),
|
||||
#else
|
||||
nullptr,
|
||||
nullptr,
|
||||
#endif
|
||||
Swish{});
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
|
||||
@@ -114,12 +114,15 @@ void host_gemm_layernorm(Tensor<HDataType>& h_m_n,
|
||||
BetaDataType,
|
||||
HDataType,
|
||||
AccDataType,
|
||||
AccDataType,
|
||||
HElementOp,
|
||||
2,
|
||||
1>;
|
||||
|
||||
Tensor<EMeanVarDataType> e_m_n(HostTensorDescriptor{M, N});
|
||||
Tensor<AccDataType> c_m_n(HostTensorDescriptor{M, N});
|
||||
Tensor<AccDataType> save_mean({M});
|
||||
Tensor<AccDataType> save_inv_std({M});
|
||||
|
||||
auto ref_gemm = ReferenceGemm{};
|
||||
auto ref_gemm_invoker = ref_gemm.MakeInvoker();
|
||||
@@ -145,7 +148,7 @@ void host_gemm_layernorm(Tensor<HDataType>& h_m_n,
|
||||
auto ref_layernorm_invoker = ref_layernorm.MakeInvoker();
|
||||
|
||||
auto ref_layernorm_argument = ref_layernorm.MakeArgument(
|
||||
e_m_n, gamma_n, beta_n, h_m_n, h_element_op, {M, N}, {1}, epsilon);
|
||||
e_m_n, gamma_n, beta_n, h_m_n, save_mean, save_inv_std, h_element_op, {M, N}, {1}, epsilon);
|
||||
ref_layernorm_invoker.Run(ref_layernorm_argument);
|
||||
}
|
||||
|
||||
|
||||
@@ -3,12 +3,15 @@
|
||||
|
||||
#include "common.hpp"
|
||||
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using ComputeDataType = float;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using SaveMeanInvStdDataType = float;
|
||||
using ComputeDataType = float;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
#define SAVE_MEAN_INV_STD
|
||||
|
||||
constexpr int Rank = 2;
|
||||
constexpr int NumReduceDim = 1;
|
||||
@@ -19,6 +22,7 @@ using DeviceInstance =
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim,
|
||||
@@ -33,7 +37,8 @@ using DeviceInstance =
|
||||
8, // GammaScalarPerVector
|
||||
1, // BetaVecDim (0=M, 1=K)
|
||||
8, // BetaScalarPerVector
|
||||
8>; // OutScalarPerVector
|
||||
8, // YScalarPerVector
|
||||
1>; // SaveMeanInvStdScalarPerVector
|
||||
#include "run_layernorm_example.inc"
|
||||
|
||||
int main() { return run_groupnorm_example<DeviceInstance>(); }
|
||||
|
||||
@@ -3,12 +3,15 @@
|
||||
|
||||
#include "common.hpp"
|
||||
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using ComputeDataType = float;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using SaveMeanInvStdDataType = float;
|
||||
using ComputeDataType = float;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
#define SAVE_MEAN_INV_STD
|
||||
|
||||
constexpr int Rank = 2;
|
||||
constexpr int NumReduceDim = 1;
|
||||
@@ -19,6 +22,7 @@ using DeviceInstance =
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim,
|
||||
@@ -33,7 +37,8 @@ using DeviceInstance =
|
||||
8, // GammaScalarPerVector
|
||||
1, // BetaVecDim (0=M, 1=K)
|
||||
8, // BetaScalarPerVector
|
||||
8>; // YScalarPerVector
|
||||
8, // YScalarPerVector
|
||||
1>; // SaveMeanInvStdScalarPerVector
|
||||
|
||||
#include "run_layernorm_example.inc"
|
||||
|
||||
|
||||
@@ -10,22 +10,13 @@ int run_groupnorm_example()
|
||||
|
||||
ck::index_t M = 1024;
|
||||
ck::index_t N = 1024;
|
||||
ck::index_t Stride = N;
|
||||
|
||||
auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) {
|
||||
return HostTensorDescriptor({len}, {stride});
|
||||
};
|
||||
|
||||
auto f_host_tensor_descriptor2d = [](std::size_t row, std::size_t col, std::size_t stride) {
|
||||
using namespace ck::literals;
|
||||
|
||||
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
||||
};
|
||||
|
||||
Tensor<XDataType> x(f_host_tensor_descriptor2d(M, N, Stride));
|
||||
Tensor<GammaDataType> gamma(f_host_tensor_descriptor1d(N, 1));
|
||||
Tensor<BetaDataType> beta(f_host_tensor_descriptor1d(N, 1));
|
||||
Tensor<YDataType> y(f_host_tensor_descriptor2d(M, N, Stride));
|
||||
Tensor<XDataType> x({M, N});
|
||||
Tensor<GammaDataType> gamma({N});
|
||||
Tensor<BetaDataType> beta({N});
|
||||
Tensor<YDataType> y({M, N});
|
||||
Tensor<SaveMeanInvStdDataType> save_mean({M});
|
||||
Tensor<SaveMeanInvStdDataType> save_inv_std({M});
|
||||
|
||||
x.GenerateTensorValue(GeneratorTensor_3<XDataType>{0.0, 1.0});
|
||||
gamma.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{0.0, 1.0});
|
||||
@@ -35,6 +26,11 @@ int run_groupnorm_example()
|
||||
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem beta_dev(sizeof(BetaDataType) * beta.mDesc.GetElementSpaceSize());
|
||||
DeviceMem y_dev(sizeof(YDataType) * y.mDesc.GetElementSpaceSize());
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
DeviceMem save_mean_dev(sizeof(SaveMeanInvStdDataType) * save_mean.mDesc.GetElementSpaceSize());
|
||||
DeviceMem save_inv_std_dev(sizeof(SaveMeanInvStdDataType) *
|
||||
save_inv_std.mDesc.GetElementSpaceSize());
|
||||
#endif
|
||||
|
||||
x_dev.ToDevice(x.mData.data());
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
@@ -47,14 +43,23 @@ int run_groupnorm_example()
|
||||
{0, 1},
|
||||
{0, 1},
|
||||
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
|
||||
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
|
||||
save_mean.mDesc.GetStrides().end()},
|
||||
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
|
||||
save_mean.mDesc.GetStrides().end()},
|
||||
{1},
|
||||
1e-4,
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
y_dev.GetDeviceBuffer(),
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
save_mean_dev.GetDeviceBuffer(),
|
||||
save_inv_std_dev.GetDeviceBuffer(),
|
||||
#else
|
||||
nullptr,
|
||||
nullptr,
|
||||
#endif
|
||||
PassThrough{});
|
||||
|
||||
if(!device_instance.IsSupportedArgument(argument_ptr.get()))
|
||||
@@ -72,24 +77,45 @@ int run_groupnorm_example()
|
||||
|
||||
bool pass = true;
|
||||
{
|
||||
Tensor<YDataType> host_y(f_host_tensor_descriptor2d(M, N, Stride));
|
||||
using ReferenceInstance = ck::tensor_operation::host::ReferenceLayernorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
ComputeDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
Tensor<YDataType> host_y({M, N});
|
||||
Tensor<SaveMeanInvStdDataType> host_save_mean({M});
|
||||
Tensor<SaveMeanInvStdDataType> host_save_inv_std({M});
|
||||
|
||||
using ReferenceInstance =
|
||||
ck::tensor_operation::host::ReferenceLayernorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
|
||||
ReferenceInstance ref;
|
||||
auto ref_argument =
|
||||
ref.MakeArgument(x, gamma, beta, host_y, PassThrough{}, {M, N}, {1}, 1e-4);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
auto ref_argument = ref.MakeArgument(x,
|
||||
gamma,
|
||||
beta,
|
||||
host_y,
|
||||
host_save_mean,
|
||||
host_save_inv_std,
|
||||
PassThrough{},
|
||||
{M, N},
|
||||
{1},
|
||||
1e-4);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
ref_invoker.Run(ref_argument);
|
||||
|
||||
y_dev.FromDevice(y.mData.data());
|
||||
pass &= ck::utils::check_err(y, host_y, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
pass &= ck::utils::check_err(y, host_y, "Error: Incorrect results (y)", 1e-3, 1e-3);
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
save_mean_dev.FromDevice(save_mean.mData.data());
|
||||
save_inv_std_dev.FromDevice(save_inv_std.mData.data());
|
||||
pass &= ck::utils::check_err(
|
||||
save_mean, host_save_mean, "Error: Incorrect results (mean)", 1e-3, 1e-3);
|
||||
pass &= ck::utils::check_err(
|
||||
save_inv_std, host_save_inv_std, "Error: Incorrect results (inv_std)", 1e-3, 1e-3);
|
||||
#endif
|
||||
}
|
||||
|
||||
return (pass ? 0 : 1);
|
||||
|
||||
@@ -6,11 +6,14 @@
|
||||
constexpr int Rank = 5;
|
||||
constexpr int NumReduceDim = 3;
|
||||
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using ComputeDataType = float;
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using SaveMeanInvStdDataType = float;
|
||||
using ComputeDataType = float;
|
||||
|
||||
#define SAVE_MEAN_INV_STD
|
||||
|
||||
struct YElementOp
|
||||
{
|
||||
@@ -39,6 +42,7 @@ using DeviceInstance =
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
YElementOp,
|
||||
Rank,
|
||||
NumReduceDim,
|
||||
@@ -53,7 +57,8 @@ using DeviceInstance =
|
||||
2, // GammaScalarPerVector
|
||||
1, // BetaVecDim (0=M, 1=K)
|
||||
2, // BetaScalarPerVector
|
||||
2>; // OutScalarPerVector
|
||||
2, // YScalarPerVector
|
||||
1>; // SaveMeanInvStdScalarPerVector
|
||||
|
||||
#include "run_groupnorm_example.inc"
|
||||
|
||||
|
||||
@@ -6,12 +6,15 @@
|
||||
constexpr int Rank = 5;
|
||||
constexpr int NumReduceDim = 3;
|
||||
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using ComputeDataType = float;
|
||||
using YElementOp = ck::tensor_operation::element_wise::Swish;
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using SaveMeanInvStdDataType = float;
|
||||
using ComputeDataType = float;
|
||||
using YElementOp = ck::tensor_operation::element_wise::Swish;
|
||||
|
||||
#define SAVE_MEAN_INV_STD
|
||||
|
||||
using DeviceInstance =
|
||||
ck::tensor_operation::device::DeviceNormalizationSplitKImpl<XDataType,
|
||||
@@ -19,6 +22,7 @@ using DeviceInstance =
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
YElementOp,
|
||||
Rank,
|
||||
NumReduceDim,
|
||||
@@ -33,7 +37,8 @@ using DeviceInstance =
|
||||
2, // GammaScalarPerVector
|
||||
1, // BetaVecDim (0=M, 1=K)
|
||||
2, // BetaScalarPerVector
|
||||
2>; // OutScalarPerVector
|
||||
2, // YScalarPerVector
|
||||
1>; // SaveMeanInvStdScalarPerVector
|
||||
|
||||
#include "run_groupnorm_example.inc"
|
||||
|
||||
|
||||
@@ -6,12 +6,15 @@
|
||||
constexpr int Rank = 5;
|
||||
constexpr int NumReduceDim = 3;
|
||||
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using ComputeDataType = float;
|
||||
using YElementOp = ck::tensor_operation::element_wise::Swish;
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using SaveMeanInvStdDataType = float;
|
||||
using ComputeDataType = float;
|
||||
using YElementOp = ck::tensor_operation::element_wise::Swish;
|
||||
|
||||
#define SAVE_MEAN_INV_STD
|
||||
|
||||
using DeviceInstance =
|
||||
ck::tensor_operation::device::DeviceNormalizationImpl<XDataType,
|
||||
@@ -19,6 +22,7 @@ using DeviceInstance =
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
YElementOp,
|
||||
Rank,
|
||||
NumReduceDim,
|
||||
@@ -33,7 +37,8 @@ using DeviceInstance =
|
||||
2, // GammaScalarPerVector
|
||||
1, // BetaVecDim (0=M, 1=K)
|
||||
2, // BetaScalarPerVector
|
||||
2>; // OutScalarPerVector
|
||||
2, // YScalarPerVector
|
||||
1>; // SaveMeanInvStdScalarPerVector
|
||||
|
||||
#include "run_groupnorm_example.inc"
|
||||
|
||||
|
||||
@@ -34,6 +34,8 @@ int run_groupnorm_example(int argc, char* argv[])
|
||||
Tensor<YDataType> y({N, H, W, G, C});
|
||||
Tensor<GammaDataType> gamma({G, C});
|
||||
Tensor<BetaDataType> beta({G, C});
|
||||
Tensor<SaveMeanInvStdDataType> save_mean({N, G});
|
||||
Tensor<SaveMeanInvStdDataType> save_inv_std({N, G});
|
||||
|
||||
ck::utils::FillUniformDistribution<XDataType>{0.f, 1.f}(x);
|
||||
ck::utils::FillUniformDistribution<GammaDataType>{0.f, 1.f}(gamma);
|
||||
@@ -43,6 +45,11 @@ int run_groupnorm_example(int argc, char* argv[])
|
||||
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem beta_dev(sizeof(BetaDataType) * beta.mDesc.GetElementSpaceSize());
|
||||
DeviceMem y_dev(sizeof(YDataType) * y.mDesc.GetElementSpaceSize());
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
DeviceMem save_mean_dev(sizeof(SaveMeanInvStdDataType) * save_mean.mDesc.GetElementSpaceSize());
|
||||
DeviceMem save_inv_std_dev(sizeof(SaveMeanInvStdDataType) *
|
||||
save_inv_std.mDesc.GetElementSpaceSize());
|
||||
#endif
|
||||
|
||||
x_dev.ToDevice(x.mData.data());
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
@@ -57,14 +64,23 @@ int run_groupnorm_example(int argc, char* argv[])
|
||||
{0, 0, 0, C, 1},
|
||||
{0, 0, 0, C, 1},
|
||||
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
|
||||
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
|
||||
save_mean.mDesc.GetStrides().end()},
|
||||
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
|
||||
save_mean.mDesc.GetStrides().end()},
|
||||
{1, 2, 4}, // reduction dimension: [H, W, C]
|
||||
1e-6,
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
y_dev.GetDeviceBuffer(),
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
save_mean_dev.GetDeviceBuffer(),
|
||||
save_inv_std_dev.GetDeviceBuffer(),
|
||||
#else
|
||||
nullptr,
|
||||
nullptr,
|
||||
#endif
|
||||
y_element_op);
|
||||
|
||||
if(!device_instance.IsSupportedArgument(argument_ptr.get()))
|
||||
@@ -92,21 +108,40 @@ int run_groupnorm_example(int argc, char* argv[])
|
||||
bool pass = true;
|
||||
{
|
||||
Tensor<YDataType> host_y({N, H, W, G, C});
|
||||
using ReferenceInstance = ck::tensor_operation::host::ReferenceGroupnorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
ComputeDataType,
|
||||
YElementOp>;
|
||||
Tensor<SaveMeanInvStdDataType> host_save_mean(HostTensorDescriptor{N, G});
|
||||
Tensor<SaveMeanInvStdDataType> host_save_inv_std(HostTensorDescriptor{N, G});
|
||||
using ReferenceInstance =
|
||||
ck::tensor_operation::host::ReferenceGroupnorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
YElementOp>;
|
||||
|
||||
ReferenceInstance ref;
|
||||
auto ref_argument =
|
||||
ref.MakeArgument(x, gamma, beta, host_y, y_element_op, {N, H, W, G, C}, 1e-6);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
auto ref_argument = ref.MakeArgument(x,
|
||||
gamma,
|
||||
beta,
|
||||
host_y,
|
||||
host_save_mean,
|
||||
host_save_inv_std,
|
||||
y_element_op,
|
||||
{N, H, W, G, C},
|
||||
1e-6);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
ref_invoker.Run(ref_argument);
|
||||
|
||||
y_dev.FromDevice(y.mData.data());
|
||||
pass &= ck::utils::check_err(y, host_y, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
#ifdef SAVE_MEAN_INV_STD
|
||||
save_mean_dev.FromDevice(save_mean.mData.data());
|
||||
save_inv_std_dev.FromDevice(save_inv_std.mData.data());
|
||||
pass &= ck::utils::check_err(
|
||||
save_mean, host_save_mean, "Error: Incorrect results (mean)", 1e-3, 1e-3);
|
||||
pass &= ck::utils::check_err(
|
||||
save_inv_std, host_save_inv_std, "Error: Incorrect results (inv_std)", 1e-3, 1e-3);
|
||||
#endif
|
||||
}
|
||||
|
||||
return (pass ? 0 : 1);
|
||||
|
||||
@@ -167,20 +167,31 @@ int main()
|
||||
XElementwiseOperation>(x, a, b, mn, XElementwiseOperation{});
|
||||
|
||||
Tensor<YDataType> host_y(f_host_tensor_descriptor2d(M, N, Stride));
|
||||
Tensor<AccDataType> host_save_mean({M});
|
||||
Tensor<AccDataType> host_save_inv_std({M});
|
||||
using ReferenceInstance =
|
||||
ck::tensor_operation::host::ReferenceLayernorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
AccDataType,
|
||||
YElementwiseOperation,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
|
||||
ReferenceInstance ref;
|
||||
auto ref_argument =
|
||||
ref.MakeArgument(x, gamma, beta, host_y, YElementwiseOperation{}, {M, N}, {1}, 1e-4);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
auto ref_argument = ref.MakeArgument(x,
|
||||
gamma,
|
||||
beta,
|
||||
host_y,
|
||||
host_save_mean,
|
||||
host_save_inv_std,
|
||||
YElementwiseOperation{},
|
||||
{M, N},
|
||||
{1},
|
||||
1e-4);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
ref_invoker.Run(ref_argument);
|
||||
|
||||
y_dev.FromDevice(y.mData.data());
|
||||
|
||||
@@ -14,8 +14,8 @@ namespace device {
|
||||
template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename ComputeDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
typename YElementwiseOperation,
|
||||
index_t Rank,
|
||||
index_t NumReduceDim>
|
||||
@@ -27,6 +27,8 @@ struct DeviceNormalization : public BaseOperator
|
||||
const std::vector<index_t> gammaStrides,
|
||||
const std::vector<index_t> betaStrides,
|
||||
const std::vector<index_t> yStrides,
|
||||
const std::vector<index_t> saveMeanStrides,
|
||||
const std::vector<index_t> saveInvStdStrides,
|
||||
const std::vector<index_t> reduceDims,
|
||||
double epsilon,
|
||||
const void* p_x,
|
||||
@@ -43,16 +45,16 @@ struct DeviceNormalization : public BaseOperator
|
||||
template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename ComputeDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
typename YElementwiseOperation,
|
||||
index_t Rank,
|
||||
index_t NumReduceDim>
|
||||
using DeviceNormalizationPtr = std::unique_ptr<DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
YElementwiseOperation,
|
||||
Rank,
|
||||
NumReduceDim>>;
|
||||
|
||||
@@ -28,6 +28,7 @@ template <typename XDataType,
|
||||
typename BetaDataType,
|
||||
typename ComputeDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
typename YElementwiseOperation,
|
||||
index_t Rank,
|
||||
index_t NumReduceDim,
|
||||
@@ -43,12 +44,13 @@ template <typename XDataType,
|
||||
index_t BetaSrcVectorDim,
|
||||
index_t BetaSrcVectorSize,
|
||||
index_t YDstVectorSize,
|
||||
index_t SaveMeanInvStdDstVectorSize,
|
||||
bool UseWelford = true>
|
||||
struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
YElementwiseOperation,
|
||||
Rank,
|
||||
NumReduceDim>
|
||||
@@ -64,18 +66,24 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
(BetaSrcVectorDim == 1 && KThreadSliceSize % BetaSrcVectorSize == 0)),
|
||||
"Invalid thread slice sizes and/or beta vector sizes configuration, please check!");
|
||||
|
||||
static_assert(MThreadSliceSize % SaveMeanInvStdDstVectorSize == 0,
|
||||
"Invalid thread slice sizes and/or save mean and inverse std vector sizes "
|
||||
"configuration, please check!");
|
||||
|
||||
using PassThrough = tensor_operation::element_wise::PassThrough;
|
||||
|
||||
static constexpr index_t NumInvariantDim = Rank - NumReduceDim;
|
||||
static constexpr index_t M_BlockTileSize = MThreadClusterSize * MThreadSliceSize;
|
||||
static constexpr index_t K_BlockTileSize = KThreadClusterSize * KThreadSliceSize;
|
||||
|
||||
static constexpr bool reduceAllDim = (NumInvariantDim == 0);
|
||||
static_assert(!reduceAllDim); // TODO
|
||||
|
||||
static auto MakeSrc2dDescriptor(const std::vector<index_t>& inLengths,
|
||||
const std::vector<index_t>& inStrides,
|
||||
int numBlockTileIteration)
|
||||
{
|
||||
constexpr index_t NumInvariantDim = Rank - NumReduceDim;
|
||||
static constexpr index_t numSrcDim = Rank;
|
||||
static constexpr bool reduceAllDim = (NumInvariantDim == 0);
|
||||
|
||||
const auto tupleSrcLengths = make_tuple_from_array(inLengths, Number<numSrcDim>{});
|
||||
const auto tupleSrcStrides = make_tuple_from_array(inStrides, Number<numSrcDim>{});
|
||||
@@ -133,7 +141,37 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
return (in_grid_desc_m_k_padded);
|
||||
};
|
||||
|
||||
static auto MakeSaveMeanInvStdDescriptor_M(const std::vector<index_t>& lengths,
|
||||
const std::vector<index_t>& strides)
|
||||
{
|
||||
using InvariantDims = typename arithmetic_sequence_gen<0, NumInvariantDim, 1>::type;
|
||||
|
||||
const auto tupleSrcLengths = make_tuple_from_array_and_index_seq(lengths, InvariantDims{});
|
||||
const auto tupleSrcStrides = make_tuple_from_array_and_index_seq(strides, InvariantDims{});
|
||||
|
||||
const auto desc = make_naive_tensor_descriptor(tupleSrcLengths, tupleSrcStrides);
|
||||
|
||||
const auto grid_desc_m =
|
||||
transform_tensor_descriptor(desc,
|
||||
make_tuple(make_merge_transform(tupleSrcLengths)),
|
||||
make_tuple(InvariantDims{}),
|
||||
make_tuple(Sequence<0>{}));
|
||||
|
||||
const auto invariantLength = grid_desc_m.GetLength(Number<0>{});
|
||||
const auto pad_M =
|
||||
math::integer_least_multiple(invariantLength, M_BlockTileSize) - invariantLength;
|
||||
|
||||
auto grid_desc_m_padded = transform_tensor_descriptor(
|
||||
grid_desc_m,
|
||||
make_tuple(make_right_pad_transform(invariantLength, pad_M)),
|
||||
make_tuple(Sequence<0>{}),
|
||||
make_tuple(Sequence<0>{}));
|
||||
|
||||
return grid_desc_m_padded;
|
||||
}
|
||||
|
||||
using GridDesc_M_K = decltype(MakeSrc2dDescriptor({1}, {1}, 1));
|
||||
using GridDesc_M = decltype(MakeSaveMeanInvStdDescriptor_M({1}, {1}));
|
||||
|
||||
struct Argument : public BaseArgument
|
||||
{
|
||||
@@ -142,17 +180,23 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
const std::vector<index_t> gammaStrides,
|
||||
const std::vector<index_t> betaStrides,
|
||||
const std::vector<index_t> yStrides,
|
||||
const std::vector<index_t> saveMeanStrides,
|
||||
const std::vector<index_t> saveInvStdStrides,
|
||||
const std::vector<index_t> reduceDims,
|
||||
YElementwiseOperation y_elementwise_op,
|
||||
double epsilon,
|
||||
const XDataType* p_x,
|
||||
const GammaDataType* p_gamma,
|
||||
const BetaDataType* p_beta,
|
||||
YDataType* p_y)
|
||||
YDataType* p_y,
|
||||
SaveMeanInvStdDataType* p_saveMean,
|
||||
SaveMeanInvStdDataType* p_saveInvStd)
|
||||
: p_x_(p_x),
|
||||
p_gamma_(p_gamma),
|
||||
p_beta_(p_beta),
|
||||
p_y_(p_y),
|
||||
p_saveMean_(p_saveMean),
|
||||
p_saveInvStd_(p_saveInvStd),
|
||||
y_elementwise_op_(y_elementwise_op)
|
||||
{
|
||||
epsilon_ = static_cast<ComputeDataType>(epsilon);
|
||||
@@ -162,16 +206,14 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
yStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(yStrides, reduceDims);
|
||||
gammaStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(gammaStrides, reduceDims);
|
||||
betaStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(betaStrides, reduceDims);
|
||||
saveMeanStrides_ = saveMeanStrides;
|
||||
saveInvStdStrides_ = saveInvStdStrides;
|
||||
|
||||
long_index_t invariant_length;
|
||||
long_index_t reduce_length;
|
||||
std::tie(MRaw_, KRaw_) = get_2d_lengths<Rank, NumReduceDim>(Lengths_);
|
||||
|
||||
std::tie(invariant_length, reduce_length) =
|
||||
get_2d_lengths<Rank, NumReduceDim>(Lengths_);
|
||||
numBlockTileIteration_ = math::integer_divide_ceil(KRaw_, K_BlockTileSize);
|
||||
|
||||
numBlockTileIteration_ = math::integer_divide_ceil(reduce_length, K_BlockTileSize);
|
||||
|
||||
gridSize_ = math::integer_divide_ceil(invariant_length, M_BlockTileSize);
|
||||
gridSize_ = math::integer_divide_ceil(MRaw_, M_BlockTileSize);
|
||||
|
||||
x_grid_desc_m_k_ = MakeSrc2dDescriptor(Lengths_, xStrides_, numBlockTileIteration_);
|
||||
gamma_grid_desc_m_k_ =
|
||||
@@ -179,9 +221,16 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
beta_grid_desc_m_k_ =
|
||||
MakeSrc2dDescriptor(Lengths_, betaStrides_, numBlockTileIteration_);
|
||||
y_grid_desc_m_k_ = MakeSrc2dDescriptor(Lengths_, yStrides_, numBlockTileIteration_);
|
||||
save_mean_grid_desc_m_ = MakeSaveMeanInvStdDescriptor_M(Lengths_, saveMeanStrides);
|
||||
save_inv_std_grid_desc_m_ = MakeSaveMeanInvStdDescriptor_M(Lengths_, saveInvStdStrides);
|
||||
|
||||
isSweeponce_ =
|
||||
x_grid_desc_m_k_.GetLength(Number<1>{}) <= KThreadClusterSize * KThreadSliceSize;
|
||||
|
||||
if constexpr(NumInvariantDim == 0)
|
||||
invariant_lowest_length_ = 1;
|
||||
else
|
||||
invariant_lowest_length_ = Lengths_[NumInvariantDim - 1];
|
||||
}
|
||||
|
||||
ComputeDataType epsilon_;
|
||||
@@ -190,12 +239,16 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
const GammaDataType* p_gamma_;
|
||||
const BetaDataType* p_beta_;
|
||||
YDataType* p_y_;
|
||||
SaveMeanInvStdDataType* p_saveMean_;
|
||||
SaveMeanInvStdDataType* p_saveInvStd_;
|
||||
|
||||
std::vector<index_t> Lengths_;
|
||||
std::vector<index_t> xStrides_;
|
||||
std::vector<index_t> gammaStrides_;
|
||||
std::vector<index_t> betaStrides_;
|
||||
std::vector<index_t> yStrides_;
|
||||
std::vector<index_t> saveMeanStrides_;
|
||||
std::vector<index_t> saveInvStdStrides_;
|
||||
|
||||
YElementwiseOperation y_elementwise_op_;
|
||||
|
||||
@@ -206,7 +259,14 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
GridDesc_M_K gamma_grid_desc_m_k_;
|
||||
GridDesc_M_K beta_grid_desc_m_k_;
|
||||
GridDesc_M_K y_grid_desc_m_k_;
|
||||
GridDesc_M save_mean_grid_desc_m_;
|
||||
GridDesc_M save_inv_std_grid_desc_m_;
|
||||
bool isSweeponce_;
|
||||
|
||||
index_t MRaw_; // invarient length
|
||||
index_t KRaw_; // reduce length
|
||||
|
||||
index_t invariant_lowest_length_;
|
||||
};
|
||||
|
||||
struct Invoker : public BaseInvoker
|
||||
@@ -217,9 +277,11 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
GridDesc_M,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
@@ -233,6 +295,7 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
BetaSrcVectorSize,
|
||||
XYSrcVectorDim,
|
||||
YDstVectorSize,
|
||||
SaveMeanInvStdDstVectorSize,
|
||||
UseWelford>(arg.isSweeponce_);
|
||||
|
||||
float avg_time = 0;
|
||||
@@ -245,12 +308,16 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
arg.gamma_grid_desc_m_k_,
|
||||
arg.beta_grid_desc_m_k_,
|
||||
arg.y_grid_desc_m_k_,
|
||||
arg.save_mean_grid_desc_m_,
|
||||
arg.save_inv_std_grid_desc_m_,
|
||||
arg.numBlockTileIteration_,
|
||||
arg.epsilon_,
|
||||
arg.p_x_,
|
||||
arg.p_gamma_,
|
||||
arg.p_beta_,
|
||||
arg.p_y_,
|
||||
arg.p_saveMean_,
|
||||
arg.p_saveInvStd_,
|
||||
arg.y_elementwise_op_);
|
||||
|
||||
return (avg_time);
|
||||
@@ -267,8 +334,6 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
{
|
||||
const Argument* p_arg_ = dynamic_cast<const Argument*>(p_arg);
|
||||
|
||||
constexpr index_t NumInvariantDim = Rank - NumReduceDim;
|
||||
|
||||
if constexpr(XYSrcVectorDim == 0)
|
||||
{
|
||||
if constexpr(NumInvariantDim == 0)
|
||||
@@ -277,13 +342,15 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("!!!! %d\n", p_arg_->invariant_lowest_length_);
|
||||
|
||||
if(p_arg_->xStrides_[NumInvariantDim - 1] != 1)
|
||||
return false;
|
||||
|
||||
if(p_arg_->invariant_lowest_length % XSrcVectorSize != 0)
|
||||
if(p_arg_->invariant_lowest_length_ % XSrcVectorSize != 0)
|
||||
return false;
|
||||
|
||||
if(p_arg_->invariant_lowest_length % YDstVectorSize != 0)
|
||||
if(p_arg_->invariant_lowest_length_ % YDstVectorSize != 0)
|
||||
return false;
|
||||
};
|
||||
}
|
||||
@@ -325,7 +392,7 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
if(p_arg_->betaStrides_[NumInvariantDim - 1] != 1)
|
||||
return (false);
|
||||
|
||||
if(p_arg_->invariant_lowest_length % BetaSrcVectorSize != 0)
|
||||
if(p_arg_->invariant_lowest_length_ % BetaSrcVectorSize != 0)
|
||||
return (false);
|
||||
}
|
||||
else // if fastest dim is reduced
|
||||
@@ -337,6 +404,9 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
return (false);
|
||||
}
|
||||
|
||||
if(p_arg_->invariant_lowest_length_ % SaveMeanInvStdDstVectorSize != 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
@@ -346,6 +416,8 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
const std::vector<index_t> gammaStrides,
|
||||
const std::vector<index_t> betaStrides,
|
||||
const std::vector<index_t> yStrides,
|
||||
const std::vector<index_t> saveMeanStrides,
|
||||
const std::vector<index_t> saveInvStdStrides,
|
||||
const std::vector<index_t> reduceDims,
|
||||
double epsilon,
|
||||
const void* p_x,
|
||||
@@ -353,27 +425,30 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
const void* p_beta,
|
||||
void* p_y,
|
||||
void* p_saveMean,
|
||||
void* p_saveInvVar,
|
||||
void* p_saveInvStd,
|
||||
YElementwiseOperation y_elementwise_op) override
|
||||
{
|
||||
// TODO
|
||||
// Optional cache of the intermediate results (mean and InvVariance) during the
|
||||
// forward pass could speedup in the backward
|
||||
ignore = p_saveMean;
|
||||
ignore = p_saveInvVar;
|
||||
if(lengths.size() != Rank || xStrides.size() != Rank || gammaStrides.size() != Rank ||
|
||||
betaStrides.size() != Rank || yStrides.size() != Rank ||
|
||||
saveMeanStrides.size() != NumInvariantDim || saveInvStdStrides.size() != NumInvariantDim)
|
||||
throw std::runtime_error("dimension is incorrect");
|
||||
|
||||
return std::make_unique<Argument>(lengths,
|
||||
xStrides,
|
||||
gammaStrides,
|
||||
betaStrides,
|
||||
yStrides,
|
||||
saveMeanStrides,
|
||||
saveInvStdStrides,
|
||||
reduceDims,
|
||||
y_elementwise_op,
|
||||
epsilon,
|
||||
static_cast<const XDataType*>(p_x),
|
||||
static_cast<const GammaDataType*>(p_gamma),
|
||||
static_cast<const BetaDataType*>(p_beta),
|
||||
static_cast<YDataType*>(p_y));
|
||||
static_cast<YDataType*>(p_y),
|
||||
static_cast<SaveMeanInvStdDataType*>(p_saveMean),
|
||||
static_cast<SaveMeanInvStdDataType*>(p_saveInvStd));
|
||||
};
|
||||
|
||||
std::unique_ptr<BaseInvoker> MakeInvokerPointer() override
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
namespace ck {
|
||||
template <typename GridwiseWelford,
|
||||
typename XDataType,
|
||||
typename MeanVarDataType,
|
||||
typename WorkspaceMeanVarDataType,
|
||||
typename ComputeDataType,
|
||||
typename XGridDesc_M_K,
|
||||
typename MeanVarGridDesc_M_KBlock>
|
||||
@@ -28,8 +28,8 @@ kernel_normalizationSplitK1st(const XGridDesc_M_K x_grid_desc_m_k,
|
||||
const MeanVarGridDesc_M_KBlock mean_var_grid_desc_m_kblock,
|
||||
index_t num_k_block_tile_iteration,
|
||||
const XDataType* const __restrict__ p_x_global,
|
||||
MeanVarDataType* const __restrict__ p_welford_mean,
|
||||
MeanVarDataType* const __restrict__ p_welford_variance,
|
||||
WorkspaceMeanVarDataType* const __restrict__ p_welford_mean,
|
||||
WorkspaceMeanVarDataType* const __restrict__ p_welford_variance,
|
||||
int32_t* const __restrict__ p_welford_count)
|
||||
{
|
||||
GridwiseWelford::Run(x_grid_desc_m_k,
|
||||
@@ -42,16 +42,18 @@ kernel_normalizationSplitK1st(const XGridDesc_M_K x_grid_desc_m_k,
|
||||
};
|
||||
|
||||
template <typename GridwiseWelfordNormalization,
|
||||
typename MeanVarDataType,
|
||||
typename WorkspaceMeanVarDataType,
|
||||
typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
typename ComputeDataType,
|
||||
typename YElementwiseOperation,
|
||||
typename MeanVarGridDesc_M_KBlock,
|
||||
typename CountGridDesc_M_KBlock,
|
||||
typename XYGammaBetaGridDesc_M_K>
|
||||
typename XYGammaBetaGridDesc_M_K,
|
||||
typename SaveMeanInvStdGridDesc_M>
|
||||
__global__ void
|
||||
kernel_normalizationSplitK2nd(const MeanVarGridDesc_M_KBlock mean_var_grid_desc_m_kblock,
|
||||
const CountGridDesc_M_KBlock count_grid_desc_m_kblock,
|
||||
@@ -59,17 +61,21 @@ kernel_normalizationSplitK2nd(const MeanVarGridDesc_M_KBlock mean_var_grid_desc_
|
||||
const XYGammaBetaGridDesc_M_K gamma_grid_desc_m_k,
|
||||
const XYGammaBetaGridDesc_M_K beta_grid_desc_m_k,
|
||||
const XYGammaBetaGridDesc_M_K y_grid_desc_m_k,
|
||||
const SaveMeanInvStdGridDesc_M save_mean_grid_desc_m,
|
||||
const SaveMeanInvStdGridDesc_M save_inv_std_grid_desc_m,
|
||||
index_t num_k_mean_var_count_iteration,
|
||||
index_t num_k_block_tile_iteration,
|
||||
index_t k_grid_size,
|
||||
ComputeDataType epsilon,
|
||||
const MeanVarDataType* const p_mean_global,
|
||||
const MeanVarDataType* const p_variance_global,
|
||||
const WorkspaceMeanVarDataType* const p_mean_global,
|
||||
const WorkspaceMeanVarDataType* const p_variance_global,
|
||||
const int32_t* const p_welford_count_global,
|
||||
const XDataType* const __restrict__ p_x_global,
|
||||
const GammaDataType* const __restrict__ p_gamma_global,
|
||||
const BetaDataType* const __restrict__ p_beta_global,
|
||||
YDataType* const __restrict__ p_y_global,
|
||||
SaveMeanInvStdDataType* const __restrict__ p_save_mean_global,
|
||||
SaveMeanInvStdDataType* const __restrict__ p_save_inv_std_global,
|
||||
const YElementwiseOperation y_elementwise_op)
|
||||
{
|
||||
GridwiseWelfordNormalization::Run(mean_var_grid_desc_m_kblock,
|
||||
@@ -78,6 +84,8 @@ kernel_normalizationSplitK2nd(const MeanVarGridDesc_M_KBlock mean_var_grid_desc_
|
||||
gamma_grid_desc_m_k,
|
||||
beta_grid_desc_m_k,
|
||||
y_grid_desc_m_k,
|
||||
save_mean_grid_desc_m,
|
||||
save_inv_std_grid_desc_m,
|
||||
num_k_mean_var_count_iteration,
|
||||
num_k_block_tile_iteration,
|
||||
k_grid_size,
|
||||
@@ -89,6 +97,8 @@ kernel_normalizationSplitK2nd(const MeanVarGridDesc_M_KBlock mean_var_grid_desc_
|
||||
p_gamma_global,
|
||||
p_beta_global,
|
||||
p_y_global,
|
||||
p_save_mean_global,
|
||||
p_save_inv_std_global,
|
||||
y_elementwise_op);
|
||||
};
|
||||
} // namespace ck
|
||||
@@ -107,6 +117,7 @@ template <typename XDataType,
|
||||
typename BetaDataType,
|
||||
typename ComputeDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
typename YElementwiseOperation,
|
||||
index_t Rank,
|
||||
index_t NumReduceDim,
|
||||
@@ -121,17 +132,18 @@ template <typename XDataType,
|
||||
index_t GammaSrcVectorSize,
|
||||
index_t BetaSrcVectorDim,
|
||||
index_t BetaSrcVectorSize,
|
||||
index_t YDstVectorSize>
|
||||
index_t YDstVectorSize,
|
||||
index_t SaveMeanInvStdDstVectorSize>
|
||||
struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
YElementwiseOperation,
|
||||
Rank,
|
||||
NumReduceDim>
|
||||
{
|
||||
using MeanVarDataType = ComputeDataType;
|
||||
using WorkspaceMeanVarDataType = SaveMeanInvStdDataType;
|
||||
|
||||
static_assert(BlockSize == MThreadClusterSize * KThreadClusterSize);
|
||||
static_assert(
|
||||
@@ -144,22 +156,28 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
(BetaSrcVectorDim == 1 && KThreadSliceSize % BetaSrcVectorSize == 0)),
|
||||
"Invalid thread slice sizes and/or beta vector sizes configuration, please check!");
|
||||
|
||||
static_assert(MThreadSliceSize % SaveMeanInvStdDstVectorSize == 0,
|
||||
"Invalid thread slice sizes and/or save mean and inverse std vector sizes "
|
||||
"configuration, please check!");
|
||||
|
||||
using PassThrough = tensor_operation::element_wise::PassThrough;
|
||||
|
||||
static constexpr auto I0 = Number<0>{};
|
||||
static constexpr auto I1 = Number<1>{};
|
||||
|
||||
static constexpr index_t NumInvariantDim = Rank - NumReduceDim;
|
||||
static constexpr index_t M_BlockTileSize = MThreadClusterSize * MThreadSliceSize;
|
||||
static constexpr index_t K_BlockTileSize = KThreadClusterSize * KThreadSliceSize;
|
||||
|
||||
static constexpr bool reduceAllDim = (NumInvariantDim == 0);
|
||||
static_assert(!reduceAllDim); // TODO
|
||||
|
||||
static auto MakeSrc2dDescriptor(const std::vector<index_t>& inLengths,
|
||||
const std::vector<index_t>& inStrides,
|
||||
int kBlockSize,
|
||||
int numBlockTileIteration)
|
||||
{
|
||||
constexpr index_t NumInvariantDim = Rank - NumReduceDim;
|
||||
static constexpr index_t numSrcDim = Rank;
|
||||
static constexpr bool reduceAllDim = (NumInvariantDim == 0);
|
||||
|
||||
const auto tupleSrcLengths = make_tuple_from_array(inLengths, Number<numSrcDim>{});
|
||||
const auto tupleSrcStrides = make_tuple_from_array(inStrides, Number<numSrcDim>{});
|
||||
@@ -219,7 +237,7 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
};
|
||||
|
||||
template <typename DoPads, index_t MPerTile, index_t KPerTile>
|
||||
static auto MakeMeanVarDescriptor_M_K(index_t M, index_t K)
|
||||
static auto MakeWorkspaceMeanVarDescriptor_M_K(index_t M, index_t K)
|
||||
{
|
||||
const auto grid_desc_m_k =
|
||||
make_naive_tensor_descriptor(make_tuple(M, K), make_tuple(K, I1));
|
||||
@@ -227,26 +245,57 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
}
|
||||
|
||||
template <typename DoPads, index_t MPerTile, index_t KPerTile>
|
||||
static auto MakeCountDescriptor_M_K(index_t M, index_t K)
|
||||
static auto MakeWorkspaceCountDescriptor_M_K(index_t M, index_t K)
|
||||
{
|
||||
const auto grid_desc_m_k =
|
||||
make_naive_tensor_descriptor(make_tuple(M, K), make_tuple(I0, I1));
|
||||
return PadTensorDescriptor(grid_desc_m_k, make_tuple(MPerTile, KPerTile), DoPads{});
|
||||
}
|
||||
|
||||
static auto MakeSaveMeanInvStdDescriptor_M(const std::vector<index_t>& lengths,
|
||||
const std::vector<index_t>& strides)
|
||||
{
|
||||
using InvariantDims = typename arithmetic_sequence_gen<0, NumInvariantDim, 1>::type;
|
||||
|
||||
const auto tupleSrcLengths = make_tuple_from_array_and_index_seq(lengths, InvariantDims{});
|
||||
const auto tupleSrcStrides = make_tuple_from_array_and_index_seq(strides, InvariantDims{});
|
||||
|
||||
const auto desc = make_naive_tensor_descriptor(tupleSrcLengths, tupleSrcStrides);
|
||||
|
||||
const auto grid_desc_m =
|
||||
transform_tensor_descriptor(desc,
|
||||
make_tuple(make_merge_transform(tupleSrcLengths)),
|
||||
make_tuple(InvariantDims{}),
|
||||
make_tuple(Sequence<0>{}));
|
||||
|
||||
const auto invariantLength = grid_desc_m.GetLength(Number<0>{});
|
||||
const auto pad_M =
|
||||
math::integer_least_multiple(invariantLength, M_BlockTileSize) - invariantLength;
|
||||
|
||||
auto grid_desc_m_padded = transform_tensor_descriptor(
|
||||
grid_desc_m,
|
||||
make_tuple(make_right_pad_transform(invariantLength, pad_M)),
|
||||
make_tuple(Sequence<0>{}),
|
||||
make_tuple(Sequence<0>{}));
|
||||
|
||||
return grid_desc_m_padded;
|
||||
}
|
||||
|
||||
using SrcGridDesc_M_K = decltype(MakeSrc2dDescriptor({1}, {1}, 1, 1));
|
||||
using Kernel1MeanVarGridDesc_M_KBlock =
|
||||
decltype(MakeMeanVarDescriptor_M_K<Sequence<true, false>, 1, 1>(1, 1));
|
||||
decltype(MakeWorkspaceMeanVarDescriptor_M_K<Sequence<true, false>, 1, 1>(1, 1));
|
||||
|
||||
using Kernel2MeanVarGridDesc_M_KBlock =
|
||||
decltype(MakeMeanVarDescriptor_M_K<Sequence<true, true>, 1, 1>(1, 1));
|
||||
decltype(MakeWorkspaceMeanVarDescriptor_M_K<Sequence<true, true>, 1, 1>(1, 1));
|
||||
|
||||
using Kernel2CountGridDesc_M_KBlock =
|
||||
decltype(MakeCountDescriptor_M_K<Sequence<true, true>, 1, 1>(1, 1));
|
||||
decltype(MakeWorkspaceCountDescriptor_M_K<Sequence<true, true>, 1, 1>(1, 1));
|
||||
|
||||
using SaveMeanInvStdGridDesc_M = decltype(MakeSaveMeanInvStdDescriptor_M({1}, {1}));
|
||||
|
||||
using GridwiseWelford = GridwiseNormalizationSplitK1st<XDataType,
|
||||
ComputeDataType,
|
||||
MeanVarDataType,
|
||||
WorkspaceMeanVarDataType,
|
||||
SrcGridDesc_M_K,
|
||||
Kernel1MeanVarGridDesc_M_KBlock,
|
||||
BlockSize,
|
||||
@@ -258,16 +307,18 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
XSrcVectorSize>;
|
||||
|
||||
using GridwiseWelfordNormalization =
|
||||
GridwiseNormalizationSplitK2nd<MeanVarDataType,
|
||||
GridwiseNormalizationSplitK2nd<WorkspaceMeanVarDataType,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
Kernel2MeanVarGridDesc_M_KBlock,
|
||||
Kernel2CountGridDesc_M_KBlock,
|
||||
SrcGridDesc_M_K,
|
||||
SaveMeanInvStdGridDesc_M,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
@@ -280,7 +331,8 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
XYVectorDim,
|
||||
YDstVectorSize>;
|
||||
YDstVectorSize,
|
||||
SaveMeanInvStdDstVectorSize>;
|
||||
|
||||
struct Argument : public BaseArgument
|
||||
{
|
||||
@@ -289,17 +341,23 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
const std::vector<index_t> gammaStrides,
|
||||
const std::vector<index_t> betaStrides,
|
||||
const std::vector<index_t> yStrides,
|
||||
const std::vector<index_t> saveMeanStrides,
|
||||
const std::vector<index_t> saveInvStdStrides,
|
||||
const std::vector<index_t> reduceDims,
|
||||
YElementwiseOperation y_elementwise_op,
|
||||
double epsilon,
|
||||
const XDataType* p_x,
|
||||
const GammaDataType* p_gamma,
|
||||
const BetaDataType* p_beta,
|
||||
YDataType* p_y)
|
||||
YDataType* p_y,
|
||||
SaveMeanInvStdDataType* p_saveMean,
|
||||
SaveMeanInvStdDataType* p_saveInvStd)
|
||||
: p_x_(p_x),
|
||||
p_gamma_(p_gamma),
|
||||
p_beta_(p_beta),
|
||||
p_y_(p_y),
|
||||
p_saveMean_(p_saveMean),
|
||||
p_saveInvStd_(p_saveInvStd),
|
||||
p_workspace_mean_{nullptr},
|
||||
p_workspace_var_{nullptr},
|
||||
p_workspace_count_{nullptr},
|
||||
@@ -312,6 +370,8 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
yStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(yStrides, reduceDims);
|
||||
gammaStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(gammaStrides, reduceDims);
|
||||
betaStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(betaStrides, reduceDims);
|
||||
saveMeanStrides_ = saveMeanStrides;
|
||||
saveInvStdStrides_ = saveInvStdStrides;
|
||||
|
||||
std::tie(MRaw_, KRaw_) = get_2d_lengths<Rank, NumReduceDim>(Lengths_);
|
||||
|
||||
@@ -346,20 +406,28 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
y_grid_desc_m_k_ =
|
||||
MakeSrc2dDescriptor(Lengths_, yStrides_, kGridSize_, numBlockTileIteration_);
|
||||
|
||||
save_mean_grid_desc_m_ = MakeSaveMeanInvStdDescriptor_M(Lengths_, saveMeanStrides);
|
||||
save_inv_std_grid_desc_m_ = MakeSaveMeanInvStdDescriptor_M(Lengths_, saveInvStdStrides);
|
||||
|
||||
// We don't need to pad in K dimension for Welford1. Set KPerTile 1.
|
||||
kernel1_mean_var_grid_desc_m_kblock_ =
|
||||
MakeMeanVarDescriptor_M_K<Sequence<true, false>, M_BlockTileSize, 1>(MRaw_,
|
||||
kGridSize_);
|
||||
MakeWorkspaceMeanVarDescriptor_M_K<Sequence<true, false>, M_BlockTileSize, 1>(
|
||||
MRaw_, kGridSize_);
|
||||
|
||||
kernel2_mean_var_grid_desc_m_kblock_ =
|
||||
MakeMeanVarDescriptor_M_K<Sequence<true, true>,
|
||||
M_BlockTileSize,
|
||||
K_MeanVarCountBlockTileSize>(MRaw_, kGridSize_);
|
||||
MakeWorkspaceMeanVarDescriptor_M_K<Sequence<true, true>,
|
||||
M_BlockTileSize,
|
||||
K_MeanVarCountBlockTileSize>(MRaw_, kGridSize_);
|
||||
|
||||
kernel2_count_grid_desc_m_kblock_ =
|
||||
MakeCountDescriptor_M_K<Sequence<true, true>,
|
||||
M_BlockTileSize,
|
||||
K_MeanVarCountBlockTileSize>(MRaw_, kGridSize_);
|
||||
MakeWorkspaceCountDescriptor_M_K<Sequence<true, true>,
|
||||
M_BlockTileSize,
|
||||
K_MeanVarCountBlockTileSize>(MRaw_, kGridSize_);
|
||||
|
||||
if constexpr(NumInvariantDim == 0)
|
||||
invariant_lowest_length_ = 1;
|
||||
else
|
||||
invariant_lowest_length_ = Lengths_[NumInvariantDim - 1];
|
||||
}
|
||||
|
||||
ComputeDataType epsilon_;
|
||||
@@ -368,6 +436,8 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
const GammaDataType* p_gamma_;
|
||||
const BetaDataType* p_beta_;
|
||||
YDataType* p_y_;
|
||||
SaveMeanInvStdDataType* p_saveMean_;
|
||||
SaveMeanInvStdDataType* p_saveInvStd_;
|
||||
void* p_workspace_mean_;
|
||||
void* p_workspace_var_;
|
||||
void* p_workspace_count_;
|
||||
@@ -377,6 +447,8 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
std::vector<index_t> gammaStrides_;
|
||||
std::vector<index_t> betaStrides_;
|
||||
std::vector<index_t> yStrides_;
|
||||
std::vector<index_t> saveMeanStrides_;
|
||||
std::vector<index_t> saveInvStdStrides_;
|
||||
|
||||
YElementwiseOperation y_elementwise_op_;
|
||||
|
||||
@@ -389,6 +461,8 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
SrcGridDesc_M_K gamma_grid_desc_m_k_;
|
||||
SrcGridDesc_M_K beta_grid_desc_m_k_;
|
||||
SrcGridDesc_M_K y_grid_desc_m_k_;
|
||||
SaveMeanInvStdGridDesc_M save_mean_grid_desc_m_;
|
||||
SaveMeanInvStdGridDesc_M save_inv_std_grid_desc_m_;
|
||||
|
||||
Kernel1MeanVarGridDesc_M_KBlock kernel1_mean_var_grid_desc_m_kblock_;
|
||||
Kernel2MeanVarGridDesc_M_KBlock kernel2_mean_var_grid_desc_m_kblock_;
|
||||
@@ -396,6 +470,8 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
|
||||
index_t MRaw_; // invarient length
|
||||
index_t KRaw_; // reduce length
|
||||
|
||||
index_t invariant_lowest_length_;
|
||||
};
|
||||
|
||||
struct Invoker : public BaseInvoker
|
||||
@@ -408,60 +484,68 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
|
||||
auto kernel1 = kernel_normalizationSplitK1st<GridwiseWelford,
|
||||
XDataType,
|
||||
MeanVarDataType,
|
||||
WorkspaceMeanVarDataType,
|
||||
ComputeDataType,
|
||||
SrcGridDesc_M_K,
|
||||
Kernel1MeanVarGridDesc_M_KBlock>;
|
||||
|
||||
auto kernel2 = kernel_normalizationSplitK2nd<GridwiseWelfordNormalization,
|
||||
MeanVarDataType,
|
||||
WorkspaceMeanVarDataType,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
Kernel2MeanVarGridDesc_M_KBlock,
|
||||
Kernel2CountGridDesc_M_KBlock,
|
||||
SrcGridDesc_M_K>;
|
||||
SrcGridDesc_M_K,
|
||||
SaveMeanInvStdGridDesc_M>;
|
||||
|
||||
float avg_time = 0;
|
||||
avg_time += launch_and_time_kernel(stream_config,
|
||||
kernel1,
|
||||
dim3(arg.gridSize_),
|
||||
dim3(BlockSize),
|
||||
0,
|
||||
arg.x_grid_desc_m_k_,
|
||||
arg.kernel1_mean_var_grid_desc_m_kblock_,
|
||||
arg.numBlockTileIteration_,
|
||||
arg.p_x_,
|
||||
static_cast<MeanVarDataType*>(arg.p_workspace_mean_),
|
||||
static_cast<MeanVarDataType*>(arg.p_workspace_var_),
|
||||
static_cast<int32_t*>(arg.p_workspace_count_));
|
||||
avg_time += launch_and_time_kernel(
|
||||
stream_config,
|
||||
kernel1,
|
||||
dim3(arg.gridSize_),
|
||||
dim3(BlockSize),
|
||||
0,
|
||||
arg.x_grid_desc_m_k_,
|
||||
arg.kernel1_mean_var_grid_desc_m_kblock_,
|
||||
arg.numBlockTileIteration_,
|
||||
arg.p_x_,
|
||||
static_cast<WorkspaceMeanVarDataType*>(arg.p_workspace_mean_),
|
||||
static_cast<WorkspaceMeanVarDataType*>(arg.p_workspace_var_),
|
||||
static_cast<int32_t*>(arg.p_workspace_count_));
|
||||
|
||||
avg_time += launch_and_time_kernel(stream_config,
|
||||
kernel2,
|
||||
dim3(arg.gridSize_),
|
||||
dim3(BlockSize),
|
||||
0,
|
||||
arg.kernel2_mean_var_grid_desc_m_kblock_,
|
||||
arg.kernel2_count_grid_desc_m_kblock_,
|
||||
arg.x_grid_desc_m_k_,
|
||||
arg.gamma_grid_desc_m_k_,
|
||||
arg.beta_grid_desc_m_k_,
|
||||
arg.y_grid_desc_m_k_,
|
||||
arg.numMeanVarCountIteration_,
|
||||
arg.numBlockTileIteration_,
|
||||
arg.kGridSize_,
|
||||
arg.epsilon_,
|
||||
static_cast<MeanVarDataType*>(arg.p_workspace_mean_),
|
||||
static_cast<MeanVarDataType*>(arg.p_workspace_var_),
|
||||
static_cast<int32_t*>(arg.p_workspace_count_),
|
||||
arg.p_x_,
|
||||
arg.p_gamma_,
|
||||
arg.p_beta_,
|
||||
arg.p_y_,
|
||||
arg.y_elementwise_op_);
|
||||
avg_time += launch_and_time_kernel(
|
||||
stream_config,
|
||||
kernel2,
|
||||
dim3(arg.gridSize_),
|
||||
dim3(BlockSize),
|
||||
0,
|
||||
arg.kernel2_mean_var_grid_desc_m_kblock_,
|
||||
arg.kernel2_count_grid_desc_m_kblock_,
|
||||
arg.x_grid_desc_m_k_,
|
||||
arg.gamma_grid_desc_m_k_,
|
||||
arg.beta_grid_desc_m_k_,
|
||||
arg.y_grid_desc_m_k_,
|
||||
arg.save_mean_grid_desc_m_,
|
||||
arg.save_inv_std_grid_desc_m_,
|
||||
arg.numMeanVarCountIteration_,
|
||||
arg.numBlockTileIteration_,
|
||||
arg.kGridSize_,
|
||||
arg.epsilon_,
|
||||
static_cast<const WorkspaceMeanVarDataType*>(arg.p_workspace_mean_),
|
||||
static_cast<const WorkspaceMeanVarDataType*>(arg.p_workspace_var_),
|
||||
static_cast<const int32_t*>(arg.p_workspace_count_),
|
||||
arg.p_x_,
|
||||
arg.p_gamma_,
|
||||
arg.p_beta_,
|
||||
arg.p_y_,
|
||||
arg.p_saveMean_,
|
||||
arg.p_saveInvStd_,
|
||||
arg.y_elementwise_op_);
|
||||
|
||||
return avg_time;
|
||||
};
|
||||
@@ -482,10 +566,10 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
int welford_size = pArg_->MRaw_ * pArg_->kGridSize_;
|
||||
|
||||
// workspace for welford intermediate mean
|
||||
workspace_size += welford_size * sizeof(MeanVarDataType) + 64;
|
||||
workspace_size += welford_size * sizeof(WorkspaceMeanVarDataType) + 64;
|
||||
|
||||
// workspace for welford intermediate variance
|
||||
workspace_size += welford_size * sizeof(MeanVarDataType) + 64;
|
||||
workspace_size += welford_size * sizeof(WorkspaceMeanVarDataType) + 64;
|
||||
|
||||
// workspace for welford intermediate count
|
||||
workspace_size += pArg_->kGridSize_ * sizeof(int32_t) + 64;
|
||||
@@ -504,13 +588,13 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
// setup buffer used for intermediate welford mean
|
||||
pArg_->p_workspace_mean_ = static_cast<char*>(pArg_->p_workspace_);
|
||||
|
||||
index_t mean_space_sz = welford_size * sizeof(MeanVarDataType);
|
||||
index_t mean_space_sz = welford_size * sizeof(WorkspaceMeanVarDataType);
|
||||
mean_space_sz = math::integer_least_multiple(mean_space_sz, 64);
|
||||
|
||||
// setup buffer used for intermediate welford varirance
|
||||
pArg_->p_workspace_var_ = reinterpret_cast<char*>(pArg_->p_workspace_mean_) + mean_space_sz;
|
||||
|
||||
index_t variance_space_sz = welford_size * sizeof(MeanVarDataType);
|
||||
index_t variance_space_sz = welford_size * sizeof(WorkspaceMeanVarDataType);
|
||||
variance_space_sz = math::integer_least_multiple(variance_space_sz, 64);
|
||||
|
||||
// setup buffer used for intermediate welford count
|
||||
@@ -522,8 +606,6 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
{
|
||||
const Argument* p_arg_ = dynamic_cast<const Argument*>(p_arg);
|
||||
|
||||
constexpr index_t NumInvariantDim = Rank - NumReduceDim;
|
||||
|
||||
if constexpr(XYVectorDim == 0)
|
||||
{
|
||||
if constexpr(NumInvariantDim == 0)
|
||||
@@ -535,10 +617,10 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
if(p_arg_->xStrides_[NumInvariantDim - 1] != 1)
|
||||
return false;
|
||||
|
||||
if(p_arg_->invariant_lowest_length % XSrcVectorSize != 0)
|
||||
if(p_arg_->invariant_lowest_length_ % XSrcVectorSize != 0)
|
||||
return false;
|
||||
|
||||
if(p_arg_->invariant_lowest_length % YDstVectorSize != 0)
|
||||
if(p_arg_->invariant_lowest_length_ % YDstVectorSize != 0)
|
||||
return false;
|
||||
};
|
||||
}
|
||||
@@ -578,7 +660,7 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
if(p_arg_->betaStrides_[NumInvariantDim - 1] != 1)
|
||||
return false;
|
||||
|
||||
if(p_arg_->invariant_lowest_length % BetaSrcVectorSize != 0)
|
||||
if(p_arg_->invariant_lowest_length_ % BetaSrcVectorSize != 0)
|
||||
return false;
|
||||
}
|
||||
else // if fastest dim is reduced
|
||||
@@ -593,6 +675,9 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
if(p_arg_->kGridSize_ <= 1)
|
||||
return false;
|
||||
|
||||
if(p_arg_->invariant_lowest_length_ % SaveMeanInvStdDstVectorSize != 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
@@ -602,6 +687,8 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
const std::vector<index_t> gammaStrides,
|
||||
const std::vector<index_t> betaStrides,
|
||||
const std::vector<index_t> yStrides,
|
||||
const std::vector<index_t> saveMeanStrides,
|
||||
const std::vector<index_t> saveInvStdStrides,
|
||||
const std::vector<index_t> reduceDims,
|
||||
double epsilon,
|
||||
const void* p_x,
|
||||
@@ -609,27 +696,30 @@ struct DeviceNormalizationSplitKImpl : public DeviceNormalization<XDataType,
|
||||
const void* p_beta,
|
||||
void* p_y,
|
||||
void* p_saveMean,
|
||||
void* p_saveInvVar,
|
||||
void* p_saveInvStd,
|
||||
YElementwiseOperation y_elementwise_op) override
|
||||
{
|
||||
// TODO
|
||||
// Optional cache of the intermediate results (mean and InvVariance) during the
|
||||
// forward pass could speedup in the backward
|
||||
ignore = p_saveMean;
|
||||
ignore = p_saveInvVar;
|
||||
if(lengths.size() != Rank || xStrides.size() != Rank || gammaStrides.size() != Rank ||
|
||||
betaStrides.size() != Rank || yStrides.size() != Rank ||
|
||||
saveMeanStrides.size() != NumInvariantDim || saveInvStdStrides.size() != NumInvariantDim)
|
||||
throw std::runtime_error("dimension is incorrect");
|
||||
|
||||
return std::make_unique<Argument>(lengths,
|
||||
xStrides,
|
||||
gammaStrides,
|
||||
betaStrides,
|
||||
yStrides,
|
||||
saveMeanStrides,
|
||||
saveInvStdStrides,
|
||||
reduceDims,
|
||||
y_elementwise_op,
|
||||
epsilon,
|
||||
static_cast<const XDataType*>(p_x),
|
||||
static_cast<const GammaDataType*>(p_gamma),
|
||||
static_cast<const BetaDataType*>(p_beta),
|
||||
static_cast<YDataType*>(p_y));
|
||||
static_cast<YDataType*>(p_y),
|
||||
static_cast<SaveMeanInvStdDataType*>(p_saveMean),
|
||||
static_cast<SaveMeanInvStdDataType*>(p_saveInvStd));
|
||||
};
|
||||
|
||||
std::unique_ptr<BaseInvoker> MakeInvokerPointer() override
|
||||
|
||||
@@ -18,9 +18,11 @@ template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
typename ComputeDataType,
|
||||
typename YElementwiseOperation,
|
||||
typename GridDesc_M_K,
|
||||
typename GridDesc_M,
|
||||
index_t BlockSize,
|
||||
index_t MThreadClusterSize,
|
||||
index_t KThreadClusterSize,
|
||||
@@ -34,6 +36,7 @@ template <typename XDataType,
|
||||
index_t BetaSrcVectorSize,
|
||||
index_t YDstVectorDim,
|
||||
index_t YDstVectorSize,
|
||||
index_t SaveMeanInvStdDstVectorSize,
|
||||
bool SweepOnce>
|
||||
struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
{
|
||||
@@ -45,6 +48,10 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
(YDstVectorDim == 1 && KThreadSliceSize % YDstVectorSize == 0),
|
||||
"Invalid thread slice sizes and/or vector sizes configuration, please check!");
|
||||
|
||||
static_assert(MThreadSliceSize % SaveMeanInvStdDstVectorSize == 0,
|
||||
"Invalid thread slice sizes and/or save mean and inverse std vector sizes "
|
||||
"configuration, please check!");
|
||||
|
||||
static_assert(XSrcVectorSize == YDstVectorSize);
|
||||
static_assert(XSrcVectorSize == GammaSrcVectorSize);
|
||||
static_assert(XSrcVectorSize == BetaSrcVectorSize);
|
||||
@@ -66,6 +73,10 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
static constexpr auto thread_buffer_desc_m_k = make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<XSrcVectorSize>{}));
|
||||
|
||||
using ThreadBufferLengths_M = Sequence<MThreadSliceSize>;
|
||||
static constexpr auto thread_buffer_desc_m =
|
||||
make_naive_tensor_descriptor_packed(make_tuple(Number<MThreadSliceSize>{}));
|
||||
|
||||
using ThreadReduceSrcDesc_M_K = decltype(make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<XSrcVectorSize>{})));
|
||||
using ThreadReduceDstDesc_M =
|
||||
@@ -84,6 +95,8 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
reduce::Add,
|
||||
true>;
|
||||
|
||||
using PassThroughOp = tensor_operation::element_wise::PassThrough;
|
||||
|
||||
static constexpr auto I0 = Number<0>{};
|
||||
static constexpr auto I1 = Number<1>{};
|
||||
static constexpr auto I2 = Number<2>{};
|
||||
@@ -98,12 +111,16 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
const GridDesc_M_K& gamma_grid_desc_m_k,
|
||||
const GridDesc_M_K& beta_grid_desc_m_k,
|
||||
const GridDesc_M_K& y_grid_desc_m_k,
|
||||
const GridDesc_M& save_mean_grid_desc_m,
|
||||
const GridDesc_M& save_inv_std_grid_desc_m,
|
||||
index_t num_k_block_tile_iteration,
|
||||
ComputeDataType epsilon,
|
||||
const XDataType* const __restrict__ p_x_global,
|
||||
const GammaDataType* const __restrict__ p_gamma_global,
|
||||
const BetaDataType* const __restrict__ p_beta_global,
|
||||
YDataType* const __restrict__ p_y_global,
|
||||
SaveMeanInvStdDataType* const __restrict__ p_save_mean_global,
|
||||
SaveMeanInvStdDataType* const __restrict__ p_save_inv_std_global,
|
||||
const YElementwiseOperation y_elementwise_op)
|
||||
{
|
||||
// LDS
|
||||
@@ -115,6 +132,12 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
auto y_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_y_global, y_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
auto save_mean_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_save_mean_global, save_mean_grid_desc_m.GetElementSpaceSize());
|
||||
|
||||
auto save_inv_std_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_save_inv_std_global, save_inv_std_grid_desc_m.GetElementSpaceSize());
|
||||
|
||||
auto x_thread_buf = generate_tuple(
|
||||
[&](auto) {
|
||||
return StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
@@ -152,6 +175,8 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
mean_square_thread_buf;
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, ComputeDataType, MThreadSliceSize, true>&
|
||||
var_thread_buf = mean_square_thread_buf;
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, ComputeDataType, MThreadSliceSize, true>&
|
||||
inv_std_thread_buf = mean_square_thread_buf;
|
||||
|
||||
const index_t thread_local_id = get_thread_local_1d_id();
|
||||
const index_t block_global_id = get_block_1d_id();
|
||||
@@ -228,6 +253,42 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
thread_k_cluster_id * YDstVectorSize),
|
||||
y_elementwise_op);
|
||||
|
||||
auto threadwise_mean_store =
|
||||
ThreadwiseTensorSliceTransfer_v1r3<ComputeDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
decltype(thread_buffer_desc_m),
|
||||
GridDesc_M,
|
||||
PassThroughOp,
|
||||
ThreadBufferLengths_M,
|
||||
Sequence<0>, // DimAccessOrder
|
||||
0, // SrcVectorDim
|
||||
SaveMeanInvStdDstVectorSize, // ScalarPerVector
|
||||
InMemoryDataOperationEnum::Set,
|
||||
1,
|
||||
true>(
|
||||
save_mean_grid_desc_m,
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize),
|
||||
PassThroughOp{});
|
||||
|
||||
auto threadwise_inv_std_store =
|
||||
ThreadwiseTensorSliceTransfer_v1r3<ComputeDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
decltype(thread_buffer_desc_m),
|
||||
GridDesc_M,
|
||||
PassThroughOp,
|
||||
ThreadBufferLengths_M,
|
||||
Sequence<0>, // DimAccessOrder
|
||||
0, // SrcVectorDim
|
||||
SaveMeanInvStdDstVectorSize, // ScalarPerVector
|
||||
InMemoryDataOperationEnum::Set,
|
||||
1,
|
||||
true>(
|
||||
save_inv_std_grid_desc_m,
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize),
|
||||
PassThroughOp{});
|
||||
|
||||
constexpr auto thread_copy_fwd_step_m_k = make_multi_index(0, K_BlockTileStepSize);
|
||||
constexpr auto thread_copy_bwd_step_m_k =
|
||||
make_multi_index(0, SweepOnce ? 0 : -K_BlockTileSize);
|
||||
@@ -243,7 +304,8 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
|
||||
// E(x), E[x^2], var(x)
|
||||
// FIXME: Should not hack the transform from deviceOP
|
||||
int reduce_length = x_grid_desc_m_k.GetTransforms()[I2].GetUpperLengths()[I0];
|
||||
ComputeDataType reduce_length = type_convert<ComputeDataType>(
|
||||
x_grid_desc_m_k.GetTransforms()[I2].GetUpperLengths()[I0]);
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
|
||||
mean_thread_buf(I) = reduce::Add::template GetIdentityValue<ComputeDataType>();
|
||||
@@ -302,10 +364,34 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
// var(x) = E[x^2] - E[x]^2
|
||||
var_thread_buf(I) =
|
||||
mean_square_thread_buf(I) - (mean_thread_buf(I) * mean_thread_buf(I));
|
||||
|
||||
inv_std_thread_buf(I) = type_convert<ComputeDataType>(1.0f) /
|
||||
ck::math::sqrt(var_thread_buf(I) + epsilon);
|
||||
});
|
||||
|
||||
// save mean and inverse std for backward (optional)
|
||||
if(thread_k_cluster_id == 0)
|
||||
{
|
||||
if(p_save_mean_global != nullptr)
|
||||
{
|
||||
threadwise_mean_store.Run(thread_buffer_desc_m,
|
||||
make_tuple(I0),
|
||||
mean_thread_buf,
|
||||
save_mean_grid_desc_m,
|
||||
save_mean_global_val_buf);
|
||||
}
|
||||
if(p_save_inv_std_global != nullptr)
|
||||
{
|
||||
threadwise_inv_std_store.Run(thread_buffer_desc_m,
|
||||
make_tuple(I0),
|
||||
inv_std_thread_buf,
|
||||
save_inv_std_grid_desc_m,
|
||||
save_inv_std_global_val_buf);
|
||||
}
|
||||
}
|
||||
|
||||
// normalization
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
auto divisor = 1 / ck::math::sqrt(var_thread_buf(iM) + epsilon);
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
@@ -314,7 +400,7 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
// normalize
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
(x_thread_buf(iK0)(Number<offset_m_k>{}) - mean_thread_buf(iM)) *
|
||||
divisor;
|
||||
inv_std_thread_buf(iM);
|
||||
|
||||
// gamma & beta
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
@@ -404,8 +490,30 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
// var(x) = E[x^2] - E[x]^2
|
||||
var_thread_buf(I) =
|
||||
mean_square_thread_buf(I) - (mean_thread_buf(I) * mean_thread_buf(I));
|
||||
|
||||
inv_std_thread_buf(I) = 1 / ck::math::sqrt(var_thread_buf(I) + epsilon);
|
||||
});
|
||||
|
||||
if(thread_k_cluster_id == 0)
|
||||
{
|
||||
if(p_save_mean_global != nullptr)
|
||||
{
|
||||
threadwise_mean_store.Run(thread_buffer_desc_m,
|
||||
make_tuple(I0),
|
||||
mean_thread_buf,
|
||||
save_mean_grid_desc_m,
|
||||
save_mean_global_val_buf);
|
||||
}
|
||||
if(p_save_inv_std_global != nullptr)
|
||||
{
|
||||
threadwise_inv_std_store.Run(thread_buffer_desc_m,
|
||||
make_tuple(I0),
|
||||
inv_std_thread_buf,
|
||||
save_inv_std_grid_desc_m,
|
||||
save_inv_std_global_val_buf);
|
||||
}
|
||||
}
|
||||
|
||||
auto thread_copy_tail_m_k =
|
||||
(num_k_block_tile_iteration - 1) * ThreadBufferNumber * thread_copy_fwd_step_m_k;
|
||||
|
||||
@@ -437,7 +545,6 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
auto divisor = 1 / ck::math::sqrt(var_thread_buf(iM) + epsilon);
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
@@ -446,7 +553,7 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
// normalize
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
(x_thread_buf(iK0)(Number<offset_m_k>{}) - mean_thread_buf(iM)) *
|
||||
divisor;
|
||||
inv_std_thread_buf(iM);
|
||||
|
||||
// gamma
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
|
||||
@@ -12,31 +12,42 @@ template <typename GridwiseReduction,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
typename ComputeDataType,
|
||||
typename YElementwiseOperation,
|
||||
typename GridDesc_M_K>
|
||||
__global__ void kernel_normalization(const GridDesc_M_K x_grid_desc_m_k,
|
||||
const GridDesc_M_K gamma_grid_desc_m_k,
|
||||
const GridDesc_M_K beta_grid_desc_m_k,
|
||||
const GridDesc_M_K y_grid_desc_m_k,
|
||||
index_t num_k_block_tile_iteration,
|
||||
ComputeDataType epsilon,
|
||||
const XDataType* const __restrict__ p_x_global,
|
||||
const GammaDataType* const __restrict__ p_gamma_global,
|
||||
const BetaDataType* const __restrict__ p_beta_global,
|
||||
YDataType* const __restrict__ p_y_global,
|
||||
const YElementwiseOperation y_elementwise_op)
|
||||
typename GridDesc_M_K,
|
||||
typename GridDesc_M>
|
||||
__global__ void
|
||||
kernel_normalization(const GridDesc_M_K x_grid_desc_m_k,
|
||||
const GridDesc_M_K gamma_grid_desc_m_k,
|
||||
const GridDesc_M_K beta_grid_desc_m_k,
|
||||
const GridDesc_M_K y_grid_desc_m_k,
|
||||
const GridDesc_M save_mean_grid_desc_m,
|
||||
const GridDesc_M save_inv_std_grid_desc_m,
|
||||
index_t num_k_block_tile_iteration,
|
||||
ComputeDataType epsilon,
|
||||
const XDataType* const __restrict__ p_x_global,
|
||||
const GammaDataType* const __restrict__ p_gamma_global,
|
||||
const BetaDataType* const __restrict__ p_beta_global,
|
||||
YDataType* const __restrict__ p_y_global,
|
||||
SaveMeanInvStdDataType* const __restrict__ p_save_mean_global,
|
||||
SaveMeanInvStdDataType* const __restrict__ p_save_inv_std_global,
|
||||
const YElementwiseOperation y_elementwise_op)
|
||||
{
|
||||
GridwiseReduction::Run(x_grid_desc_m_k,
|
||||
gamma_grid_desc_m_k,
|
||||
beta_grid_desc_m_k,
|
||||
y_grid_desc_m_k,
|
||||
save_mean_grid_desc_m,
|
||||
save_inv_std_grid_desc_m,
|
||||
num_k_block_tile_iteration,
|
||||
epsilon,
|
||||
p_x_global,
|
||||
p_gamma_global,
|
||||
p_beta_global,
|
||||
p_y_global,
|
||||
p_save_mean_global,
|
||||
p_save_inv_std_global,
|
||||
y_elementwise_op);
|
||||
};
|
||||
|
||||
@@ -44,9 +55,11 @@ template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
typename ComputeDataType,
|
||||
typename YElementwiseOperation,
|
||||
typename GridDesc_M_K,
|
||||
typename GridDesc_M,
|
||||
index_t BlockSize,
|
||||
index_t MThreadClusterSize,
|
||||
index_t KThreadClusterSize,
|
||||
@@ -60,6 +73,7 @@ template <typename XDataType,
|
||||
index_t BetaSrcVectorSize,
|
||||
index_t YDstVectorDim,
|
||||
index_t YDstVectorSize,
|
||||
index_t SaveMeanInvStdDstVectorSize,
|
||||
bool UseWelford>
|
||||
auto NormalizationKernelSelector(bool isSweepOnce)
|
||||
{
|
||||
@@ -68,9 +82,11 @@ auto NormalizationKernelSelector(bool isSweepOnce)
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
GridDesc_M,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
@@ -84,15 +100,18 @@ auto NormalizationKernelSelector(bool isSweepOnce)
|
||||
BetaSrcVectorSize,
|
||||
YDstVectorDim,
|
||||
YDstVectorSize,
|
||||
SaveMeanInvStdDstVectorSize,
|
||||
false>;
|
||||
using GridwiseNormalizationSweepOnceNaive =
|
||||
GridwiseNormalizationNaiveVariance_mk_to_mk<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
GridDesc_M,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
@@ -106,15 +125,18 @@ auto NormalizationKernelSelector(bool isSweepOnce)
|
||||
BetaSrcVectorSize,
|
||||
YDstVectorDim,
|
||||
YDstVectorSize,
|
||||
SaveMeanInvStdDstVectorSize,
|
||||
true>;
|
||||
using GridwiseNormalizationGenericWelford =
|
||||
GridwiseNormalizationWelfordVariance_mk_to_mk<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
GridDesc_M,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
@@ -128,15 +150,18 @@ auto NormalizationKernelSelector(bool isSweepOnce)
|
||||
BetaSrcVectorSize,
|
||||
YDstVectorDim,
|
||||
YDstVectorSize,
|
||||
SaveMeanInvStdDstVectorSize,
|
||||
false>;
|
||||
using GridwiseNormalizationSweepOnceWelford =
|
||||
GridwiseNormalizationWelfordVariance_mk_to_mk<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
GridDesc_M,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
@@ -150,6 +175,7 @@ auto NormalizationKernelSelector(bool isSweepOnce)
|
||||
BetaSrcVectorSize,
|
||||
YDstVectorDim,
|
||||
YDstVectorSize,
|
||||
SaveMeanInvStdDstVectorSize,
|
||||
true>;
|
||||
|
||||
if constexpr(UseWelford)
|
||||
@@ -159,17 +185,21 @@ auto NormalizationKernelSelector(bool isSweepOnce)
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K>
|
||||
GridDesc_M_K,
|
||||
GridDesc_M>
|
||||
: kernel_normalization<GridwiseNormalizationGenericWelford,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K>;
|
||||
GridDesc_M_K,
|
||||
GridDesc_M>;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -178,17 +208,21 @@ auto NormalizationKernelSelector(bool isSweepOnce)
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K>
|
||||
GridDesc_M_K,
|
||||
GridDesc_M>
|
||||
: kernel_normalization<GridwiseNormalizationGenericNaive,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K>;
|
||||
GridDesc_M_K,
|
||||
GridDesc_M>;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,11 +17,13 @@ template <typename MeanVarDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
typename ComputeDataType,
|
||||
typename YElementwiseOperation,
|
||||
typename MeanVarGridDesc_M_KBlock,
|
||||
typename CountGridDesc_M_KBlock,
|
||||
typename XYGammaBetaGridDesc_M_K,
|
||||
typename SaveMeanInvStdGridDesc_M,
|
||||
index_t BlockSize,
|
||||
index_t MThreadClusterSize,
|
||||
index_t KThreadClusterSize,
|
||||
@@ -34,7 +36,8 @@ template <typename MeanVarDataType,
|
||||
index_t BetaSrcVectorDim,
|
||||
index_t BetaSrcVectorSize,
|
||||
index_t YDstVectorDim,
|
||||
index_t YDstVectorSize>
|
||||
index_t YDstVectorSize,
|
||||
index_t SaveMeanInvStdDstVectorSize>
|
||||
struct GridwiseNormalizationSplitK2nd
|
||||
{
|
||||
static_assert((XSrcVectorDim == 0 && MThreadSliceSize % XSrcVectorSize == 0) ||
|
||||
@@ -45,6 +48,10 @@ struct GridwiseNormalizationSplitK2nd
|
||||
(YDstVectorDim == 1 && KThreadSliceSize % YDstVectorSize == 0),
|
||||
"Invalid thread slice sizes and/or vector sizes configuration, please check!");
|
||||
|
||||
static_assert(MThreadSliceSize % SaveMeanInvStdDstVectorSize == 0,
|
||||
"Invalid thread slice sizes and/or save mean and inverse std vector sizes "
|
||||
"configuration, please check!");
|
||||
|
||||
static_assert(XSrcVectorSize == YDstVectorSize);
|
||||
static_assert(XSrcVectorSize == GammaSrcVectorSize);
|
||||
static_assert(XSrcVectorSize == BetaSrcVectorSize);
|
||||
@@ -69,6 +76,10 @@ struct GridwiseNormalizationSplitK2nd
|
||||
static constexpr auto thread_buffer_desc_m_k = make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<XSrcVectorSize>{}));
|
||||
|
||||
using ThreadBufferLengths_M = Sequence<MThreadSliceSize>;
|
||||
static constexpr auto thread_buffer_desc_m =
|
||||
make_naive_tensor_descriptor_packed(make_tuple(Number<MThreadSliceSize>{}));
|
||||
|
||||
using ThreadBufferLengths_M_1 = Sequence<MThreadSliceSize, 1>;
|
||||
static constexpr auto thread_buffer_desc_m_1 =
|
||||
make_naive_tensor_descriptor_packed(make_tuple(Number<MThreadSliceSize>{}, I1));
|
||||
@@ -99,6 +110,8 @@ struct GridwiseNormalizationSplitK2nd
|
||||
const XYGammaBetaGridDesc_M_K& gamma_grid_desc_m_k,
|
||||
const XYGammaBetaGridDesc_M_K& beta_grid_desc_m_k,
|
||||
const XYGammaBetaGridDesc_M_K& y_grid_desc_m_k,
|
||||
const SaveMeanInvStdGridDesc_M& save_mean_grid_desc_m,
|
||||
const SaveMeanInvStdGridDesc_M& save_inv_std_grid_desc_m,
|
||||
index_t num_k_mean_var_count_iteration,
|
||||
index_t num_k_block_tile_iteration,
|
||||
index_t k_grid_size,
|
||||
@@ -110,6 +123,8 @@ struct GridwiseNormalizationSplitK2nd
|
||||
const GammaDataType* const __restrict__ p_gamma_global,
|
||||
const BetaDataType* const __restrict__ p_beta_global,
|
||||
YDataType* const __restrict__ p_y_global,
|
||||
SaveMeanInvStdDataType* const __restrict__ p_save_mean_global,
|
||||
SaveMeanInvStdDataType* const __restrict__ p_save_inv_std_global,
|
||||
const YElementwiseOperation y_elementwise_op)
|
||||
{
|
||||
// Thread/Block id
|
||||
@@ -145,6 +160,12 @@ struct GridwiseNormalizationSplitK2nd
|
||||
auto y_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_y_global, y_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
auto save_mean_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_save_mean_global, save_mean_grid_desc_m.GetElementSpaceSize());
|
||||
|
||||
auto save_inv_std_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_save_inv_std_global, save_inv_std_grid_desc_m.GetElementSpaceSize());
|
||||
|
||||
// VGPR
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, ComputeDataType, MThreadSliceSize, true>
|
||||
in_mean_thread_buf;
|
||||
@@ -158,6 +179,7 @@ struct GridwiseNormalizationSplitK2nd
|
||||
var_thread_buf;
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, int32_t, MThreadSliceSize, true>
|
||||
welford_count_thread_buf;
|
||||
auto& inv_std_thread_buf = var_thread_buf;
|
||||
|
||||
auto x_thread_buf = generate_tuple(
|
||||
[&](auto) {
|
||||
@@ -283,6 +305,42 @@ struct GridwiseNormalizationSplitK2nd
|
||||
thread_k_cluster_id * YDstVectorSize),
|
||||
y_elementwise_op);
|
||||
|
||||
auto threadwise_mean_store =
|
||||
ThreadwiseTensorSliceTransfer_v1r3<ComputeDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
decltype(thread_buffer_desc_m),
|
||||
SaveMeanInvStdGridDesc_M,
|
||||
PassThroughOp,
|
||||
ThreadBufferLengths_M,
|
||||
Sequence<0>, // DimAccessOrder
|
||||
0, // SrcVectorDim
|
||||
SaveMeanInvStdDstVectorSize, // ScalarPerVector
|
||||
InMemoryDataOperationEnum::Set,
|
||||
1,
|
||||
true>(
|
||||
save_mean_grid_desc_m,
|
||||
make_multi_index(block_m_cluster_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize),
|
||||
PassThroughOp{});
|
||||
|
||||
auto threadwise_inv_std_store =
|
||||
ThreadwiseTensorSliceTransfer_v1r3<ComputeDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
decltype(thread_buffer_desc_m),
|
||||
SaveMeanInvStdGridDesc_M,
|
||||
PassThroughOp,
|
||||
ThreadBufferLengths_M,
|
||||
Sequence<0>, // DimAccessOrder
|
||||
0, // SrcVectorDim
|
||||
SaveMeanInvStdDstVectorSize, // ScalarPerVector
|
||||
InMemoryDataOperationEnum::Set,
|
||||
1,
|
||||
true>(
|
||||
save_inv_std_grid_desc_m,
|
||||
make_multi_index(block_m_cluster_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize),
|
||||
PassThroughOp{});
|
||||
|
||||
// step1: Merge mean and variance
|
||||
constexpr auto mean_var_count_thread_copy_step_I0_k =
|
||||
make_multi_index(I0, KThreadClusterSize);
|
||||
@@ -332,9 +390,33 @@ struct GridwiseNormalizationSplitK2nd
|
||||
|
||||
BlockwiseWelford::Run(
|
||||
mean_thread_buf(I), var_thread_buf(I), welford_count_thread_buf(I));
|
||||
|
||||
inv_std_thread_buf(I) =
|
||||
type_convert<ComputeDataType>(1.0f) / ck::math::sqrt(var_thread_buf(I) + epsilon);
|
||||
});
|
||||
|
||||
// step2: normalization
|
||||
// step2: save mean and inverse std for backward (optional)
|
||||
if(block_k_cluster_id == 0 && thread_k_cluster_id == 0)
|
||||
{
|
||||
if(p_save_mean_global != nullptr)
|
||||
{
|
||||
threadwise_mean_store.Run(thread_buffer_desc_m,
|
||||
make_tuple(I0),
|
||||
mean_thread_buf,
|
||||
save_mean_grid_desc_m,
|
||||
save_mean_global_val_buf);
|
||||
}
|
||||
if(p_save_inv_std_global != nullptr)
|
||||
{
|
||||
threadwise_inv_std_store.Run(thread_buffer_desc_m,
|
||||
make_tuple(I0),
|
||||
inv_std_thread_buf,
|
||||
save_inv_std_grid_desc_m,
|
||||
save_inv_std_global_val_buf);
|
||||
}
|
||||
}
|
||||
|
||||
// step3: normalization
|
||||
constexpr auto thread_copy_fwd_step_m_k = make_multi_index(0, K_BlockTileStepSize);
|
||||
|
||||
for(index_t k = 0; k < num_k_block_tile_iteration; ++k)
|
||||
@@ -360,7 +442,6 @@ struct GridwiseNormalizationSplitK2nd
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
auto divisor = 1 / ck::math::sqrt(var_thread_buf(iM) + epsilon);
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
@@ -369,7 +450,7 @@ struct GridwiseNormalizationSplitK2nd
|
||||
// normalize
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
(x_thread_buf(iK0)(Number<offset_m_k>{}) - mean_thread_buf(iM)) *
|
||||
divisor;
|
||||
inv_std_thread_buf(iM);
|
||||
|
||||
// gamma
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
|
||||
@@ -16,9 +16,11 @@ template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
typename ComputeDataType,
|
||||
typename YElementwiseOperation,
|
||||
typename GridDesc_M_K,
|
||||
typename GridDesc_M,
|
||||
index_t BlockSize,
|
||||
index_t MThreadClusterSize,
|
||||
index_t KThreadClusterSize,
|
||||
@@ -32,6 +34,7 @@ template <typename XDataType,
|
||||
index_t BetaSrcVectorSize,
|
||||
index_t YDstVectorDim,
|
||||
index_t YDstVectorSize,
|
||||
index_t SaveMeanInvStdDstVectorSize,
|
||||
bool SweepOnce>
|
||||
struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
{
|
||||
@@ -43,6 +46,10 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
(YDstVectorDim == 1 && KThreadSliceSize % YDstVectorSize == 0),
|
||||
"Invalid thread slice sizes and/or vector sizes configuration, please check!");
|
||||
|
||||
static_assert(MThreadSliceSize % SaveMeanInvStdDstVectorSize == 0,
|
||||
"Invalid thread slice sizes and/or save mean and inverse std vector sizes "
|
||||
"configuration, please check!");
|
||||
|
||||
static_assert(XSrcVectorSize == YDstVectorSize);
|
||||
static_assert(XSrcVectorSize == GammaSrcVectorSize);
|
||||
static_assert(XSrcVectorSize == BetaSrcVectorSize);
|
||||
@@ -64,6 +71,10 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
static constexpr auto thread_buffer_desc_m_k = make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<XSrcVectorSize>{}));
|
||||
|
||||
using ThreadBufferLengths_M = Sequence<MThreadSliceSize>;
|
||||
static constexpr auto thread_buffer_desc_m =
|
||||
make_naive_tensor_descriptor_packed(make_tuple(Number<MThreadSliceSize>{}));
|
||||
|
||||
using ThreadReduceSrcDesc_M_K = decltype(make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<XSrcVectorSize>{})));
|
||||
using ThreadReduceDstDesc_M =
|
||||
@@ -77,6 +88,8 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
ThreadClusterLengths_M_K,
|
||||
ThreadClusterArrangeOrder>;
|
||||
|
||||
using PassThroughOp = tensor_operation::element_wise::PassThrough;
|
||||
|
||||
static constexpr auto I0 = Number<0>{};
|
||||
static constexpr auto I1 = Number<1>{};
|
||||
static constexpr auto I2 = Number<2>{};
|
||||
@@ -114,17 +127,18 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
const GridDesc_M_K& gamma_grid_desc_m_k,
|
||||
const GridDesc_M_K& beta_grid_desc_m_k,
|
||||
const GridDesc_M_K& y_grid_desc_m_k,
|
||||
const GridDesc_M& save_mean_grid_desc_m,
|
||||
const GridDesc_M& save_inv_std_grid_desc_m,
|
||||
index_t num_k_block_tile_iteration,
|
||||
ComputeDataType epsilon,
|
||||
const XDataType* const __restrict__ p_x_global,
|
||||
const GammaDataType* const __restrict__ p_gamma_global,
|
||||
const BetaDataType* const __restrict__ p_beta_global,
|
||||
YDataType* const __restrict__ p_y_global,
|
||||
SaveMeanInvStdDataType* const __restrict__ p_save_mean_global,
|
||||
SaveMeanInvStdDataType* const __restrict__ p_save_inv_std_global,
|
||||
const YElementwiseOperation y_elementwise_op)
|
||||
{
|
||||
auto y_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_y_global, y_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
auto x_thread_buf = generate_tuple(
|
||||
[&](auto) {
|
||||
return StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
@@ -150,6 +164,7 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
mean_thread_buf;
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, ComputeDataType, MThreadSliceSize, true>
|
||||
var_thread_buf;
|
||||
auto& inv_std_thread_buf = var_thread_buf;
|
||||
|
||||
const index_t thread_local_id = get_thread_local_1d_id();
|
||||
const index_t block_global_id = get_block_1d_id();
|
||||
@@ -226,6 +241,42 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
thread_k_cluster_id * YDstVectorSize),
|
||||
y_elementwise_op);
|
||||
|
||||
auto threadwise_mean_store =
|
||||
ThreadwiseTensorSliceTransfer_v1r3<ComputeDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
decltype(thread_buffer_desc_m),
|
||||
GridDesc_M,
|
||||
PassThroughOp,
|
||||
ThreadBufferLengths_M,
|
||||
Sequence<0>, // DimAccessOrder
|
||||
0, // SrcVectorDim
|
||||
SaveMeanInvStdDstVectorSize, // ScalarPerVector
|
||||
InMemoryDataOperationEnum::Set,
|
||||
1,
|
||||
true>(
|
||||
save_mean_grid_desc_m,
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize),
|
||||
PassThroughOp{});
|
||||
|
||||
auto threadwise_inv_std_store =
|
||||
ThreadwiseTensorSliceTransfer_v1r3<ComputeDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
decltype(thread_buffer_desc_m),
|
||||
GridDesc_M,
|
||||
PassThroughOp,
|
||||
ThreadBufferLengths_M,
|
||||
Sequence<0>, // DimAccessOrder
|
||||
0, // SrcVectorDim
|
||||
SaveMeanInvStdDstVectorSize, // ScalarPerVector
|
||||
InMemoryDataOperationEnum::Set,
|
||||
1,
|
||||
true>(
|
||||
save_inv_std_grid_desc_m,
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize),
|
||||
PassThroughOp{});
|
||||
|
||||
constexpr auto thread_copy_fwd_step_m_k = make_multi_index(0, K_BlockTileStepSize);
|
||||
constexpr auto thread_copy_bwd_step_m_k =
|
||||
make_multi_index(0, SweepOnce ? 0 : -K_BlockTileSize);
|
||||
@@ -239,6 +290,15 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
const auto beta_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_beta_global, beta_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
auto y_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_y_global, y_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
auto save_mean_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_save_mean_global, save_mean_grid_desc_m.GetElementSpaceSize());
|
||||
|
||||
auto save_inv_std_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_save_inv_std_global, save_inv_std_grid_desc_m.GetElementSpaceSize());
|
||||
|
||||
auto threadwise_welford = ThreadwiseWelford();
|
||||
threadwise_welford.max_count_ = GetKPerThread(x_grid_desc_m_k, thread_k_cluster_id);
|
||||
|
||||
@@ -279,10 +339,33 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
|
||||
int count = threadwise_welford.cur_count_;
|
||||
BlockwiseWelford::Run(mean_thread_buf(I), var_thread_buf(I), count);
|
||||
inv_std_thread_buf(I) = type_convert<ComputeDataType>(1.0f) /
|
||||
ck::math::sqrt(var_thread_buf(I) + epsilon);
|
||||
});
|
||||
|
||||
// save mean and inverse std for backward (optional)
|
||||
if(thread_k_cluster_id == 0)
|
||||
{
|
||||
if(p_save_mean_global != nullptr)
|
||||
{
|
||||
threadwise_mean_store.Run(thread_buffer_desc_m,
|
||||
make_tuple(I0),
|
||||
mean_thread_buf,
|
||||
save_mean_grid_desc_m,
|
||||
save_mean_global_val_buf);
|
||||
}
|
||||
if(p_save_inv_std_global != nullptr)
|
||||
{
|
||||
threadwise_inv_std_store.Run(thread_buffer_desc_m,
|
||||
make_tuple(I0),
|
||||
inv_std_thread_buf,
|
||||
save_inv_std_grid_desc_m,
|
||||
save_inv_std_global_val_buf);
|
||||
}
|
||||
}
|
||||
|
||||
// normalization
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
auto divisor = 1 / ck::math::sqrt(var_thread_buf(iM) + epsilon);
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
@@ -291,7 +374,7 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
// normalize
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
(x_thread_buf(iK0)(Number<offset_m_k>{}) - mean_thread_buf(iM)) *
|
||||
divisor;
|
||||
inv_std_thread_buf(iM);
|
||||
|
||||
// gamma & beta
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
@@ -360,8 +443,29 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
|
||||
int count = threadwise_welford.cur_count_;
|
||||
BlockwiseWelford::Run(mean_thread_buf(I), var_thread_buf(I), count);
|
||||
inv_std_thread_buf(I) = 1 / ck::math::sqrt(var_thread_buf(I) + epsilon);
|
||||
});
|
||||
|
||||
if(thread_k_cluster_id == 0)
|
||||
{
|
||||
if(p_save_mean_global != nullptr)
|
||||
{
|
||||
threadwise_mean_store.Run(thread_buffer_desc_m,
|
||||
make_tuple(I0),
|
||||
mean_thread_buf,
|
||||
save_mean_grid_desc_m,
|
||||
save_mean_global_val_buf);
|
||||
}
|
||||
if(p_save_inv_std_global != nullptr)
|
||||
{
|
||||
threadwise_inv_std_store.Run(thread_buffer_desc_m,
|
||||
make_tuple(I0),
|
||||
inv_std_thread_buf,
|
||||
save_inv_std_grid_desc_m,
|
||||
save_inv_std_global_val_buf);
|
||||
}
|
||||
}
|
||||
|
||||
auto thread_copy_tail_m_k =
|
||||
(num_k_block_tile_iteration - 1) * ThreadBufferNumber * thread_copy_fwd_step_m_k;
|
||||
|
||||
@@ -393,7 +497,6 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
auto divisor = 1 / ck::math::sqrt(var_thread_buf(iM) + epsilon);
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
@@ -402,7 +505,7 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
// normalize
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
(x_thread_buf(iK0)(Number<offset_m_k>{}) - mean_thread_buf(iM)) *
|
||||
divisor;
|
||||
inv_std_thread_buf(iM);
|
||||
|
||||
// gamma
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
|
||||
@@ -20,8 +20,9 @@ template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename AccDataType,
|
||||
typename AccElementwiseOperation>
|
||||
typename SaveMeanInvStdDataType,
|
||||
typename ComputeDataType,
|
||||
typename YElementwiseOperation>
|
||||
struct ReferenceGroupnorm : public device::BaseOperator
|
||||
{
|
||||
// x = [N, H, W, G, C]
|
||||
@@ -35,14 +36,18 @@ struct ReferenceGroupnorm : public device::BaseOperator
|
||||
const Tensor<GammaDataType>& gamma,
|
||||
const Tensor<BetaDataType>& beta,
|
||||
Tensor<YDataType>& y,
|
||||
AccElementwiseOperation acc_elementwise_op,
|
||||
Tensor<SaveMeanInvStdDataType>& save_mean,
|
||||
Tensor<SaveMeanInvStdDataType>& save_inv_std,
|
||||
YElementwiseOperation y_elementwise_op,
|
||||
const std::vector<index_t> lengths,
|
||||
AccDataType epsilon)
|
||||
ComputeDataType epsilon)
|
||||
: x_(x),
|
||||
gamma_(gamma),
|
||||
beta_(beta),
|
||||
y_(y),
|
||||
acc_elementwise_op_(acc_elementwise_op),
|
||||
save_mean_(save_mean),
|
||||
save_inv_std_(save_inv_std),
|
||||
y_elementwise_op_(y_elementwise_op),
|
||||
lengths_(lengths),
|
||||
epsilon_(epsilon)
|
||||
{
|
||||
@@ -52,9 +57,11 @@ struct ReferenceGroupnorm : public device::BaseOperator
|
||||
const Tensor<XDataType> gamma_;
|
||||
const Tensor<XDataType> beta_;
|
||||
Tensor<YDataType>& y_;
|
||||
AccElementwiseOperation acc_elementwise_op_;
|
||||
Tensor<SaveMeanInvStdDataType>& save_mean_;
|
||||
Tensor<SaveMeanInvStdDataType>& save_inv_std_;
|
||||
YElementwiseOperation y_elementwise_op_;
|
||||
std::vector<index_t> lengths_;
|
||||
AccDataType epsilon_;
|
||||
ComputeDataType epsilon_;
|
||||
};
|
||||
|
||||
// Invoker
|
||||
@@ -68,8 +75,8 @@ struct ReferenceGroupnorm : public device::BaseOperator
|
||||
int G = arg.lengths_[3];
|
||||
int C = arg.lengths_[4];
|
||||
|
||||
Tensor<AccDataType> mean({N, G});
|
||||
Tensor<AccDataType> var({N, G});
|
||||
Tensor<ComputeDataType> mean({N, G});
|
||||
Tensor<ComputeDataType> var({N, G});
|
||||
|
||||
// Compute mean & var in [H, W, C] by Welford Algorithm
|
||||
// TODO - parallel for each HWC
|
||||
@@ -78,9 +85,9 @@ struct ReferenceGroupnorm : public device::BaseOperator
|
||||
{
|
||||
for(int g = 0; g < G; ++g)
|
||||
{
|
||||
AccDataType mean_val = type_convert<AccDataType>(0.0f);
|
||||
AccDataType var_val = type_convert<AccDataType>(0.0f);
|
||||
int32_t curr_count = 0;
|
||||
ComputeDataType mean_val = type_convert<ComputeDataType>(0.0f);
|
||||
ComputeDataType var_val = type_convert<ComputeDataType>(0.0f);
|
||||
int32_t curr_count = 0;
|
||||
|
||||
for(int h = 0; h < H; ++h)
|
||||
{
|
||||
@@ -89,10 +96,11 @@ struct ReferenceGroupnorm : public device::BaseOperator
|
||||
for(int c = 0; c < C; ++c)
|
||||
{
|
||||
curr_count++;
|
||||
AccDataType x = type_convert<AccDataType>(arg.x_(n, h, w, g, c));
|
||||
AccDataType delta = x - mean_val;
|
||||
ComputeDataType x =
|
||||
type_convert<ComputeDataType>(arg.x_(n, h, w, g, c));
|
||||
ComputeDataType delta = x - mean_val;
|
||||
mean_val += delta / curr_count;
|
||||
AccDataType delta2 = x - mean_val;
|
||||
ComputeDataType delta2 = x - mean_val;
|
||||
var_val += delta * delta2;
|
||||
}
|
||||
}
|
||||
@@ -100,6 +108,12 @@ struct ReferenceGroupnorm : public device::BaseOperator
|
||||
|
||||
mean(n, g) = mean_val;
|
||||
var(n, g) = var_val / curr_count;
|
||||
|
||||
arg.save_mean_(n, g) = ck::type_convert<SaveMeanInvStdDataType>(mean(n, g));
|
||||
|
||||
ComputeDataType divisor =
|
||||
static_cast<ComputeDataType>(1) / ck::math::sqrt(var(n, g) + arg.epsilon_);
|
||||
arg.save_inv_std_(n, g) = ck::type_convert<SaveMeanInvStdDataType>(divisor);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,15 +128,19 @@ struct ReferenceGroupnorm : public device::BaseOperator
|
||||
{
|
||||
for(int c = 0; c < C; ++c)
|
||||
{
|
||||
AccDataType x = type_convert<AccDataType>(arg.x_(n, h, w, g, c));
|
||||
AccDataType gamma = type_convert<AccDataType>(arg.gamma_(g, c));
|
||||
AccDataType beta = type_convert<AccDataType>(arg.beta_(g, c));
|
||||
AccDataType mean_val = type_convert<AccDataType>(mean(n, g));
|
||||
AccDataType var_val = type_convert<AccDataType>(var(n, g));
|
||||
AccDataType y = gamma * (x - mean_val) /
|
||||
ck::math::sqrt(arg.epsilon_ + var_val) +
|
||||
beta;
|
||||
arg.acc_elementwise_op_(y, y);
|
||||
ComputeDataType x =
|
||||
type_convert<ComputeDataType>(arg.x_(n, h, w, g, c));
|
||||
ComputeDataType gamma =
|
||||
type_convert<ComputeDataType>(arg.gamma_(g, c));
|
||||
ComputeDataType beta =
|
||||
type_convert<ComputeDataType>(arg.beta_(g, c));
|
||||
ComputeDataType mean_val =
|
||||
type_convert<ComputeDataType>(mean(n, g));
|
||||
ComputeDataType var_val = type_convert<ComputeDataType>(var(n, g));
|
||||
ComputeDataType y = gamma * (x - mean_val) /
|
||||
ck::math::sqrt(arg.epsilon_ + var_val) +
|
||||
beta;
|
||||
arg.y_elementwise_op_(y, y);
|
||||
arg.y_(n, h, w, g, c) = type_convert<YDataType>(y);
|
||||
}
|
||||
}
|
||||
@@ -159,11 +177,14 @@ struct ReferenceGroupnorm : public device::BaseOperator
|
||||
const Tensor<GammaDataType>& gamma,
|
||||
const Tensor<BetaDataType>& beta,
|
||||
Tensor<YDataType>& y,
|
||||
AccElementwiseOperation acc_elementwise_op,
|
||||
Tensor<SaveMeanInvStdDataType>& save_mean,
|
||||
Tensor<SaveMeanInvStdDataType>& save_inv_std,
|
||||
YElementwiseOperation y_elementwise_op,
|
||||
const std::vector<index_t> lengths,
|
||||
AccDataType epsilon)
|
||||
ComputeDataType epsilon)
|
||||
{
|
||||
return Argument{x, gamma, beta, y, acc_elementwise_op, lengths, epsilon};
|
||||
return Argument{
|
||||
x, gamma, beta, y, save_mean, save_inv_std, y_elementwise_op, lengths, epsilon};
|
||||
}
|
||||
|
||||
static auto MakeInvoker() { return Invoker{}; }
|
||||
|
||||
@@ -20,8 +20,9 @@ template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename AccDataType,
|
||||
typename AccElementwiseOperation,
|
||||
typename SaveMeanInvStdDataType,
|
||||
typename ComputeDataType,
|
||||
typename YElementwiseOperation,
|
||||
index_t Rank,
|
||||
index_t NumReduceDim>
|
||||
struct ReferenceLayernorm : public device::BaseOperator
|
||||
@@ -36,15 +37,19 @@ struct ReferenceLayernorm : public device::BaseOperator
|
||||
const Tensor<GammaDataType>& gamma_n,
|
||||
const Tensor<BetaDataType>& beta_n,
|
||||
Tensor<YDataType>& y_m_n,
|
||||
AccElementwiseOperation acc_elementwise_op,
|
||||
Tensor<SaveMeanInvStdDataType>& save_mean_m,
|
||||
Tensor<SaveMeanInvStdDataType>& save_inv_std_m,
|
||||
YElementwiseOperation y_elementwise_op,
|
||||
const std::vector<index_t> lengths,
|
||||
const std::vector<index_t> reduceDims,
|
||||
AccDataType epsilon)
|
||||
ComputeDataType epsilon)
|
||||
: x_m_n_(x_m_n),
|
||||
gamma_n_(gamma_n),
|
||||
beta_n_(beta_n),
|
||||
y_m_n_(y_m_n),
|
||||
acc_elementwise_op_(acc_elementwise_op),
|
||||
save_mean_m_(save_mean_m),
|
||||
save_inv_std_m_(save_inv_std_m),
|
||||
y_elementwise_op_(y_elementwise_op),
|
||||
lengths_(lengths),
|
||||
reduceDims_(reduceDims),
|
||||
epsilon_(epsilon)
|
||||
@@ -55,10 +60,12 @@ struct ReferenceLayernorm : public device::BaseOperator
|
||||
const Tensor<XDataType> gamma_n_;
|
||||
const Tensor<XDataType> beta_n_;
|
||||
Tensor<YDataType>& y_m_n_;
|
||||
AccElementwiseOperation acc_elementwise_op_;
|
||||
Tensor<SaveMeanInvStdDataType>& save_mean_m_;
|
||||
Tensor<SaveMeanInvStdDataType>& save_inv_std_m_;
|
||||
YElementwiseOperation y_elementwise_op_;
|
||||
std::vector<index_t> lengths_;
|
||||
std::vector<index_t> reduceDims_;
|
||||
AccDataType epsilon_;
|
||||
ComputeDataType epsilon_;
|
||||
};
|
||||
|
||||
// Invoker
|
||||
@@ -69,8 +76,8 @@ struct ReferenceLayernorm : public device::BaseOperator
|
||||
int M = arg.lengths_[0];
|
||||
int N = arg.lengths_[1];
|
||||
|
||||
Tensor<AccDataType> mean({M});
|
||||
Tensor<AccDataType> var({M});
|
||||
Tensor<ComputeDataType> mean({M});
|
||||
Tensor<ComputeDataType> var({M});
|
||||
|
||||
for(int m = 0; m < M; ++m)
|
||||
{
|
||||
@@ -79,7 +86,7 @@ struct ReferenceLayernorm : public device::BaseOperator
|
||||
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
auto x_val = ck::type_convert<AccDataType>(arg.x_m_n_(m, n));
|
||||
auto x_val = ck::type_convert<ComputeDataType>(arg.x_m_n_(m, n));
|
||||
mean(m) += x_val;
|
||||
var(m) += x_val * x_val;
|
||||
}
|
||||
@@ -90,17 +97,21 @@ struct ReferenceLayernorm : public device::BaseOperator
|
||||
|
||||
for(int m = 0; m < M; ++m)
|
||||
{
|
||||
AccDataType divisor =
|
||||
static_cast<AccDataType>(1) / ck::math::sqrt(var(m) + arg.epsilon_);
|
||||
ComputeDataType divisor =
|
||||
static_cast<ComputeDataType>(1) / ck::math::sqrt(var(m) + arg.epsilon_);
|
||||
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
auto x_val = ck::type_convert<AccDataType>(arg.x_m_n_(m, n));
|
||||
auto y_val = (x_val - mean(m)) * divisor;
|
||||
y_val = (y_val * arg.gamma_n_(n)) + arg.beta_n_(n);
|
||||
arg.acc_elementwise_op_(y_val, y_val);
|
||||
auto x_val = ck::type_convert<ComputeDataType>(arg.x_m_n_(m, n));
|
||||
auto gamma_val = ck::type_convert<ComputeDataType>(arg.gamma_n_(n));
|
||||
auto beta_val = ck::type_convert<ComputeDataType>(arg.beta_n_(n));
|
||||
auto y_val = (x_val - mean(m)) * divisor;
|
||||
y_val = (y_val * gamma_val) + beta_val;
|
||||
arg.y_elementwise_op_(y_val, y_val);
|
||||
arg.y_m_n_(m, n) = ck::type_convert<YDataType>(y_val);
|
||||
}
|
||||
arg.save_mean_m_(m) = ck::type_convert<SaveMeanInvStdDataType>(mean(m));
|
||||
arg.save_inv_std_m_(m) = ck::type_convert<SaveMeanInvStdDataType>(divisor);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -140,13 +151,23 @@ struct ReferenceLayernorm : public device::BaseOperator
|
||||
const Tensor<GammaDataType>& gamma_n,
|
||||
const Tensor<BetaDataType>& beta_n,
|
||||
Tensor<YDataType>& y_m_n,
|
||||
AccElementwiseOperation acc_elementwise_op,
|
||||
Tensor<SaveMeanInvStdDataType>& save_mean_m,
|
||||
Tensor<SaveMeanInvStdDataType>& save_inv_std_m,
|
||||
YElementwiseOperation y_elementwise_op,
|
||||
const std::vector<index_t> lengths,
|
||||
const std::vector<index_t> reduceDims,
|
||||
AccDataType epsilon)
|
||||
ComputeDataType epsilon)
|
||||
{
|
||||
return Argument{
|
||||
x_m_n, gamma_n, beta_n, y_m_n, acc_elementwise_op, lengths, reduceDims, epsilon};
|
||||
return Argument{x_m_n,
|
||||
gamma_n,
|
||||
beta_n,
|
||||
y_m_n,
|
||||
save_mean_m,
|
||||
save_inv_std_m,
|
||||
y_elementwise_op,
|
||||
lengths,
|
||||
reduceDims,
|
||||
epsilon};
|
||||
}
|
||||
|
||||
static auto MakeInvoker() { return Invoker{}; }
|
||||
|
||||
@@ -19,13 +19,13 @@ namespace instance {
|
||||
#ifdef CK_ENABLE_FP16
|
||||
// FP16
|
||||
void add_device_normalization_rank_2_1_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F32, F16, PassThrough, 2, 1>>>&);
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F16, F32, PassThrough, 2, 1>>>&);
|
||||
|
||||
void add_device_normalization_rank_4_3_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F32, F16, PassThrough, 4, 3>>>&);
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F16, F32, PassThrough, 4, 3>>>&);
|
||||
|
||||
void add_device_normalization_rank_5_3_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F32, F16, PassThrough, 5, 3>>>&);
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F16, F32, PassThrough, 5, 3>>>&);
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
// FP32
|
||||
@@ -42,14 +42,15 @@ template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
index_t Rank,
|
||||
index_t NumReduceDim>
|
||||
struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceNormalization<
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
F32,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ck::tensor_operation::element_wise::PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>>
|
||||
@@ -57,8 +58,8 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceNormal
|
||||
using DeviceOp = DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
F32,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ck::tensor_operation::element_wise::PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
@@ -68,7 +69,8 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceNormal
|
||||
std::vector<std::unique_ptr<DeviceOp>> op_ptrs;
|
||||
#ifdef CK_ENABLE_FP16
|
||||
if constexpr(is_same_v<XDataType, F16> && is_same_v<GammaDataType, F16> &&
|
||||
is_same_v<BetaDataType, F16> && is_same_v<YDataType, F16>)
|
||||
is_same_v<BetaDataType, F16> && is_same_v<YDataType, F16> &&
|
||||
is_same_v<SaveMeanInvStdDataType, F32>)
|
||||
{
|
||||
if constexpr(Rank == 2 && NumReduceDim == 1)
|
||||
{
|
||||
@@ -86,7 +88,8 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceNormal
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
if constexpr(is_same_v<XDataType, F32> && is_same_v<GammaDataType, F32> &&
|
||||
is_same_v<BetaDataType, F32> && is_same_v<YDataType, F32>)
|
||||
is_same_v<BetaDataType, F32> && is_same_v<YDataType, F32> &&
|
||||
is_same_v<SaveMeanInvStdDataType, F32>)
|
||||
{
|
||||
if constexpr(Rank == 2 && NumReduceDim == 1)
|
||||
{
|
||||
|
||||
@@ -19,7 +19,7 @@ namespace instance {
|
||||
|
||||
// FP16
|
||||
void add_device_normalization_rank_5_3_swish_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F32, F16, Swish, 5, 3>>>&);
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F16, F32, Swish, 5, 3>>>&);
|
||||
|
||||
// FP32
|
||||
void add_device_normalization_rank_5_3_swish_f32_instances(
|
||||
@@ -27,20 +27,21 @@ void add_device_normalization_rank_5_3_swish_f32_instances(
|
||||
|
||||
// [x, gamma, beta, y] = [f16, f32, f32, f16]
|
||||
void add_device_normalization_rank_5_3_swish_f16_f32_f32_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F32, F32, F32, F16, Swish, 5, 3>>>&);
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F32, F32, F16, F32, Swish, 5, 3>>>&);
|
||||
|
||||
template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
index_t Rank,
|
||||
index_t NumReduceDim>
|
||||
struct DeviceOperationInstanceFactory<
|
||||
ck::tensor_operation::device::DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
F32,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ck::tensor_operation::element_wise::Swish,
|
||||
Rank,
|
||||
NumReduceDim>>
|
||||
@@ -48,8 +49,8 @@ struct DeviceOperationInstanceFactory<
|
||||
using DeviceOp = DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
F32,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ck::tensor_operation::element_wise::Swish,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
@@ -59,7 +60,8 @@ struct DeviceOperationInstanceFactory<
|
||||
std::vector<std::unique_ptr<DeviceOp>> op_ptrs;
|
||||
|
||||
if constexpr(is_same_v<XDataType, F16> && is_same_v<GammaDataType, F16> &&
|
||||
is_same_v<BetaDataType, F16> && is_same_v<YDataType, F16>)
|
||||
is_same_v<BetaDataType, F16> && is_same_v<YDataType, F16> &&
|
||||
is_same_v<SaveMeanInvStdDataType, F32>)
|
||||
{
|
||||
if constexpr(Rank == 5 && NumReduceDim == 3)
|
||||
{
|
||||
@@ -67,7 +69,8 @@ struct DeviceOperationInstanceFactory<
|
||||
}
|
||||
}
|
||||
else if constexpr(is_same_v<XDataType, F32> && is_same_v<GammaDataType, F32> &&
|
||||
is_same_v<BetaDataType, F32> && is_same_v<YDataType, F32>)
|
||||
is_same_v<BetaDataType, F32> && is_same_v<YDataType, F32> &&
|
||||
is_same_v<SaveMeanInvStdDataType, F32>)
|
||||
{
|
||||
if constexpr(Rank == 5 && NumReduceDim == 3)
|
||||
{
|
||||
@@ -75,7 +78,8 @@ struct DeviceOperationInstanceFactory<
|
||||
}
|
||||
}
|
||||
else if constexpr(is_same_v<XDataType, F16> && is_same_v<GammaDataType, F32> &&
|
||||
is_same_v<BetaDataType, F32> && is_same_v<YDataType, F16>)
|
||||
is_same_v<BetaDataType, F32> && is_same_v<YDataType, F16> &&
|
||||
is_same_v<SaveMeanInvStdDataType, F32>)
|
||||
{
|
||||
if constexpr(Rank == 5 && NumReduceDim == 3)
|
||||
{
|
||||
|
||||
@@ -11,7 +11,7 @@ namespace instance {
|
||||
using Pass = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
void add_device_normalization_rank_5_3_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F32, F16, Pass, 5, 3>>>&
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F16, F32, Pass, 5, 3>>>&
|
||||
instances)
|
||||
{
|
||||
add_device_operation_instances(instances,
|
||||
|
||||
@@ -11,7 +11,7 @@ namespace instance {
|
||||
using Swish = ck::tensor_operation::element_wise::Swish;
|
||||
|
||||
void add_device_normalization_rank_5_3_swish_f16_f32_f32_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F32, F32, F32, F16, Swish, 5, 3>>>&
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F32, F32, F16, F32, Swish, 5, 3>>>&
|
||||
instances)
|
||||
{
|
||||
add_device_operation_instances(
|
||||
|
||||
@@ -11,7 +11,7 @@ namespace instance {
|
||||
using Swish = ck::tensor_operation::element_wise::Swish;
|
||||
|
||||
void add_device_normalization_rank_5_3_swish_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F32, F16, Swish, 5, 3>>>&
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F16, F32, Swish, 5, 3>>>&
|
||||
instances)
|
||||
{
|
||||
add_device_operation_instances(instances,
|
||||
|
||||
@@ -11,7 +11,7 @@ namespace instance {
|
||||
using Pass = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
void add_device_normalization_rank_2_1_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F32, F16, Pass, 2, 1>>>&
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F16, F32, Pass, 2, 1>>>&
|
||||
instances)
|
||||
{
|
||||
add_device_operation_instances(instances,
|
||||
|
||||
@@ -11,7 +11,7 @@ namespace instance {
|
||||
using Pass = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
void add_device_normalization_rank_4_3_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F32, F16, Pass, 4, 3>>>&
|
||||
std::vector<std::unique_ptr<DeviceNormalization<F16, F16, F16, F16, F32, Pass, 4, 3>>>&
|
||||
instances)
|
||||
{
|
||||
add_device_operation_instances(instances,
|
||||
|
||||
@@ -22,25 +22,25 @@ template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
using device_normalization_f16_instances =
|
||||
// clang-format off
|
||||
std::tuple <
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim, GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 64, 1, 64, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 16, 1, 8, 1, 8, 1, 8, 8>
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, SaveMeanInvStdDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim, GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize, SaveMeanInvStdScalarPerVector>
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 64, 1, 64, 1, 8, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 8, 1, 8, 1, 8, 8, 2>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 16, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 16, 1, 8, 1, 8, 1, 8, 8, 1>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
@@ -48,150 +48,150 @@ template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
using device_normalization_splitk_f16_instances =
|
||||
// clang-format off
|
||||
std::tuple <
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim, GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 64, 1, 64, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 16, 1, 8, 1, 8, 1, 8, 8>
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, SaveMeanInvStdDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim, GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize, SaveMeanInvStdScalarPerVector>
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 64, 1, 64, 1, 8, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 8, 1, 8, 1, 8, 8, 2>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 16, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 8, 1, 8, 1, 8, 8, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 16, 1, 8, 1, 8, 1, 8, 8, 1>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
using device_normalization_f16_generic_instance = std::tuple<
|
||||
// clang-format off
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 64, 1, 64, 1, 1, 1, 1, 1, 1, 1, 1, 1>
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, F32, OutElementwise, Rank, Reduce, 64, 1, 64, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
using device_normalization_f32_instances = std::tuple<
|
||||
// clang-format off
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorSize, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 2, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 4, 1, 4, 1, 4, 4>
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, SaveMeanInvStdDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim, GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize, SaveMeanInvStdScalarPerVector>
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 4, 1, 4, 1, 4, 4, 2>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 2, 8, 1, 4, 1, 4, 1, 4, 4, 2>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
using device_normalization_splitk_f32_instances = std::tuple<
|
||||
// clang-format off
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorSize, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 2, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 4, 1, 4, 1, 4, 4>
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, SaveMeanInvStdDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim, GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize, SaveMeanInvStdScalarPerVector>
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 4, 1, 4, 1, 4, 4, 2>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 2, 8, 1, 4, 1, 4, 1, 4, 4, 2>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
using device_normalization_f32_generic_instance = std::tuple<
|
||||
// clang-format off
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 64, 1, 64, 1, 1, 1, 1, 1, 1, 1, 1, 1>
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 64, 1, 64, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
using device_normalization_f16_f32_f32_f16_instances = std::tuple<
|
||||
// clang-format off
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorSize, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 2, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 4, 1, 4, 1, 4, 4>
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, SaveMeanInvStdDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim, GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize, SaveMeanInvStdScalarPerVector>
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 4, 1, 4, 1, 4, 4, 2>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 2, 8, 1, 4, 1, 4, 1, 4, 4, 2>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
using device_normalization_splitk_f16_f32_f32_f16_instances = std::tuple<
|
||||
// clang-format off
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorSize, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 2, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 4, 1, 4, 1, 4, 4>
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, SaveMeanInvStdDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim, GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize, SaveMeanInvStdScalarPerVector>
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2, 1>, // irregular size
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 4, 1, 4, 1, 4, 4, 2>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 2, 8, 1, 4, 1, 4, 1, 4, 4, 2>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 4, 1, 4, 1, 4, 1, 4, 4, 1>,
|
||||
DeviceNormalizationSplitKImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 4, 1, 4, 1, 4, 4, 1>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
using device_normalization_f16_f32_f32_f16_generic_instance = std::tuple<
|
||||
// clang-format off
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, OutElementwise, Rank, Reduce, 64, 1, 64, 1, 1, 1, 1, 1, 1, 1, 1, 1>
|
||||
DeviceNormalizationImpl<F16, F32, F32, F32, F16, F32, OutElementwise, Rank, Reduce, 64, 1, 64, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
|
||||
@@ -80,6 +80,8 @@ bool profile_elementwise_layernorm_impl(int do_verification,
|
||||
Tensor<BetaDataType> beta(gammaBetaLength);
|
||||
Tensor<YDataType> y(length);
|
||||
Tensor<YDataType> host_y(length);
|
||||
Tensor<AccDataType> host_save_mean({M});
|
||||
Tensor<AccDataType> host_save_inv_std({M});
|
||||
|
||||
switch(init_method)
|
||||
{
|
||||
@@ -152,14 +154,23 @@ bool profile_elementwise_layernorm_impl(int do_verification,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
AccDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
|
||||
ReferenceInstance ref;
|
||||
auto ref_argument =
|
||||
ref.MakeArgument(x, gamma, beta, host_y, PassThrough{}, {M, N}, {1}, 1e-4);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
auto ref_argument = ref.MakeArgument(x,
|
||||
gamma,
|
||||
beta,
|
||||
host_y,
|
||||
host_save_mean,
|
||||
host_save_inv_std,
|
||||
PassThrough{},
|
||||
{M, N},
|
||||
{1},
|
||||
1e-4);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
ref_invoker.Run(ref_argument);
|
||||
}
|
||||
|
||||
|
||||
@@ -66,12 +66,15 @@ void host_gemm_layernorm(Tensor<HDataType>& h_m_n,
|
||||
BetaDataType,
|
||||
HDataType,
|
||||
AccDataType,
|
||||
AccDataType,
|
||||
HElementOp,
|
||||
2,
|
||||
1>;
|
||||
|
||||
Tensor<EMeanVarDataType> e_m_n(HostTensorDescriptor{M, N});
|
||||
Tensor<AccDataType> c_m_n(HostTensorDescriptor{M, N});
|
||||
Tensor<AccDataType> save_mean({M});
|
||||
Tensor<AccDataType> save_inv_std({M});
|
||||
|
||||
auto ref_gemm = ReferenceGemm{};
|
||||
auto ref_gemm_invoker = ref_gemm.MakeInvoker();
|
||||
@@ -97,7 +100,7 @@ void host_gemm_layernorm(Tensor<HDataType>& h_m_n,
|
||||
auto ref_layernorm_invoker = ref_layernorm.MakeInvoker();
|
||||
|
||||
auto ref_layernorm_argument = ref_layernorm.MakeArgument(
|
||||
e_m_n, gamma_n, beta_n, h_m_n, h_element_op, {M, N}, {1}, epsilon);
|
||||
e_m_n, gamma_n, beta_n, h_m_n, save_mean, save_inv_std, h_element_op, {M, N}, {1}, epsilon);
|
||||
ref_layernorm_invoker.Run(ref_layernorm_argument);
|
||||
}
|
||||
|
||||
|
||||
@@ -21,8 +21,10 @@ namespace profiler {
|
||||
template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename AccDataType,
|
||||
typename YDataType>
|
||||
typename ComputeDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
bool SaveMeanInvStd>
|
||||
bool profile_groupnorm_impl(int do_verification,
|
||||
int init_method,
|
||||
bool do_log,
|
||||
@@ -34,6 +36,7 @@ bool profile_groupnorm_impl(int do_verification,
|
||||
if(length.size() != 5)
|
||||
return false;
|
||||
|
||||
index_t N = length[0];
|
||||
index_t G = length[3];
|
||||
index_t C = length[4];
|
||||
|
||||
@@ -45,7 +48,14 @@ bool profile_groupnorm_impl(int do_verification,
|
||||
Tensor<GammaDataType> gamma(gammaBetaLength);
|
||||
Tensor<BetaDataType> beta(gammaBetaLength);
|
||||
Tensor<YDataType> y(length);
|
||||
Tensor<SaveMeanInvStdDataType> save_mean({N, G});
|
||||
Tensor<SaveMeanInvStdDataType> save_inv_std({N, G});
|
||||
|
||||
Tensor<YDataType> host_y(length);
|
||||
Tensor<SaveMeanInvStdDataType> host_save_mean({N, G});
|
||||
Tensor<SaveMeanInvStdDataType> host_save_inv_std({N, G});
|
||||
|
||||
std::vector<index_t> strideSaveMeanInvStd = {1};
|
||||
|
||||
switch(init_method)
|
||||
{
|
||||
@@ -69,6 +79,9 @@ bool profile_groupnorm_impl(int do_verification,
|
||||
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem beta_dev(sizeof(BetaDataType) * beta.mDesc.GetElementSpaceSize());
|
||||
DeviceMem y_dev(sizeof(YDataType) * y.mDesc.GetElementSpaceSize());
|
||||
DeviceMem save_mean_dev(sizeof(SaveMeanInvStdDataType) * save_mean.mDesc.GetElementSpaceSize());
|
||||
DeviceMem save_inv_std_dev(sizeof(SaveMeanInvStdDataType) *
|
||||
save_inv_std.mDesc.GetElementSpaceSize());
|
||||
|
||||
x_dev.ToDevice(x.mData.data());
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
@@ -78,8 +91,8 @@ bool profile_groupnorm_impl(int do_verification,
|
||||
using DeviceOp = ck::tensor_operation::device::DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
PassThrough,
|
||||
5,
|
||||
3>;
|
||||
@@ -97,38 +110,70 @@ bool profile_groupnorm_impl(int do_verification,
|
||||
|
||||
if(do_verification)
|
||||
{
|
||||
using ReferenceInstance = ck::tensor_operation::host::ReferenceGroupnorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
PassThrough>;
|
||||
using ReferenceInstance =
|
||||
ck::tensor_operation::host::ReferenceGroupnorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
PassThrough>;
|
||||
|
||||
ReferenceInstance ref;
|
||||
auto ref_argument = ref.MakeArgument(x, gamma, beta, host_y, PassThrough{}, length, 1e-6);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
auto ref_argument = ref.MakeArgument(
|
||||
x, gamma, beta, host_y, host_save_mean, host_save_inv_std, PassThrough{}, length, 1e-6);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
ref_invoker.Run(ref_argument);
|
||||
}
|
||||
|
||||
int num_kernel = 0;
|
||||
|
||||
auto f_get_argument = [&](auto& inst_ptr) {
|
||||
if constexpr(SaveMeanInvStd)
|
||||
return inst_ptr->MakeArgumentPointer(
|
||||
length,
|
||||
std::vector<ck::index_t>{x.mDesc.GetStrides().begin(), x.mDesc.GetStrides().end()},
|
||||
gammaBetaStride,
|
||||
gammaBetaStride,
|
||||
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
|
||||
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
|
||||
save_mean.mDesc.GetStrides().end()},
|
||||
std::vector<ck::index_t>{save_inv_std.mDesc.GetStrides().begin(),
|
||||
save_inv_std.mDesc.GetStrides().end()},
|
||||
reduce_dim,
|
||||
1e-6,
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
y_dev.GetDeviceBuffer(),
|
||||
save_mean_dev.GetDeviceBuffer(),
|
||||
save_inv_std_dev.GetDeviceBuffer(),
|
||||
PassThrough{});
|
||||
else
|
||||
return inst_ptr->MakeArgumentPointer(
|
||||
length,
|
||||
std::vector<ck::index_t>{x.mDesc.GetStrides().begin(), x.mDesc.GetStrides().end()},
|
||||
gammaBetaStride,
|
||||
gammaBetaStride,
|
||||
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
|
||||
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
|
||||
save_mean.mDesc.GetStrides().end()},
|
||||
std::vector<ck::index_t>{save_inv_std.mDesc.GetStrides().begin(),
|
||||
save_inv_std.mDesc.GetStrides().end()},
|
||||
reduce_dim,
|
||||
1e-6,
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
y_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
nullptr,
|
||||
PassThrough{});
|
||||
};
|
||||
|
||||
for(auto& inst_ptr : instance_ptrs)
|
||||
{
|
||||
auto argument_ptr = inst_ptr->MakeArgumentPointer(
|
||||
length,
|
||||
std::vector<ck::index_t>{x.mDesc.GetStrides().begin(), x.mDesc.GetStrides().end()},
|
||||
gammaBetaStride,
|
||||
gammaBetaStride,
|
||||
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
|
||||
reduce_dim,
|
||||
1e-6,
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
y_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
nullptr,
|
||||
PassThrough{});
|
||||
auto argument_ptr = f_get_argument(inst_ptr);
|
||||
|
||||
if(inst_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
@@ -152,6 +197,10 @@ bool profile_groupnorm_impl(int do_verification,
|
||||
beta.mDesc.GetElementSize() * sizeof(BetaDataType) +
|
||||
y.mDesc.GetElementSize() * sizeof(YDataType);
|
||||
|
||||
if constexpr(SaveMeanInvStd)
|
||||
num_bytes += save_mean.mDesc.GetElementSpaceSize() * sizeof(SaveMeanInvStdDataType) +
|
||||
save_inv_std.mDesc.GetElementSpaceSize() * sizeof(SaveMeanInvStdDataType);
|
||||
|
||||
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
||||
|
||||
if(time_kernel)
|
||||
@@ -168,9 +217,22 @@ bool profile_groupnorm_impl(int do_verification,
|
||||
if(do_verification)
|
||||
{
|
||||
y_dev.FromDevice(y.mData.data());
|
||||
|
||||
bool pass = ck::utils::check_err(y, host_y, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
|
||||
if constexpr(SaveMeanInvStd)
|
||||
{
|
||||
save_mean_dev.FromDevice(save_mean.mData.data());
|
||||
pass &= ck::utils::check_err(
|
||||
save_mean.mData, host_save_mean.mData, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
|
||||
save_inv_std_dev.FromDevice(save_inv_std.mData.data());
|
||||
pass &= ck::utils::check_err(save_inv_std.mData,
|
||||
host_save_inv_std.mData,
|
||||
"Error: Incorrect results",
|
||||
1e-3,
|
||||
1e-3);
|
||||
}
|
||||
|
||||
if(do_log)
|
||||
{
|
||||
LogRangeAsType<float>(std::cout << "x : ", x.mData, ",") << std::endl;
|
||||
|
||||
@@ -21,6 +21,8 @@ template <typename XDataType,
|
||||
typename BetaDataType,
|
||||
typename ComputeDataType,
|
||||
typename YDataType,
|
||||
typename SaveMeanInvStdDataType,
|
||||
bool SaveMeanInvStd,
|
||||
index_t Rank>
|
||||
bool profile_layernorm_impl(int do_verification,
|
||||
int init_method,
|
||||
@@ -43,13 +45,19 @@ bool profile_layernorm_impl(int do_verification,
|
||||
Tensor<GammaDataType> gamma(reduce_length);
|
||||
Tensor<BetaDataType> beta(reduce_length);
|
||||
Tensor<YDataType> y(length);
|
||||
Tensor<SaveMeanInvStdDataType> save_mean({length[0]});
|
||||
Tensor<SaveMeanInvStdDataType> save_inv_std({length[0]});
|
||||
Tensor<YDataType> host_y(length);
|
||||
Tensor<SaveMeanInvStdDataType> host_save_mean({length[0]});
|
||||
Tensor<SaveMeanInvStdDataType> host_save_inv_std({length[0]});
|
||||
|
||||
std::vector<index_t> strideXY =
|
||||
std::vector<ck::index_t>{x.mDesc.GetStrides().begin(), x.mDesc.GetStrides().end()};
|
||||
std::vector<index_t> strideGammaBeta = strideXY;
|
||||
strideGammaBeta[0] = 0;
|
||||
|
||||
std::vector<index_t> strideSaveMeanInvStd = {1};
|
||||
|
||||
switch(init_method)
|
||||
{
|
||||
case 0:
|
||||
@@ -75,6 +83,9 @@ bool profile_layernorm_impl(int do_verification,
|
||||
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem beta_dev(sizeof(BetaDataType) * beta.mDesc.GetElementSpaceSize());
|
||||
DeviceMem y_dev(sizeof(YDataType) * y.mDesc.GetElementSpaceSize());
|
||||
DeviceMem save_mean_dev(sizeof(SaveMeanInvStdDataType) * save_mean.mDesc.GetElementSpaceSize());
|
||||
DeviceMem save_inv_std_dev(sizeof(SaveMeanInvStdDataType) *
|
||||
save_inv_std.mDesc.GetElementSpaceSize());
|
||||
|
||||
x_dev.ToDevice(x.mData.data());
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
@@ -86,8 +97,8 @@ bool profile_layernorm_impl(int do_verification,
|
||||
using DeviceOp = ck::tensor_operation::device::DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
@@ -105,40 +116,74 @@ bool profile_layernorm_impl(int do_verification,
|
||||
|
||||
if(do_verification)
|
||||
{
|
||||
using ReferenceInstance = ck::tensor_operation::host::ReferenceLayernorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
ComputeDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
using ReferenceInstance =
|
||||
ck::tensor_operation::host::ReferenceLayernorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
|
||||
ReferenceInstance ref;
|
||||
auto ref_argument =
|
||||
ref.MakeArgument(x, gamma, beta, host_y, PassThrough{}, length, reduce_dim, 1e-4);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
auto ref_argument = ref.MakeArgument(x,
|
||||
gamma,
|
||||
beta,
|
||||
host_y,
|
||||
host_save_mean,
|
||||
host_save_inv_std,
|
||||
PassThrough{},
|
||||
length,
|
||||
reduce_dim,
|
||||
1e-4);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
ref_invoker.Run(ref_argument);
|
||||
}
|
||||
|
||||
int num_kernel = 0;
|
||||
|
||||
auto f_get_argument = [&](auto& inst_ptr) {
|
||||
if constexpr(SaveMeanInvStd)
|
||||
return inst_ptr->MakeArgumentPointer(length,
|
||||
strideXY,
|
||||
strideGammaBeta,
|
||||
strideGammaBeta,
|
||||
strideXY,
|
||||
strideSaveMeanInvStd,
|
||||
strideSaveMeanInvStd,
|
||||
reduce_dim,
|
||||
1e-4,
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
y_dev.GetDeviceBuffer(),
|
||||
save_mean_dev.GetDeviceBuffer(),
|
||||
save_inv_std_dev.GetDeviceBuffer(),
|
||||
PassThrough{});
|
||||
else
|
||||
return inst_ptr->MakeArgumentPointer(length,
|
||||
strideXY,
|
||||
strideGammaBeta,
|
||||
strideGammaBeta,
|
||||
strideXY,
|
||||
strideSaveMeanInvStd,
|
||||
strideSaveMeanInvStd,
|
||||
reduce_dim,
|
||||
1e-4,
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
y_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
nullptr,
|
||||
PassThrough{});
|
||||
};
|
||||
|
||||
for(auto& inst_ptr : instance_ptrs)
|
||||
{
|
||||
auto argument_ptr = inst_ptr->MakeArgumentPointer(length,
|
||||
strideXY,
|
||||
strideGammaBeta,
|
||||
strideGammaBeta,
|
||||
strideXY,
|
||||
reduce_dim,
|
||||
1e-4,
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
y_dev.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
nullptr,
|
||||
PassThrough{});
|
||||
auto argument_ptr = f_get_argument(inst_ptr);
|
||||
|
||||
if(inst_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
@@ -168,6 +213,10 @@ bool profile_layernorm_impl(int do_verification,
|
||||
beta.mDesc.GetElementSize() * sizeof(BetaDataType) +
|
||||
y.mDesc.GetElementSize() * sizeof(YDataType);
|
||||
|
||||
if constexpr(SaveMeanInvStd)
|
||||
num_bytes += save_mean.mDesc.GetElementSpaceSize() * sizeof(SaveMeanInvStdDataType) +
|
||||
save_inv_std.mDesc.GetElementSpaceSize() * sizeof(SaveMeanInvStdDataType);
|
||||
|
||||
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
||||
|
||||
if(time_kernel)
|
||||
@@ -184,10 +233,23 @@ bool profile_layernorm_impl(int do_verification,
|
||||
if(do_verification)
|
||||
{
|
||||
y_dev.FromDevice(y.mData.data());
|
||||
|
||||
bool pass =
|
||||
ck::utils::check_err(y.mData, host_y.mData, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
|
||||
if constexpr(SaveMeanInvStd)
|
||||
{
|
||||
save_mean_dev.FromDevice(save_mean.mData.data());
|
||||
pass &= ck::utils::check_err(
|
||||
save_mean.mData, host_save_mean.mData, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
|
||||
save_inv_std_dev.FromDevice(save_inv_std.mData.data());
|
||||
pass &= ck::utils::check_err(save_inv_std.mData,
|
||||
host_save_inv_std.mData,
|
||||
"Error: Incorrect results",
|
||||
1e-3,
|
||||
1e-3);
|
||||
}
|
||||
|
||||
if(do_log)
|
||||
{
|
||||
LogRangeAsType<float>(std::cout << "x : ", x.mData, ",") << std::endl;
|
||||
|
||||
@@ -93,12 +93,12 @@ int profile_groupnorm(int argc, char* argv[])
|
||||
|
||||
if(data_type == ck::DataTypeEnum::Float)
|
||||
{
|
||||
ck::profiler::profile_groupnorm_impl<F32, F32, F32, F32, F32>(
|
||||
ck::profiler::profile_groupnorm_impl<F32, F32, F32, F32, F32, F32, false>(
|
||||
do_verification, init_method, do_log, time_kernel, length);
|
||||
}
|
||||
else if(data_type == ck::DataTypeEnum::Half)
|
||||
{
|
||||
ck::profiler::profile_groupnorm_impl<F16, F16, F16, F32, F16>(
|
||||
ck::profiler::profile_groupnorm_impl<F16, F16, F16, F32, F16, F32, false>(
|
||||
do_verification, init_method, do_log, time_kernel, length);
|
||||
}
|
||||
else
|
||||
|
||||
@@ -82,12 +82,12 @@ int profile_layernorm(int argc, char* argv[])
|
||||
|
||||
if(data_type == ck::DataTypeEnum::Half)
|
||||
{
|
||||
ck::profiler::profile_layernorm_impl<F16, F16, F16, F32, F16, rank>(
|
||||
ck::profiler::profile_layernorm_impl<F16, F16, F16, F32, F16, F32, false, rank>(
|
||||
do_verification, init_method, do_log, time_kernel, length);
|
||||
}
|
||||
else if(data_type == ck::DataTypeEnum::Float)
|
||||
{
|
||||
ck::profiler::profile_layernorm_impl<F32, F32, F32, F32, F32, rank>(
|
||||
ck::profiler::profile_layernorm_impl<F32, F32, F32, F32, F32, F32, false, rank>(
|
||||
do_verification, init_method, do_log, time_kernel, length);
|
||||
}
|
||||
else
|
||||
|
||||
@@ -12,11 +12,12 @@ template <typename Tuple>
|
||||
class TestGroupnorm : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
using SaveMeanInvStdDataType = std::tuple_element_t<5, Tuple>;
|
||||
|
||||
void Run()
|
||||
{
|
||||
@@ -37,7 +38,9 @@ class TestGroupnorm : public ::testing::Test
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType>(true, 2, false, false, length);
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
true>(true, 2, false, false, length);
|
||||
EXPECT_TRUE(success);
|
||||
}
|
||||
}
|
||||
@@ -45,7 +48,7 @@ class TestGroupnorm : public ::testing::Test
|
||||
|
||||
using KernelTypes = ::testing::Types<
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
|
||||
std::tuple<F16, F16, F16, F32, F16>>;
|
||||
std::tuple<F16, F16, F16, F32, F16, F32>>;
|
||||
|
||||
TYPED_TEST_SUITE(TestGroupnorm, KernelTypes);
|
||||
TYPED_TEST(TestGroupnorm, Test_FP16) { this->Run(); }
|
||||
|
||||
@@ -12,11 +12,12 @@ template <typename Tuple>
|
||||
class TestGroupnorm : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
using SaveMeanInvStdDataType = std::tuple_element_t<5, Tuple>;
|
||||
|
||||
void Run()
|
||||
{
|
||||
@@ -35,7 +36,9 @@ class TestGroupnorm : public ::testing::Test
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType>(true, 2, false, false, length);
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
true>(true, 2, false, false, length);
|
||||
EXPECT_TRUE(success);
|
||||
}
|
||||
}
|
||||
@@ -43,7 +46,7 @@ class TestGroupnorm : public ::testing::Test
|
||||
|
||||
using KernelTypes = ::testing::Types<
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
|
||||
std::tuple<F32, F32, F32, F32, F32>>;
|
||||
std::tuple<F32, F32, F32, F32, F32, F32>>;
|
||||
|
||||
TYPED_TEST_SUITE(TestGroupnorm, KernelTypes);
|
||||
TYPED_TEST(TestGroupnorm, Test_FP32) { this->Run(); }
|
||||
|
||||
@@ -12,11 +12,12 @@ template <typename Tuple>
|
||||
class TestLayernorm2d : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
using SaveMeanInvStdDataType = std::tuple_element_t<5, Tuple>;
|
||||
|
||||
void Run()
|
||||
{
|
||||
@@ -31,6 +32,8 @@ class TestLayernorm2d : public ::testing::Test
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
true,
|
||||
2>(true, 2, false, false, length);
|
||||
EXPECT_TRUE(success);
|
||||
}
|
||||
@@ -39,7 +42,7 @@ class TestLayernorm2d : public ::testing::Test
|
||||
|
||||
using KernelTypes = ::testing::Types<
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
|
||||
std::tuple<F16, F16, F16, F32, F16>>;
|
||||
std::tuple<F16, F16, F16, F32, F16, F32>>;
|
||||
|
||||
TYPED_TEST_SUITE(TestLayernorm2d, KernelTypes);
|
||||
TYPED_TEST(TestLayernorm2d, Test_FP16) { this->Run(); }
|
||||
|
||||
@@ -12,11 +12,12 @@ template <typename Tuple>
|
||||
class TestLayernorm2d : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
using SaveMeanInvStdDataType = std::tuple_element_t<5, Tuple>;
|
||||
|
||||
void Run()
|
||||
{
|
||||
@@ -31,6 +32,8 @@ class TestLayernorm2d : public ::testing::Test
|
||||
BetaDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
SaveMeanInvStdDataType,
|
||||
true,
|
||||
2>(true, 2, false, false, length);
|
||||
EXPECT_TRUE(success);
|
||||
}
|
||||
@@ -39,7 +42,7 @@ class TestLayernorm2d : public ::testing::Test
|
||||
|
||||
using KernelTypes = ::testing::Types<
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
|
||||
std::tuple<F32, F32, F32, F32, F32>>;
|
||||
std::tuple<F32, F32, F32, F32, F32, F32>>;
|
||||
|
||||
TYPED_TEST_SUITE(TestLayernorm2d, KernelTypes);
|
||||
TYPED_TEST(TestLayernorm2d, Test_FP32) { this->Run(); }
|
||||
|
||||
Reference in New Issue
Block a user