mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-14 02:02:46 +00:00
Improve normalization (#580)
* Sync the order of type string with template parameter
* Add more instances
* Check the vector size and remove redundant var
* Extract var to static, prepare to separate sweep once kernel
* Separate sweeponce flow and optimize the flow
* 1. Rename AccDatatype in normalization to computeData
2. Rename AccElementwiseOperation to YElementwiseOperation in normalization
* Remove useless code
* Update naive variance kernel
* Refine string
* Fix typo
* Support naive variance for device_normalization
* Check the blocksize
* Share the VGPR of x and y
* Share the VGPR of gamma and beta
* Add more instances
* Support fp16 sqrt for experiment
* Add CHANGELOG
* Fix typo
* clang-format
[ROCm/composable_kernel commit: 6a6163a3d1]
This commit is contained in:
@@ -9,7 +9,7 @@ Full documentation for Composable Kernel is not yet available.
|
||||
- Fixed grouped ConvBwdWeight test case failure (#524).
|
||||
|
||||
### Optimizations
|
||||
- Optimized ...
|
||||
- Improve proformance of normalization kernel
|
||||
|
||||
### Added
|
||||
- Added user tutorial (#563).
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/normalization.hpp"
|
||||
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using AccDataType = float;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using ComputeDataType = float;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
constexpr int Rank = 2;
|
||||
constexpr int NumReduceDim = 1;
|
||||
@@ -54,7 +54,7 @@ int main(int argc, char* argv[])
|
||||
using DeviceOp = ck::tensor_operation::device::DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
|
||||
@@ -20,12 +20,12 @@
|
||||
#include "ck/library/utility/literals.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_layernorm.hpp"
|
||||
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using AccDataType = float;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using ComputeDataType = float;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
constexpr int Rank = 2;
|
||||
constexpr int NumReduceDim = 1;
|
||||
@@ -34,7 +34,7 @@ using DeviceInstance =
|
||||
ck::tensor_operation::device::DeviceNormalizationImpl<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
@@ -121,7 +121,7 @@ int main()
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
|
||||
@@ -23,11 +23,11 @@
|
||||
constexpr int Rank = 5;
|
||||
constexpr int NumReduceDim = 3;
|
||||
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using AccDataType = float;
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using ComputeDataType = float;
|
||||
|
||||
struct YElementOp
|
||||
{
|
||||
@@ -50,7 +50,7 @@ using DeviceInstance =
|
||||
ck::tensor_operation::device::DeviceNormalizationImpl<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
YElementOp,
|
||||
Rank,
|
||||
@@ -157,7 +157,7 @@ int main(int argc, char* argv[])
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
YElementOp>;
|
||||
|
||||
ReferenceInstance ref;
|
||||
|
||||
@@ -14,9 +14,9 @@ namespace device {
|
||||
template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename AccDataType,
|
||||
typename ComputeDataType,
|
||||
typename YDataType,
|
||||
typename AccElementwiseOperation,
|
||||
typename YElementwiseOperation,
|
||||
index_t Rank,
|
||||
index_t NumReduceDim>
|
||||
struct DeviceNormalization : public BaseOperator
|
||||
@@ -35,7 +35,7 @@ struct DeviceNormalization : public BaseOperator
|
||||
void* p_y,
|
||||
void* p_savedMean,
|
||||
void* p_savedInvVar,
|
||||
AccElementwiseOperation acc_elementwise_op) = 0;
|
||||
YElementwiseOperation y_elementwise_op) = 0;
|
||||
|
||||
virtual std::unique_ptr<BaseInvoker> MakeInvokerPointer() = 0;
|
||||
};
|
||||
@@ -43,17 +43,17 @@ struct DeviceNormalization : public BaseOperator
|
||||
template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename AccDataType,
|
||||
typename ComputeDataType,
|
||||
typename YDataType,
|
||||
typename AccElementwiseOperation,
|
||||
typename YElementwiseOperation,
|
||||
index_t Rank,
|
||||
index_t NumReduceDim>
|
||||
using DeviceNormalizationPtr = std::unique_ptr<DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
AccElementwiseOperation,
|
||||
YElementwiseOperation,
|
||||
Rank,
|
||||
NumReduceDim>>;
|
||||
|
||||
|
||||
@@ -10,46 +10,11 @@
|
||||
#include "ck/tensor_operation/gpu/device/device_normalization.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_reduce.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_reduce_common.hpp"
|
||||
#include "ck/tensor_operation/gpu/grid/gridwise_normalization_welford_variance.hpp"
|
||||
#include "ck/tensor_operation/gpu/grid/gridwise_normalization_selector.hpp"
|
||||
#include "ck/tensor_operation/gpu/grid/gridwise_set_buffer_value.hpp"
|
||||
#include "ck/host_utility/device_prop.hpp"
|
||||
#include "ck/host_utility/kernel_launch.hpp"
|
||||
|
||||
namespace ck {
|
||||
template <typename GridwiseReduction,
|
||||
typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename AccDataType,
|
||||
typename AccElementwiseOperation,
|
||||
typename GridDesc_M_K>
|
||||
__global__ void kernel_normalization(const GridDesc_M_K x_grid_desc_m_k,
|
||||
const GridDesc_M_K gamma_grid_desc_m_k,
|
||||
const GridDesc_M_K beta_grid_desc_m_k,
|
||||
const GridDesc_M_K y_grid_desc_m_k,
|
||||
index_t num_k_block_tile_iteration,
|
||||
AccDataType epsilon,
|
||||
const XDataType* const __restrict__ p_x_global,
|
||||
const GammaDataType* const __restrict__ p_gamma_global,
|
||||
const BetaDataType* const __restrict__ p_beta_global,
|
||||
YDataType* const __restrict__ p_y_global,
|
||||
const AccElementwiseOperation acc_elementwise_op)
|
||||
{
|
||||
GridwiseReduction::Run(x_grid_desc_m_k,
|
||||
gamma_grid_desc_m_k,
|
||||
beta_grid_desc_m_k,
|
||||
y_grid_desc_m_k,
|
||||
num_k_block_tile_iteration,
|
||||
epsilon,
|
||||
p_x_global,
|
||||
p_gamma_global,
|
||||
p_beta_global,
|
||||
p_y_global,
|
||||
acc_elementwise_op);
|
||||
};
|
||||
} // namespace ck
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
@@ -58,9 +23,9 @@ namespace device {
|
||||
template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename AccDataType,
|
||||
typename ComputeDataType,
|
||||
typename YDataType,
|
||||
typename AccElementwiseOperation,
|
||||
typename YElementwiseOperation,
|
||||
index_t Rank,
|
||||
index_t NumReduceDim,
|
||||
index_t BlockSize,
|
||||
@@ -74,16 +39,18 @@ template <typename XDataType,
|
||||
index_t GammaSrcVectorSize,
|
||||
index_t BetaSrcVectorDim,
|
||||
index_t BetaSrcVectorSize,
|
||||
index_t YDstVectorSize>
|
||||
index_t YDstVectorSize,
|
||||
bool UseWelford = true>
|
||||
struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
AccElementwiseOperation,
|
||||
YElementwiseOperation,
|
||||
Rank,
|
||||
NumReduceDim>
|
||||
{
|
||||
static_assert(BlockSize == MThreadClusterSize * KThreadClusterSize);
|
||||
static_assert(
|
||||
((GammaSrcVectorDim == 0 && MThreadSliceSize % GammaSrcVectorSize == 0) ||
|
||||
(GammaSrcVectorDim == 1 && KThreadSliceSize % GammaSrcVectorSize == 0)),
|
||||
@@ -167,51 +134,6 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
|
||||
using GridDesc_M_K = decltype(MakeSrc2dDescriptor({1}, {1}, 1, 1));
|
||||
|
||||
using GridwiseReduceLayernormGeneric =
|
||||
GridwiseNormalizationWelfordVariance_mk_to_mk<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
AccElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
MThreadSliceSize,
|
||||
KThreadSliceSize,
|
||||
XYSrcVectorDim,
|
||||
XSrcVectorSize,
|
||||
GammaSrcVectorDim,
|
||||
GammaSrcVectorSize,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
XYSrcVectorDim,
|
||||
YDstVectorSize,
|
||||
false>;
|
||||
using GridwiseNormalizationSweepOnce =
|
||||
GridwiseNormalizationWelfordVariance_mk_to_mk<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
AccElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
MThreadSliceSize,
|
||||
KThreadSliceSize,
|
||||
XYSrcVectorDim,
|
||||
XSrcVectorSize,
|
||||
GammaSrcVectorDim,
|
||||
GammaSrcVectorSize,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
XYSrcVectorDim,
|
||||
YDstVectorSize,
|
||||
true>;
|
||||
|
||||
struct Argument : public BaseArgument
|
||||
{
|
||||
Argument(const std::vector<index_t> lengths,
|
||||
@@ -220,7 +142,7 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
const std::vector<index_t> betaStrides,
|
||||
const std::vector<index_t> yStrides,
|
||||
const std::vector<index_t> reduceDims,
|
||||
AccElementwiseOperation acc_elementwise_op,
|
||||
YElementwiseOperation y_elementwise_op,
|
||||
double epsilon,
|
||||
const XDataType* p_x,
|
||||
const GammaDataType* p_gamma,
|
||||
@@ -230,9 +152,9 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
p_gamma_(p_gamma),
|
||||
p_beta_(p_beta),
|
||||
p_y_(p_y),
|
||||
acc_elementwise_op_(acc_elementwise_op)
|
||||
y_elementwise_op_(y_elementwise_op)
|
||||
{
|
||||
epsilon_ = static_cast<AccDataType>(epsilon);
|
||||
epsilon_ = static_cast<ComputeDataType>(epsilon);
|
||||
|
||||
Lengths_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(lengths, reduceDims);
|
||||
xStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(xStrides, reduceDims);
|
||||
@@ -265,7 +187,7 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
x_grid_desc_m_k_.GetLength(Number<1>{}) <= KThreadClusterSize * KThreadSliceSize;
|
||||
}
|
||||
|
||||
AccDataType epsilon_;
|
||||
ComputeDataType epsilon_;
|
||||
|
||||
const XDataType* p_x_;
|
||||
const GammaDataType* p_gamma_;
|
||||
@@ -278,7 +200,7 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
std::vector<index_t> betaStrides_;
|
||||
std::vector<index_t> yStrides_;
|
||||
|
||||
AccElementwiseOperation acc_elementwise_op_;
|
||||
YElementwiseOperation y_elementwise_op_;
|
||||
|
||||
int blkGroupSize_;
|
||||
int numBlockTileIteration_;
|
||||
@@ -295,23 +217,27 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
{
|
||||
float Run(const Argument& arg, const StreamConfig& stream_config = StreamConfig{})
|
||||
{
|
||||
const auto kernel_main = arg.isSweeponce_
|
||||
? kernel_normalization<GridwiseNormalizationSweepOnce,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
AccElementwiseOperation,
|
||||
GridDesc_M_K>
|
||||
: kernel_normalization<GridwiseReduceLayernormGeneric,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
AccElementwiseOperation,
|
||||
GridDesc_M_K>;
|
||||
auto kernel_main = NormalizationKernelSelector<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
MThreadSliceSize,
|
||||
KThreadSliceSize,
|
||||
XYSrcVectorDim,
|
||||
XSrcVectorSize,
|
||||
GammaSrcVectorDim,
|
||||
GammaSrcVectorSize,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
XYSrcVectorDim,
|
||||
YDstVectorSize,
|
||||
UseWelford>(arg.isSweeponce_);
|
||||
|
||||
float avg_time = 0;
|
||||
avg_time += launch_and_time_kernel(stream_config,
|
||||
@@ -329,7 +255,7 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
arg.p_gamma_,
|
||||
arg.p_beta_,
|
||||
arg.p_y_,
|
||||
arg.acc_elementwise_op_);
|
||||
arg.y_elementwise_op_);
|
||||
|
||||
return (avg_time);
|
||||
};
|
||||
@@ -429,7 +355,7 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
void* p_y,
|
||||
void* p_saveMean,
|
||||
void* p_saveInvVar,
|
||||
AccElementwiseOperation acc_elementwise_op) override
|
||||
YElementwiseOperation y_elementwise_op) override
|
||||
{
|
||||
// TODO
|
||||
// Optional cache of the intermediate results (mean and InvVariance) during the
|
||||
@@ -443,7 +369,7 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
betaStrides,
|
||||
yStrides,
|
||||
reduceDims,
|
||||
acc_elementwise_op,
|
||||
y_elementwise_op,
|
||||
epsilon,
|
||||
static_cast<const XDataType*>(p_x),
|
||||
static_cast<const GammaDataType*>(p_gamma),
|
||||
@@ -462,8 +388,8 @@ struct DeviceNormalizationImpl : public DeviceNormalization<XDataType,
|
||||
|
||||
// clang-format off
|
||||
str << "DeviceNormalizationImpl<" << BlockSize << ",";
|
||||
str << "M_C" << MThreadClusterSize << "_S" << MThreadSliceSize << ",";
|
||||
str << "K_C" << KThreadClusterSize << "_S" << KThreadSliceSize << ",";
|
||||
str << "Cluster_MK_" << MThreadClusterSize << "_" << KThreadClusterSize << ",";
|
||||
str << "Slice_MK_" << MThreadSliceSize << "_" << KThreadSliceSize << ",";
|
||||
str << "XYSrcVectorDim_" << XYSrcVectorDim << ",";
|
||||
str << "VectorSize_X" << XSrcVectorSize << "_Gamma" << GammaSrcVectorSize << "_Beta" << BetaSrcVectorSize << "_Y" << YDstVectorSize << ">";
|
||||
// clang-format on
|
||||
|
||||
@@ -4,9 +4,8 @@
|
||||
#pragma once
|
||||
|
||||
#include "ck/utility/data_type.hpp"
|
||||
#include "ck/utility/reduction_common.hpp"
|
||||
|
||||
#include "ck/utility/reduction_operator.hpp"
|
||||
#include "ck/utility/reduction_functions_accumulate.hpp"
|
||||
#include "ck/tensor_operation/gpu/block/reduction_functions_blockwise.hpp"
|
||||
#include "ck/tensor_operation/gpu/thread/reduction_functions_threadwise.hpp"
|
||||
#include "ck/tensor_operation/gpu/thread/threadwise_tensor_slice_transfer.hpp"
|
||||
@@ -19,8 +18,8 @@ template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename AccDataType,
|
||||
typename AccElementwiseOperation,
|
||||
typename ComputeDataType,
|
||||
typename YElementwiseOperation,
|
||||
typename GridDesc_M_K,
|
||||
index_t BlockSize,
|
||||
index_t MThreadClusterSize,
|
||||
@@ -46,6 +45,10 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
(YDstVectorDim == 1 && KThreadSliceSize % YDstVectorSize == 0),
|
||||
"Invalid thread slice sizes and/or vector sizes configuration, please check!");
|
||||
|
||||
static_assert(XSrcVectorSize == YDstVectorSize);
|
||||
static_assert(XSrcVectorSize == GammaSrcVectorSize);
|
||||
static_assert(XSrcVectorSize == BetaSrcVectorSize);
|
||||
|
||||
static constexpr bool reorder_thread_cluster = (XSrcVectorDim == 0);
|
||||
|
||||
using ThreadClusterLengths_M_K = Sequence<MThreadClusterSize, KThreadClusterSize>;
|
||||
@@ -59,19 +62,23 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
static constexpr auto thread_cluster_desc =
|
||||
make_cluster_descriptor(ThreadClusterLengths_M_K{}, ThreadClusterArrangeOrder{});
|
||||
|
||||
using ThreadBufferLengths_M_K = Sequence<MThreadSliceSize, XSrcVectorSize>;
|
||||
static constexpr auto thread_buffer_desc_m_k = make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<XSrcVectorSize>{}));
|
||||
|
||||
using ThreadReduceSrcDesc_M_K = decltype(make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<KThreadSliceSize>{})));
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<XSrcVectorSize>{})));
|
||||
using ThreadReduceDstDesc_M =
|
||||
decltype(make_naive_tensor_descriptor_packed(make_tuple(Number<MThreadSliceSize>{})));
|
||||
|
||||
using BlockwiseSumReduce = PartitionedBlockwiseReduction<AccDataType,
|
||||
using BlockwiseSumReduce = PartitionedBlockwiseReduction<ComputeDataType,
|
||||
BlockSize,
|
||||
ThreadClusterLengths_M_K,
|
||||
ThreadClusterArrangeOrder,
|
||||
reduce::Add,
|
||||
true>;
|
||||
|
||||
using ThreadwiseSumReduce = ThreadwiseReduction<AccDataType,
|
||||
using ThreadwiseSumReduce = ThreadwiseReduction<ComputeDataType,
|
||||
ThreadReduceSrcDesc_M_K,
|
||||
ThreadReduceDstDesc_M,
|
||||
reduce::Add,
|
||||
@@ -81,64 +88,70 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
static constexpr auto I1 = Number<1>{};
|
||||
static constexpr auto I2 = Number<2>{};
|
||||
|
||||
static constexpr index_t M_BlockTileSize = MThreadClusterSize * MThreadSliceSize;
|
||||
static constexpr index_t K_BlockTileSize = KThreadClusterSize * KThreadSliceSize;
|
||||
static constexpr index_t M_BlockTileSize = MThreadClusterSize * MThreadSliceSize;
|
||||
static constexpr index_t K_BlockTileSize = KThreadClusterSize * KThreadSliceSize;
|
||||
static constexpr index_t K_BlockTileStepSize = KThreadClusterSize * XSrcVectorSize;
|
||||
|
||||
static constexpr auto ThreadBufferNumber = Number<KThreadSliceSize / XSrcVectorSize>{};
|
||||
|
||||
__device__ static void Run(const GridDesc_M_K& x_grid_desc_m_k,
|
||||
const GridDesc_M_K& gamma_grid_desc_m_k,
|
||||
const GridDesc_M_K& beta_grid_desc_m_k,
|
||||
const GridDesc_M_K& y_grid_desc_m_k,
|
||||
index_t num_k_block_tile_iteration,
|
||||
AccDataType epsilon,
|
||||
ComputeDataType epsilon,
|
||||
const XDataType* const __restrict__ p_x_global,
|
||||
const GammaDataType* const __restrict__ p_gamma_global,
|
||||
const BetaDataType* const __restrict__ p_beta_global,
|
||||
YDataType* const __restrict__ p_y_global,
|
||||
const AccElementwiseOperation acc_elementwise_op)
|
||||
const YElementwiseOperation y_elementwise_op)
|
||||
{
|
||||
if constexpr(SweepOnce)
|
||||
{
|
||||
num_k_block_tile_iteration = 1;
|
||||
}
|
||||
|
||||
// LDS
|
||||
__shared__ AccDataType p_reduce_work_buffer[BlockSize];
|
||||
|
||||
auto y_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_y_global, y_grid_desc_m_k.GetElementSpaceSize());
|
||||
__shared__ ComputeDataType p_reduce_work_buffer[BlockSize];
|
||||
|
||||
auto reduce_work_buf =
|
||||
make_dynamic_buffer<AddressSpaceEnum::Lds>(p_reduce_work_buffer, BlockSize);
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize * KThreadSliceSize, true>
|
||||
x_thread_buf;
|
||||
auto y_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_y_global, y_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize * KThreadSliceSize, true>
|
||||
gamma_thread_buf;
|
||||
auto x_thread_buf = generate_tuple(
|
||||
[&](auto) {
|
||||
return StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
ComputeDataType,
|
||||
MThreadSliceSize * XSrcVectorSize,
|
||||
true>{};
|
||||
},
|
||||
Number<ThreadBufferNumber>{});
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
AccDataType,
|
||||
MThreadSliceSize * KThreadSliceSize,
|
||||
true>& beta_thread_buf = gamma_thread_buf;
|
||||
auto gamma_thread_buf = generate_tuple(
|
||||
[&](auto) {
|
||||
return StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
ComputeDataType,
|
||||
MThreadSliceSize * GammaSrcVectorSize,
|
||||
true>{};
|
||||
},
|
||||
Number<ThreadBufferNumber>{});
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize * KThreadSliceSize, true>
|
||||
y_thread_buf;
|
||||
auto& beta_thread_buf = gamma_thread_buf;
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
AccDataType,
|
||||
MThreadSliceSize * KThreadSliceSize,
|
||||
true>& x_square_thread_buf = y_thread_buf;
|
||||
auto y_thread_buf = generate_tuple(
|
||||
[&](auto) {
|
||||
return StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
ComputeDataType,
|
||||
MThreadSliceSize * YDstVectorSize,
|
||||
true>{};
|
||||
},
|
||||
Number<ThreadBufferNumber>{});
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize, true> mean_thread_buf;
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize, true>
|
||||
auto& x_square_thread_buf = y_thread_buf;
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, ComputeDataType, MThreadSliceSize, true>
|
||||
mean_thread_buf;
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, ComputeDataType, MThreadSliceSize, true>
|
||||
mean_square_thread_buf;
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize, true>& var_thread_buf =
|
||||
mean_square_thread_buf;
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
|
||||
mean_thread_buf(I) = reduce::Add::template GetIdentityValue<AccDataType>();
|
||||
mean_square_thread_buf(I) = reduce::Add::template GetIdentityValue<AccDataType>();
|
||||
});
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, ComputeDataType, MThreadSliceSize, true>&
|
||||
var_thread_buf = mean_square_thread_buf;
|
||||
|
||||
const index_t thread_local_id = get_thread_local_1d_id();
|
||||
const index_t block_global_id = get_block_1d_id();
|
||||
@@ -149,12 +162,8 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
const auto thread_m_cluster_id = thread_cluster_idx[I0];
|
||||
const auto thread_k_cluster_id = thread_cluster_idx[I1];
|
||||
|
||||
using ThreadBufferLengths_M_K = Sequence<MThreadSliceSize, KThreadSliceSize>;
|
||||
constexpr auto thread_buffer_desc_m_k = make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<KThreadSliceSize>{}));
|
||||
|
||||
auto threadwise_x_load = ThreadwiseTensorSliceTransfer_v2<XDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
GridDesc_M_K,
|
||||
decltype(thread_buffer_desc_m_k),
|
||||
ThreadBufferLengths_M_K,
|
||||
@@ -166,11 +175,11 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
x_grid_desc_m_k,
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize,
|
||||
thread_k_cluster_id * KThreadSliceSize));
|
||||
thread_k_cluster_id * XSrcVectorSize));
|
||||
|
||||
auto threadwise_gamma_load =
|
||||
ThreadwiseTensorSliceTransfer_v2<GammaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
GridDesc_M_K,
|
||||
decltype(thread_buffer_desc_m_k),
|
||||
ThreadBufferLengths_M_K,
|
||||
@@ -182,11 +191,11 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
gamma_grid_desc_m_k,
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize,
|
||||
thread_k_cluster_id * KThreadSliceSize));
|
||||
thread_k_cluster_id * GammaSrcVectorSize));
|
||||
|
||||
auto threadwise_beta_load =
|
||||
ThreadwiseTensorSliceTransfer_v2<BetaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
GridDesc_M_K,
|
||||
decltype(thread_buffer_desc_m_k),
|
||||
ThreadBufferLengths_M_K,
|
||||
@@ -198,14 +207,14 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
beta_grid_desc_m_k,
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize,
|
||||
thread_k_cluster_id * KThreadSliceSize));
|
||||
thread_k_cluster_id * BetaSrcVectorSize));
|
||||
|
||||
auto threadwise_y_store =
|
||||
ThreadwiseTensorSliceTransfer_v1r3<AccDataType,
|
||||
ThreadwiseTensorSliceTransfer_v1r3<ComputeDataType,
|
||||
YDataType,
|
||||
decltype(thread_buffer_desc_m_k),
|
||||
GridDesc_M_K,
|
||||
AccElementwiseOperation,
|
||||
YElementwiseOperation,
|
||||
ThreadBufferLengths_M_K,
|
||||
ThreadBufferDimAccessOrder,
|
||||
YDstVectorDim,
|
||||
@@ -216,13 +225,10 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
y_grid_desc_m_k,
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize,
|
||||
thread_k_cluster_id * KThreadSliceSize),
|
||||
acc_elementwise_op);
|
||||
thread_k_cluster_id * YDstVectorSize),
|
||||
y_elementwise_op);
|
||||
|
||||
// Copy x from Cache
|
||||
// one pass: fwd, second pass: bwd
|
||||
constexpr auto thread_copy_fwd_step_m_k =
|
||||
make_multi_index(0, SweepOnce ? 0 : K_BlockTileSize);
|
||||
constexpr auto thread_copy_fwd_step_m_k = make_multi_index(0, K_BlockTileStepSize);
|
||||
constexpr auto thread_copy_bwd_step_m_k =
|
||||
make_multi_index(0, SweepOnce ? 0 : -K_BlockTileSize);
|
||||
|
||||
@@ -239,121 +245,260 @@ struct GridwiseNormalizationNaiveVariance_mk_to_mk
|
||||
// FIXME: Should not hack the transform from deviceOP
|
||||
int reduce_length = x_grid_desc_m_k.GetTransforms()[I2].GetUpperLengths()[I0];
|
||||
|
||||
index_t reducedTiles = 0;
|
||||
do
|
||||
{
|
||||
threadwise_x_load.Run(x_grid_desc_m_k,
|
||||
x_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
x_thread_buf);
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
static_for<0, KThreadSliceSize, 1>{}([&](auto iK) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK));
|
||||
x_square_thread_buf(Number<offset_m_k>{}) =
|
||||
x_thread_buf(Number<offset_m_k>{}) * x_thread_buf(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
|
||||
ThreadwiseSumReduce::Reduce(x_thread_buf, mean_thread_buf);
|
||||
ThreadwiseSumReduce::Reduce(x_square_thread_buf, mean_square_thread_buf);
|
||||
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_fwd_step_m_k);
|
||||
|
||||
++reducedTiles;
|
||||
} while(reducedTiles < num_k_block_tile_iteration);
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
|
||||
if constexpr(I > 0)
|
||||
block_sync_lds();
|
||||
|
||||
BlockwiseSumReduce::Reduce(reduce_work_buf, mean_thread_buf(I));
|
||||
mean_thread_buf(I) = mean_thread_buf(I) / reduce_length;
|
||||
|
||||
block_sync_lds();
|
||||
|
||||
BlockwiseSumReduce::Reduce(reduce_work_buf, mean_square_thread_buf(I));
|
||||
mean_square_thread_buf(I) = mean_square_thread_buf(I) / reduce_length;
|
||||
|
||||
// var(x) = E[x^2] - E[x]^2
|
||||
var_thread_buf(I) =
|
||||
mean_square_thread_buf(I) - (mean_thread_buf(I) * mean_thread_buf(I));
|
||||
mean_thread_buf(I) = reduce::Add::template GetIdentityValue<ComputeDataType>();
|
||||
mean_square_thread_buf(I) = reduce::Add::template GetIdentityValue<ComputeDataType>();
|
||||
});
|
||||
|
||||
// y = (x - E[x]) / sqrt(var[x] + epsilon)
|
||||
auto thread_copy_tail_m_k = (num_k_block_tile_iteration - 1) * thread_copy_fwd_step_m_k;
|
||||
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
|
||||
reducedTiles = 0;
|
||||
do
|
||||
// Separate sweep once and sweep twice pipeline
|
||||
if constexpr(SweepOnce)
|
||||
{
|
||||
if constexpr(!SweepOnce)
|
||||
{
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_x_load.Run(x_grid_desc_m_k,
|
||||
x_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
x_thread_buf);
|
||||
x_thread_buf(i));
|
||||
|
||||
threadwise_gamma_load.Run(gamma_grid_desc_m_k,
|
||||
gamma_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
gamma_thread_buf(i));
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK));
|
||||
x_square_thread_buf(i)(Number<offset_m_k>{}) =
|
||||
x_thread_buf(i)(Number<offset_m_k>{}) *
|
||||
x_thread_buf(i)(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
|
||||
ThreadwiseSumReduce::Reduce(x_thread_buf[i], mean_thread_buf);
|
||||
ThreadwiseSumReduce::Reduce(x_square_thread_buf[i], mean_square_thread_buf);
|
||||
|
||||
if constexpr(i != ThreadBufferNumber - 1)
|
||||
{
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_fwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
}
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
|
||||
if constexpr(I > 0)
|
||||
block_sync_lds();
|
||||
|
||||
BlockwiseSumReduce::Reduce(reduce_work_buf, mean_thread_buf(I));
|
||||
mean_thread_buf(I) = mean_thread_buf(I) / reduce_length;
|
||||
|
||||
block_sync_lds();
|
||||
|
||||
BlockwiseSumReduce::Reduce(reduce_work_buf, mean_square_thread_buf(I));
|
||||
mean_square_thread_buf(I) = mean_square_thread_buf(I) / reduce_length;
|
||||
|
||||
// var(x) = E[x^2] - E[x]^2
|
||||
var_thread_buf(I) =
|
||||
mean_square_thread_buf(I) - (mean_thread_buf(I) * mean_thread_buf(I));
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
auto divisor = 1 / ck::math::sqrt(var_thread_buf(iM) + epsilon);
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK1));
|
||||
|
||||
// normalize
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
(x_thread_buf(iK0)(Number<offset_m_k>{}) - mean_thread_buf(iM)) *
|
||||
divisor;
|
||||
|
||||
// gamma & beta
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) *
|
||||
gamma_thread_buf(iK0)(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_beta_load.Run(beta_grid_desc_m_k,
|
||||
beta_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
beta_thread_buf(i));
|
||||
|
||||
if constexpr(i != ThreadBufferNumber - 1)
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK1));
|
||||
|
||||
// beta
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) +
|
||||
beta_thread_buf(iK0)(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_y_store.Run(thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
y_thread_buf(i),
|
||||
y_grid_desc_m_k,
|
||||
y_global_val_buf);
|
||||
|
||||
if constexpr(i != ThreadBufferNumber - 1)
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
});
|
||||
} // end of sweep once
|
||||
else
|
||||
{
|
||||
for(index_t reducedTiles = 0; reducedTiles < num_k_block_tile_iteration; ++reducedTiles)
|
||||
{
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_x_load.Run(x_grid_desc_m_k,
|
||||
x_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
x_thread_buf(i));
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_fwd_step_m_k);
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK));
|
||||
x_square_thread_buf(i)(Number<offset_m_k>{}) =
|
||||
x_thread_buf(i)(Number<offset_m_k>{}) *
|
||||
x_thread_buf(i)(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
|
||||
ThreadwiseSumReduce::Reduce(x_thread_buf[i], mean_thread_buf);
|
||||
ThreadwiseSumReduce::Reduce(x_square_thread_buf[i], mean_square_thread_buf);
|
||||
});
|
||||
}
|
||||
|
||||
threadwise_gamma_load.Run(gamma_grid_desc_m_k,
|
||||
gamma_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
gamma_thread_buf);
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
|
||||
if constexpr(I > 0)
|
||||
block_sync_lds();
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
static_for<0, KThreadSliceSize, 1>{}([&](auto iK) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK));
|
||||
BlockwiseSumReduce::Reduce(reduce_work_buf, mean_thread_buf(I));
|
||||
mean_thread_buf(I) = mean_thread_buf(I) / reduce_length;
|
||||
|
||||
// normalize
|
||||
y_thread_buf(Number<offset_m_k>{}) =
|
||||
(x_thread_buf(Number<offset_m_k>{}) - mean_thread_buf(iM)) /
|
||||
sqrt(var_thread_buf(iM) + epsilon);
|
||||
block_sync_lds();
|
||||
|
||||
// gamma
|
||||
y_thread_buf(Number<offset_m_k>{}) =
|
||||
y_thread_buf(Number<offset_m_k>{}) * gamma_thread_buf(Number<offset_m_k>{});
|
||||
});
|
||||
BlockwiseSumReduce::Reduce(reduce_work_buf, mean_square_thread_buf(I));
|
||||
mean_square_thread_buf(I) = mean_square_thread_buf(I) / reduce_length;
|
||||
|
||||
// var(x) = E[x^2] - E[x]^2
|
||||
var_thread_buf(I) =
|
||||
mean_square_thread_buf(I) - (mean_thread_buf(I) * mean_thread_buf(I));
|
||||
});
|
||||
|
||||
threadwise_beta_load.Run(beta_grid_desc_m_k,
|
||||
beta_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
beta_thread_buf);
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
static_for<0, KThreadSliceSize, 1>{}([&](auto iK) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK));
|
||||
|
||||
// beta
|
||||
y_thread_buf(Number<offset_m_k>{}) =
|
||||
y_thread_buf(Number<offset_m_k>{}) + beta_thread_buf(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
|
||||
threadwise_y_store.Run(thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
y_thread_buf,
|
||||
y_grid_desc_m_k,
|
||||
y_global_val_buf);
|
||||
auto thread_copy_tail_m_k =
|
||||
(num_k_block_tile_iteration - 1) * ThreadBufferNumber * thread_copy_fwd_step_m_k;
|
||||
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
|
||||
++reducedTiles;
|
||||
} while(reducedTiles < num_k_block_tile_iteration);
|
||||
for(index_t reducedTiles = 0; reducedTiles < num_k_block_tile_iteration; ++reducedTiles)
|
||||
{
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_x_load.Run(x_grid_desc_m_k,
|
||||
x_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
x_thread_buf(i));
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_fwd_step_m_k);
|
||||
});
|
||||
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_gamma_load.Run(gamma_grid_desc_m_k,
|
||||
gamma_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
gamma_thread_buf(i));
|
||||
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
auto divisor = 1 / ck::math::sqrt(var_thread_buf(iM) + epsilon);
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK1));
|
||||
|
||||
// normalize
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
(x_thread_buf(iK0)(Number<offset_m_k>{}) - mean_thread_buf(iM)) *
|
||||
divisor;
|
||||
|
||||
// gamma
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) *
|
||||
gamma_thread_buf(iK0)(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_beta_load.Run(beta_grid_desc_m_k,
|
||||
beta_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
beta_thread_buf(i));
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK1));
|
||||
|
||||
// beta
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) +
|
||||
beta_thread_buf(iK0)(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_y_store.Run(thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
y_thread_buf(i),
|
||||
y_grid_desc_m_k,
|
||||
y_global_val_buf);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
});
|
||||
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, 2 * thread_copy_bwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k,
|
||||
2 * thread_copy_bwd_step_m_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k,
|
||||
2 * thread_copy_bwd_step_m_k);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k,
|
||||
2 * thread_copy_bwd_step_m_k);
|
||||
}
|
||||
} // end of sweep twice
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -0,0 +1,195 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ck/tensor_operation/gpu/grid/gridwise_normalization_naive_variance.hpp"
|
||||
#include "ck/tensor_operation/gpu/grid/gridwise_normalization_welford_variance.hpp"
|
||||
|
||||
namespace ck {
|
||||
template <typename GridwiseReduction,
|
||||
typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename ComputeDataType,
|
||||
typename YElementwiseOperation,
|
||||
typename GridDesc_M_K>
|
||||
__global__ void kernel_normalization(const GridDesc_M_K x_grid_desc_m_k,
|
||||
const GridDesc_M_K gamma_grid_desc_m_k,
|
||||
const GridDesc_M_K beta_grid_desc_m_k,
|
||||
const GridDesc_M_K y_grid_desc_m_k,
|
||||
index_t num_k_block_tile_iteration,
|
||||
ComputeDataType epsilon,
|
||||
const XDataType* const __restrict__ p_x_global,
|
||||
const GammaDataType* const __restrict__ p_gamma_global,
|
||||
const BetaDataType* const __restrict__ p_beta_global,
|
||||
YDataType* const __restrict__ p_y_global,
|
||||
const YElementwiseOperation y_elementwise_op)
|
||||
{
|
||||
GridwiseReduction::Run(x_grid_desc_m_k,
|
||||
gamma_grid_desc_m_k,
|
||||
beta_grid_desc_m_k,
|
||||
y_grid_desc_m_k,
|
||||
num_k_block_tile_iteration,
|
||||
epsilon,
|
||||
p_x_global,
|
||||
p_gamma_global,
|
||||
p_beta_global,
|
||||
p_y_global,
|
||||
y_elementwise_op);
|
||||
};
|
||||
|
||||
template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename ComputeDataType,
|
||||
typename YElementwiseOperation,
|
||||
typename GridDesc_M_K,
|
||||
index_t BlockSize,
|
||||
index_t MThreadClusterSize,
|
||||
index_t KThreadClusterSize,
|
||||
index_t MThreadSliceSize,
|
||||
index_t KThreadSliceSize,
|
||||
index_t XSrcVectorDim,
|
||||
index_t XSrcVectorSize,
|
||||
index_t GammaSrcVectorDim,
|
||||
index_t GammaSrcVectorSize,
|
||||
index_t BetaSrcVectorDim,
|
||||
index_t BetaSrcVectorSize,
|
||||
index_t YDstVectorDim,
|
||||
index_t YDstVectorSize,
|
||||
bool UseWelford>
|
||||
auto NormalizationKernelSelector(bool isSweepOnce)
|
||||
{
|
||||
using GridwiseNormalizationGenericNaive =
|
||||
GridwiseNormalizationNaiveVariance_mk_to_mk<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
MThreadSliceSize,
|
||||
KThreadSliceSize,
|
||||
XSrcVectorDim,
|
||||
XSrcVectorSize,
|
||||
GammaSrcVectorDim,
|
||||
GammaSrcVectorSize,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
YDstVectorDim,
|
||||
YDstVectorSize,
|
||||
false>;
|
||||
using GridwiseNormalizationSweepOnceNaive =
|
||||
GridwiseNormalizationNaiveVariance_mk_to_mk<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
MThreadSliceSize,
|
||||
KThreadSliceSize,
|
||||
XSrcVectorDim,
|
||||
XSrcVectorSize,
|
||||
GammaSrcVectorDim,
|
||||
GammaSrcVectorSize,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
YDstVectorDim,
|
||||
YDstVectorSize,
|
||||
true>;
|
||||
using GridwiseNormalizationGenericWelford =
|
||||
GridwiseNormalizationWelfordVariance_mk_to_mk<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
MThreadSliceSize,
|
||||
KThreadSliceSize,
|
||||
XSrcVectorDim,
|
||||
XSrcVectorSize,
|
||||
GammaSrcVectorDim,
|
||||
GammaSrcVectorSize,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
YDstVectorDim,
|
||||
YDstVectorSize,
|
||||
false>;
|
||||
using GridwiseNormalizationSweepOnceWelford =
|
||||
GridwiseNormalizationWelfordVariance_mk_to_mk<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
MThreadSliceSize,
|
||||
KThreadSliceSize,
|
||||
XSrcVectorDim,
|
||||
XSrcVectorSize,
|
||||
GammaSrcVectorDim,
|
||||
GammaSrcVectorSize,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
YDstVectorDim,
|
||||
YDstVectorSize,
|
||||
true>;
|
||||
|
||||
if constexpr(UseWelford)
|
||||
{
|
||||
return isSweepOnce ? kernel_normalization<GridwiseNormalizationSweepOnceWelford,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K>
|
||||
: kernel_normalization<GridwiseNormalizationGenericWelford,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K>;
|
||||
}
|
||||
else
|
||||
{
|
||||
return isSweepOnce ? kernel_normalization<GridwiseNormalizationSweepOnceNaive,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K>
|
||||
: kernel_normalization<GridwiseNormalizationGenericNaive,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
ComputeDataType,
|
||||
YElementwiseOperation,
|
||||
GridDesc_M_K>;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace ck
|
||||
@@ -16,8 +16,8 @@ template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename AccDataType,
|
||||
typename AccElementwiseOperation,
|
||||
typename ComputeDataType,
|
||||
typename YElementwiseOperation,
|
||||
typename GridDesc_M_K,
|
||||
index_t BlockSize,
|
||||
index_t MThreadClusterSize,
|
||||
@@ -43,6 +43,10 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
(YDstVectorDim == 1 && KThreadSliceSize % YDstVectorSize == 0),
|
||||
"Invalid thread slice sizes and/or vector sizes configuration, please check!");
|
||||
|
||||
static_assert(XSrcVectorSize == YDstVectorSize);
|
||||
static_assert(XSrcVectorSize == GammaSrcVectorSize);
|
||||
static_assert(XSrcVectorSize == BetaSrcVectorSize);
|
||||
|
||||
static constexpr bool reorder_thread_cluster = (XSrcVectorDim == 0);
|
||||
|
||||
using ThreadClusterLengths_M_K = Sequence<MThreadClusterSize, KThreadClusterSize>;
|
||||
@@ -56,15 +60,19 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
static constexpr auto thread_cluster_desc =
|
||||
make_cluster_descriptor(ThreadClusterLengths_M_K{}, ThreadClusterArrangeOrder{});
|
||||
|
||||
using ThreadBufferLengths_M_K = Sequence<MThreadSliceSize, XSrcVectorSize>;
|
||||
static constexpr auto thread_buffer_desc_m_k = make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<XSrcVectorSize>{}));
|
||||
|
||||
using ThreadReduceSrcDesc_M_K = decltype(make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<XSrcVectorSize>{})));
|
||||
using ThreadReduceDstDesc_M =
|
||||
decltype(make_naive_tensor_descriptor_packed(make_tuple(Number<MThreadSliceSize>{})));
|
||||
|
||||
using ThreadwiseWelford =
|
||||
ThreadwiseWelford<AccDataType, ThreadReduceSrcDesc_M_K, ThreadReduceDstDesc_M>;
|
||||
ThreadwiseWelford<ComputeDataType, ThreadReduceSrcDesc_M_K, ThreadReduceDstDesc_M>;
|
||||
|
||||
using BlockwiseWelford = BlockwiseWelford<AccDataType,
|
||||
using BlockwiseWelford = BlockwiseWelford<ComputeDataType,
|
||||
BlockSize,
|
||||
ThreadClusterLengths_M_K,
|
||||
ThreadClusterArrangeOrder>;
|
||||
@@ -77,10 +85,7 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
static constexpr index_t K_BlockTileSize = KThreadClusterSize * KThreadSliceSize;
|
||||
static constexpr index_t K_BlockTileStepSize = KThreadClusterSize * XSrcVectorSize;
|
||||
|
||||
static constexpr auto XThreadBufferNumber = Number<KThreadSliceSize / XSrcVectorSize>{};
|
||||
static constexpr auto GammaThreadBufferNumber = Number<KThreadSliceSize / XSrcVectorSize>{};
|
||||
static constexpr auto BetaThreadBufferNumber = Number<KThreadSliceSize / XSrcVectorSize>{};
|
||||
static constexpr auto YThreadBufferNumber = Number<KThreadSliceSize / XSrcVectorSize>{};
|
||||
static constexpr auto ThreadBufferNumber = Number<KThreadSliceSize / XSrcVectorSize>{};
|
||||
|
||||
__device__ static int GetKPerThread(const GridDesc_M_K& x_grid_desc_m_k,
|
||||
int thread_k_cluster_id)
|
||||
@@ -93,7 +98,7 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
|
||||
if(kPerBlockTail > 0)
|
||||
{
|
||||
static_for<0, XThreadBufferNumber, 1>{}([&](auto i) {
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
int thread_max_len =
|
||||
(thread_k_cluster_id + 1) * XSrcVectorSize + K_BlockTileStepSize * i;
|
||||
int delta = thread_max_len - kPerBlockTail;
|
||||
@@ -110,59 +115,41 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
const GridDesc_M_K& beta_grid_desc_m_k,
|
||||
const GridDesc_M_K& y_grid_desc_m_k,
|
||||
index_t num_k_block_tile_iteration,
|
||||
AccDataType epsilon,
|
||||
ComputeDataType epsilon,
|
||||
const XDataType* const __restrict__ p_x_global,
|
||||
const GammaDataType* const __restrict__ p_gamma_global,
|
||||
const BetaDataType* const __restrict__ p_beta_global,
|
||||
YDataType* const __restrict__ p_y_global,
|
||||
const AccElementwiseOperation acc_elementwise_op)
|
||||
const YElementwiseOperation y_elementwise_op)
|
||||
{
|
||||
if constexpr(SweepOnce)
|
||||
{
|
||||
num_k_block_tile_iteration = 1;
|
||||
}
|
||||
|
||||
auto y_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_y_global, y_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
auto x_thread_buf = generate_tuple(
|
||||
[&](auto) {
|
||||
return StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
MThreadSliceSize * XSrcVectorSize,
|
||||
true>{};
|
||||
},
|
||||
Number<XThreadBufferNumber>{});
|
||||
Number<ThreadBufferNumber>{});
|
||||
|
||||
auto gamma_thread_buf = generate_tuple(
|
||||
[&](auto) {
|
||||
return StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
MThreadSliceSize * GammaSrcVectorSize,
|
||||
true>{};
|
||||
},
|
||||
Number<GammaThreadBufferNumber>{});
|
||||
Number<ThreadBufferNumber>{});
|
||||
|
||||
auto beta_thread_buf = generate_tuple(
|
||||
[&](auto) {
|
||||
return StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
AccDataType,
|
||||
MThreadSliceSize * BetaSrcVectorSize,
|
||||
true>{};
|
||||
},
|
||||
Number<BetaThreadBufferNumber>{});
|
||||
auto& beta_thread_buf = gamma_thread_buf;
|
||||
auto& y_thread_buf = x_thread_buf;
|
||||
|
||||
auto y_thread_buf = generate_tuple(
|
||||
[&](auto) {
|
||||
return StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
AccDataType,
|
||||
MThreadSliceSize * YDstVectorSize,
|
||||
true>{};
|
||||
},
|
||||
Number<YThreadBufferNumber>{});
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize, true> mean_thread_buf;
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize, true> var_thread_buf;
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, ComputeDataType, MThreadSliceSize, true>
|
||||
mean_thread_buf;
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, ComputeDataType, MThreadSliceSize, true>
|
||||
var_thread_buf;
|
||||
|
||||
const index_t thread_local_id = get_thread_local_1d_id();
|
||||
const index_t block_global_id = get_block_1d_id();
|
||||
@@ -173,12 +160,8 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
const auto thread_m_cluster_id = thread_cluster_idx[I0];
|
||||
const auto thread_k_cluster_id = thread_cluster_idx[I1];
|
||||
|
||||
using ThreadBufferLengths_M_K = Sequence<MThreadSliceSize, XSrcVectorSize>;
|
||||
constexpr auto thread_buffer_desc_m_k = make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<XSrcVectorSize>{}));
|
||||
|
||||
auto threadwise_x_load = ThreadwiseTensorSliceTransfer_v2<XDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
GridDesc_M_K,
|
||||
decltype(thread_buffer_desc_m_k),
|
||||
ThreadBufferLengths_M_K,
|
||||
@@ -194,7 +177,7 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
|
||||
auto threadwise_gamma_load =
|
||||
ThreadwiseTensorSliceTransfer_v2<GammaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
GridDesc_M_K,
|
||||
decltype(thread_buffer_desc_m_k),
|
||||
ThreadBufferLengths_M_K,
|
||||
@@ -210,7 +193,7 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
|
||||
auto threadwise_beta_load =
|
||||
ThreadwiseTensorSliceTransfer_v2<BetaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
GridDesc_M_K,
|
||||
decltype(thread_buffer_desc_m_k),
|
||||
ThreadBufferLengths_M_K,
|
||||
@@ -225,11 +208,11 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
thread_k_cluster_id * BetaSrcVectorSize));
|
||||
|
||||
auto threadwise_y_store =
|
||||
ThreadwiseTensorSliceTransfer_v1r3<AccDataType,
|
||||
ThreadwiseTensorSliceTransfer_v1r3<ComputeDataType,
|
||||
YDataType,
|
||||
decltype(thread_buffer_desc_m_k),
|
||||
GridDesc_M_K,
|
||||
AccElementwiseOperation,
|
||||
YElementwiseOperation,
|
||||
ThreadBufferLengths_M_K,
|
||||
ThreadBufferDimAccessOrder,
|
||||
YDstVectorDim,
|
||||
@@ -241,7 +224,7 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize,
|
||||
thread_k_cluster_id * YDstVectorSize),
|
||||
acc_elementwise_op);
|
||||
y_elementwise_op);
|
||||
|
||||
constexpr auto thread_copy_fwd_step_m_k = make_multi_index(0, K_BlockTileStepSize);
|
||||
constexpr auto thread_copy_bwd_step_m_k =
|
||||
@@ -260,67 +243,47 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
threadwise_welford.max_count_ = GetKPerThread(x_grid_desc_m_k, thread_k_cluster_id);
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
|
||||
mean_thread_buf(I) = type_convert<AccDataType>(0.0f);
|
||||
var_thread_buf(I) = type_convert<AccDataType>(0.0f);
|
||||
mean_thread_buf(I) = type_convert<ComputeDataType>(0.0f);
|
||||
var_thread_buf(I) = type_convert<ComputeDataType>(0.0f);
|
||||
});
|
||||
|
||||
for(index_t reducedTiles = 0; reducedTiles < num_k_block_tile_iteration; ++reducedTiles)
|
||||
// Separate sweep once and sweep twice pipeline
|
||||
if constexpr(SweepOnce)
|
||||
{
|
||||
static_for<0, XThreadBufferNumber, 1>{}([&](auto i) {
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_x_load.Run(x_grid_desc_m_k,
|
||||
x_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
x_thread_buf(i));
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_fwd_step_m_k);
|
||||
threadwise_welford.Run(x_thread_buf[i], mean_thread_buf, var_thread_buf);
|
||||
});
|
||||
}
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
|
||||
if constexpr(I > 0)
|
||||
block_sync_lds();
|
||||
|
||||
int count = threadwise_welford.cur_count_;
|
||||
BlockwiseWelford::Run(mean_thread_buf(I), var_thread_buf(I), count);
|
||||
});
|
||||
|
||||
auto thread_copy_tail_m_k =
|
||||
(num_k_block_tile_iteration - 1) * XThreadBufferNumber * thread_copy_fwd_step_m_k;
|
||||
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
|
||||
for(index_t reducedTiles = 0; reducedTiles < num_k_block_tile_iteration; ++reducedTiles)
|
||||
{
|
||||
if constexpr(!SweepOnce)
|
||||
{
|
||||
static_for<0, XThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_x_load.Run(x_grid_desc_m_k,
|
||||
x_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
x_thread_buf(i));
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_fwd_step_m_k);
|
||||
});
|
||||
}
|
||||
|
||||
static_for<0, GammaThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_gamma_load.Run(gamma_grid_desc_m_k,
|
||||
gamma_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
gamma_thread_buf(i));
|
||||
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
threadwise_welford.Run(x_thread_buf[i], mean_thread_buf, var_thread_buf);
|
||||
|
||||
if constexpr(i != ThreadBufferNumber - 1)
|
||||
{
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_fwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
}
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
|
||||
if constexpr(I > 0)
|
||||
block_sync_lds();
|
||||
|
||||
int count = threadwise_welford.cur_count_;
|
||||
BlockwiseWelford::Run(mean_thread_buf(I), var_thread_buf(I), count);
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
auto divisor = 1 / ck::math::sqrt(var_thread_buf(iM) + epsilon);
|
||||
static_for<0, XThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK1));
|
||||
@@ -330,7 +293,7 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
(x_thread_buf(iK0)(Number<offset_m_k>{}) - mean_thread_buf(iM)) *
|
||||
divisor;
|
||||
|
||||
// gamma
|
||||
// gamma & beta
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) *
|
||||
gamma_thread_buf(iK0)(Number<offset_m_k>{});
|
||||
@@ -338,18 +301,20 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
});
|
||||
});
|
||||
|
||||
static_for<0, BetaThreadBufferNumber, 1>{}([&](auto i) {
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_beta_load.Run(beta_grid_desc_m_k,
|
||||
beta_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
beta_thread_buf(i));
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
|
||||
if constexpr(i != ThreadBufferNumber - 1)
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
static_for<0, XThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK1));
|
||||
@@ -362,22 +327,134 @@ struct GridwiseNormalizationWelfordVariance_mk_to_mk
|
||||
});
|
||||
});
|
||||
|
||||
static_for<0, YThreadBufferNumber, 1>{}([&](auto i) {
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_y_store.Run(thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
y_thread_buf(i),
|
||||
y_grid_desc_m_k,
|
||||
y_global_val_buf);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k, thread_copy_fwd_step_m_k);
|
||||
|
||||
if constexpr(i != ThreadBufferNumber - 1)
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
});
|
||||
} // end of sweep once
|
||||
else
|
||||
{
|
||||
for(index_t reducedTiles = 0; reducedTiles < num_k_block_tile_iteration; ++reducedTiles)
|
||||
{
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_x_load.Run(x_grid_desc_m_k,
|
||||
x_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
x_thread_buf(i));
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_fwd_step_m_k);
|
||||
threadwise_welford.Run(x_thread_buf[i], mean_thread_buf, var_thread_buf);
|
||||
});
|
||||
}
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
|
||||
if constexpr(I > 0)
|
||||
block_sync_lds();
|
||||
|
||||
int count = threadwise_welford.cur_count_;
|
||||
BlockwiseWelford::Run(mean_thread_buf(I), var_thread_buf(I), count);
|
||||
});
|
||||
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, 2 * thread_copy_bwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k,
|
||||
2 * thread_copy_bwd_step_m_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k,
|
||||
2 * thread_copy_bwd_step_m_k);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k, 2 * thread_copy_bwd_step_m_k);
|
||||
}
|
||||
auto thread_copy_tail_m_k =
|
||||
(num_k_block_tile_iteration - 1) * ThreadBufferNumber * thread_copy_fwd_step_m_k;
|
||||
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
|
||||
for(index_t reducedTiles = 0; reducedTiles < num_k_block_tile_iteration; ++reducedTiles)
|
||||
{
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_x_load.Run(x_grid_desc_m_k,
|
||||
x_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
x_thread_buf(i));
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_fwd_step_m_k);
|
||||
});
|
||||
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_gamma_load.Run(gamma_grid_desc_m_k,
|
||||
gamma_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
gamma_thread_buf(i));
|
||||
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
auto divisor = 1 / ck::math::sqrt(var_thread_buf(iM) + epsilon);
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK1));
|
||||
|
||||
// normalize
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
(x_thread_buf(iK0)(Number<offset_m_k>{}) - mean_thread_buf(iM)) *
|
||||
divisor;
|
||||
|
||||
// gamma
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) *
|
||||
gamma_thread_buf(iK0)(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_beta_load.Run(beta_grid_desc_m_k,
|
||||
beta_global_val_buf,
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
beta_thread_buf(i));
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
});
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto iK0) {
|
||||
static_for<0, XSrcVectorSize, 1>{}([&](auto iK1) {
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK1));
|
||||
|
||||
// beta
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) =
|
||||
y_thread_buf(iK0)(Number<offset_m_k>{}) +
|
||||
beta_thread_buf(iK0)(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
static_for<0, ThreadBufferNumber, 1>{}([&](auto i) {
|
||||
threadwise_y_store.Run(thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
y_thread_buf(i),
|
||||
y_grid_desc_m_k,
|
||||
y_global_val_buf);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k,
|
||||
thread_copy_fwd_step_m_k);
|
||||
});
|
||||
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, 2 * thread_copy_bwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k,
|
||||
2 * thread_copy_bwd_step_m_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k,
|
||||
2 * thread_copy_bwd_step_m_k);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k,
|
||||
2 * thread_copy_bwd_step_m_k);
|
||||
}
|
||||
} // end of sweep twice
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -83,6 +83,11 @@ static inline __host__ bool isnan(int4_t x)
|
||||
};
|
||||
#endif
|
||||
|
||||
static inline __host__ half_t sqrt(half_t x)
|
||||
{
|
||||
return static_cast<half_t>(std::sqrt(static_cast<float>(x)));
|
||||
};
|
||||
|
||||
static inline __host__ float sqrt(float x) { return std::sqrt(x); };
|
||||
|
||||
static inline __host__ double sqrt(double x) { return std::sqrt(x); };
|
||||
@@ -158,6 +163,11 @@ static inline __device__ bool isnan(half_t x)
|
||||
return (xx & 0x7FFF) > 0x7C00;
|
||||
};
|
||||
|
||||
static inline __device__ half_t sqrt(half_t x)
|
||||
{
|
||||
return static_cast<half_t>(__builtin_amdgcn_sqrtf(static_cast<float>(x)));
|
||||
};
|
||||
|
||||
static inline __device__ float sqrt(float x) { return __builtin_amdgcn_sqrtf(x); };
|
||||
|
||||
static inline __device__ double sqrt(double x) { return __builtin_amdgcn_sqrt(x); };
|
||||
|
||||
@@ -21,20 +21,25 @@ template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
// clang-format off
|
||||
using device_normalization_f16_instances =
|
||||
std::tuple <
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim, GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 1, 1, 1, 1, 1, 1>, // fallback kernel
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 2, 1, 2, 1, 2, 2>, // fallback kernel
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 4, 1, 4, 1, 4, 4>, // fallback kernel
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 4, 64, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 2, 128, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 2, 128, 1, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 2, 128, 1, 32, 1, 8, 1, 8, 1, 8, 8>,
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim, GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4>, // irregular size
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 64, 1, 64, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 32, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 2, 1, 2, 1, 2, 2>
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceNormalizationImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 16, 1, 8, 1, 8, 1, 8, 8>
|
||||
>;
|
||||
// clang-format on
|
||||
|
||||
|
||||
@@ -19,17 +19,26 @@ using Pass = ck::tensor_operation::element_wise::PassThrough;
|
||||
template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
using device_layernorm_f32_instances = std::tuple<
|
||||
// clang-format off
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorSize, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 1, 1, 1, 1, 1, 1>, // fallback kernel
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 2, 1, 2, 1, 2, 2>, // fallback kernel
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 4, 64, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 2, 128, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 2, 128, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 2, 128, 1, 32, 1, 4, 1, 4, 1, 4, 4>,
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorSize, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 1, 1, 1, 1, 1, 1, 1, 1>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 2, 1, 2, 1, 2, 1, 2, 2>, // irregular size
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 128, 1, 128, 1, 32, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 4, 1, 4, 1, 4, 4>
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 2, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 512, 1, 512, 2, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 4, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceNormalizationImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 1024, 1, 1024, 1, 8, 1, 4, 1, 4, 1, 4, 4>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ namespace profiler {
|
||||
template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename AccDataType,
|
||||
typename ComputeDataType,
|
||||
typename YDataType,
|
||||
index_t Rank>
|
||||
bool profile_layernorm_impl(int do_verification,
|
||||
@@ -86,7 +86,7 @@ bool profile_layernorm_impl(int do_verification,
|
||||
using DeviceOp = ck::tensor_operation::device::DeviceNormalization<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
@@ -109,7 +109,7 @@ bool profile_layernorm_impl(int do_verification,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
@@ -181,8 +181,8 @@ bool profile_layernorm_impl(int do_verification,
|
||||
{
|
||||
y_dev.FromDevice(y.mData.data());
|
||||
|
||||
bool pass = ck::utils::check_err(
|
||||
y.mData, host_y.mData, "Error: Incorrect results d1", 1e-3, 1e-3);
|
||||
bool pass =
|
||||
ck::utils::check_err(y.mData, host_y.mData, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
|
||||
if(do_log)
|
||||
{
|
||||
|
||||
@@ -12,11 +12,11 @@ template <typename Tuple>
|
||||
class TestGroupnorm : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using AccDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
|
||||
void Run()
|
||||
{
|
||||
@@ -36,7 +36,7 @@ class TestGroupnorm : public ::testing::Test
|
||||
ck::profiler::profile_groupnorm_impl<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
YDataType>(true, 2, false, false, length);
|
||||
EXPECT_TRUE(success);
|
||||
}
|
||||
@@ -44,7 +44,7 @@ class TestGroupnorm : public ::testing::Test
|
||||
};
|
||||
|
||||
using KernelTypes = ::testing::Types<
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType>
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
|
||||
std::tuple<F16, F16, F16, F32, F16>>;
|
||||
|
||||
TYPED_TEST_SUITE(TestGroupnorm, KernelTypes);
|
||||
|
||||
@@ -12,11 +12,11 @@ template <typename Tuple>
|
||||
class TestGroupnorm : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using AccDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
|
||||
void Run()
|
||||
{
|
||||
@@ -34,7 +34,7 @@ class TestGroupnorm : public ::testing::Test
|
||||
ck::profiler::profile_groupnorm_impl<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
YDataType>(true, 2, false, false, length);
|
||||
EXPECT_TRUE(success);
|
||||
}
|
||||
@@ -42,7 +42,7 @@ class TestGroupnorm : public ::testing::Test
|
||||
};
|
||||
|
||||
using KernelTypes = ::testing::Types<
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType>
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
|
||||
std::tuple<F32, F32, F32, F32, F32>>;
|
||||
|
||||
TYPED_TEST_SUITE(TestGroupnorm, KernelTypes);
|
||||
|
||||
@@ -12,11 +12,11 @@ template <typename Tuple>
|
||||
class TestLayernorm2d : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using AccDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
|
||||
void Run()
|
||||
{
|
||||
@@ -29,7 +29,7 @@ class TestLayernorm2d : public ::testing::Test
|
||||
bool success = ck::profiler::profile_layernorm_impl<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
2>(true, 2, false, false, length);
|
||||
EXPECT_TRUE(success);
|
||||
@@ -38,7 +38,7 @@ class TestLayernorm2d : public ::testing::Test
|
||||
};
|
||||
|
||||
using KernelTypes = ::testing::Types<
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType>
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
|
||||
std::tuple<F16, F16, F16, F32, F16>>;
|
||||
|
||||
TYPED_TEST_SUITE(TestLayernorm2d, KernelTypes);
|
||||
|
||||
@@ -12,11 +12,11 @@ template <typename Tuple>
|
||||
class TestLayernorm2d : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using AccDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
|
||||
void Run()
|
||||
{
|
||||
@@ -29,7 +29,7 @@ class TestLayernorm2d : public ::testing::Test
|
||||
bool success = ck::profiler::profile_layernorm_impl<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
ComputeDataType,
|
||||
YDataType,
|
||||
2>(true, 2, false, false, length);
|
||||
EXPECT_TRUE(success);
|
||||
@@ -38,7 +38,7 @@ class TestLayernorm2d : public ::testing::Test
|
||||
};
|
||||
|
||||
using KernelTypes = ::testing::Types<
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType>
|
||||
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
|
||||
std::tuple<F32, F32, F32, F32, F32>>;
|
||||
|
||||
TYPED_TEST_SUITE(TestLayernorm2d, KernelTypes);
|
||||
|
||||
Reference in New Issue
Block a user