mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-14 02:02:46 +00:00
Group norm (#417)
* Add groupnorm example by layernorm
1. Reference is not ready
2. shape of gamma and beta need to be fix
* Let shape of gamma and beta can be same as x
* Modify test, instance and client example
* [What] Fix bug of layernorm for greater than 2 dimension.
[Why] We need to get upper length from merge transform instead of embed transform.
* Add reference for groupnorm
* Fuse sigmoid after groupnorm
* [What] Rename original layernorm into layernorm2d
[Why] Prepare to add groupnorm using layernorm5d
* clang-format
* Add groupnorm test
* Refine error message
* Add groupnorm ckProfiler
* Test groupnorm kernel from device_instance
* update example
* upadte profiler
* Fix test naming
* Fix argc number
* Move descriptor and sweeponce to argument for quick debugging
Co-authored-by: Chao Liu <chao.liu2@amd.com>
[ROCm/composable_kernel commit: 4eba345f6e]
This commit is contained in:
@@ -81,8 +81,8 @@ int main(int argc, char* argv[])
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer({M, N}, // lengths
|
||||
{Stride, 1}, // xStrides
|
||||
{1}, // gammaStrides
|
||||
{1}, // betaStrides
|
||||
{0, 1}, // gammaStrides
|
||||
{0, 1}, // betaStrides
|
||||
{Stride, 1}, // yStrides
|
||||
{1}, // reduceDims
|
||||
1e-4,
|
||||
|
||||
@@ -29,24 +29,27 @@ using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
constexpr int Rank = 2;
|
||||
constexpr int NumReduceDim = 1;
|
||||
|
||||
using DeviceInstance = ck::tensor_operation::device::DeviceLayernormImpl<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
YDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim,
|
||||
256, // BlockSize
|
||||
8, // ClusterM
|
||||
32, // ClusterK
|
||||
1, // SliceM
|
||||
8, // SliceK
|
||||
1, // SrcVecDim (0=M, 1=K)
|
||||
8, // SrcScalarPerVector
|
||||
8, // GammaScalarPerVector
|
||||
8, // BetaScalarPerVector
|
||||
8>; // OutScalarPerVector
|
||||
using DeviceInstance =
|
||||
ck::tensor_operation::device::DeviceLayernormImpl<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
YDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim,
|
||||
256, // BlockSize
|
||||
8, // ClusterM
|
||||
32, // ClusterK
|
||||
1, // SliceM
|
||||
8, // SliceK
|
||||
1, // SrcVecDim (0=M, 1=K)
|
||||
8, // SrcScalarPerVector
|
||||
1, // GammaVecDim (0=M, 1=K)
|
||||
8, // GammaScalarPerVector
|
||||
1, // BetaVecDim (0=M, 1=K)
|
||||
8, // BetaScalarPerVector
|
||||
8>; // OutScalarPerVector
|
||||
|
||||
int main()
|
||||
{
|
||||
@@ -88,8 +91,8 @@ int main()
|
||||
auto argument_ptr = device_instance.MakeArgumentPointer(
|
||||
{M, N},
|
||||
std::vector<ck::index_t>{x.mDesc.GetStrides().begin(), x.mDesc.GetStrides().end()},
|
||||
std::vector<ck::index_t>{gamma.mDesc.GetStrides().begin(), gamma.mDesc.GetStrides().end()},
|
||||
std::vector<ck::index_t>{beta.mDesc.GetStrides().begin(), beta.mDesc.GetStrides().end()},
|
||||
{0, 1},
|
||||
{0, 1},
|
||||
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
|
||||
{1},
|
||||
1e-4,
|
||||
|
||||
1
example/42_groupnorm/CMakeLists.txt
Normal file
1
example/42_groupnorm/CMakeLists.txt
Normal file
@@ -0,0 +1 @@
|
||||
add_example_executable(example_groupnorm_sigmoid_fp16 groupnorm_sigmoid_fp16.cpp)
|
||||
172
example/42_groupnorm/groupnorm_sigmoid_fp16.cpp
Normal file
172
example/42_groupnorm/groupnorm_sigmoid_fp16.cpp
Normal file
@@ -0,0 +1,172 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <numeric>
|
||||
#include <initializer_list>
|
||||
#include <cstdlib>
|
||||
#include <getopt.h>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/utility/reduction_enums.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_layernorm_impl.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/reduction_operator_mapping.hpp"
|
||||
|
||||
#include "ck/library/utility/fill.hpp"
|
||||
#include "ck/library/utility/check_err.hpp"
|
||||
#include "ck/library/utility/device_memory.hpp"
|
||||
#include "ck/library/utility/host_common_util.hpp"
|
||||
#include "ck/library/utility/host_tensor.hpp"
|
||||
#include "ck/library/utility/host_tensor_generator.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_groupnorm.hpp"
|
||||
|
||||
constexpr int Rank = 5;
|
||||
constexpr int NumReduceDim = 3;
|
||||
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using AccDataType = float;
|
||||
|
||||
struct YElementOp
|
||||
{
|
||||
template <typename T>
|
||||
__host__ __device__ void operator()(T& y, const T& x) const
|
||||
{
|
||||
static_assert(ck::is_same<T, float>::value || ck::is_same<T, double>::value ||
|
||||
ck::is_same<T, ck::half_t>::value,
|
||||
"Data type is not supported by this operation!");
|
||||
|
||||
T a;
|
||||
|
||||
ck::tensor_operation::element_wise::Sigmoid{}(a, x);
|
||||
|
||||
y = x * a;
|
||||
};
|
||||
};
|
||||
|
||||
using DeviceInstance =
|
||||
ck::tensor_operation::device::DeviceLayernormImpl<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
YDataType,
|
||||
YElementOp,
|
||||
Rank,
|
||||
NumReduceDim,
|
||||
256, // BlockSize
|
||||
8, // ClusterM
|
||||
32, // ClusterK
|
||||
1, // SliceM
|
||||
8, // SliceK
|
||||
1, // SrcVecDim (0=M, 1=K)
|
||||
8, // SrcScalarPerVector
|
||||
1, // GammaVecDim (0=M, 1=K)
|
||||
8, // GammaScalarPerVector
|
||||
1, // BetaVecDim (0=M, 1=K)
|
||||
8, // BetaScalarPerVector
|
||||
8>; // OutScalarPerVector
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
ck::index_t N = 128;
|
||||
ck::index_t H = 16;
|
||||
ck::index_t W = 16;
|
||||
ck::index_t G = 32;
|
||||
ck::index_t C = 40;
|
||||
|
||||
if(argc == 1)
|
||||
{
|
||||
// use default case
|
||||
}
|
||||
else if(argc == 6)
|
||||
{
|
||||
N = std::stoi(argv[1]);
|
||||
H = std::stoi(argv[2]);
|
||||
W = std::stoi(argv[3]);
|
||||
G = std::stoi(argv[4]);
|
||||
C = std::stoi(argv[5]);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cerr << "arg1 to 5: N, H, W, G, C" << std::endl;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
Tensor<XDataType> x({N, H, W, G, C});
|
||||
Tensor<YDataType> y({N, H, W, G, C});
|
||||
Tensor<GammaDataType> gamma({G, C});
|
||||
Tensor<BetaDataType> beta({G, C});
|
||||
|
||||
ck::utils::FillUniformDistribution<XDataType>{0.f, 1.f}(x.begin(), x.end());
|
||||
ck::utils::FillUniformDistribution<GammaDataType>{0.f, 1.f}(gamma.begin(), gamma.end());
|
||||
ck::utils::FillUniformDistribution<BetaDataType>{0.f, 1.f}(beta.begin(), beta.end());
|
||||
|
||||
DeviceMem x_dev(sizeof(XDataType) * x.mDesc.GetElementSpaceSize());
|
||||
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem beta_dev(sizeof(BetaDataType) * beta.mDesc.GetElementSpaceSize());
|
||||
DeviceMem y_dev(sizeof(YDataType) * y.mDesc.GetElementSpaceSize());
|
||||
|
||||
x_dev.ToDevice(x.mData.data());
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
beta_dev.ToDevice(beta.mData.data());
|
||||
|
||||
const auto y_element_op = YElementOp{};
|
||||
|
||||
auto device_instance = DeviceInstance{};
|
||||
auto argument_ptr = device_instance.MakeArgumentPointer(
|
||||
{N, H, W, G, C},
|
||||
std::vector<ck::index_t>{x.mDesc.GetStrides().begin(), x.mDesc.GetStrides().end()},
|
||||
{0, 0, 0, C, 1},
|
||||
{0, 0, 0, C, 1},
|
||||
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
|
||||
{1, 2, 4}, // reduction dimension: [H, W, C]
|
||||
1e-6,
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
y_dev.GetDeviceBuffer(),
|
||||
y_element_op);
|
||||
|
||||
if(!device_instance.IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
std::cout << "The runtime parameters are not supported" << std::endl;
|
||||
return 1;
|
||||
};
|
||||
|
||||
auto invoker_ptr = device_instance.MakeInvokerPointer();
|
||||
float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true, true});
|
||||
|
||||
std::size_t num_btype = sizeof(XDataType) * N * H * W * G * C +
|
||||
sizeof(YDataType) * N * H * W * G * C + sizeof(GammaDataType) * G * C +
|
||||
sizeof(BetaDataType) * G * C;
|
||||
|
||||
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
||||
|
||||
std::cout << "Perf: " << ave_time << " ms, " << gb_per_sec << " GB/s, "
|
||||
<< device_instance.GetTypeString() << std::endl;
|
||||
|
||||
bool pass = true;
|
||||
{
|
||||
Tensor<YDataType> host_y({N, H, W, G, C});
|
||||
using ReferenceInstance = ck::tensor_operation::host::ReferenceGroupnorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
YElementOp>;
|
||||
|
||||
ReferenceInstance ref;
|
||||
auto ref_argument =
|
||||
ref.MakeArgument(x, gamma, beta, host_y, y_element_op, {N, H, W, G, C}, 1e-6);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
ref_invoker.Run(ref_argument);
|
||||
|
||||
y_dev.FromDevice(y.mData.data());
|
||||
pass &= ck::utils::check_err(y.mData, host_y.mData, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
}
|
||||
|
||||
return (pass ? 0 : 1);
|
||||
}
|
||||
@@ -23,11 +23,10 @@ template <typename GridwiseReduction,
|
||||
typename YDataType,
|
||||
typename AccDataType,
|
||||
typename AccElementwiseOperation,
|
||||
typename GridDesc_M_K,
|
||||
typename GridDesc_K>
|
||||
typename GridDesc_M_K>
|
||||
__global__ void kernel_layernorm(const GridDesc_M_K x_grid_desc_m_k,
|
||||
const GridDesc_K gamma_grid_desc_k,
|
||||
const GridDesc_K beta_grid_desc_k,
|
||||
const GridDesc_M_K gamma_grid_desc_m_k,
|
||||
const GridDesc_M_K beta_grid_desc_m_k,
|
||||
const GridDesc_M_K y_grid_desc_m_k,
|
||||
index_t num_k_block_tile_iteration,
|
||||
AccDataType epsilon,
|
||||
@@ -38,8 +37,8 @@ __global__ void kernel_layernorm(const GridDesc_M_K x_grid_desc_m_k,
|
||||
const AccElementwiseOperation acc_elementwise_op)
|
||||
{
|
||||
GridwiseReduction::Run(x_grid_desc_m_k,
|
||||
gamma_grid_desc_k,
|
||||
beta_grid_desc_k,
|
||||
gamma_grid_desc_m_k,
|
||||
beta_grid_desc_m_k,
|
||||
y_grid_desc_m_k,
|
||||
num_k_block_tile_iteration,
|
||||
epsilon,
|
||||
@@ -71,7 +70,9 @@ template <typename XDataType,
|
||||
index_t KThreadSliceSize,
|
||||
index_t XYSrcVectorDim,
|
||||
index_t XSrcVectorSize,
|
||||
index_t GammaSrcVectorDim,
|
||||
index_t GammaSrcVectorSize,
|
||||
index_t BetaSrcVectorDim,
|
||||
index_t BetaSrcVectorSize,
|
||||
index_t YDstVectorSize>
|
||||
struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
@@ -84,11 +85,13 @@ struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
NumReduceDim>
|
||||
{
|
||||
static_assert(
|
||||
(KThreadSliceSize % GammaSrcVectorSize == 0),
|
||||
((GammaSrcVectorDim == 0 && MThreadSliceSize % GammaSrcVectorSize == 0) ||
|
||||
(GammaSrcVectorDim == 1 && KThreadSliceSize % GammaSrcVectorSize == 0)),
|
||||
"Invalid thread slice sizes and/or gamma vector sizes configuration, please check!");
|
||||
|
||||
static_assert(
|
||||
(KThreadSliceSize % BetaSrcVectorSize == 0),
|
||||
((BetaSrcVectorDim == 0 && MThreadSliceSize % BetaSrcVectorSize == 0) ||
|
||||
(BetaSrcVectorDim == 1 && KThreadSliceSize % BetaSrcVectorSize == 0)),
|
||||
"Invalid thread slice sizes and/or beta vector sizes configuration, please check!");
|
||||
|
||||
using PassThrough = tensor_operation::element_wise::PassThrough;
|
||||
@@ -162,38 +165,7 @@ struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
return (in_grid_desc_m_k_padded);
|
||||
};
|
||||
|
||||
static auto MakeAffine1dDescriptor(const std::vector<index_t>& Lengths,
|
||||
const std::vector<index_t>& Strides,
|
||||
int blkGroupSize,
|
||||
int numBlockTileIteration)
|
||||
{
|
||||
const auto tupleLengths = make_tuple_from_array(Lengths, Number<NumReduceDim>{});
|
||||
const auto tupleStrides = make_tuple_from_array(Strides, Number<NumReduceDim>{});
|
||||
|
||||
auto desc = make_naive_tensor_descriptor(tupleLengths, tupleStrides);
|
||||
|
||||
auto grid_desc_k = transform_tensor_descriptor(
|
||||
desc,
|
||||
make_tuple(make_merge_transform(tupleLengths)),
|
||||
make_tuple(typename arithmetic_sequence_gen<0, NumReduceDim, 1>::type{}),
|
||||
make_tuple(Sequence<0>{}));
|
||||
|
||||
const auto reduceTotalLength = grid_desc_k.GetLength(Number<0>{});
|
||||
const int reduceSizePerBlock = K_BlockTileSize * numBlockTileIteration;
|
||||
|
||||
const auto Pad_K = reduceSizePerBlock * blkGroupSize - reduceTotalLength;
|
||||
|
||||
auto grid_desc_k_padded = transform_tensor_descriptor(
|
||||
grid_desc_k,
|
||||
make_tuple(make_right_pad_transform(reduceTotalLength, Pad_K)),
|
||||
make_tuple(Sequence<0>{}),
|
||||
make_tuple(Sequence<0>{}));
|
||||
|
||||
return (grid_desc_k_padded);
|
||||
};
|
||||
|
||||
using GridDesc_M_K = decltype(MakeSrc2dDescriptor({1}, {1}, 1, 1));
|
||||
using GridDesc_K = decltype(MakeAffine1dDescriptor({1}, {1}, 1, 1));
|
||||
|
||||
using GridwiseReduceLayernormGeneric =
|
||||
GridwiseLayernormWelfordVariance_mk_to_mk<XDataType,
|
||||
@@ -203,7 +175,6 @@ struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
AccDataType,
|
||||
AccElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
GridDesc_K,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
@@ -211,12 +182,13 @@ struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
KThreadSliceSize,
|
||||
XYSrcVectorDim,
|
||||
XSrcVectorSize,
|
||||
GammaSrcVectorDim,
|
||||
GammaSrcVectorSize,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
XYSrcVectorDim,
|
||||
YDstVectorSize,
|
||||
false>;
|
||||
|
||||
using GridwiseReduceLayernormSweepOnce =
|
||||
GridwiseLayernormWelfordVariance_mk_to_mk<XDataType,
|
||||
GammaDataType,
|
||||
@@ -225,7 +197,6 @@ struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
AccDataType,
|
||||
AccElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
GridDesc_K,
|
||||
BlockSize,
|
||||
MThreadClusterSize,
|
||||
KThreadClusterSize,
|
||||
@@ -233,7 +204,9 @@ struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
KThreadSliceSize,
|
||||
XYSrcVectorDim,
|
||||
XSrcVectorSize,
|
||||
GammaSrcVectorDim,
|
||||
GammaSrcVectorSize,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
XYSrcVectorDim,
|
||||
YDstVectorSize,
|
||||
@@ -258,13 +231,13 @@ struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
p_gamma_(p_gamma),
|
||||
p_beta_(p_beta),
|
||||
p_y_(p_y),
|
||||
gammaStrides_(gammaStrides),
|
||||
betaStrides_(betaStrides),
|
||||
acc_elementwise_op_(acc_elementwise_op)
|
||||
{
|
||||
Lengths_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(lengths, reduceDims);
|
||||
xStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(xStrides, reduceDims);
|
||||
yStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(yStrides, reduceDims);
|
||||
Lengths_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(lengths, reduceDims);
|
||||
xStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(xStrides, reduceDims);
|
||||
yStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(yStrides, reduceDims);
|
||||
gammaStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(gammaStrides, reduceDims);
|
||||
betaStrides_ = shuffle_tensor_dimensions<Rank, NumReduceDim>(betaStrides, reduceDims);
|
||||
|
||||
long_index_t invariant_total_length;
|
||||
long_index_t reduce_total_length;
|
||||
@@ -278,12 +251,17 @@ struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
gridSize_ = math::integer_least_multiple(invariant_total_length, M_BlockTileSize) /
|
||||
M_BlockTileSize * blkGroupSize_;
|
||||
|
||||
reduceLengths_.resize(NumReduceDim);
|
||||
x_grid_desc_m_k_ =
|
||||
MakeSrc2dDescriptor(Lengths_, xStrides_, blkGroupSize_, numBlockTileIteration_);
|
||||
gamma_grid_desc_m_k_ =
|
||||
MakeSrc2dDescriptor(Lengths_, gammaStrides_, blkGroupSize_, numBlockTileIteration_);
|
||||
beta_grid_desc_m_k_ =
|
||||
MakeSrc2dDescriptor(Lengths_, betaStrides_, blkGroupSize_, numBlockTileIteration_);
|
||||
y_grid_desc_m_k_ =
|
||||
MakeSrc2dDescriptor(Lengths_, yStrides_, blkGroupSize_, numBlockTileIteration_);
|
||||
|
||||
for(int i = 0; i < NumReduceDim; ++i)
|
||||
{
|
||||
reduceLengths_[i] = lengths[reduceDims[i]];
|
||||
}
|
||||
isSweeponce_ =
|
||||
x_grid_desc_m_k_.GetLength(Number<1>{}) <= KThreadClusterSize * KThreadSliceSize;
|
||||
}
|
||||
|
||||
AccDataType epsilon_;
|
||||
@@ -295,7 +273,6 @@ struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
|
||||
std::vector<index_t> Lengths_;
|
||||
std::vector<index_t> xStrides_;
|
||||
std::vector<index_t> reduceLengths_;
|
||||
std::vector<index_t> gammaStrides_;
|
||||
std::vector<index_t> betaStrides_;
|
||||
std::vector<index_t> yStrides_;
|
||||
@@ -305,46 +282,35 @@ struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
int blkGroupSize_;
|
||||
int numBlockTileIteration_;
|
||||
size_t gridSize_;
|
||||
|
||||
GridDesc_M_K x_grid_desc_m_k_;
|
||||
GridDesc_M_K gamma_grid_desc_m_k_;
|
||||
GridDesc_M_K beta_grid_desc_m_k_;
|
||||
GridDesc_M_K y_grid_desc_m_k_;
|
||||
bool isSweeponce_;
|
||||
};
|
||||
|
||||
struct Invoker : public BaseInvoker
|
||||
{
|
||||
float Run(const Argument& arg, const StreamConfig& stream_config = StreamConfig{})
|
||||
{
|
||||
const auto x_grid_desc_m_k = MakeSrc2dDescriptor(
|
||||
arg.Lengths_, arg.xStrides_, arg.blkGroupSize_, arg.numBlockTileIteration_);
|
||||
const auto gamma_grid_desc_k = MakeAffine1dDescriptor(arg.reduceLengths_,
|
||||
arg.gammaStrides_,
|
||||
arg.blkGroupSize_,
|
||||
arg.numBlockTileIteration_);
|
||||
const auto beta_grid_desc_k = MakeAffine1dDescriptor(arg.reduceLengths_,
|
||||
arg.betaStrides_,
|
||||
arg.blkGroupSize_,
|
||||
arg.numBlockTileIteration_);
|
||||
const auto y_grid_desc_m_k = MakeSrc2dDescriptor(
|
||||
arg.Lengths_, arg.yStrides_, arg.blkGroupSize_, arg.numBlockTileIteration_);
|
||||
|
||||
bool sweep_once =
|
||||
x_grid_desc_m_k.GetLength(Number<1>{}) <= KThreadClusterSize * KThreadSliceSize;
|
||||
|
||||
const auto kernel_main = sweep_once ? kernel_layernorm<GridwiseReduceLayernormSweepOnce,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
AccElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
GridDesc_K>
|
||||
: kernel_layernorm<GridwiseReduceLayernormGeneric,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
AccElementwiseOperation,
|
||||
GridDesc_M_K,
|
||||
GridDesc_K>;
|
||||
const auto kernel_main = arg.isSweeponce_
|
||||
? kernel_layernorm<GridwiseReduceLayernormSweepOnce,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
AccElementwiseOperation,
|
||||
GridDesc_M_K>
|
||||
: kernel_layernorm<GridwiseReduceLayernormGeneric,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
AccElementwiseOperation,
|
||||
GridDesc_M_K>;
|
||||
|
||||
float avg_time = 0;
|
||||
avg_time += launch_and_time_kernel(stream_config,
|
||||
@@ -352,10 +318,10 @@ struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
dim3(arg.gridSize_),
|
||||
dim3(BlockSize),
|
||||
0,
|
||||
x_grid_desc_m_k,
|
||||
gamma_grid_desc_k,
|
||||
beta_grid_desc_k,
|
||||
y_grid_desc_m_k,
|
||||
arg.x_grid_desc_m_k_,
|
||||
arg.gamma_grid_desc_m_k_,
|
||||
arg.beta_grid_desc_m_k_,
|
||||
arg.y_grid_desc_m_k_,
|
||||
arg.numBlockTileIteration_,
|
||||
arg.epsilon_,
|
||||
arg.p_x_,
|
||||
@@ -409,26 +375,41 @@ struct DeviceLayernormImpl : public DeviceLayernorm<XDataType,
|
||||
return false;
|
||||
}
|
||||
|
||||
if(p_arg_->gammaStrides_.size() != NumReduceDim ||
|
||||
p_arg_->betaStrides_.size() != NumReduceDim)
|
||||
return false;
|
||||
// if fastest dim is not reduced
|
||||
if constexpr(GammaSrcVectorDim == 0)
|
||||
{
|
||||
if(p_arg_->gammaStrides_[NumInvariantDim - 1] != 1)
|
||||
return (false);
|
||||
|
||||
auto IsScalarPerVectorValid = [](bool isLastDimensionCoalesced, int scalarPerVector) {
|
||||
bool ret = true;
|
||||
if(p_arg_->Lengths_[Rank - 1] % GammaSrcVectorSize != 0)
|
||||
return (false);
|
||||
}
|
||||
else // if fastest dim is reduced
|
||||
{
|
||||
if(p_arg_->gammaStrides_[Rank - 1] != 1)
|
||||
return (false);
|
||||
|
||||
if(!isLastDimensionCoalesced)
|
||||
ret = scalarPerVector == 1;
|
||||
else
|
||||
ret = KThreadSliceSize % scalarPerVector == 0;
|
||||
if(p_arg_->Lengths_[Rank - 1] % GammaSrcVectorSize != 0)
|
||||
return (false);
|
||||
}
|
||||
|
||||
return ret;
|
||||
};
|
||||
// if fastest dim is not reduced
|
||||
if constexpr(BetaSrcVectorDim == 0)
|
||||
{
|
||||
if(p_arg_->betaStrides_[NumInvariantDim - 1] != 1)
|
||||
return (false);
|
||||
|
||||
if(!IsScalarPerVectorValid(p_arg_->gammaStrides_.back() == 1, GammaSrcVectorSize))
|
||||
return false;
|
||||
if(p_arg_->invariant_lowest_length % BetaSrcVectorSize != 0)
|
||||
return (false);
|
||||
}
|
||||
else // if fastest dim is reduced
|
||||
{
|
||||
if(p_arg_->betaStrides_[Rank - 1] != 1)
|
||||
return (false);
|
||||
|
||||
if(!IsScalarPerVectorValid(p_arg_->betaStrides_.back() == 1, BetaSrcVectorSize))
|
||||
return false;
|
||||
if(p_arg_->Lengths_[Rank - 1] % BetaSrcVectorSize != 0)
|
||||
return (false);
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
@@ -232,6 +232,21 @@ struct Gelu
|
||||
}
|
||||
};
|
||||
|
||||
struct Sigmoid
|
||||
{
|
||||
template <typename T>
|
||||
__host__ __device__ void operator()(T& y, const T& x) const
|
||||
{
|
||||
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
|
||||
is_same<T, ck::half_t>::value,
|
||||
"Data type is not supported by this operation!");
|
||||
|
||||
y = 1 / (ck::type_convert<T>(1) + exp(-x));
|
||||
};
|
||||
|
||||
int32_t divider_ = 1;
|
||||
};
|
||||
|
||||
} // namespace element_wise
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
|
||||
@@ -22,7 +22,6 @@ template <typename XDataType,
|
||||
typename AccDataType,
|
||||
typename AccElementwiseOperation,
|
||||
typename GridDesc_M_K,
|
||||
typename GridDesc_K,
|
||||
index_t BlockSize,
|
||||
index_t MThreadClusterSize,
|
||||
index_t KThreadClusterSize,
|
||||
@@ -30,7 +29,9 @@ template <typename XDataType,
|
||||
index_t KThreadSliceSize,
|
||||
index_t XSrcVectorDim,
|
||||
index_t XSrcVectorSize,
|
||||
index_t GammaSrcVectorDim,
|
||||
index_t GammaSrcVectorSize,
|
||||
index_t BetaSrcVectorDim,
|
||||
index_t BetaSrcVectorSize,
|
||||
index_t YDstVectorDim,
|
||||
index_t YDstVectorSize,
|
||||
@@ -78,13 +79,14 @@ struct GridwiseLayernormNaiveVariance_mk_to_mk
|
||||
|
||||
static constexpr auto I0 = Number<0>{};
|
||||
static constexpr auto I1 = Number<1>{};
|
||||
static constexpr auto I2 = Number<2>{};
|
||||
|
||||
static constexpr index_t M_BlockTileSize = MThreadClusterSize * MThreadSliceSize;
|
||||
static constexpr index_t K_BlockTileSize = KThreadClusterSize * KThreadSliceSize;
|
||||
|
||||
__device__ static void Run(const GridDesc_M_K& x_grid_desc_m_k,
|
||||
const GridDesc_K& gamma_grid_desc_k,
|
||||
const GridDesc_K& beta_grid_desc_k,
|
||||
const GridDesc_M_K& gamma_grid_desc_m_k,
|
||||
const GridDesc_M_K& beta_grid_desc_m_k,
|
||||
const GridDesc_M_K& y_grid_desc_m_k,
|
||||
index_t num_k_block_tile_iteration,
|
||||
AccDataType epsilon,
|
||||
@@ -111,11 +113,14 @@ struct GridwiseLayernormNaiveVariance_mk_to_mk
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize * KThreadSliceSize, true>
|
||||
x_thread_buf;
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, KThreadSliceSize, true> gamma_thread_buf;
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, KThreadSliceSize, true>& beta_thread_buf =
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize * KThreadSliceSize, true>
|
||||
gamma_thread_buf;
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
AccDataType,
|
||||
MThreadSliceSize * KThreadSliceSize,
|
||||
true>& beta_thread_buf = gamma_thread_buf;
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize * KThreadSliceSize, true>
|
||||
y_thread_buf;
|
||||
|
||||
@@ -127,7 +132,7 @@ struct GridwiseLayernormNaiveVariance_mk_to_mk
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize, true> mean_thread_buf;
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize, true>
|
||||
mean_square_thread_buf;
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize, true>& var_value_buf =
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize, true>& var_thread_buf =
|
||||
mean_square_thread_buf;
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
|
||||
@@ -145,11 +150,8 @@ struct GridwiseLayernormNaiveVariance_mk_to_mk
|
||||
const auto thread_k_cluster_id = thread_cluster_idx[I1];
|
||||
|
||||
using ThreadBufferLengths_M_K = Sequence<MThreadSliceSize, KThreadSliceSize>;
|
||||
using ThreadBufferLengths_K = Sequence<KThreadSliceSize>;
|
||||
constexpr auto thread_buffer_desc_m_k = make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<KThreadSliceSize>{}));
|
||||
constexpr auto thread_buffer_desc_k =
|
||||
make_naive_tensor_descriptor_packed(make_tuple(Number<KThreadSliceSize>{}));
|
||||
|
||||
auto threadwise_x_load = ThreadwiseTensorSliceTransfer_v2<XDataType,
|
||||
AccDataType,
|
||||
@@ -169,27 +171,34 @@ struct GridwiseLayernormNaiveVariance_mk_to_mk
|
||||
auto threadwise_gamma_load =
|
||||
ThreadwiseTensorSliceTransfer_v2<GammaDataType,
|
||||
AccDataType,
|
||||
GridDesc_K,
|
||||
decltype(thread_buffer_desc_k),
|
||||
ThreadBufferLengths_K,
|
||||
Sequence<0>,
|
||||
0,
|
||||
GridDesc_M_K,
|
||||
decltype(thread_buffer_desc_m_k),
|
||||
ThreadBufferLengths_M_K,
|
||||
ThreadBufferDimAccessOrder,
|
||||
GammaSrcVectorDim,
|
||||
GammaSrcVectorSize,
|
||||
1,
|
||||
true>(
|
||||
gamma_grid_desc_k, make_multi_index(thread_k_cluster_id * KThreadSliceSize));
|
||||
gamma_grid_desc_m_k,
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize,
|
||||
thread_k_cluster_id * KThreadSliceSize));
|
||||
|
||||
auto threadwise_beta_load = ThreadwiseTensorSliceTransfer_v2<BetaDataType,
|
||||
AccDataType,
|
||||
GridDesc_K,
|
||||
decltype(thread_buffer_desc_k),
|
||||
ThreadBufferLengths_K,
|
||||
Sequence<0>,
|
||||
0,
|
||||
BetaSrcVectorSize,
|
||||
1,
|
||||
true>(
|
||||
beta_grid_desc_k, make_multi_index(thread_k_cluster_id * KThreadSliceSize));
|
||||
auto threadwise_beta_load =
|
||||
ThreadwiseTensorSliceTransfer_v2<BetaDataType,
|
||||
AccDataType,
|
||||
GridDesc_M_K,
|
||||
decltype(thread_buffer_desc_m_k),
|
||||
ThreadBufferLengths_M_K,
|
||||
ThreadBufferDimAccessOrder,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
1,
|
||||
true>(
|
||||
beta_grid_desc_m_k,
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize,
|
||||
thread_k_cluster_id * KThreadSliceSize));
|
||||
|
||||
auto threadwise_y_store =
|
||||
ThreadwiseTensorSliceTransfer_v1r3<AccDataType,
|
||||
@@ -212,9 +221,6 @@ struct GridwiseLayernormNaiveVariance_mk_to_mk
|
||||
|
||||
// Copy x from Cache
|
||||
// one pass: fwd, second pass: bwd
|
||||
constexpr auto thread_copy_fwd_step_k = make_multi_index(SweepOnce ? 0 : K_BlockTileSize);
|
||||
constexpr auto thread_copy_bwd_step_k = make_multi_index(SweepOnce ? 0 : -K_BlockTileSize);
|
||||
|
||||
constexpr auto thread_copy_fwd_step_m_k =
|
||||
make_multi_index(0, SweepOnce ? 0 : K_BlockTileSize);
|
||||
constexpr auto thread_copy_bwd_step_m_k =
|
||||
@@ -224,13 +230,14 @@ struct GridwiseLayernormNaiveVariance_mk_to_mk
|
||||
p_x_global, x_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
const auto gamma_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_gamma_global, gamma_grid_desc_k.GetElementSpaceSize());
|
||||
p_gamma_global, gamma_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
const auto beta_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_beta_global, beta_grid_desc_k.GetElementSpaceSize());
|
||||
p_beta_global, beta_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
// E(x), E[x^2], var(x)
|
||||
int reduce_length = x_grid_desc_m_k.GetTransforms()[I0].GetUpperLengths()[I1];
|
||||
// FIXME: Should not hack the transform from deviceOP
|
||||
int reduce_length = x_grid_desc_m_k.GetTransforms()[I2].GetUpperLengths()[I0];
|
||||
|
||||
index_t reducedTiles = 0;
|
||||
do
|
||||
@@ -271,17 +278,16 @@ struct GridwiseLayernormNaiveVariance_mk_to_mk
|
||||
mean_square_thread_buf(I) = mean_square_thread_buf(I) / reduce_length;
|
||||
|
||||
// var(x) = E[x^2] - E[x]^2
|
||||
var_value_buf(I) =
|
||||
var_thread_buf(I) =
|
||||
mean_square_thread_buf(I) - (mean_thread_buf(I) * mean_thread_buf(I));
|
||||
});
|
||||
|
||||
// y = (x - E[x]) / sqrt(var[x] + epsilon)
|
||||
auto thread_copy_tail_m_k = (num_k_block_tile_iteration - 1) * thread_copy_fwd_step_m_k;
|
||||
auto thread_copy_tail_k = (num_k_block_tile_iteration - 1) * thread_copy_fwd_step_k;
|
||||
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_k, thread_copy_tail_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_k, thread_copy_tail_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
|
||||
reducedTiles = 0;
|
||||
@@ -296,10 +302,10 @@ struct GridwiseLayernormNaiveVariance_mk_to_mk
|
||||
x_thread_buf);
|
||||
}
|
||||
|
||||
threadwise_gamma_load.Run(gamma_grid_desc_k,
|
||||
threadwise_gamma_load.Run(gamma_grid_desc_m_k,
|
||||
gamma_global_val_buf,
|
||||
thread_buffer_desc_k,
|
||||
make_tuple(I0),
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
gamma_thread_buf);
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
@@ -307,23 +313,21 @@ struct GridwiseLayernormNaiveVariance_mk_to_mk
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK));
|
||||
|
||||
constexpr auto offset_k = thread_buffer_desc_k.CalculateOffset(make_tuple(iK));
|
||||
|
||||
// normalize
|
||||
y_thread_buf(Number<offset_m_k>{}) =
|
||||
(x_thread_buf(Number<offset_m_k>{}) - mean_thread_buf(iM)) /
|
||||
sqrt(var_value_buf(iM) + epsilon);
|
||||
sqrt(var_thread_buf(iM) + epsilon);
|
||||
|
||||
// gamma
|
||||
y_thread_buf(Number<offset_m_k>{}) =
|
||||
y_thread_buf(Number<offset_m_k>{}) * gamma_thread_buf(Number<offset_k>{});
|
||||
y_thread_buf(Number<offset_m_k>{}) * gamma_thread_buf(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
|
||||
threadwise_beta_load.Run(beta_grid_desc_k,
|
||||
threadwise_beta_load.Run(beta_grid_desc_m_k,
|
||||
beta_global_val_buf,
|
||||
thread_buffer_desc_k,
|
||||
make_tuple(I0),
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
beta_thread_buf);
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
@@ -331,11 +335,9 @@ struct GridwiseLayernormNaiveVariance_mk_to_mk
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK));
|
||||
|
||||
constexpr auto offset_k = thread_buffer_desc_k.CalculateOffset(make_tuple(iK));
|
||||
|
||||
// beta
|
||||
y_thread_buf(Number<offset_m_k>{}) =
|
||||
y_thread_buf(Number<offset_m_k>{}) + beta_thread_buf(Number<offset_k>{});
|
||||
y_thread_buf(Number<offset_m_k>{}) + beta_thread_buf(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -346,8 +348,8 @@ struct GridwiseLayernormNaiveVariance_mk_to_mk
|
||||
y_global_val_buf);
|
||||
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_k, thread_copy_bwd_step_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_k, thread_copy_bwd_step_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
|
||||
++reducedTiles;
|
||||
|
||||
@@ -19,7 +19,6 @@ template <typename XDataType,
|
||||
typename AccDataType,
|
||||
typename AccElementwiseOperation,
|
||||
typename GridDesc_M_K,
|
||||
typename GridDesc_K,
|
||||
index_t BlockSize,
|
||||
index_t MThreadClusterSize,
|
||||
index_t KThreadClusterSize,
|
||||
@@ -27,7 +26,9 @@ template <typename XDataType,
|
||||
index_t KThreadSliceSize,
|
||||
index_t XSrcVectorDim,
|
||||
index_t XSrcVectorSize,
|
||||
index_t GammaSrcVectorDim,
|
||||
index_t GammaSrcVectorSize,
|
||||
index_t BetaSrcVectorDim,
|
||||
index_t BetaSrcVectorSize,
|
||||
index_t YDstVectorDim,
|
||||
index_t YDstVectorSize,
|
||||
@@ -70,6 +71,7 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
|
||||
static constexpr auto I0 = Number<0>{};
|
||||
static constexpr auto I1 = Number<1>{};
|
||||
static constexpr auto I2 = Number<2>{};
|
||||
|
||||
static constexpr index_t M_BlockTileSize = MThreadClusterSize * MThreadSliceSize;
|
||||
static constexpr index_t K_BlockTileSize = KThreadClusterSize * KThreadSliceSize;
|
||||
@@ -77,7 +79,8 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
__device__ static int GetKPerThread(const GridDesc_M_K& x_grid_desc_m_k,
|
||||
int thread_k_cluster_id)
|
||||
{
|
||||
int kPerBlock = x_grid_desc_m_k.GetTransforms()[I0].GetUpperLengths()[I1];
|
||||
// FIXME: Should not hack the transform from deviceOP
|
||||
int kPerBlock = x_grid_desc_m_k.GetTransforms()[I2].GetUpperLengths()[I0];
|
||||
int kPerThread =
|
||||
kPerBlock < K_BlockTileSize ? 0 : KThreadSliceSize * (kPerBlock / K_BlockTileSize);
|
||||
int kPerBlockTail = kPerBlock - kPerThread * KThreadClusterSize;
|
||||
@@ -94,8 +97,8 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
}
|
||||
|
||||
__device__ static void Run(const GridDesc_M_K& x_grid_desc_m_k,
|
||||
const GridDesc_K& gamma_grid_desc_k,
|
||||
const GridDesc_K& beta_grid_desc_k,
|
||||
const GridDesc_M_K& gamma_grid_desc_m_k,
|
||||
const GridDesc_M_K& beta_grid_desc_m_k,
|
||||
const GridDesc_M_K& y_grid_desc_m_k,
|
||||
index_t num_k_block_tile_iteration,
|
||||
AccDataType epsilon,
|
||||
@@ -116,11 +119,14 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize * KThreadSliceSize, true>
|
||||
x_thread_buf;
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, KThreadSliceSize, true> gamma_thread_buf;
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, KThreadSliceSize, true>& beta_thread_buf =
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize * KThreadSliceSize, true>
|
||||
gamma_thread_buf;
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr,
|
||||
AccDataType,
|
||||
MThreadSliceSize * KThreadSliceSize,
|
||||
true>& beta_thread_buf = gamma_thread_buf;
|
||||
|
||||
StaticBuffer<AddressSpaceEnum::Vgpr, AccDataType, MThreadSliceSize * KThreadSliceSize, true>
|
||||
y_thread_buf;
|
||||
|
||||
@@ -137,11 +143,8 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
const auto thread_k_cluster_id = thread_cluster_idx[I1];
|
||||
|
||||
using ThreadBufferLengths_M_K = Sequence<MThreadSliceSize, KThreadSliceSize>;
|
||||
using ThreadBufferLengths_K = Sequence<KThreadSliceSize>;
|
||||
constexpr auto thread_buffer_desc_m_k = make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MThreadSliceSize>{}, Number<KThreadSliceSize>{}));
|
||||
constexpr auto thread_buffer_desc_k =
|
||||
make_naive_tensor_descriptor_packed(make_tuple(Number<KThreadSliceSize>{}));
|
||||
|
||||
auto threadwise_x_load = ThreadwiseTensorSliceTransfer_v2<XDataType,
|
||||
AccDataType,
|
||||
@@ -161,27 +164,34 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
auto threadwise_gamma_load =
|
||||
ThreadwiseTensorSliceTransfer_v2<GammaDataType,
|
||||
AccDataType,
|
||||
GridDesc_K,
|
||||
decltype(thread_buffer_desc_k),
|
||||
ThreadBufferLengths_K,
|
||||
Sequence<0>,
|
||||
0,
|
||||
GridDesc_M_K,
|
||||
decltype(thread_buffer_desc_m_k),
|
||||
ThreadBufferLengths_M_K,
|
||||
ThreadBufferDimAccessOrder,
|
||||
GammaSrcVectorDim,
|
||||
GammaSrcVectorSize,
|
||||
1,
|
||||
true>(
|
||||
gamma_grid_desc_k, make_multi_index(thread_k_cluster_id * KThreadSliceSize));
|
||||
gamma_grid_desc_m_k,
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize,
|
||||
thread_k_cluster_id * KThreadSliceSize));
|
||||
|
||||
auto threadwise_beta_load = ThreadwiseTensorSliceTransfer_v2<BetaDataType,
|
||||
AccDataType,
|
||||
GridDesc_K,
|
||||
decltype(thread_buffer_desc_k),
|
||||
ThreadBufferLengths_K,
|
||||
Sequence<0>,
|
||||
0,
|
||||
BetaSrcVectorSize,
|
||||
1,
|
||||
true>(
|
||||
beta_grid_desc_k, make_multi_index(thread_k_cluster_id * KThreadSliceSize));
|
||||
auto threadwise_beta_load =
|
||||
ThreadwiseTensorSliceTransfer_v2<BetaDataType,
|
||||
AccDataType,
|
||||
GridDesc_M_K,
|
||||
decltype(thread_buffer_desc_m_k),
|
||||
ThreadBufferLengths_M_K,
|
||||
ThreadBufferDimAccessOrder,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
1,
|
||||
true>(
|
||||
beta_grid_desc_m_k,
|
||||
make_multi_index(block_global_id * M_BlockTileSize +
|
||||
thread_m_cluster_id * MThreadSliceSize,
|
||||
thread_k_cluster_id * KThreadSliceSize));
|
||||
|
||||
auto threadwise_y_store =
|
||||
ThreadwiseTensorSliceTransfer_v1r3<AccDataType,
|
||||
@@ -204,9 +214,6 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
|
||||
// Copy x from Cache
|
||||
// one pass: fwd, second pass: bwd
|
||||
constexpr auto thread_copy_fwd_step_k = make_multi_index(SweepOnce ? 0 : K_BlockTileSize);
|
||||
constexpr auto thread_copy_bwd_step_k = make_multi_index(SweepOnce ? 0 : -K_BlockTileSize);
|
||||
|
||||
constexpr auto thread_copy_fwd_step_m_k =
|
||||
make_multi_index(0, SweepOnce ? 0 : K_BlockTileSize);
|
||||
constexpr auto thread_copy_bwd_step_m_k =
|
||||
@@ -216,10 +223,10 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
p_x_global, x_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
const auto gamma_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_gamma_global, gamma_grid_desc_k.GetElementSpaceSize());
|
||||
p_gamma_global, gamma_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
const auto beta_global_val_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
|
||||
p_beta_global, beta_grid_desc_k.GetElementSpaceSize());
|
||||
p_beta_global, beta_grid_desc_m_k.GetElementSpaceSize());
|
||||
|
||||
auto threadwise_welford = ThreadwiseWelford();
|
||||
threadwise_welford.max_count_ = GetKPerThread(x_grid_desc_m_k, thread_k_cluster_id);
|
||||
@@ -250,11 +257,10 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
});
|
||||
|
||||
auto thread_copy_tail_m_k = (num_k_block_tile_iteration - 1) * thread_copy_fwd_step_m_k;
|
||||
auto thread_copy_tail_k = (num_k_block_tile_iteration - 1) * thread_copy_fwd_step_k;
|
||||
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_k, thread_copy_tail_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_k, thread_copy_tail_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k, thread_copy_tail_m_k);
|
||||
|
||||
for(index_t reducedTiles = 0; reducedTiles < num_k_block_tile_iteration; ++reducedTiles)
|
||||
@@ -268,10 +274,10 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
x_thread_buf);
|
||||
}
|
||||
|
||||
threadwise_gamma_load.Run(gamma_grid_desc_k,
|
||||
threadwise_gamma_load.Run(gamma_grid_desc_m_k,
|
||||
gamma_global_val_buf,
|
||||
thread_buffer_desc_k,
|
||||
make_tuple(I0),
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
gamma_thread_buf);
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
@@ -279,8 +285,6 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK));
|
||||
|
||||
constexpr auto offset_k = thread_buffer_desc_k.CalculateOffset(make_tuple(iK));
|
||||
|
||||
// normalize
|
||||
y_thread_buf(Number<offset_m_k>{}) =
|
||||
(x_thread_buf(Number<offset_m_k>{}) - mean_thread_buf(iM)) /
|
||||
@@ -288,14 +292,14 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
|
||||
// gamma
|
||||
y_thread_buf(Number<offset_m_k>{}) =
|
||||
y_thread_buf(Number<offset_m_k>{}) * gamma_thread_buf(Number<offset_k>{});
|
||||
y_thread_buf(Number<offset_m_k>{}) * gamma_thread_buf(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
|
||||
threadwise_beta_load.Run(beta_grid_desc_k,
|
||||
threadwise_beta_load.Run(beta_grid_desc_m_k,
|
||||
beta_global_val_buf,
|
||||
thread_buffer_desc_k,
|
||||
make_tuple(I0),
|
||||
thread_buffer_desc_m_k,
|
||||
make_tuple(I0, I0),
|
||||
beta_thread_buf);
|
||||
|
||||
static_for<0, MThreadSliceSize, 1>{}([&](auto iM) {
|
||||
@@ -303,11 +307,9 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
constexpr auto offset_m_k =
|
||||
thread_buffer_desc_m_k.CalculateOffset(make_tuple(iM, iK));
|
||||
|
||||
constexpr auto offset_k = thread_buffer_desc_k.CalculateOffset(make_tuple(iK));
|
||||
|
||||
// beta
|
||||
y_thread_buf(Number<offset_m_k>{}) =
|
||||
y_thread_buf(Number<offset_m_k>{}) + beta_thread_buf(Number<offset_k>{});
|
||||
y_thread_buf(Number<offset_m_k>{}) + beta_thread_buf(Number<offset_m_k>{});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -318,8 +320,8 @@ struct GridwiseLayernormWelfordVariance_mk_to_mk
|
||||
y_global_val_buf);
|
||||
|
||||
threadwise_x_load.MoveSrcSliceWindow(x_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_k, thread_copy_bwd_step_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_k, thread_copy_bwd_step_k);
|
||||
threadwise_gamma_load.MoveSrcSliceWindow(gamma_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_beta_load.MoveSrcSliceWindow(beta_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
threadwise_y_store.MoveDstSliceWindow(y_grid_desc_m_k, thread_copy_bwd_step_m_k);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,191 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
|
||||
#include "ck/tensor_operation/gpu/device/device_base.hpp"
|
||||
#include "ck/library/utility/host_tensor.hpp"
|
||||
#include "ck/library/utility/host_tensor_generator.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace host {
|
||||
|
||||
template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename YDataType,
|
||||
typename AccDataType,
|
||||
typename AccElementwiseOperation>
|
||||
struct ReferenceGroupnorm : public device::BaseOperator
|
||||
{
|
||||
// x = [N, H, W, G, C]
|
||||
// y = [N, H, W, G, C]
|
||||
// reduce dim [H, W, C], mean, var = [N, G]
|
||||
// gamma, beta = [G, C]
|
||||
// beta: [G, C]
|
||||
struct Argument : public device::BaseArgument
|
||||
{
|
||||
Argument(const Tensor<XDataType>& x,
|
||||
const Tensor<GammaDataType>& gamma,
|
||||
const Tensor<BetaDataType>& beta,
|
||||
Tensor<YDataType>& y,
|
||||
AccElementwiseOperation acc_elementwise_op,
|
||||
const std::vector<index_t> lengths,
|
||||
AccDataType epsilon)
|
||||
: x_(x),
|
||||
gamma_(gamma),
|
||||
beta_(beta),
|
||||
y_(y),
|
||||
acc_elementwise_op_(acc_elementwise_op),
|
||||
lengths_(lengths),
|
||||
epsilon_(epsilon)
|
||||
{
|
||||
}
|
||||
|
||||
const Tensor<XDataType> x_;
|
||||
const Tensor<XDataType> gamma_;
|
||||
const Tensor<XDataType> beta_;
|
||||
Tensor<YDataType>& y_;
|
||||
AccElementwiseOperation acc_elementwise_op_;
|
||||
std::vector<index_t> lengths_;
|
||||
AccDataType epsilon_;
|
||||
};
|
||||
|
||||
// Invoker
|
||||
struct Invoker : public device::BaseInvoker
|
||||
{
|
||||
float Run(const Argument& arg)
|
||||
{
|
||||
int N = arg.lengths_[0];
|
||||
int H = arg.lengths_[1];
|
||||
int W = arg.lengths_[2];
|
||||
int G = arg.lengths_[3];
|
||||
int C = arg.lengths_[4];
|
||||
|
||||
Tensor<AccDataType> mean({N, G});
|
||||
Tensor<AccDataType> var({N, G});
|
||||
|
||||
// Compute mean & var in [H, W, C] by Welford Algorithm
|
||||
// TODO - parallel for each HWC
|
||||
// TODO - address calculation
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
for(int g = 0; g < G; ++g)
|
||||
{
|
||||
AccDataType mean_val = type_convert<AccDataType>(0.0f);
|
||||
AccDataType var_val = type_convert<AccDataType>(0.0f);
|
||||
int32_t curr_count = 0;
|
||||
|
||||
for(int h = 0; h < H; ++h)
|
||||
{
|
||||
for(int w = 0; w < W; ++w)
|
||||
{
|
||||
for(int c = 0; c < C; ++c)
|
||||
{
|
||||
curr_count++;
|
||||
AccDataType x = type_convert<AccDataType>(arg.x_(n, h, w, g, c));
|
||||
AccDataType delta = x - mean_val;
|
||||
mean_val += delta / curr_count;
|
||||
AccDataType delta2 = x - mean_val;
|
||||
var_val += delta * delta2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mean(n, g) = mean_val;
|
||||
var(n, g) = var_val / curr_count;
|
||||
}
|
||||
}
|
||||
|
||||
// Normalization
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
for(int h = 0; h < H; ++h)
|
||||
{
|
||||
for(int w = 0; w < W; ++w)
|
||||
{
|
||||
for(int g = 0; g < G; ++g)
|
||||
{
|
||||
for(int c = 0; c < C; ++c)
|
||||
{
|
||||
AccDataType x = type_convert<AccDataType>(arg.x_(n, h, w, g, c));
|
||||
AccDataType gamma = type_convert<AccDataType>(arg.gamma_(g, c));
|
||||
AccDataType beta = type_convert<AccDataType>(arg.beta_(g, c));
|
||||
AccDataType mean_val = type_convert<AccDataType>(mean(n, g));
|
||||
AccDataType var_val = type_convert<AccDataType>(var(n, g));
|
||||
AccDataType y = gamma * (x - mean_val) /
|
||||
ck::math::sqrt(arg.epsilon_ + var_val) +
|
||||
beta;
|
||||
arg.acc_elementwise_op_(y, y);
|
||||
arg.y_(n, h, w, g, c) = type_convert<YDataType>(y);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
float Run(const device::BaseArgument* p_arg,
|
||||
const StreamConfig& /* stream_config */ = StreamConfig{}) override
|
||||
{
|
||||
return Run(*dynamic_cast<const Argument*>(p_arg));
|
||||
}
|
||||
};
|
||||
|
||||
static constexpr bool IsValidCompilationParameter()
|
||||
{
|
||||
// TODO: properly implement this check
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IsSupportedArgument(const device::BaseArgument* p_arg) override
|
||||
{
|
||||
const Argument* p_arg_ = dynamic_cast<const Argument*>(p_arg);
|
||||
if(p_arg_->lengths_.size() != 5)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static auto MakeArgument(const Tensor<XDataType>& x,
|
||||
const Tensor<GammaDataType>& gamma,
|
||||
const Tensor<BetaDataType>& beta,
|
||||
Tensor<YDataType>& y,
|
||||
AccElementwiseOperation acc_elementwise_op,
|
||||
const std::vector<index_t> lengths,
|
||||
AccDataType epsilon)
|
||||
{
|
||||
return Argument{x, gamma, beta, y, acc_elementwise_op, lengths, epsilon};
|
||||
}
|
||||
|
||||
static auto MakeInvoker() { return Invoker{}; }
|
||||
|
||||
virtual std::unique_ptr<device::BaseInvoker> MakeInvokerPointer()
|
||||
{
|
||||
return std::make_unique<Invoker>(Invoker{});
|
||||
}
|
||||
|
||||
std::string GetTypeString() const override
|
||||
{
|
||||
auto str = std::stringstream();
|
||||
|
||||
// clang-format off
|
||||
str << "ReferenceLayernorm"
|
||||
<< std::endl;
|
||||
// clang-format on
|
||||
|
||||
return str.str();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace host
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -17,17 +17,25 @@ namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
void add_device_layernorm_f16_rank2_instances(
|
||||
std::vector<DeviceLayernormPtr<F16, F16, F16, F32, F16, PassThrough, 2, 1>>&);
|
||||
// FP16
|
||||
void add_device_layernorm_rank_2_1_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceLayernorm<F16, F16, F16, F32, F16, PassThrough, 2, 1>>>&);
|
||||
|
||||
void add_device_layernorm_f16_rank4_instances(
|
||||
std::vector<DeviceLayernormPtr<F16, F16, F16, F32, F16, PassThrough, 4, 3>>&);
|
||||
void add_device_layernorm_rank_4_3_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceLayernorm<F16, F16, F16, F32, F16, PassThrough, 4, 3>>>&);
|
||||
|
||||
void add_device_layernorm_f32_rank2_instances(
|
||||
std::vector<DeviceLayernormPtr<F32, F32, F32, F32, F32, PassThrough, 2, 1>>&);
|
||||
void add_device_layernorm_rank_5_3_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceLayernorm<F16, F16, F16, F32, F16, PassThrough, 5, 3>>>&);
|
||||
|
||||
void add_device_layernorm_f32_rank4_instances(
|
||||
std::vector<DeviceLayernormPtr<F32, F32, F32, F32, F32, PassThrough, 4, 3>>&);
|
||||
// FP32
|
||||
void add_device_layernorm_rank_2_1_f32_instances(
|
||||
std::vector<std::unique_ptr<DeviceLayernorm<F32, F32, F32, F32, F32, PassThrough, 2, 1>>>&);
|
||||
|
||||
void add_device_layernorm_rank_4_3_f32_instances(
|
||||
std::vector<std::unique_ptr<DeviceLayernorm<F32, F32, F32, F32, F32, PassThrough, 4, 3>>>&);
|
||||
|
||||
void add_device_layernorm_rank_5_3_f32_instances(
|
||||
std::vector<std::unique_ptr<DeviceLayernorm<F32, F32, F32, F32, F32, PassThrough, 5, 3>>>&);
|
||||
|
||||
template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
@@ -62,17 +70,33 @@ struct DeviceOperationInstanceFactory<
|
||||
is_same_v<BetaDataType, F16> && is_same_v<YDataType, F16>)
|
||||
{
|
||||
if constexpr(Rank == 2 && NumReduceDim == 1)
|
||||
add_device_layernorm_f16_rank2_instances(op_ptrs);
|
||||
{
|
||||
add_device_layernorm_rank_2_1_f16_instances(op_ptrs);
|
||||
}
|
||||
else if constexpr(Rank == 4 && NumReduceDim == 3)
|
||||
add_device_layernorm_f16_rank4_instances(op_ptrs);
|
||||
{
|
||||
add_device_layernorm_rank_4_3_f16_instances(op_ptrs);
|
||||
}
|
||||
else if constexpr(Rank == 5 && NumReduceDim == 3)
|
||||
{
|
||||
add_device_layernorm_rank_5_3_f16_instances(op_ptrs);
|
||||
}
|
||||
}
|
||||
else if constexpr(is_same_v<XDataType, F32> && is_same_v<GammaDataType, F32> &&
|
||||
is_same_v<BetaDataType, F32> && is_same_v<YDataType, F32>)
|
||||
{
|
||||
if constexpr(Rank == 2 && NumReduceDim == 1)
|
||||
add_device_layernorm_f32_rank2_instances(op_ptrs);
|
||||
{
|
||||
add_device_layernorm_rank_2_1_f32_instances(op_ptrs);
|
||||
}
|
||||
else if constexpr(Rank == 4 && NumReduceDim == 3)
|
||||
add_device_layernorm_f32_rank4_instances(op_ptrs);
|
||||
{
|
||||
add_device_layernorm_rank_4_3_f32_instances(op_ptrs);
|
||||
}
|
||||
else if constexpr(Rank == 5 && NumReduceDim == 3)
|
||||
{
|
||||
add_device_layernorm_rank_5_3_f32_instances(op_ptrs);
|
||||
}
|
||||
}
|
||||
|
||||
return op_ptrs;
|
||||
|
||||
@@ -17,34 +17,40 @@ using F32 = float;
|
||||
|
||||
using Pass = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
template <index_t Rank, index_t Reduce>
|
||||
template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
using device_layernorm_f16_instances = std::tuple<
|
||||
// clang-format off
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorSize, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, Pass, Rank, Reduce, 256, 8, 32, 1, 8, 1, 1, 1, 1, 1>, // fallback kernel
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, Pass, Rank, Reduce, 256, 8, 32, 1, 8, 1, 2, 2, 2, 2>, // fallback kernel
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, Pass, Rank, Reduce, 256, 8, 32, 1, 8, 1, 4, 4, 4, 4>, // fallback kernel
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, Pass, Rank, Reduce, 256, 8, 32, 1, 8, 1, 8, 8, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, Pass, Rank, Reduce, 256, 4, 64, 1, 8, 1, 8, 8, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, Pass, Rank, Reduce, 256, 2, 128, 1, 8, 1, 8, 8, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, Pass, Rank, Reduce, 256, 2, 128, 1, 16, 1, 8, 8, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, Pass, Rank, Reduce, 256, 2, 128, 1, 32, 1, 8, 8, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, Pass, Rank, Reduce, 256, 1, 256, 1, 8, 1, 8, 8, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, Pass, Rank, Reduce, 256, 1, 256, 1, 16, 1, 8, 8, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, Pass, Rank, Reduce, 256, 1, 256, 1, 32, 1, 8, 8, 8, 8>
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim, GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 1, 1, 1, 1, 1, 1>, // fallback kernel
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 2, 1, 2, 1, 2, 2>, // fallback kernel
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 4, 1, 4, 1, 4, 4>, // fallback kernel
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 4, 64, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 2, 128, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 2, 128, 1, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 2, 128, 1, 32, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 8, 1, 8, 1, 8, 8>,
|
||||
DeviceLayernormImpl<F16, F16, F16, F32, F16, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 8, 1, 8, 1, 8, 8>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
void add_device_layernorm_f16_rank2_instances(
|
||||
std::vector<DeviceLayernormPtr<F16, F16, F16, F32, F16, Pass, 2, 1>>& instances)
|
||||
void add_device_layernorm_rank_2_1_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceLayernorm<F16, F16, F16, F32, F16, Pass, 2, 1>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(instances, device_layernorm_f16_instances<2, 1>{});
|
||||
add_device_operation_instances(instances, device_layernorm_f16_instances<Pass, 2, 1>{});
|
||||
}
|
||||
|
||||
void add_device_layernorm_f16_rank4_instances(
|
||||
std::vector<DeviceLayernormPtr<F16, F16, F16, F32, F16, Pass, 4, 3>>& instances)
|
||||
void add_device_layernorm_rank_4_3_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceLayernorm<F16, F16, F16, F32, F16, Pass, 4, 3>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(instances, device_layernorm_f16_instances<4, 3>{});
|
||||
add_device_operation_instances(instances, device_layernorm_f16_instances<Pass, 4, 3>{});
|
||||
}
|
||||
|
||||
void add_device_layernorm_rank_5_3_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceLayernorm<F16, F16, F16, F32, F16, Pass, 5, 3>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(instances, device_layernorm_f16_instances<Pass, 5, 3>{});
|
||||
}
|
||||
|
||||
} // namespace instance
|
||||
|
||||
@@ -16,33 +16,39 @@ using F32 = float;
|
||||
|
||||
using Pass = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
template <index_t Rank, index_t Reduce>
|
||||
template <typename OutElementwise, index_t Rank, index_t Reduce>
|
||||
using device_layernorm_f32_instances = std::tuple<
|
||||
// clang-format off
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorSize, BetaSrcVectorSize, YDstVectorSize>
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, Pass, Rank, Reduce, 256, 8, 32, 1, 8, 1, 1, 1, 1, 1>, // fallback kernel
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, Pass, Rank, Reduce, 256, 8, 32, 1, 8, 1, 2, 2, 2, 2>, // fallback kernel
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, Pass, Rank, Reduce, 256, 8, 32, 1, 8, 1, 4, 4, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, Pass, Rank, Reduce, 256, 4, 64, 1, 8, 1, 4, 4, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, Pass, Rank, Reduce, 256, 2, 128, 1, 8, 1, 4, 4, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, Pass, Rank, Reduce, 256, 2, 128, 1, 16, 1, 4, 4, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, Pass, Rank, Reduce, 256, 2, 128, 1, 32, 1, 4, 4, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, Pass, Rank, Reduce, 256, 1, 256, 1, 8, 1, 4, 4, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, Pass, Rank, Reduce, 256, 1, 256, 1, 16, 1, 4, 4, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, Pass, Rank, Reduce, 256, 1, 256, 1, 32, 1, 4, 4, 4, 4>
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 1, 1, 1, 1, 1, 1>, // fallback kernel
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 2, 1, 2, 1, 2, 2>, // fallback kernel
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 8, 32, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 4, 64, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 2, 128, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 2, 128, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 2, 128, 1, 32, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 8, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 16, 1, 4, 1, 4, 1, 4, 4>,
|
||||
DeviceLayernormImpl<F32, F32, F32, F32, F32, OutElementwise, Rank, Reduce, 256, 1, 256, 1, 32, 1, 4, 1, 4, 1, 4, 4>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
void add_device_layernorm_f32_rank2_instances(
|
||||
std::vector<DeviceLayernormPtr<F32, F32, F32, F32, F32, Pass, 2, 1>>& instances)
|
||||
void add_device_layernorm_rank_2_1_f32_instances(
|
||||
std::vector<std::unique_ptr<DeviceLayernorm<F32, F32, F32, F32, F32, Pass, 2, 1>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(instances, device_layernorm_f32_instances<2, 1>{});
|
||||
add_device_operation_instances(instances, device_layernorm_f32_instances<Pass, 2, 1>{});
|
||||
}
|
||||
|
||||
void add_device_layernorm_f32_rank4_instances(
|
||||
std::vector<DeviceLayernormPtr<F32, F32, F32, F32, F32, Pass, 4, 3>>& instances)
|
||||
void add_device_layernorm_rank_4_3_f32_instances(
|
||||
std::vector<std::unique_ptr<DeviceLayernorm<F32, F32, F32, F32, F32, Pass, 4, 3>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(instances, device_layernorm_f32_instances<4, 3>{});
|
||||
add_device_operation_instances(instances, device_layernorm_f32_instances<Pass, 4, 3>{});
|
||||
}
|
||||
|
||||
void add_device_layernorm_rank_5_3_f32_instances(
|
||||
std::vector<std::unique_ptr<DeviceLayernorm<F32, F32, F32, F32, F32, Pass, 5, 3>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(instances, device_layernorm_f32_instances<Pass, 5, 3>{});
|
||||
}
|
||||
|
||||
} // namespace instance
|
||||
|
||||
@@ -23,6 +23,7 @@ set(PROFILER_SOURCE
|
||||
src/profile_conv_bwd_weight.cpp
|
||||
src/profile_grouped_conv_fwd.cpp
|
||||
src/profile_reduce.cpp
|
||||
src/profile_groupnorm.cpp
|
||||
src/profile_layernorm.cpp
|
||||
src/profile_normalization.cpp
|
||||
)
|
||||
|
||||
207
profiler/include/profile_groupnorm_impl.hpp
Normal file
207
profiler/include/profile_groupnorm_impl.hpp
Normal file
@@ -0,0 +1,207 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iomanip>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/layernorm.hpp"
|
||||
|
||||
#include "ck/library/utility/check_err.hpp"
|
||||
#include "ck/library/utility/device_memory.hpp"
|
||||
#include "ck/library/utility/host_tensor.hpp"
|
||||
#include "ck/library/utility/host_tensor_generator.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_groupnorm.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace profiler {
|
||||
|
||||
template <typename XDataType,
|
||||
typename GammaDataType,
|
||||
typename BetaDataType,
|
||||
typename AccDataType,
|
||||
typename YDataType>
|
||||
bool profile_groupnorm_impl(int do_verification,
|
||||
int init_method,
|
||||
bool do_log,
|
||||
bool time_kernel,
|
||||
std::vector<index_t> length)
|
||||
{
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
if(length.size() != 5)
|
||||
return false;
|
||||
|
||||
index_t G = length[3];
|
||||
index_t C = length[4];
|
||||
|
||||
std::vector<index_t> reduce_dim = {1, 2, 4};
|
||||
std::vector<index_t> gammaBetaLength = {G, C};
|
||||
std::vector<index_t> gammaBetaStride = {0, 0, 0, C, 1};
|
||||
|
||||
Tensor<XDataType> x(length);
|
||||
Tensor<GammaDataType> gamma(gammaBetaLength);
|
||||
Tensor<BetaDataType> beta(gammaBetaLength);
|
||||
Tensor<YDataType> y(length);
|
||||
Tensor<YDataType> host_y(length);
|
||||
|
||||
switch(init_method)
|
||||
{
|
||||
case 0:
|
||||
x.GenerateTensorValue(GeneratorTensor_1<XDataType>{});
|
||||
gamma.GenerateTensorValue(GeneratorTensor_1<GammaDataType>{});
|
||||
beta.GenerateTensorValue(GeneratorTensor_1<BetaDataType>{});
|
||||
break;
|
||||
case 1:
|
||||
x.GenerateTensorValue(GeneratorTensor_2<XDataType>{-5, 5});
|
||||
gamma.GenerateTensorValue(GeneratorTensor_2<GammaDataType>{-5, 5});
|
||||
beta.GenerateTensorValue(GeneratorTensor_2<BetaDataType>{-5, 5});
|
||||
break;
|
||||
default:
|
||||
x.GenerateTensorValue(GeneratorTensor_3<XDataType>{0, 1});
|
||||
gamma.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{-0.5, 0.5});
|
||||
beta.GenerateTensorValue(GeneratorTensor_3<BetaDataType>{-0.5, 0.5});
|
||||
}
|
||||
|
||||
DeviceMem x_dev(sizeof(XDataType) * x.mDesc.GetElementSpaceSize());
|
||||
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem beta_dev(sizeof(BetaDataType) * beta.mDesc.GetElementSpaceSize());
|
||||
DeviceMem y_dev(sizeof(YDataType) * y.mDesc.GetElementSpaceSize());
|
||||
|
||||
x_dev.ToDevice(x.mData.data());
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
beta_dev.ToDevice(beta.mData.data());
|
||||
|
||||
// add device normalization instances
|
||||
using DeviceOp = ck::tensor_operation::device::DeviceLayernorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
YDataType,
|
||||
PassThrough,
|
||||
5,
|
||||
3>;
|
||||
|
||||
// get device op instances
|
||||
const auto instance_ptrs =
|
||||
ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
DeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << instance_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_instance_name;
|
||||
float best_avg_time = std::numeric_limits<float>::max();
|
||||
float best_gb_per_sec = 0;
|
||||
|
||||
if(do_verification)
|
||||
{
|
||||
using ReferenceInstance = ck::tensor_operation::host::ReferenceGroupnorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
YDataType,
|
||||
AccDataType,
|
||||
PassThrough>;
|
||||
|
||||
ReferenceInstance ref;
|
||||
auto ref_argument = ref.MakeArgument(x, gamma, beta, host_y, PassThrough{}, length, 1e-6);
|
||||
auto ref_invoker = ref.MakeInvoker();
|
||||
ref_invoker.Run(ref_argument);
|
||||
}
|
||||
|
||||
int num_kernel = 0;
|
||||
|
||||
for(auto& inst_ptr : instance_ptrs)
|
||||
{
|
||||
auto argument_ptr = inst_ptr->MakeArgumentPointer(
|
||||
length,
|
||||
std::vector<ck::index_t>{x.mDesc.GetStrides().begin(), x.mDesc.GetStrides().end()},
|
||||
gammaBetaStride,
|
||||
gammaBetaStride,
|
||||
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
|
||||
reduce_dim,
|
||||
1e-6,
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
beta_dev.GetDeviceBuffer(),
|
||||
y_dev.GetDeviceBuffer(),
|
||||
PassThrough{});
|
||||
|
||||
if(inst_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
++num_kernel;
|
||||
}
|
||||
else
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
auto invoker_ptr = inst_ptr->MakeInvokerPointer();
|
||||
|
||||
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
||||
|
||||
std::size_t num_bytes = x.mDesc.GetElementSize() * sizeof(XDataType) +
|
||||
gamma.mDesc.GetElementSize() * sizeof(GammaDataType) +
|
||||
beta.mDesc.GetElementSize() * sizeof(BetaDataType) +
|
||||
y.mDesc.GetElementSize() * sizeof(YDataType);
|
||||
|
||||
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
||||
|
||||
if(time_kernel)
|
||||
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << gb_per_sec << " GB/s, "
|
||||
<< inst_ptr->GetTypeString() << std::endl;
|
||||
|
||||
if(avg_time < best_avg_time)
|
||||
{
|
||||
best_instance_name = inst_ptr->GetTypeString();
|
||||
best_avg_time = avg_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
}
|
||||
|
||||
if(do_verification)
|
||||
{
|
||||
y_dev.FromDevice(y.mData.data());
|
||||
|
||||
bool pass =
|
||||
ck::utils::check_err(y.mData, host_y.mData, "Error: Incorrect results", 1e-3, 1e-3);
|
||||
|
||||
if(do_log)
|
||||
{
|
||||
LogRangeAsType<float>(std::cout << "x : ", x.mData, ",") << std::endl;
|
||||
LogRangeAsType<float>(std::cout << "host_y : ", host_y.mData, ",") << std::endl;
|
||||
LogRangeAsType<float>(std::cout << "y : ", y.mData, ",") << std::endl;
|
||||
}
|
||||
|
||||
if(!pass)
|
||||
{
|
||||
std::cout << inst_ptr->GetTypeString() << " failed verification: ";
|
||||
LogRange(std::cout << "lengths = [", length, ", ") << "]." << std::endl;
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if(time_kernel)
|
||||
std::cout << "pass" << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(time_kernel)
|
||||
{
|
||||
LogRange(std::cout << "length = ", length, ",") << ", ";
|
||||
std::cout << "num_kernel = " << num_kernel << ", best perf = " << best_avg_time << " ms, "
|
||||
<< best_gb_per_sec << " GB/s, " << best_instance_name << std::endl;
|
||||
}
|
||||
|
||||
if(num_kernel == 0)
|
||||
{
|
||||
std::cout << "Error: No kernel is tested" << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace profiler
|
||||
} // namespace ck
|
||||
@@ -6,8 +6,8 @@
|
||||
#include <iomanip>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "profiler/include/data_type_enum.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_layernorm_impl.hpp"
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/layernorm.hpp"
|
||||
|
||||
#include "ck/library/utility/check_err.hpp"
|
||||
#include "ck/library/utility/device_memory.hpp"
|
||||
@@ -15,26 +15,6 @@
|
||||
#include "ck/library/utility/host_tensor_generator.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_layernorm.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
using F16 = ck::half_t;
|
||||
using F32 = float;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
void add_device_layernorm_f16_rank2_instances(
|
||||
std::vector<DeviceLayernormPtr<F16, F16, F16, F32, F16, PassThrough, 2, 1>>&);
|
||||
|
||||
void add_device_layernorm_f32_rank2_instances(
|
||||
std::vector<DeviceLayernormPtr<F32, F32, F32, F32, F32, PassThrough, 2, 1>>&);
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
|
||||
namespace ck {
|
||||
namespace profiler {
|
||||
|
||||
@@ -53,8 +33,6 @@ void profile_layernorm_impl(int do_verification,
|
||||
std::vector<index_t> strideGamma,
|
||||
std::vector<index_t> strideBeta)
|
||||
{
|
||||
using F16 = ck::half_t;
|
||||
using F32 = float;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
if(length.size() < 2)
|
||||
@@ -103,37 +81,24 @@ void profile_layernorm_impl(int do_verification,
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
beta_dev.ToDevice(beta.mData.data());
|
||||
|
||||
// add device normalization instances
|
||||
constexpr int NumReduceDim = Rank - 1;
|
||||
std::vector<tensor_operation::device::DeviceLayernormPtr<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
YDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>>
|
||||
instances;
|
||||
|
||||
if constexpr(is_same<XDataType, F16>::value && is_same<GammaDataType, F16>::value &&
|
||||
is_same<BetaDataType, F16>::value && is_same<YDataType, F16>::value &&
|
||||
is_same<AccDataType, F32>::value)
|
||||
{
|
||||
if(length.size() == 2)
|
||||
tensor_operation::device::instance::add_device_layernorm_f16_rank2_instances(instances);
|
||||
}
|
||||
else if constexpr(is_same<XDataType, F32>::value && is_same<GammaDataType, F32>::value &&
|
||||
is_same<BetaDataType, F32>::value && is_same<YDataType, F32>::value &&
|
||||
is_same<AccDataType, F32>::value)
|
||||
{
|
||||
if(length.size() == 2)
|
||||
tensor_operation::device::instance::add_device_layernorm_f32_rank2_instances(instances);
|
||||
}
|
||||
// add device normalization instances
|
||||
using DeviceOp = ck::tensor_operation::device::DeviceLayernorm<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
YDataType,
|
||||
PassThrough,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
|
||||
if(instances.size() <= 0)
|
||||
{
|
||||
throw std::runtime_error("wrong! no device normalization instance found");
|
||||
}
|
||||
// get device op instances
|
||||
const auto instance_ptrs =
|
||||
ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
DeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << instance_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_instance_name;
|
||||
float best_avg_time = std::numeric_limits<float>::max();
|
||||
@@ -157,7 +122,7 @@ void profile_layernorm_impl(int do_verification,
|
||||
ref_invoker.Run(ref_argument);
|
||||
}
|
||||
|
||||
for(auto& inst_ptr : instances)
|
||||
for(auto& inst_ptr : instance_ptrs)
|
||||
{
|
||||
auto argument_ptr = inst_ptr->MakeArgumentPointer(length,
|
||||
strideXY,
|
||||
@@ -175,9 +140,9 @@ void profile_layernorm_impl(int do_verification,
|
||||
if(!inst_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
std::cout << inst_ptr->GetTypeString() << " skipped due to unsupported argument: ";
|
||||
LogRange(std::cout << "input lengths = [", length, "], ") << std::endl;
|
||||
LogRange(std::cout << "input lengths = ", length, ", ") << std::endl;
|
||||
|
||||
return;
|
||||
continue;
|
||||
}
|
||||
|
||||
auto invoker_ptr = inst_ptr->MakeInvokerPointer();
|
||||
|
||||
106
profiler/src/profile_groupnorm.cpp
Normal file
106
profiler/src/profile_groupnorm.cpp
Normal file
@@ -0,0 +1,106 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "profiler/include/data_type_enum.hpp"
|
||||
#include "profiler/include/profile_groupnorm_impl.hpp"
|
||||
|
||||
using ck::index_t;
|
||||
|
||||
struct GroupnormArgParser
|
||||
{
|
||||
std::unordered_map<std::string, std::vector<int>> long_opts = {{"length", {}}};
|
||||
|
||||
bool parse_opt(int argc, char* argv[], const std::string& key, int i)
|
||||
{
|
||||
if(std::string("--") + key == argv[i])
|
||||
{
|
||||
int pos = i;
|
||||
while(++i < argc && argv[i][0] != '-') {}
|
||||
int end = i;
|
||||
for(int j = pos + 1; j < end; j++)
|
||||
{
|
||||
long_opts[key].push_back(std::stoi(argv[j]));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void operator()(int argc, char* argv[])
|
||||
{
|
||||
for(auto& kv : long_opts)
|
||||
{
|
||||
for(int i = 1; i < argc; i++)
|
||||
{
|
||||
if(parse_opt(argc, argv, kv.first, i))
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void print_help_groupnorm()
|
||||
{
|
||||
std::cout << "arg1: tensor operation (groupnorm: Group normalization)\n"
|
||||
<< "arg2: data type (0: fp16; 1: fp32)\n"
|
||||
<< "arg3: verification (0: no; 1: yes)\n"
|
||||
<< "arg4: initialization (0: no init; 1: integer value; 2: decimal value)\n"
|
||||
<< "arg5: print tensor value (0: no; 1: yes)\n"
|
||||
<< "arg6: time kernel (0=no, 1=yes)\n"
|
||||
<< "--length: tensor extents (e.g, --length 1 16 16 32 40) \n"
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
int profile_groupnorm(int argc, char* argv[])
|
||||
{
|
||||
ck::DataTypeEnum data_type = ck::DataTypeEnum::Half;
|
||||
bool do_verification = false;
|
||||
int init_method = 0;
|
||||
bool do_log = 0;
|
||||
bool time_kernel = 1;
|
||||
std::vector<index_t> length = {64, 16, 16, 32, 40};
|
||||
|
||||
if(argc != 1 && argc != 13)
|
||||
{
|
||||
print_help_groupnorm();
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(argc == 13)
|
||||
{
|
||||
data_type = static_cast<ck::DataTypeEnum>(std::stoi(argv[2]));
|
||||
do_verification = std::stoi(argv[3]);
|
||||
init_method = std::stoi(argv[4]);
|
||||
do_log = std::stoi(argv[5]);
|
||||
time_kernel = std::stoi(argv[6]);
|
||||
|
||||
// parse the long options
|
||||
GroupnormArgParser arg_parser;
|
||||
arg_parser(argc, argv);
|
||||
length = arg_parser.long_opts["length"];
|
||||
}
|
||||
|
||||
using F16 = ck::half_t;
|
||||
using F32 = float;
|
||||
|
||||
if(data_type == ck::DataTypeEnum::Float)
|
||||
{
|
||||
ck::profiler::profile_groupnorm_impl<F32, F32, F32, F32, F32>(
|
||||
do_verification, init_method, do_log, time_kernel, length);
|
||||
}
|
||||
else if(data_type == ck::DataTypeEnum::Half)
|
||||
{
|
||||
ck::profiler::profile_groupnorm_impl<F16, F16, F16, F32, F16>(
|
||||
do_verification, init_method, do_log, time_kernel, length);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw std::runtime_error("not implemented yet");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "profiler/include/data_type_enum.hpp"
|
||||
#include "profiler/include/profile_layernorm_impl.hpp"
|
||||
|
||||
using ck::index_t;
|
||||
@@ -49,7 +50,7 @@ void print_help_layernorm()
|
||||
<< "arg2: verification (0: no; 1: yes)\n"
|
||||
<< "arg3: initialization (0: no init; 1: integer value; 2: decimal value)\n"
|
||||
<< "arg4: print tensor value (0: no; 1: yes)\n"
|
||||
<< "arg5: time kernel (0=n0, 1=yes)\n"
|
||||
<< "arg5: time kernel (0=no, 1=yes)\n"
|
||||
<< "--length: tensor extents (e.g, --length 1024 1024) \n"
|
||||
<< "--strideXY: tensor strides (e.g, --strideXY 1024 1)\n"
|
||||
<< "--strideGamma: tensor strides (e.g, --strideGamma 1)\n"
|
||||
@@ -114,10 +115,3 @@ int profile_layernorm(int argc, char* argv[])
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// hijack main() for quick debugging
|
||||
// int main(int argc, char* argv[])
|
||||
// {
|
||||
// profile_layernorm(argc, argv);
|
||||
// return 0;
|
||||
// }
|
||||
|
||||
@@ -3,26 +3,27 @@
|
||||
|
||||
#include <cstring>
|
||||
|
||||
int profile_gemm(int, char*[]);
|
||||
int profile_gemm_splitk(int, char*[]);
|
||||
int profile_gemm_bilinear(int, char*[]);
|
||||
int profile_gemm_add_add_fastgelu(int, char*[]);
|
||||
int profile_gemm_reduce(int, char*[]);
|
||||
int profile_gemm_bias_add_reduce(int, char*[]);
|
||||
int profile_batched_gemm(int, char*[]);
|
||||
int profile_batched_gemm_gemm(int, char*[]);
|
||||
int profile_batched_gemm_add_relu_gemm_add(int, char*[]);
|
||||
int profile_batched_gemm_reduce(int, char*[]);
|
||||
int profile_grouped_gemm(int, char*[]);
|
||||
int profile_conv_fwd(int, char*[]);
|
||||
int profile_conv_fwd_bias_relu(int, char*[]);
|
||||
int profile_conv_fwd_bias_relu_add(int, char*[]);
|
||||
int profile_conv_bwd_data(int, char*[]);
|
||||
int profile_conv_bwd_weight(int, char*[]);
|
||||
int profile_grouped_conv_fwd(int, char*[]);
|
||||
int profile_normalization(int, char*[]);
|
||||
// int profile_gemm(int, char*[]);
|
||||
// int profile_gemm_splitk(int, char*[]);
|
||||
// int profile_gemm_bilinear(int, char*[]);
|
||||
// int profile_gemm_add_add_fastgelu(int, char*[]);
|
||||
// int profile_gemm_reduce(int, char*[]);
|
||||
// int profile_gemm_bias_add_reduce(int, char*[]);
|
||||
// int profile_batched_gemm(int, char*[]);
|
||||
// int profile_batched_gemm_gemm(int, char*[]);
|
||||
// int profile_batched_gemm_add_relu_gemm_add(int, char*[]);
|
||||
// int profile_batched_gemm_reduce(int, char*[]);
|
||||
// int profile_grouped_gemm(int, char*[]);
|
||||
// int profile_conv_fwd(int, char*[]);
|
||||
// int profile_conv_fwd_bias_relu(int, char*[]);
|
||||
// int profile_conv_fwd_bias_relu_add(int, char*[]);
|
||||
// int profile_conv_bwd_data(int, char*[]);
|
||||
// int profile_conv_bwd_weight(int, char*[]);
|
||||
// int profile_grouped_conv_fwd(int, char*[]);
|
||||
// int profile_normalization(int, char*[]);
|
||||
int profile_layernorm(int, char*[]);
|
||||
int profile_reduce(int, char*[]);
|
||||
int profile_groupnorm(int, char*[]);
|
||||
// int profile_reduce(int, char*[]);
|
||||
|
||||
static void print_helper_message()
|
||||
{
|
||||
@@ -56,6 +57,7 @@ int main(int argc, char* argv[])
|
||||
|
||||
return 0;
|
||||
}
|
||||
#if 0
|
||||
else if(strcmp(argv[1], "gemm") == 0)
|
||||
{
|
||||
return profile_gemm(argc, argv);
|
||||
@@ -132,10 +134,15 @@ int main(int argc, char* argv[])
|
||||
{
|
||||
return profile_normalization(argc, argv);
|
||||
}
|
||||
#endif
|
||||
else if(strcmp(argv[1], "layernorm") == 0)
|
||||
{
|
||||
return profile_layernorm(argc, argv);
|
||||
}
|
||||
else if(strcmp(argv[1], "groupnorm") == 0)
|
||||
{
|
||||
return profile_groupnorm(argc, argv);
|
||||
}
|
||||
else
|
||||
{
|
||||
print_helper_message();
|
||||
|
||||
@@ -1,10 +1,17 @@
|
||||
add_custom_target(test_layernorm)
|
||||
|
||||
add_gtest_executable(test_layernorm_fp32 test_layernorm_fp32.cpp)
|
||||
add_gtest_executable(test_layernorm_fp16 test_layernorm_fp16.cpp)
|
||||
add_gtest_executable(test_layernorm2d_fp32 test_layernorm2d_fp32.cpp)
|
||||
add_gtest_executable(test_layernorm2d_fp16 test_layernorm2d_fp16.cpp)
|
||||
add_gtest_executable(test_groupnorm_fp16 test_groupnorm_fp16.cpp)
|
||||
add_gtest_executable(test_groupnorm_fp32 test_groupnorm_fp32.cpp)
|
||||
|
||||
target_link_libraries(test_layernorm_fp32 PRIVATE utility)
|
||||
target_link_libraries(test_layernorm_fp16 PRIVATE utility)
|
||||
target_link_libraries(test_layernorm2d_fp32 PRIVATE utility)
|
||||
target_link_libraries(test_layernorm2d_fp16 PRIVATE utility)
|
||||
target_link_libraries(test_groupnorm_fp16 PRIVATE utility device_normalization_instance)
|
||||
target_link_libraries(test_groupnorm_fp32 PRIVATE utility device_normalization_instance)
|
||||
|
||||
add_dependencies(test_layernorm test_layernorm2d_fp32)
|
||||
add_dependencies(test_layernorm test_layernorm2d_fp16)
|
||||
add_dependencies(test_layernorm test_groupnorm_fp16)
|
||||
add_dependencies(test_layernorm test_groupnorm_fp32)
|
||||
|
||||
add_dependencies(test_layernorm test_layernorm_fp32)
|
||||
add_dependencies(test_layernorm test_layernorm_fp16)
|
||||
|
||||
56
test/layernorm/test_groupnorm_fp16.cpp
Normal file
56
test/layernorm/test_groupnorm_fp16.cpp
Normal file
@@ -0,0 +1,56 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "profiler/include/profile_groupnorm_impl.hpp"
|
||||
|
||||
using F16 = ck::half_t;
|
||||
using F32 = float;
|
||||
using ck::index_t;
|
||||
|
||||
template <typename Tuple>
|
||||
class TestGroupnorm : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using AccDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
|
||||
void Run()
|
||||
{
|
||||
// N, H, W, G, C
|
||||
std::vector<std::vector<ck::index_t>> lengths = {{1, 1, 1, 1, 1},
|
||||
{1, 2, 3, 4, 5},
|
||||
{256, 9, 9, 9, 9},
|
||||
{1, 64, 64, 32, 10},
|
||||
{1, 32, 32, 32, 20},
|
||||
{1, 16, 16, 32, 40}};
|
||||
|
||||
for(auto length : lengths)
|
||||
{
|
||||
bool success =
|
||||
ck::profiler::profile_groupnorm_impl<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
YDataType>(true, 2, false, false, length);
|
||||
EXPECT_TRUE(success);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
using KernelTypes = ::testing::Types<
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType>
|
||||
std::tuple<F16, F16, F16, F32, F16>,
|
||||
std::tuple<F16, F16, F16, F32, F16>,
|
||||
std::tuple<F16, F16, F16, F32, F16>,
|
||||
std::tuple<F16, F16, F16, F32, F16>,
|
||||
std::tuple<F16, F16, F16, F32, F16>,
|
||||
std::tuple<F16, F16, F16, F32, F16>,
|
||||
std::tuple<F16, F16, F16, F32, F16>,
|
||||
std::tuple<F16, F16, F16, F32, F16>>;
|
||||
|
||||
TYPED_TEST_SUITE(TestGroupnorm, KernelTypes);
|
||||
TYPED_TEST(TestGroupnorm, Test_FP16) { this->Run(); }
|
||||
56
test/layernorm/test_groupnorm_fp32.cpp
Normal file
56
test/layernorm/test_groupnorm_fp32.cpp
Normal file
@@ -0,0 +1,56 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "profiler/include/profile_groupnorm_impl.hpp"
|
||||
|
||||
using F16 = ck::half_t;
|
||||
using F32 = float;
|
||||
using ck::index_t;
|
||||
|
||||
template <typename Tuple>
|
||||
class TestGroupnorm : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
using GammaDataType = std::tuple_element_t<1, Tuple>;
|
||||
using BetaDataType = std::tuple_element_t<2, Tuple>;
|
||||
using AccDataType = std::tuple_element_t<3, Tuple>;
|
||||
using YDataType = std::tuple_element_t<4, Tuple>;
|
||||
|
||||
void Run()
|
||||
{
|
||||
// N, H, W, G, C
|
||||
std::vector<std::vector<ck::index_t>> lengths = {{1, 1, 1, 1, 1},
|
||||
{1, 2, 3, 4, 5},
|
||||
{256, 9, 9, 9, 9},
|
||||
{1, 64, 64, 32, 10},
|
||||
{1, 32, 32, 32, 20},
|
||||
{1, 16, 16, 32, 40}};
|
||||
|
||||
for(auto length : lengths)
|
||||
{
|
||||
bool success =
|
||||
ck::profiler::profile_groupnorm_impl<XDataType,
|
||||
GammaDataType,
|
||||
BetaDataType,
|
||||
AccDataType,
|
||||
YDataType>(true, 2, false, false, length);
|
||||
EXPECT_TRUE(success);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
using KernelTypes = ::testing::Types<
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType>
|
||||
std::tuple<F32, F32, F32, F32, F32>,
|
||||
std::tuple<F32, F32, F32, F32, F32>,
|
||||
std::tuple<F32, F32, F32, F32, F32>,
|
||||
std::tuple<F32, F32, F32, F32, F32>,
|
||||
std::tuple<F32, F32, F32, F32, F32>,
|
||||
std::tuple<F32, F32, F32, F32, F32>,
|
||||
std::tuple<F32, F32, F32, F32, F32>,
|
||||
std::tuple<F32, F32, F32, F32, F32>>;
|
||||
|
||||
TYPED_TEST_SUITE(TestGroupnorm, KernelTypes);
|
||||
TYPED_TEST(TestGroupnorm, Test_FP32) { this->Run(); }
|
||||
@@ -2,28 +2,28 @@
|
||||
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "test_layernorm_util.hpp"
|
||||
#include "test_layernorm2d_util.hpp"
|
||||
|
||||
template <ck::index_t N>
|
||||
using I = ck::Number<N>;
|
||||
|
||||
template <typename Tuple>
|
||||
class TestLayernormFP16 : public ck::TestLayernorm<Tuple>
|
||||
class TestLayernorm2dFP16 : public ck::TestLayernorm2d<Tuple>
|
||||
{
|
||||
};
|
||||
|
||||
// clang-format off
|
||||
using KernelTypes = ::testing::Types<
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, , GammaSrcVectorSize, BetaSrcVectorSize, YDstVectorSize>
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<8>, I<32>, I<1>, I<8>, I<1>, I<8>, I<8>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<8>, I<32>, I<2>, I<8>, I<1>, I<8>, I<8>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<4>, I<64>, I<1>, I<8>, I<1>, I<8>, I<8>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<4>, I<64>, I<2>, I<8>, I<1>, I<8>, I<8>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<2>, I<128>, I<1>, I<8>, I<1>, I<8>, I<8>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<2>, I<128>, I<2>, I<8>, I<1>, I<8>, I<8>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<1>, I<256>, I<1>, I<8>, I<1>, I<8>, I<8>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<1>, I<256>, I<2>, I<8>, I<1>, I<8>, I<8>, I<8>, I<8>>
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim , GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize>
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<8>, I<32>, I<1>, I<8>, I<1>, I<8>, I<1>, I<8>, I<1>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<8>, I<32>, I<2>, I<8>, I<1>, I<8>, I<1>, I<8>, I<1>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<4>, I<64>, I<1>, I<8>, I<1>, I<8>, I<1>, I<8>, I<1>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<4>, I<64>, I<2>, I<8>, I<1>, I<8>, I<1>, I<8>, I<1>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<2>, I<128>, I<1>, I<8>, I<1>, I<8>, I<1>, I<8>, I<1>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<2>, I<128>, I<2>, I<8>, I<1>, I<8>, I<1>, I<8>, I<1>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<1>, I<256>, I<1>, I<8>, I<1>, I<8>, I<1>, I<8>, I<1>, I<8>, I<8>>,
|
||||
std::tuple<ck::half_t, ck::half_t, ck::half_t, float, ck::half_t, I<2>, I<1>, I<256>, I<1>, I<256>, I<2>, I<8>, I<1>, I<8>, I<1>, I<8>, I<1>, I<8>, I<8>>
|
||||
>;
|
||||
// clang-format on
|
||||
TYPED_TEST_SUITE(TestLayernormFP16, KernelTypes);
|
||||
TYPED_TEST(TestLayernormFP16, Test_FP16) { this->Run(); }
|
||||
TYPED_TEST_SUITE(TestLayernorm2dFP16, KernelTypes);
|
||||
TYPED_TEST(TestLayernorm2dFP16, Test_FP16) { this->Run(); }
|
||||
@@ -2,28 +2,28 @@
|
||||
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "test_layernorm_util.hpp"
|
||||
#include "test_layernorm2d_util.hpp"
|
||||
|
||||
template <ck::index_t N>
|
||||
using I = ck::Number<N>;
|
||||
|
||||
template <typename Tuple>
|
||||
class TestLayernormFP32 : public ck::TestLayernorm<Tuple>
|
||||
class TestLayernorm2dFP32 : public ck::TestLayernorm2d<Tuple>
|
||||
{
|
||||
};
|
||||
|
||||
// clang-format off
|
||||
using KernelTypes = ::testing::Types<
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, , GammaSrcVectorSize, BetaSrcVectorSize, YDstVectorSize>
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<8>, I<32>, I<1>, I<8>, I<1>, I<4>, I<4>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<8>, I<32>, I<2>, I<8>, I<1>, I<4>, I<4>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<4>, I<64>, I<1>, I<8>, I<1>, I<4>, I<4>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<4>, I<64>, I<2>, I<8>, I<1>, I<4>, I<4>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<2>, I<128>, I<1>, I<8>, I<1>, I<4>, I<4>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<2>, I<128>, I<2>, I<8>, I<1>, I<4>, I<4>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<1>, I<256>, I<1>, I<8>, I<1>, I<4>, I<4>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<1>, I<256>, I<2>, I<8>, I<1>, I<4>, I<4>, I<4>, I<4>>
|
||||
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType, Rank, NumReduceDim, BlockSize, MThreadClusterSize, KThreadClusterSize, MThreadSliceSize, KThreadSliceSize, XYSrcVectorDim, XSrcVectorSize, GammaSrcVectorDim, GammaSrcVectorSize, BetaSrcVectorDim, BetaSrcVectorSize, YDstVectorSize>
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<8>, I<32>, I<1>, I<8>, I<1>, I<4>, I<1>, I<4>, I<1>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<8>, I<32>, I<2>, I<8>, I<1>, I<4>, I<1>, I<4>, I<1>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<4>, I<64>, I<1>, I<8>, I<1>, I<4>, I<1>, I<4>, I<1>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<4>, I<64>, I<2>, I<8>, I<1>, I<4>, I<1>, I<4>, I<1>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<2>, I<128>, I<1>, I<8>, I<1>, I<4>, I<1>, I<4>, I<1>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<2>, I<128>, I<2>, I<8>, I<1>, I<4>, I<1>, I<4>, I<1>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<1>, I<256>, I<1>, I<8>, I<1>, I<4>, I<1>, I<4>, I<1>, I<4>, I<4>>,
|
||||
std::tuple<float, float, float, float, float, I<2>, I<1>, I<256>, I<1>, I<256>, I<2>, I<8>, I<1>, I<4>, I<1>, I<4>, I<1>, I<4>, I<4>>
|
||||
>;
|
||||
// clang-format on
|
||||
TYPED_TEST_SUITE(TestLayernormFP32, KernelTypes);
|
||||
TYPED_TEST(TestLayernormFP32, Test_FP32) { this->Run(); }
|
||||
TYPED_TEST_SUITE(TestLayernorm2dFP32, KernelTypes);
|
||||
TYPED_TEST(TestLayernorm2dFP32, Test_FP32) { this->Run(); }
|
||||
@@ -31,7 +31,7 @@ std::string serialize_range(const Range& range)
|
||||
}
|
||||
|
||||
template <typename Tuple>
|
||||
class TestLayernorm : public ::testing::Test
|
||||
class TestLayernorm2d : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using XDataType = std::tuple_element_t<0, Tuple>;
|
||||
@@ -48,9 +48,11 @@ class TestLayernorm : public ::testing::Test
|
||||
static constexpr index_t KThreadSliceSize = std::tuple_element_t<11, Tuple>{}.value;
|
||||
static constexpr index_t XYSrcVectorDim = std::tuple_element_t<12, Tuple>{}.value;
|
||||
static constexpr index_t XSrcVectorSize = std::tuple_element_t<13, Tuple>{}.value;
|
||||
static constexpr index_t GammaSrcVectorSize = std::tuple_element_t<14, Tuple>{}.value;
|
||||
static constexpr index_t BetaSrcVectorSize = std::tuple_element_t<15, Tuple>{}.value;
|
||||
static constexpr index_t YDstVectorSize = std::tuple_element_t<16, Tuple>{}.value;
|
||||
static constexpr index_t GammaSrcVectorDim = std::tuple_element_t<14, Tuple>{}.value;
|
||||
static constexpr index_t GammaSrcVectorSize = std::tuple_element_t<15, Tuple>{}.value;
|
||||
static constexpr index_t BetaSrcVectorDim = std::tuple_element_t<16, Tuple>{}.value;
|
||||
static constexpr index_t BetaSrcVectorSize = std::tuple_element_t<17, Tuple>{}.value;
|
||||
static constexpr index_t YDstVectorSize = std::tuple_element_t<18, Tuple>{}.value;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
@@ -78,23 +80,24 @@ class TestLayernorm : public ::testing::Test
|
||||
KThreadSliceSize,
|
||||
XYSrcVectorDim,
|
||||
XSrcVectorSize,
|
||||
GammaSrcVectorDim,
|
||||
GammaSrcVectorSize,
|
||||
BetaSrcVectorDim,
|
||||
BetaSrcVectorSize,
|
||||
YDstVectorSize>;
|
||||
|
||||
TestLayernorm() : ref_instance_invoker_(ReferenceInstance{}.MakeInvoker()) {}
|
||||
TestLayernorm2d() : ref_instance_invoker_(ReferenceInstance{}.MakeInvoker()) {}
|
||||
|
||||
void RunSingle(std::vector<index_t> lengths, std::vector<index_t> reduceDims)
|
||||
void RunSingle(const std::vector<index_t>& lengths,
|
||||
const std::vector<index_t>& reduceDims,
|
||||
const std::vector<index_t>& GammaLength,
|
||||
const std::vector<index_t>& GammaStride,
|
||||
const std::vector<index_t>& BetaLength,
|
||||
const std::vector<index_t>& BetaStride)
|
||||
{
|
||||
std::vector<index_t> reduceLength(reduceDims.size());
|
||||
for(int i = 0; i < NumReduceDim; ++i)
|
||||
{
|
||||
reduceLength[i] = lengths[reduceDims[i]];
|
||||
}
|
||||
|
||||
Tensor<XDataType> x(lengths);
|
||||
Tensor<GammaDataType> gamma(reduceLength);
|
||||
Tensor<BetaDataType> beta(reduceLength);
|
||||
Tensor<GammaDataType> gamma(GammaLength);
|
||||
Tensor<BetaDataType> beta(BetaLength);
|
||||
Tensor<YDataType> y(lengths);
|
||||
Tensor<YDataType> y_ref(lengths);
|
||||
|
||||
@@ -115,10 +118,8 @@ class TestLayernorm : public ::testing::Test
|
||||
auto argument_ptr = device_instance.MakeArgumentPointer(
|
||||
lengths,
|
||||
std::vector<ck::index_t>{x.mDesc.GetStrides().begin(), x.mDesc.GetStrides().end()},
|
||||
std::vector<ck::index_t>{gamma.mDesc.GetStrides().begin(),
|
||||
gamma.mDesc.GetStrides().end()},
|
||||
std::vector<ck::index_t>{beta.mDesc.GetStrides().begin(),
|
||||
beta.mDesc.GetStrides().end()},
|
||||
GammaStride,
|
||||
BetaStride,
|
||||
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
|
||||
reduceDims,
|
||||
1e-4,
|
||||
@@ -163,17 +164,16 @@ class TestLayernorm : public ::testing::Test
|
||||
|
||||
void Run()
|
||||
{
|
||||
for(auto length : this->lengths_)
|
||||
std::vector<std::vector<index_t>> lengths = {
|
||||
{4, 256}, {8, 511}, {9, 1032}, {4, 2048}, {1, 8192}, {4000, 2000}};
|
||||
|
||||
for(auto length : lengths)
|
||||
{
|
||||
this->RunSingle(length, reduceDims_[0]);
|
||||
this->RunSingle(length, {1}, {length[1]}, {0, 1}, {length[1]}, {0, 1});
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::vector<index_t>> lengths_ = {
|
||||
{4, 256}, {8, 511}, {9, 1032}, {4, 2048}, {1, 8192}, {4000, 2000}};
|
||||
|
||||
std::vector<std::vector<index_t>> reduceDims_ = {{1}};
|
||||
|
||||
typename ReferenceInstance::Invoker ref_instance_invoker_;
|
||||
};
|
||||
|
||||
} // namespace ck
|
||||
Reference in New Issue
Block a user