mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-04-20 14:59:17 +00:00
778 lines
37 KiB
C++
778 lines
37 KiB
C++
// SPDX-License-Identifier: MIT
|
|
// Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved.
|
|
|
|
#pragma once
|
|
|
|
#include "ck_tile/core.hpp"
|
|
#include "ck_tile/ops/gemm/warp/warp_gemm_dispatcher.hpp"
|
|
#include "ck_tile/ops/common/tensor_layout.hpp"
|
|
|
|
namespace ck_tile {
|
|
|
|
template <typename ADataType_,
|
|
typename BDataType_,
|
|
typename DsDataType_,
|
|
typename AccDataType_,
|
|
typename ODataType_,
|
|
typename DsLayout_,
|
|
typename ELayout_,
|
|
typename CDElementwise_,
|
|
index_t kBlockSize_,
|
|
index_t kM_,
|
|
index_t kN_,
|
|
index_t MWave_,
|
|
index_t NWave_,
|
|
index_t MPerXdl_,
|
|
index_t NPerXdl_,
|
|
index_t KPerXdl_,
|
|
bool isCTransposed_,
|
|
memory_operation_enum MemoryOperation_,
|
|
index_t kNumWaveGroups_ = 1,
|
|
bool FixedVectorSize_ = false,
|
|
index_t VectorSizeC_ = 1,
|
|
bool TiledMMAPermuteN_ = false>
|
|
struct CShuffleEpilogueProblem
|
|
{
|
|
using ADataType = remove_cvref_t<ADataType_>;
|
|
using BDataType = remove_cvref_t<BDataType_>;
|
|
using AccDataType = remove_cvref_t<AccDataType_>;
|
|
using ODataType = remove_cvref_t<ODataType_>;
|
|
using DsDataType = remove_cvref_t<DsDataType_>;
|
|
using DsLayout = remove_cvref_t<DsLayout_>;
|
|
using ELayout = remove_cvref_t<ELayout_>;
|
|
using CDElementwise = remove_cvref_t<CDElementwise_>;
|
|
static constexpr index_t kBlockSize = kBlockSize_;
|
|
static constexpr index_t kMPerBlock = kM_;
|
|
static constexpr index_t kNPerBlock = kN_;
|
|
static constexpr index_t MWave = MWave_;
|
|
static constexpr index_t NWave = NWave_;
|
|
static constexpr index_t MPerXdl = MPerXdl_;
|
|
static constexpr index_t NPerXdl = NPerXdl_;
|
|
static constexpr index_t KPerXdl = KPerXdl_;
|
|
static constexpr index_t isCTransposed = isCTransposed_;
|
|
static constexpr memory_operation_enum MemoryOperation = MemoryOperation_;
|
|
static constexpr bool FixedVectorSize = FixedVectorSize_;
|
|
static constexpr index_t VectorSizeC = VectorSizeC_;
|
|
static constexpr bool TiledMMAPermuteN = TiledMMAPermuteN_;
|
|
static constexpr index_t kNumWaveGroups = kNumWaveGroups_;
|
|
static constexpr index_t NumDTensor = DsDataType::size();
|
|
|
|
static_assert(NumDTensor == DsLayout::size(),
|
|
"The size of DsDataType and DsLayout should be the same");
|
|
};
|
|
|
|
template <typename Problem_, typename Policy_ = void>
|
|
struct CShuffleEpilogue
|
|
{
|
|
using Problem = remove_cvref_t<Problem_>;
|
|
using ADataType = remove_cvref_t<typename Problem::ADataType>;
|
|
using BDataType = remove_cvref_t<typename Problem::BDataType>;
|
|
using AccDataType = remove_cvref_t<typename Problem::AccDataType>;
|
|
using ODataType = remove_cvref_t<typename Problem::ODataType>;
|
|
using DsDataType = remove_cvref_t<typename Problem::DsDataType>;
|
|
using DsLayout = remove_cvref_t<typename Problem::DsLayout>;
|
|
// Used for weight-only quantization kernel, B would be dequantized to the same data type as A
|
|
using BTypeToUse =
|
|
std::conditional_t<std::is_same_v<BDataType, pk_int4_t>, ADataType, BDataType>;
|
|
using ELayout = remove_cvref_t<typename Problem::ELayout>;
|
|
using CDElementwise = remove_cvref_t<typename Problem::CDElementwise>;
|
|
static constexpr memory_operation_enum MemoryOperation = Problem::MemoryOperation;
|
|
static constexpr index_t kBlockSize = Problem::kBlockSize;
|
|
static constexpr index_t kMPerBlock = Problem::kMPerBlock;
|
|
static constexpr index_t kNPerBlock = Problem::kNPerBlock;
|
|
static constexpr index_t MWave = Problem::MWave;
|
|
static constexpr index_t NWave = Problem::NWave;
|
|
static constexpr index_t MPerXdl = Problem::MPerXdl;
|
|
static constexpr index_t NPerXdl = Problem::NPerXdl;
|
|
static constexpr index_t KPerXdl = Problem::KPerXdl;
|
|
static constexpr index_t isCTransposed = Problem::isCTransposed;
|
|
static constexpr bool FixedVectorSize = Problem::FixedVectorSize;
|
|
static constexpr bool TiledMMAPermuteN = Problem::TiledMMAPermuteN;
|
|
static constexpr index_t VectorSizeC = Problem::VectorSizeC;
|
|
static constexpr index_t MPerIteration = MPerXdl * MWave;
|
|
static constexpr index_t NPerIteration = NPerXdl * NWave;
|
|
static constexpr index_t NumDTensor = Problem::NumDTensor;
|
|
static constexpr index_t MRepeat = kMPerBlock / (MPerXdl * MWave);
|
|
static constexpr index_t NRepeat = kNPerBlock / (NPerXdl * NWave);
|
|
|
|
static_assert(NumDTensor == DsLayout::size(),
|
|
"The size of DsDataType and DsLayout should be the same");
|
|
/**
|
|
* @brief Get the vector store size for C tensor.
|
|
*
|
|
* @note The vector store size for output C tensor would depend on multiple factors
|
|
* like its data layout and warp gemm C transposition. In general it would
|
|
* be the number of consecutive elements in contiguous C dimension hold by
|
|
* single thread.
|
|
*
|
|
* @return The vector store size for C tensor.
|
|
*/
|
|
CK_TILE_HOST_DEVICE static constexpr index_t GetVectorSizeC()
|
|
{
|
|
if constexpr(FixedVectorSize)
|
|
{
|
|
return VectorSizeC;
|
|
}
|
|
constexpr index_t max_vector_size = 16;
|
|
if constexpr(std::is_same_v<ELayout, tensor_layout::gemm::RowMajor>)
|
|
{
|
|
return std::min(static_cast<int>(NPerIteration),
|
|
static_cast<int>(max_vector_size / sizeof(ODataType)));
|
|
}
|
|
else if constexpr(std::is_same_v<ELayout, tensor_layout::gemm::ColumnMajor>)
|
|
{
|
|
return std::min(static_cast<int>(MPerIteration),
|
|
static_cast<int>(max_vector_size / sizeof(ODataType)));
|
|
}
|
|
else
|
|
{
|
|
static_assert(false, "Unsupported ELayout!");
|
|
}
|
|
}
|
|
|
|
/**
|
|
* @brief Get the vector store size for Di tensor.
|
|
*
|
|
* @return The vector store size for Di tensor.
|
|
*/
|
|
template <index_t I>
|
|
CK_TILE_HOST_DEVICE static constexpr index_t GetVectorSizeD(number<I> index)
|
|
{
|
|
constexpr index_t max_vector_size = 16;
|
|
using DiDataType = remove_cvref_t<std::tuple_element_t<index.value, DsDataType>>;
|
|
using DiLayout = remove_cvref_t<std::tuple_element_t<index.value, DsLayout>>;
|
|
if constexpr(std::is_same_v<DiLayout, tensor_layout::gemm::RowMajor>)
|
|
{
|
|
return std::min(static_cast<int>(NPerIteration),
|
|
static_cast<int>(max_vector_size / sizeof(DiDataType)));
|
|
}
|
|
else if constexpr(std::is_same_v<DiLayout, tensor_layout::gemm::ColumnMajor>)
|
|
{
|
|
return std::min(static_cast<int>(MPerIteration),
|
|
static_cast<int>(max_vector_size / sizeof(DiDataType)));
|
|
}
|
|
else
|
|
{
|
|
static_assert(false, "Unsupported DLayout!");
|
|
}
|
|
return max_vector_size / sizeof(DiDataType);
|
|
}
|
|
/**
|
|
* @brief Shuffle tile configuration parameters
|
|
*
|
|
* @details These parameters control the number of XDL tiles processed per wave in each shuffle
|
|
* iteration:
|
|
* - NumMXdlPerWavePerShuffle: Number of XDL tiles in M dimension processed per wave
|
|
* - NumNXdlPerWavePerShuffle: Number of XDL tiles in N dimension processed per wave
|
|
*/
|
|
static constexpr auto shuffle_tile_tuple = [] {
|
|
constexpr index_t elem_per_thread = MPerXdl * NPerXdl / get_warp_size();
|
|
if constexpr(elem_per_thread >= GetVectorSizeC())
|
|
{
|
|
return std::make_tuple(1, 1);
|
|
}
|
|
else
|
|
{
|
|
constexpr index_t num_xdl_shuffles = GetVectorSizeC() / elem_per_thread;
|
|
if constexpr(std::is_same_v<ELayout, tensor_layout::gemm::RowMajor>)
|
|
{
|
|
static_assert((kMPerBlock % (MPerXdl * MWave) == 0) &&
|
|
(kMPerBlock % num_xdl_shuffles == 0),
|
|
"kMPerBlock must be divisible by MPerXdl*MWave and "
|
|
"num_xdl_shuffles for CShuffleEpilogue");
|
|
return std::make_tuple(min(num_xdl_shuffles, kMPerBlock / (MPerXdl * MWave)), 1);
|
|
}
|
|
else
|
|
{
|
|
static_assert((kNPerBlock % (NPerXdl * NWave) == 0) &&
|
|
(kNPerBlock % num_xdl_shuffles == 0),
|
|
"kNPerBlock must be divisible by NPerXdl*NWave and "
|
|
"num_xdl_shuffles for CShuffleEpilogue");
|
|
return std::make_tuple(1, min(num_xdl_shuffles, kNPerBlock / (NPerXdl * NWave)));
|
|
}
|
|
}
|
|
}();
|
|
static constexpr index_t NumMXdlPerWavePerShuffle = std::get<0>(shuffle_tile_tuple);
|
|
static constexpr index_t NumNXdlPerWavePerShuffle = std::get<1>(shuffle_tile_tuple);
|
|
|
|
static constexpr auto MNPerIterationShuffle = [] {
|
|
constexpr index_t m_val = MPerXdl * MWave * NumMXdlPerWavePerShuffle;
|
|
constexpr index_t n_val = NPerXdl * NWave * NumNXdlPerWavePerShuffle;
|
|
if constexpr(kMPerBlock % m_val != 0 || kNPerBlock % n_val != 0)
|
|
return std::make_tuple(MPerXdl * MWave, NPerXdl * NWave);
|
|
else
|
|
return std::make_tuple(m_val, n_val);
|
|
}();
|
|
static constexpr index_t MPerIterationShuffle = std::get<0>(MNPerIterationShuffle);
|
|
static constexpr index_t NPerIterationShuffle = std::get<1>(MNPerIterationShuffle);
|
|
|
|
using WG = WarpGemmMfmaDispatcher<ADataType,
|
|
BTypeToUse,
|
|
AccDataType,
|
|
MPerXdl,
|
|
NPerXdl,
|
|
KPerXdl,
|
|
isCTransposed>;
|
|
|
|
using CWarpDstr = typename WG::CWarpDstr;
|
|
using CWarpTensor = typename WG::CWarpTensor;
|
|
|
|
template <typename Problem>
|
|
CK_TILE_HOST_DEVICE static constexpr auto MakeLdsBlockDescriptor()
|
|
{
|
|
// N is contiguous dimension
|
|
if constexpr(std::is_same_v<ELayout, tensor_layout::gemm::RowMajor>)
|
|
{
|
|
return make_naive_tensor_descriptor(
|
|
make_tuple(number<MPerIterationShuffle>{}, number<NPerIterationShuffle>{}),
|
|
make_tuple(number<NPerIterationShuffle>{}, number<1>{}));
|
|
}
|
|
// M is contiguous dimension
|
|
else if constexpr(std::is_same_v<ELayout, tensor_layout::gemm::ColumnMajor>)
|
|
{
|
|
return make_naive_tensor_descriptor(
|
|
make_tuple(number<MPerIterationShuffle>{}, number<NPerIterationShuffle>{}),
|
|
make_tuple(number<1>{}, number<MPerIterationShuffle>{}));
|
|
}
|
|
else
|
|
{
|
|
static_assert(false, "Unsupported ELayout!");
|
|
}
|
|
}
|
|
|
|
CK_TILE_DEVICE static constexpr auto MakeLdsDistributionEncode()
|
|
{
|
|
constexpr auto block_outer_dstr_encoding =
|
|
tile_distribution_encoding<sequence<>,
|
|
tuple<sequence<NumMXdlPerWavePerShuffle, MWave>,
|
|
sequence<NumNXdlPerWavePerShuffle, NWave>>,
|
|
tuple<sequence<1, 2>>,
|
|
tuple<sequence<1, 1>>,
|
|
sequence<1, 2>,
|
|
sequence<0, 0>>{};
|
|
constexpr auto block_dstr_encoding = detail::make_embed_tile_distribution_encoding(
|
|
block_outer_dstr_encoding, typename CWarpDstr::DstrEncode{});
|
|
|
|
return block_dstr_encoding;
|
|
}
|
|
|
|
CK_TILE_HOST_DEVICE static constexpr index_t GetSmemSize()
|
|
{
|
|
return MPerIterationShuffle * NPerIterationShuffle * sizeof(ODataType);
|
|
}
|
|
|
|
template <typename ODramWindow,
|
|
typename OAccTile,
|
|
typename DsDramWindows,
|
|
int EnablePermuateN_ = TiledMMAPermuteN,
|
|
std::enable_if_t<EnablePermuateN_, int> = 0>
|
|
CK_TILE_DEVICE auto operator()(ODramWindow& out_dram_window,
|
|
const OAccTile& o_acc_tile,
|
|
const DsDramWindows& ds_dram_windows,
|
|
void* p_smem)
|
|
{
|
|
constexpr int kM0 = MWave;
|
|
constexpr int kM2 = 4;
|
|
constexpr int kM1 = MPerXdl / kM2;
|
|
|
|
constexpr int kN0 = NWave;
|
|
constexpr int kN1 = NPerXdl;
|
|
constexpr int kN2 = NRepeat;
|
|
|
|
using IntrThreadShuffleEncode =
|
|
tile_distribution_encoding<sequence<>,
|
|
tuple<sequence<kM0, kM1, kM2>, sequence<kN0, kN1, kN2>>,
|
|
tuple<sequence<1, 2>, sequence<1, 2>>,
|
|
tuple<sequence<0, 0>, sequence<1, 1>>,
|
|
sequence<1, 2>,
|
|
sequence<2, 2>>;
|
|
constexpr auto dram_tile_distribution =
|
|
make_static_tile_distribution(IntrThreadShuffleEncode{});
|
|
|
|
auto d_dram_windows = generate_tuple(
|
|
[&](auto idx) {
|
|
return make_tile_window(ds_dram_windows[idx], dram_tile_distribution);
|
|
},
|
|
number<NumDTensor>{});
|
|
|
|
constexpr auto c_warp_y_lengths =
|
|
to_sequence(CWarpDstr{}.get_ys_to_d_descriptor().get_lengths());
|
|
constexpr auto c_warp_y_index_zeros = uniform_sequence_gen_t<CWarpDstr::NDimY, 0>{};
|
|
|
|
auto shuffle_acc = make_static_distributed_tensor<AccDataType>(dram_tile_distribution);
|
|
auto c_out_tensor = make_static_distributed_tensor<ODataType>(dram_tile_distribution);
|
|
// auto c_out_tensor = make_static_distributed_tensor<ODataType>(dram_tile_distribution);
|
|
|
|
static_for<0, MRepeat, 1>{}([&](auto mIter) {
|
|
shuffle_acc.get_thread_buffer() = o_acc_tile.get_y_sliced_thread_data(
|
|
merge_sequences(sequence<mIter, 0>{}, c_warp_y_index_zeros),
|
|
merge_sequences(sequence<1, NRepeat>{}, c_warp_y_lengths));
|
|
|
|
static_for<0, NRepeat, 1>{}([&](auto n_idx) {
|
|
c_out_tensor.get_thread_buffer()[n_idx + 0 * NRepeat] = type_convert<ODataType>(
|
|
shuffle_acc.get_thread_buffer()[n_idx * c_warp_y_lengths.product() + 0]);
|
|
c_out_tensor.get_thread_buffer()[n_idx + 1 * NRepeat] = type_convert<ODataType>(
|
|
shuffle_acc.get_thread_buffer()[n_idx * c_warp_y_lengths.product() + 1]);
|
|
c_out_tensor.get_thread_buffer()[n_idx + 2 * NRepeat] = type_convert<ODataType>(
|
|
shuffle_acc.get_thread_buffer()[n_idx * c_warp_y_lengths.product() + 2]);
|
|
c_out_tensor.get_thread_buffer()[n_idx + 3 * NRepeat] = type_convert<ODataType>(
|
|
shuffle_acc.get_thread_buffer()[n_idx * c_warp_y_lengths.product() + 3]);
|
|
});
|
|
|
|
// c_out_tensor = cast_tile<ODataType>(c_out_tensor_fp32);
|
|
|
|
if constexpr(MemoryOperation == memory_operation_enum::set)
|
|
{
|
|
store_tile(out_dram_window, c_out_tensor);
|
|
}
|
|
else
|
|
{
|
|
update_tile(out_dram_window, c_out_tensor);
|
|
}
|
|
move_tile_window(out_dram_window, {number<MPerXdl * MWave>{}, number<0>{}});
|
|
|
|
static_for<0, NumDTensor, 1>{}([&](auto idx) {
|
|
move_tile_window(d_dram_windows[idx], {number<MPerXdl * MWave>{}, number<0>{}});
|
|
});
|
|
});
|
|
}
|
|
|
|
template <typename ODramWindow,
|
|
typename OAccTile,
|
|
typename DsDramWindows,
|
|
int EnablePermuateN_ = TiledMMAPermuteN,
|
|
std::enable_if_t<!EnablePermuateN_, int> = 0>
|
|
CK_TILE_DEVICE auto operator()(ODramWindow& out_dram_window,
|
|
const OAccTile& o_acc_tile,
|
|
const DsDramWindows& ds_dram_windows,
|
|
void* p_smem)
|
|
{
|
|
constexpr auto LdsTileDistr = make_static_tile_distribution(MakeLdsDistributionEncode());
|
|
|
|
auto lds_tile = make_static_distributed_tensor<AccDataType>(LdsTileDistr);
|
|
|
|
constexpr auto lds_block_desc = MakeLdsBlockDescriptor<Problem>();
|
|
auto o_lds_block = make_tensor_view<address_space_enum::lds>(
|
|
static_cast<ODataType*>(p_smem), lds_block_desc);
|
|
|
|
auto in_lds_window = make_tile_window(
|
|
o_lds_block,
|
|
make_tuple(number<MPerIterationShuffle>{}, number<NPerIterationShuffle>{}),
|
|
{0, 0},
|
|
LdsTileDistr);
|
|
|
|
auto out_lds_window = make_tile_window(
|
|
o_lds_block,
|
|
make_tuple(number<MPerIterationShuffle>{}, number<NPerIterationShuffle>{}),
|
|
{0, 0});
|
|
|
|
using SFC = space_filling_curve<sequence<kMPerBlock, kNPerBlock>,
|
|
sequence<0, 1>,
|
|
sequence<MPerIterationShuffle, NPerIterationShuffle>>;
|
|
constexpr index_t num_access = SFC::get_num_of_access();
|
|
|
|
static_assert(std::is_same_v<ELayout, tensor_layout::gemm::RowMajor>,
|
|
"Currently, the CShuffle Epilogue only supports the Row Major Output layout");
|
|
|
|
using TileEncodingPattern =
|
|
TileDistributionEncodingPattern2D<kBlockSize,
|
|
MPerIterationShuffle,
|
|
NPerIterationShuffle,
|
|
GetVectorSizeC(),
|
|
tile_distribution_pattern::thread_raked,
|
|
Problem::kNumWaveGroups>;
|
|
constexpr auto dram_tile_distribution = TileEncodingPattern::Make2DStaticTileDistribution();
|
|
|
|
auto d_dram_windows = generate_tuple(
|
|
[&](auto idx) {
|
|
return make_tile_window(ds_dram_windows[idx], dram_tile_distribution);
|
|
},
|
|
number<NumDTensor>{});
|
|
|
|
constexpr auto c_warp_y_lengths =
|
|
to_sequence(CWarpDstr{}.get_ys_to_d_descriptor().get_lengths());
|
|
constexpr auto c_warp_y_index_zeros = uniform_sequence_gen_t<CWarpDstr::NDimY, 0>{};
|
|
|
|
static_for<0, num_access, 1>{}([&](auto iAccess) {
|
|
block_sync_lds();
|
|
constexpr auto idx_y_start = SFC::get_index(iAccess);
|
|
|
|
constexpr auto mIter = number<idx_y_start.at(number<0>{}) / (MPerIterationShuffle)>{};
|
|
constexpr auto nIter = number<idx_y_start.at(number<1>{}) / (NPerIterationShuffle)>{};
|
|
|
|
lds_tile.get_thread_buffer() = o_acc_tile.get_y_sliced_thread_data(
|
|
merge_sequences(
|
|
sequence<mIter * NumMXdlPerWavePerShuffle, nIter * NumNXdlPerWavePerShuffle>{},
|
|
c_warp_y_index_zeros),
|
|
merge_sequences(sequence<NumMXdlPerWavePerShuffle, NumNXdlPerWavePerShuffle>{},
|
|
c_warp_y_lengths));
|
|
|
|
const auto c_warptile_in_tensor_casted = cast_tile<ODataType>(lds_tile);
|
|
|
|
store_tile(in_lds_window, c_warptile_in_tensor_casted);
|
|
block_sync_lds();
|
|
|
|
auto c_out_tensor = load_tile(make_tile_window(out_lds_window, dram_tile_distribution));
|
|
|
|
const auto ds_tensor = generate_tuple(
|
|
[&](auto idx) { return load_tile(d_dram_windows[idx]); }, number<NumDTensor>{});
|
|
|
|
const auto c_ds_tiles = concat_tuple_of_reference(
|
|
tie(c_out_tensor, c_out_tensor),
|
|
generate_tie([&](auto idx) -> const auto& { return ds_tensor[idx]; },
|
|
number<NumDTensor>{}));
|
|
|
|
tile_elementwise_inout_unpack(typename Problem::CDElementwise{}, c_ds_tiles);
|
|
|
|
if constexpr(MemoryOperation == memory_operation_enum::set)
|
|
{
|
|
store_tile(out_dram_window, c_out_tensor);
|
|
}
|
|
else
|
|
{
|
|
update_tile(out_dram_window, c_out_tensor);
|
|
}
|
|
if constexpr(iAccess != num_access - 1)
|
|
{
|
|
constexpr auto step = SFC::get_forward_step(iAccess);
|
|
|
|
move_tile_window(out_dram_window, {step.at(number<0>{}), step.at(number<1>{})});
|
|
|
|
static_for<0, NumDTensor, 1>{}([&](auto idx) {
|
|
move_tile_window(d_dram_windows[idx],
|
|
{step.at(number<0>{}), step.at(number<1>{})});
|
|
});
|
|
}
|
|
});
|
|
}
|
|
|
|
template <typename ODramWindow,
|
|
typename OAccTile,
|
|
typename DsDramWindows,
|
|
typename ScaleM,
|
|
typename ScaleN,
|
|
int EnablePermuateN_ = TiledMMAPermuteN,
|
|
std::enable_if_t<EnablePermuateN_, int> = 0>
|
|
CK_TILE_DEVICE auto operator()(ODramWindow& out_dram_window,
|
|
const OAccTile& o_acc_tile,
|
|
const DsDramWindows& ds_dram_windows,
|
|
void* p_smem,
|
|
ScaleM scale_m,
|
|
ScaleN scale_n)
|
|
{
|
|
constexpr int kM0 = MWave;
|
|
constexpr int kM2 = 4;
|
|
constexpr int kM1 = MPerXdl / kM2;
|
|
static_assert(MPerXdl == 16, "TiledMMAPermuteN only supports MPerXdl = 16 now");
|
|
|
|
constexpr int kN0 = NWave;
|
|
constexpr int kN1 = NPerXdl;
|
|
constexpr int kN2 = NRepeat;
|
|
|
|
using IntrThreadShuffleEncode =
|
|
tile_distribution_encoding<sequence<>,
|
|
tuple<sequence<kM0, kM1, kM2>, sequence<kN0, kN1, kN2>>,
|
|
tuple<sequence<1, 2>, sequence<1, 2>>,
|
|
tuple<sequence<0, 0>, sequence<1, 1>>,
|
|
sequence<1, 2>,
|
|
sequence<2, 2>>;
|
|
constexpr auto dram_tile_distribution =
|
|
make_static_tile_distribution(IntrThreadShuffleEncode{});
|
|
|
|
auto d_dram_windows = generate_tuple(
|
|
[&](auto idx) {
|
|
return make_tile_window(ds_dram_windows[idx], dram_tile_distribution);
|
|
},
|
|
number<NumDTensor>{});
|
|
|
|
constexpr auto c_warp_y_lengths =
|
|
to_sequence(CWarpDstr{}.get_ys_to_d_descriptor().get_lengths());
|
|
constexpr auto c_warp_y_index_zeros = uniform_sequence_gen_t<CWarpDstr::NDimY, 0>{};
|
|
|
|
using ShuffleAcc =
|
|
decltype(make_static_distributed_tensor<AccDataType>(dram_tile_distribution));
|
|
ShuffleAcc shuffle_acc[MRepeat];
|
|
auto c_out_tensor_fp32 =
|
|
make_static_distributed_tensor<AccDataType>(dram_tile_distribution);
|
|
auto c_out_tensor = make_static_distributed_tensor<ODataType>(dram_tile_distribution);
|
|
|
|
const index_t iMWarp = get_warp_id() / NWave;
|
|
const index_t iNWarp = get_warp_id() - iMWarp * NWave;
|
|
const index_t iMLane = get_lane_id() / NPerXdl;
|
|
const index_t iNLane = get_lane_id() % NPerXdl;
|
|
|
|
float vec_scale_A[kM2 * MRepeat];
|
|
float vec_scale_B[NRepeat];
|
|
|
|
_Pragma("unroll") for(int i = 0; i < NRepeat; ++i)
|
|
{
|
|
vec_scale_B[i] = scale_n[i + iNLane * NRepeat + iNWarp * NRepeat * NPerXdl];
|
|
}
|
|
_Pragma("unroll") for(int i = 0; i < MRepeat; ++i)
|
|
{
|
|
vec_scale_A[i * kM2 + 0] =
|
|
scale_m[0 + iMLane * kM2 + iMWarp * MPerXdl + i * MPerXdl * MWave];
|
|
vec_scale_A[i * kM2 + 1] =
|
|
scale_m[1 + iMLane * kM2 + iMWarp * MPerXdl + i * MPerXdl * MWave];
|
|
vec_scale_A[i * kM2 + 2] =
|
|
scale_m[2 + iMLane * kM2 + iMWarp * MPerXdl + i * MPerXdl * MWave];
|
|
vec_scale_A[i * kM2 + 3] =
|
|
scale_m[3 + iMLane * kM2 + iMWarp * MPerXdl + i * MPerXdl * MWave];
|
|
}
|
|
|
|
static_for<0, MRepeat, 1>{}([&](auto mIter) {
|
|
shuffle_acc[mIter].get_thread_buffer() = o_acc_tile.get_y_sliced_thread_data(
|
|
merge_sequences(sequence<mIter, 0>{}, c_warp_y_index_zeros),
|
|
merge_sequences(sequence<1, NRepeat>{}, c_warp_y_lengths));
|
|
static_for<0, NRepeat, 1>{}([&](auto n_idx) {
|
|
shuffle_acc[mIter].get_thread_buffer()[n_idx * kM2 + 0] *= vec_scale_B[n_idx];
|
|
shuffle_acc[mIter].get_thread_buffer()[n_idx * kM2 + 1] *= vec_scale_B[n_idx];
|
|
shuffle_acc[mIter].get_thread_buffer()[n_idx * kM2 + 2] *= vec_scale_B[n_idx];
|
|
shuffle_acc[mIter].get_thread_buffer()[n_idx * kM2 + 3] *= vec_scale_B[n_idx];
|
|
});
|
|
});
|
|
|
|
static_for<0, MRepeat, 1>{}([&](auto mIter) {
|
|
static_for<0, NRepeat, 1>{}([&](auto n_idx) {
|
|
c_out_tensor_fp32.get_thread_buffer()[n_idx + 0 * NRepeat] =
|
|
shuffle_acc[mIter].get_thread_buffer()[n_idx * c_warp_y_lengths.product() + 0] *
|
|
vec_scale_A[mIter * kM2 + 0];
|
|
c_out_tensor_fp32.get_thread_buffer()[n_idx + 1 * NRepeat] =
|
|
shuffle_acc[mIter].get_thread_buffer()[n_idx * c_warp_y_lengths.product() + 1] *
|
|
vec_scale_A[mIter * kM2 + 1];
|
|
c_out_tensor_fp32.get_thread_buffer()[n_idx + 2 * NRepeat] =
|
|
shuffle_acc[mIter].get_thread_buffer()[n_idx * c_warp_y_lengths.product() + 2] *
|
|
vec_scale_A[mIter * kM2 + 2];
|
|
c_out_tensor_fp32.get_thread_buffer()[n_idx + 3 * NRepeat] =
|
|
shuffle_acc[mIter].get_thread_buffer()[n_idx * c_warp_y_lengths.product() + 3] *
|
|
vec_scale_A[mIter * kM2 + 3];
|
|
});
|
|
|
|
c_out_tensor = cast_tile<ODataType>(c_out_tensor_fp32);
|
|
|
|
if constexpr(MemoryOperation == memory_operation_enum::set)
|
|
{
|
|
store_tile(out_dram_window, c_out_tensor);
|
|
}
|
|
else
|
|
{
|
|
update_tile(out_dram_window, c_out_tensor);
|
|
}
|
|
move_tile_window(out_dram_window, {number<MPerXdl * MWave>{}, number<0>{}});
|
|
|
|
static_for<0, NumDTensor, 1>{}([&](auto idx) {
|
|
move_tile_window(d_dram_windows[idx], {number<MPerXdl * MWave>{}, number<0>{}});
|
|
});
|
|
});
|
|
}
|
|
|
|
template <typename ODramWindow,
|
|
typename OAccTile,
|
|
typename DsDramWindows,
|
|
typename ScaleM,
|
|
typename ScaleN,
|
|
int EnablePermuateN_ = TiledMMAPermuteN,
|
|
std::enable_if_t<!EnablePermuateN_, int> = 0>
|
|
CK_TILE_DEVICE auto operator()(ODramWindow& out_dram_window,
|
|
const OAccTile& o_acc_tile,
|
|
const DsDramWindows& ds_dram_windows,
|
|
void* p_smem,
|
|
ScaleM scale_m,
|
|
ScaleN scale_n)
|
|
{
|
|
constexpr auto LdsTileDistr = make_static_tile_distribution(MakeLdsDistributionEncode());
|
|
|
|
using LDSTileTensor = decltype(make_static_distributed_tensor<AccDataType>(LdsTileDistr));
|
|
LDSTileTensor lds_tile[2];
|
|
|
|
constexpr auto lds_block_desc = MakeLdsBlockDescriptor<Problem>();
|
|
auto o_lds_block = make_tensor_view<address_space_enum::lds>(
|
|
static_cast<ODataType*>(p_smem), lds_block_desc);
|
|
|
|
auto in_lds_window = make_tile_window(
|
|
o_lds_block,
|
|
make_tuple(number<MPerIterationShuffle>{}, number<NPerIterationShuffle>{}),
|
|
{0, 0},
|
|
LdsTileDistr);
|
|
|
|
auto out_lds_window = make_tile_window(
|
|
o_lds_block,
|
|
make_tuple(number<MPerIterationShuffle>{}, number<NPerIterationShuffle>{}),
|
|
{0, 0});
|
|
|
|
using SFC = space_filling_curve<sequence<kMPerBlock, kNPerBlock>,
|
|
sequence<0, 1>,
|
|
sequence<MPerIterationShuffle, NPerIterationShuffle>>;
|
|
constexpr index_t num_access = SFC::get_num_of_access();
|
|
|
|
static_assert(std::is_same_v<ELayout, tensor_layout::gemm::RowMajor>,
|
|
"Currently, the CShuffle Epilogue only supports the Row Major Output layout");
|
|
|
|
using TileEncodingPattern =
|
|
TileDistributionEncodingPattern2D<kBlockSize,
|
|
MPerIterationShuffle,
|
|
NPerIterationShuffle,
|
|
GetVectorSizeC(),
|
|
tile_distribution_pattern::thread_raked,
|
|
Problem::kNumWaveGroups>;
|
|
constexpr auto dram_tile_distribution = TileEncodingPattern::Make2DStaticTileDistribution();
|
|
|
|
auto d_dram_windows = generate_tuple(
|
|
[&](auto idx) {
|
|
return make_tile_window(ds_dram_windows[idx], dram_tile_distribution);
|
|
},
|
|
number<NumDTensor>{});
|
|
|
|
constexpr auto c_warp_y_lengths =
|
|
to_sequence(CWarpDstr{}.get_ys_to_d_descriptor().get_lengths());
|
|
constexpr auto c_warp_y_index_zeros = uniform_sequence_gen_t<CWarpDstr::NDimY, 0>{};
|
|
|
|
constexpr int kM2 = 4; // Val
|
|
constexpr int kM1 = (64 / NPerXdl); // Thr
|
|
constexpr int kM0 = MPerXdl / kM1 / kM2; // Val
|
|
|
|
const index_t iMWarp = get_warp_id() / NWave;
|
|
const index_t iNWarp = get_warp_id() - iMWarp * NWave;
|
|
const index_t iMLane = get_lane_id() / NPerXdl;
|
|
const index_t iNLane = get_lane_id() % NPerXdl;
|
|
|
|
float vec_scale_A[kM0 * kM2 * MRepeat];
|
|
float vec_scale_B[NRepeat];
|
|
|
|
_Pragma("unroll") for(int i = 0; i < NRepeat; ++i)
|
|
{
|
|
vec_scale_B[i] = scale_n[i * NWave * NPerXdl + iNWarp * NPerXdl + iNLane];
|
|
}
|
|
_Pragma("unroll") for(int i = 0; i < MRepeat; ++i)
|
|
{
|
|
_Pragma("unroll") for(int m0 = 0; m0 < kM0; ++m0)
|
|
{
|
|
vec_scale_A[i * kM0 * kM2 + m0 * kM2 + 0] =
|
|
scale_m[0 + iMLane * kM2 + m0 * kM2 * kM1 + iMWarp * MPerXdl +
|
|
i * MPerXdl * MWave];
|
|
vec_scale_A[i * kM0 * kM2 + m0 * kM2 + 1] =
|
|
scale_m[1 + iMLane * kM2 + m0 * kM2 * kM1 + iMWarp * MPerXdl +
|
|
i * MPerXdl * MWave];
|
|
vec_scale_A[i * kM0 * kM2 + m0 * kM2 + 2] =
|
|
scale_m[2 + iMLane * kM2 + m0 * kM2 * kM1 + iMWarp * MPerXdl +
|
|
i * MPerXdl * MWave];
|
|
vec_scale_A[i * kM0 * kM2 + m0 * kM2 + 3] =
|
|
scale_m[3 + iMLane * kM2 + m0 * kM2 * kM1 + iMWarp * MPerXdl +
|
|
i * MPerXdl * MWave];
|
|
}
|
|
}
|
|
|
|
lds_tile[0].get_thread_buffer() = o_acc_tile.get_y_sliced_thread_data(
|
|
merge_sequences(sequence<0 * NumMXdlPerWavePerShuffle, 0 * NumNXdlPerWavePerShuffle>{},
|
|
c_warp_y_index_zeros),
|
|
merge_sequences(sequence<NumMXdlPerWavePerShuffle, NumNXdlPerWavePerShuffle>{},
|
|
c_warp_y_lengths));
|
|
static_for<0, NumNXdlPerWavePerShuffle, 1>{}([&](auto n_xdl) {
|
|
static_for<0, NumMXdlPerWavePerShuffle, 1>{}([&](auto m_xdl) {
|
|
constexpr int acc_xdl_offset =
|
|
(m_xdl + n_xdl * NumMXdlPerWavePerShuffle) * c_warp_y_lengths.product();
|
|
_Pragma("unroll") for(int m0 = 0; m0 < kM0; ++m0)
|
|
{
|
|
lds_tile[0].get_thread_buffer()[acc_xdl_offset + m0 * kM2 + 0] *=
|
|
vec_scale_A[m_xdl * kM0 * kM2 + m0 * kM2 + 0] * vec_scale_B[n_xdl];
|
|
lds_tile[0].get_thread_buffer()[acc_xdl_offset + m0 * kM2 + 1] *=
|
|
vec_scale_A[m_xdl * kM0 * kM2 + m0 * kM2 + 1] * vec_scale_B[n_xdl];
|
|
lds_tile[0].get_thread_buffer()[acc_xdl_offset + m0 * kM2 + 2] *=
|
|
vec_scale_A[m_xdl * kM0 * kM2 + m0 * kM2 + 2] * vec_scale_B[n_xdl];
|
|
lds_tile[0].get_thread_buffer()[acc_xdl_offset + m0 * kM2 + 3] *=
|
|
vec_scale_A[m_xdl * kM0 * kM2 + m0 * kM2 + 3] * vec_scale_B[n_xdl];
|
|
}
|
|
});
|
|
});
|
|
|
|
static_for<0, num_access, 1>{}([&](auto iAccess) {
|
|
constexpr int read_stage = iAccess % 2;
|
|
constexpr int write_stage = read_stage ^ 1;
|
|
|
|
block_sync_lds();
|
|
constexpr auto idx_y_start = SFC::get_index(number<iAccess.value + 1>{});
|
|
|
|
constexpr auto mIter = number<idx_y_start.at(number<0>{}) / (MPerIterationShuffle)>{};
|
|
constexpr auto nIter = number<idx_y_start.at(number<1>{}) / (NPerIterationShuffle)>{};
|
|
|
|
const auto c_warptile_in_tensor_casted = cast_tile<ODataType>(lds_tile[read_stage]);
|
|
|
|
store_tile(in_lds_window, c_warptile_in_tensor_casted);
|
|
|
|
if constexpr(iAccess < num_access - 1)
|
|
{
|
|
lds_tile[write_stage].get_thread_buffer() = o_acc_tile.get_y_sliced_thread_data(
|
|
merge_sequences(sequence<mIter * NumMXdlPerWavePerShuffle,
|
|
nIter * NumNXdlPerWavePerShuffle>{},
|
|
c_warp_y_index_zeros),
|
|
merge_sequences(sequence<NumMXdlPerWavePerShuffle, NumNXdlPerWavePerShuffle>{},
|
|
c_warp_y_lengths));
|
|
static_for<0, NumNXdlPerWavePerShuffle, 1>{}([&](auto n_xdl) {
|
|
static_for<0, NumMXdlPerWavePerShuffle, 1>{}([&](auto m_xdl) {
|
|
constexpr int acc_xdl_offset =
|
|
(m_xdl + n_xdl * NumMXdlPerWavePerShuffle) * c_warp_y_lengths.product();
|
|
_Pragma("unroll") for(int m0 = 0; m0 < kM0; ++m0)
|
|
{
|
|
lds_tile[write_stage]
|
|
.get_thread_buffer()[acc_xdl_offset + m0 * kM2 + 0] *=
|
|
vec_scale_A[mIter * NumMXdlPerWavePerShuffle * kM0 * kM2 +
|
|
m_xdl * kM0 * kM2 + m0 * kM2 + 0] *
|
|
vec_scale_B[nIter * NumNXdlPerWavePerShuffle + n_xdl];
|
|
lds_tile[write_stage]
|
|
.get_thread_buffer()[acc_xdl_offset + m0 * kM2 + 1] *=
|
|
vec_scale_A[mIter * NumMXdlPerWavePerShuffle * kM0 * kM2 +
|
|
m_xdl * kM0 * kM2 + m0 * kM2 + 1] *
|
|
vec_scale_B[nIter * NumNXdlPerWavePerShuffle + n_xdl];
|
|
lds_tile[write_stage]
|
|
.get_thread_buffer()[acc_xdl_offset + m0 * kM2 + 2] *=
|
|
vec_scale_A[mIter * NumMXdlPerWavePerShuffle * kM0 * kM2 +
|
|
m_xdl * kM0 * kM2 + m0 * kM2 + 2] *
|
|
vec_scale_B[nIter * NumNXdlPerWavePerShuffle + n_xdl];
|
|
lds_tile[write_stage]
|
|
.get_thread_buffer()[acc_xdl_offset + m0 * kM2 + 3] *=
|
|
vec_scale_A[mIter * NumMXdlPerWavePerShuffle * kM0 * kM2 +
|
|
m_xdl * kM0 * kM2 + m0 * kM2 + 3] *
|
|
vec_scale_B[nIter * NumNXdlPerWavePerShuffle + n_xdl];
|
|
}
|
|
});
|
|
});
|
|
}
|
|
|
|
block_sync_lds();
|
|
|
|
auto c_out_tensor = load_tile(make_tile_window(out_lds_window, dram_tile_distribution));
|
|
|
|
const auto ds_tensor = generate_tuple(
|
|
[&](auto idx) { return load_tile(d_dram_windows[idx]); }, number<NumDTensor>{});
|
|
|
|
const auto c_ds_tiles = concat_tuple_of_reference(
|
|
tie(c_out_tensor, c_out_tensor),
|
|
generate_tie([&](auto idx) -> const auto& { return ds_tensor[idx]; },
|
|
number<NumDTensor>{}));
|
|
|
|
tile_elementwise_inout_unpack(typename Problem::CDElementwise{}, c_ds_tiles);
|
|
|
|
if constexpr(MemoryOperation == memory_operation_enum::set)
|
|
{
|
|
store_tile(out_dram_window, c_out_tensor);
|
|
}
|
|
else
|
|
{
|
|
update_tile(out_dram_window, c_out_tensor);
|
|
}
|
|
if constexpr(iAccess != num_access - 1)
|
|
{
|
|
constexpr auto step = SFC::get_forward_step(iAccess);
|
|
|
|
move_tile_window(out_dram_window, {step.at(number<0>{}), step.at(number<1>{})});
|
|
|
|
static_for<0, NumDTensor, 1>{}([&](auto idx) {
|
|
move_tile_window(d_dram_windows[idx],
|
|
{step.at(number<0>{}), step.at(number<1>{})});
|
|
});
|
|
}
|
|
});
|
|
}
|
|
};
|
|
} // namespace ck_tile
|