mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-04 05:31:24 +00:00
[CK-tile] add more tests for batched transpose testing the rectangular block tile sizes (#2634)
* add failing tests * swap out and reference * add constraint assert to transpose input distribution * test both pipelines with rectangular block tile * print mismatched indices * add a smaller failing test for old pipeline * print grid and block * fill output before operating on it * swap m/n tile sizes and make one test pass * add device syncs * add one more flipped test case * flip block tile at host arg init * fix tiles for lds pipeline * clang-format * rename tests * roll back error check * remove device syncs * reduce large test case's size
This commit is contained in:
@@ -49,9 +49,11 @@ struct BatchedTransposeKernel
|
||||
|
||||
CK_TILE_HOST static constexpr auto GridSize(const Hargs& host_args)
|
||||
{
|
||||
size_t grid_size_x = (host_args.height + host_args.dim_block_h - 1) / host_args.dim_block_h;
|
||||
size_t grid_size_y = (host_args.width + host_args.dim_block_w - 1) / host_args.dim_block_w;
|
||||
size_t grid_size_z = host_args.batch;
|
||||
const size_t grid_size_x =
|
||||
ck_tile::integer_divide_ceil(host_args.height, host_args.dim_block_h);
|
||||
const size_t grid_size_y =
|
||||
ck_tile::integer_divide_ceil(host_args.width, host_args.dim_block_w);
|
||||
const size_t grid_size_z = host_args.batch;
|
||||
return dim3(grid_size_x, grid_size_y, grid_size_z);
|
||||
}
|
||||
|
||||
@@ -71,41 +73,43 @@ struct BatchedTransposeKernel
|
||||
|
||||
CK_TILE_DEVICE void operator()(Kargs kargs) const
|
||||
{
|
||||
static constexpr ck_tile::index_t kMPerBlock = Problem::kMPerBlock;
|
||||
static constexpr ck_tile::index_t kNPerBlock = Problem::kNPerBlock;
|
||||
static constexpr bool kPadM = Problem::kPadM;
|
||||
static constexpr bool kPadN = Problem::kPadN;
|
||||
static constexpr ck_tile::index_t VectorSizeInput = Problem::VectorSizeInput;
|
||||
static constexpr ck_tile::index_t VectorSizeOutput = Problem::VectorSizeOutput;
|
||||
static constexpr ck_tile::index_t kMPerBlock = Problem::kMPerBlock;
|
||||
static constexpr ck_tile::index_t kNPerBlock = Problem::kNPerBlock;
|
||||
static constexpr bool kPadM = Problem::kPadM;
|
||||
static constexpr bool kPadN = Problem::kPadN;
|
||||
static constexpr ck_tile::index_t VectorSizeInput = Problem::VectorSizeInput;
|
||||
static constexpr ck_tile::index_t VectorStrideInput = 1;
|
||||
static constexpr ck_tile::index_t VectorSizeOutput = Problem::VectorSizeOutput;
|
||||
static constexpr ck_tile::index_t VectorStrideOutput = 1;
|
||||
|
||||
const auto iM = __builtin_amdgcn_readfirstlane(blockIdx.x * kMPerBlock);
|
||||
const auto iN = __builtin_amdgcn_readfirstlane(blockIdx.y * kNPerBlock);
|
||||
const auto iDim = blockIdx.z;
|
||||
const auto iM = __builtin_amdgcn_readfirstlane(blockIdx.x * kMPerBlock);
|
||||
const auto iN = __builtin_amdgcn_readfirstlane(blockIdx.y * kNPerBlock);
|
||||
const auto offset = __builtin_amdgcn_readfirstlane(blockIdx.z * kargs.height * kargs.width);
|
||||
|
||||
const auto x_m_n = [&]() {
|
||||
const auto x_dram_naive = make_naive_tensor_view<address_space_enum::global>(
|
||||
static_cast<const Type*>(kargs.p_input) + iDim * kargs.dim_stride,
|
||||
static_cast<const Type*>(kargs.p_input) + offset,
|
||||
make_tuple(kargs.height, kargs.width),
|
||||
make_tuple(kargs.width, 1),
|
||||
number<VectorSizeInput>{},
|
||||
number<1>{});
|
||||
number<VectorStrideInput>{});
|
||||
|
||||
return pad_tensor_view(x_dram_naive,
|
||||
make_tuple(number<kMPerBlock>{}, number<kNPerBlock>{}),
|
||||
sequence<kPadN, kPadM>{});
|
||||
sequence<kPadM, kPadN>{});
|
||||
}();
|
||||
|
||||
const auto y_n_m = [&]() {
|
||||
const auto y_dram_naive = make_naive_tensor_view<address_space_enum::global>(
|
||||
static_cast<Type*>(kargs.p_output) + iDim * kargs.dim_stride,
|
||||
static_cast<Type*>(kargs.p_output) + offset,
|
||||
make_tuple(kargs.width, kargs.height),
|
||||
make_tuple(kargs.height, 1),
|
||||
number<VectorSizeOutput>{},
|
||||
number<1>{});
|
||||
number<VectorStrideOutput>{});
|
||||
|
||||
return pad_tensor_view(y_dram_naive,
|
||||
make_tuple(number<kNPerBlock>{}, number<kMPerBlock>{}),
|
||||
sequence<kPadM, kPadN>{});
|
||||
sequence<kPadN, kPadM>{});
|
||||
}();
|
||||
|
||||
auto x_block_window = make_tile_window(
|
||||
|
||||
@@ -15,15 +15,15 @@ struct BatchedTransposeCommonPolicy
|
||||
template <typename Problem>
|
||||
CK_TILE_DEVICE static constexpr auto MakeInputDistribution()
|
||||
{
|
||||
constexpr index_t BlockSize = Problem::kBlockSize;
|
||||
constexpr index_t LeadDimPerBlock = Problem::kMPerBlock;
|
||||
constexpr index_t SecondDimPerBlock = Problem::kNPerBlock;
|
||||
constexpr index_t kBlockSize = Problem::kBlockSize;
|
||||
constexpr index_t kLeadDimPerBlock = Problem::kNPerBlock;
|
||||
constexpr index_t kSecondDimPerBlock = Problem::kMPerBlock;
|
||||
|
||||
constexpr index_t kVectorSize = Problem::VectorSizeOutput;
|
||||
|
||||
using TileEncodingPattern = TileDistributionEncodingPattern2D<BlockSize,
|
||||
SecondDimPerBlock,
|
||||
LeadDimPerBlock,
|
||||
constexpr index_t kVectorSize = Problem::VectorSizeInput;
|
||||
static_assert((kLeadDimPerBlock * kVectorSize) % kBlockSize == 0, "");
|
||||
using TileEncodingPattern = TileDistributionEncodingPattern2D<kBlockSize,
|
||||
kSecondDimPerBlock,
|
||||
kLeadDimPerBlock,
|
||||
kVectorSize,
|
||||
TileAccessPattern>;
|
||||
return TileEncodingPattern::Make2DStaticTileDistribution();
|
||||
|
||||
@@ -18,19 +18,19 @@ struct BatchedTransposeLdsProblem
|
||||
{
|
||||
using DataType = remove_cvref_t<DataType_>;
|
||||
|
||||
static constexpr index_t kRowWarps_ = NumWarps::at(number<1>{});
|
||||
static constexpr index_t kColWarps_ = NumWarps::at(number<0>{});
|
||||
static constexpr index_t kRowWarps_ = NumWarps::at(number<0>{});
|
||||
static constexpr index_t kColWarps_ = NumWarps::at(number<1>{});
|
||||
static constexpr index_t kBlockSize_ = get_warp_size() * kRowWarps_ * kColWarps_;
|
||||
static constexpr index_t kRowPerBlock_ = BlockTile::at(number<1>{});
|
||||
static constexpr index_t kColPerBlock_ = BlockTile::at(number<0>{});
|
||||
static constexpr index_t kRowPerBlock_ = BlockTile::at(number<0>{});
|
||||
static constexpr index_t kColPerBlock_ = BlockTile::at(number<1>{});
|
||||
|
||||
static constexpr index_t kBlockSize = kBlockSize_;
|
||||
// warps per block
|
||||
static constexpr index_t kLeadNumWarps = kRowWarps_;
|
||||
static constexpr index_t kSecondNumWarps = kColWarps_;
|
||||
static constexpr index_t kLeadNumWarps = kColWarps_;
|
||||
static constexpr index_t kSecondNumWarps = kRowWarps_;
|
||||
|
||||
static constexpr index_t kLeadSizePerBlock = kRowPerBlock_;
|
||||
static constexpr index_t kSecondSizePerBlock = kColPerBlock_;
|
||||
static constexpr index_t kLeadSizePerBlock = kColPerBlock_;
|
||||
static constexpr index_t kSecondSizePerBlock = kRowPerBlock_;
|
||||
|
||||
static constexpr index_t kQuadrantLeadDim = LaneGroupTransposeTraits<DataType>::kleadDim;
|
||||
static constexpr index_t kQuadrantSecondDim = LaneGroupTransposeTraits<DataType>::ksecondDim;
|
||||
@@ -60,8 +60,8 @@ struct BatchedTransposeLdsProblem
|
||||
static constexpr bool kPadM = kPadM_;
|
||||
static constexpr bool kPadN = kPadN_;
|
||||
|
||||
static constexpr auto kMPerBlock = kLeadSizePerBlock;
|
||||
static constexpr auto kNPerBlock = kSecondSizePerBlock;
|
||||
static constexpr auto kMPerBlock = kSecondSizePerBlock;
|
||||
static constexpr auto kNPerBlock = kLeadSizePerBlock;
|
||||
|
||||
// 128-bit is the max single-instruction bandwidth for load/store
|
||||
static constexpr index_t MaxLoadStoreSize = 16;
|
||||
|
||||
@@ -19,8 +19,8 @@ struct BatchedTransposePolicy : public BatchedTransposeCommonPolicy
|
||||
constexpr index_t VecLoadSize = Problem::VectorSizeOutput;
|
||||
|
||||
using TileEncodingPattern = TileDistributionEncodingPattern2D<BlockSize,
|
||||
NPerBlock,
|
||||
MPerBlock,
|
||||
NPerBlock,
|
||||
VecLoadSize,
|
||||
TileAccessPattern>;
|
||||
return TileEncodingPattern::MakeShuffled2DStaticTileDistribution();
|
||||
|
||||
Reference in New Issue
Block a user