[CK-Tile] Remove usage of tile partitioner's full gemm shape (#3204)

gemm shape should be used from the pipeline instead (where it gets from a problem description struct)
This commit is contained in:
Max Podkorytov
2025-11-18 09:56:40 -08:00
committed by GitHub
parent ac70206b2c
commit a3a4eb12bd
9 changed files with 31 additions and 36 deletions

View File

@@ -724,8 +724,8 @@ struct GroupedConvolutionBackwardDataKernel
const GroupedConvBwdDataKernelArgsSpecialized& kargs,
const index_t group_id)
{
static_assert(!TilePartitioner::BlockGemmShape::PermuteA, "Not implemented!");
static_assert(!TilePartitioner::BlockGemmShape::PermuteB, "Not implemented!");
static_assert(!GemmPipeline::BlockGemmShape::PermuteA, "Not implemented!");
static_assert(!GemmPipeline::BlockGemmShape::PermuteB, "Not implemented!");
const auto& a_tensor_view = [&]() {
return make_tensor_view<address_space_enum::global>(
a_ptr,

View File

@@ -464,7 +464,7 @@ struct GroupedConvolutionBackwardWeightKernel
__device__ SplitKBatchOffset(const GroupedConvBwdWeightKernelArgsSpecialized& kargs,
const std::size_t k_id = blockIdx.z)
{
constexpr auto K1 = TilePartitioner::BlockGemmShape::WarpTile::at(number<2>{});
constexpr auto K1 = GemmPipeline::BlockGemmShape::WarpTile::at(number<2>{});
const index_t K_t = amd_wave_read_first_lane(kargs.k_batch * K1);
const index_t KRead = amd_wave_read_first_lane((kargs.GemmK + K_t - 1) / K_t * K1);
@@ -646,8 +646,8 @@ struct GroupedConvolutionBackwardWeightKernel
WeiDataType* c_ptr,
const GroupedConvBwdWeightKernelArgsSpecialized& kargs)
{
static_assert(!TilePartitioner::BlockGemmShape::PermuteA, "Not implemented!");
static_assert(!TilePartitioner::BlockGemmShape::PermuteB, "Not implemented!");
static_assert(!GemmPipeline::BlockGemmShape::PermuteA, "Not implemented!");
static_assert(!GemmPipeline::BlockGemmShape::PermuteB, "Not implemented!");
const auto& a_tensor_view = [&]() {
return make_tensor_view<address_space_enum::global>(a_ptr,
kargs.a_grid_desc_k_m); // A: out

View File

@@ -745,8 +745,8 @@ struct GroupedConvolutionForwardKernel
const BDescType& b_desc,
const CDescType& c_desc)
{
static_assert(!TilePartitioner::BlockGemmShape::PermuteA, "Not implemented!");
static_assert(!TilePartitioner::BlockGemmShape::PermuteB, "Not implemented!");
static_assert(!GemmPipeline::BlockGemmShape::PermuteA, "Not implemented!");
static_assert(!GemmPipeline::BlockGemmShape::PermuteB, "Not implemented!");
const auto& a_tensor_view = [&]() {
return make_tensor_view<address_space_enum::global>(a_ptr, a_desc);
}();