[CK_BUILDER] Ck Tile Grouped convolution factory (#3352)

* [BUILDER] Ck Tile Grouped convolution factory

* Part 2

* Fixes after rebase

* Remove leftovers
This commit is contained in:
Bartłomiej Kocot
2025-12-08 10:32:56 +01:00
committed by GitHub
parent 8fec8054b2
commit 04612c30ce
55 changed files with 1431 additions and 92 deletions

View File

@@ -90,7 +90,7 @@ add_ck_builder_test(test_ckb_conv_builder
# Tests convolution trait selection and configuration
add_ck_builder_test(test_ckb_conv_traits
conv/test_conv_traits.cpp)
conv/ck/test_conv_traits.cpp)
# Tests convolution problem description and parameter handling
add_ck_builder_test(test_ckb_conv_description
@@ -119,19 +119,22 @@ add_ck_builder_test(test_ckb_instance_string
# Tests the forward convolution builder across multiple data types and dimensions.
# Individual tests are split into separate files to enable parallel compilation.
add_ck_builder_test(test_ckb_build_fwd_instances
conv/test_ckb_conv_fwd_2d_bf16_scaleadd_relu.cpp
conv/test_ckb_conv_fwd_1d_fp16.cpp
conv/test_ckb_conv_fwd_1d_bf16.cpp
conv/test_ckb_conv_fwd_1d_i8.cpp
conv/test_ckb_conv_fwd_2d_fp8.cpp
conv/test_ckb_conv_fwd_2d_bf16.cpp
conv/test_ckb_conv_fwd_2d_fp16.cpp
conv/test_ckb_conv_fwd_2d_fp32.cpp
conv/test_ckb_conv_fwd_2d_dl_fp16.cpp
conv/test_ckb_conv_fwd_2d_large_tensor_fp16.cpp
conv/test_ckb_conv_fwd_3d_bf16.cpp
conv/test_ckb_conv_fwd_3d_fp16.cpp
conv/test_ckb_conv_fwd_3d_fp32.cpp
conv/ck/test_ckb_conv_fwd_2d_bf16_scaleadd_relu.cpp
conv/ck/test_ckb_conv_fwd_1d_fp16.cpp
conv/ck/test_ckb_conv_fwd_1d_bf16.cpp
conv/ck/test_ckb_conv_fwd_1d_i8.cpp
conv/ck/test_ckb_conv_fwd_2d_fp8.cpp
conv/ck/test_ckb_conv_fwd_2d_bf16.cpp
conv/ck/test_ckb_conv_fwd_2d_fp16.cpp
conv/ck/test_ckb_conv_fwd_2d_fp32.cpp
conv/ck/test_ckb_conv_fwd_2d_dl_fp16.cpp
conv/ck/test_ckb_conv_fwd_2d_large_tensor_fp16.cpp
conv/ck/test_ckb_conv_fwd_3d_bf16.cpp
conv/ck/test_ckb_conv_fwd_3d_fp16.cpp
conv/ck/test_ckb_conv_fwd_3d_fp32.cpp
conv/ck_tile/test_ckb_conv_fwd_2d_fp16_v3.cpp
conv/ck_tile/test_ckb_conv_bwd_weight_2d_fp16_v3.cpp
conv/ck_tile/test_ckb_conv_bwd_data_2d_fp16_v3.cpp
)

View File

@@ -0,0 +1,52 @@
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
// SPDX-License-Identifier: MIT
#include "utils/ckb_conv_tile_test_configs.hpp"
#include "utils/ckb_conv_test_utils.hpp"
namespace {
using namespace ck_tile::builder::test_utils;
TEST(FwdConvInstances, Create_ConvAlgorithm_Tile_GroupedConvolutionKernel_2D_FP16_NHWGC)
{
constexpr ConvSignature FwdConvSignature{.spatial_dim = 2,
.direction = ConvDirection::BACKWARD_DATA,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NHWGC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::NHWGK}}};
constexpr auto FwdConvAlgorithm =
ConvAlgorithm_Tile_GroupedConvolutionKernel{}
.with_tile_specializations(TileConvSpecialization::DEFAULT)
.with_tile_thread_block(FwdTileThreadBlock_64x64x64)
.with_tile_block_gemm(TileBlockGemmDesc_16x16_v3_intrawave)
.with_tile_transfer(FwdTileTransfer_4x4x4)
.with_tile_optimizations(TileOptimizations{
.num_groups_to_merge = 1, .split_image = false, .explicit_gemm = false});
using Builder = ConvBuilder<FwdConvSignature, FwdConvAlgorithm>;
run_ck_tile_test<Builder>({
"grouped_convolution_backward_data",
"fp16",
"NHWGC_GKYXC_NHWGK",
"64x64x64",
"2x2",
"16x16x16",
// "4x4x4", // TODO: Enable this check
"Default",
"Intrawave",
"CShuffleEpilogue",
"set",
"pipeline_AgBgCrCompV3",
"DoubleSmemBuffer_0",
"NumWaveGroups_1",
"MergedGroups_1",
"SplitImage_0",
"ExplicitGemm_0",
});
}
} // namespace

View File

@@ -0,0 +1,52 @@
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
// SPDX-License-Identifier: MIT
#include "utils/ckb_conv_tile_test_configs.hpp"
#include "utils/ckb_conv_test_utils.hpp"
namespace {
using namespace ck_tile::builder::test_utils;
TEST(FwdConvInstances, Create_ConvAlgorithm_Tile_GroupedConvolutionKernel_2D_FP16_NHWGC)
{
constexpr ConvSignature FwdConvSignature{.spatial_dim = 2,
.direction = ConvDirection::BACKWARD_WEIGHT,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NHWGC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::NHWGK}}};
constexpr auto FwdConvAlgorithm =
ConvAlgorithm_Tile_GroupedConvolutionKernel{}
.with_tile_specializations(TileConvSpecialization::DEFAULT)
.with_tile_thread_block(FwdTileThreadBlock_64x64x64)
.with_tile_block_gemm(TileBlockGemmDesc_16x16_v3_intrawave)
.with_tile_transfer(FwdTileTransfer_4x4x4)
.with_tile_optimizations(TileOptimizations{
.num_groups_to_merge = 1, .split_image = false, .explicit_gemm = false});
using Builder = ConvBuilder<FwdConvSignature, FwdConvAlgorithm>;
run_ck_tile_test<Builder>({
"grouped_convolution_backward_weight",
"fp16",
"NHWGC_GKYXC_NHWGK",
"64x64x64",
"2x2",
"16x16x16",
// "4x4x4", // TODO: Enable this check
"Default",
"Intrawave",
"CShuffleEpilogue",
"set",
"pipeline_AgBgCrCompV3",
"DoubleSmemBuffer_0",
"NumWaveGroups_1",
"MergedGroups_1",
"SplitImage_0",
"ExplicitGemm_0",
});
}
} // namespace

View File

@@ -0,0 +1,52 @@
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
// SPDX-License-Identifier: MIT
#include "utils/ckb_conv_tile_test_configs.hpp"
#include "utils/ckb_conv_test_utils.hpp"
namespace {
using namespace ck_tile::builder::test_utils;
TEST(FwdConvInstances, Create_ConvAlgorithm_Tile_GroupedConvolutionKernel_2D_FP16_NHWGC)
{
constexpr ConvSignature FwdConvSignature{.spatial_dim = 2,
.direction = ConvDirection::FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NHWGC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::NHWGK}}};
constexpr auto FwdConvAlgorithm =
ConvAlgorithm_Tile_GroupedConvolutionKernel{}
.with_tile_specializations(TileConvSpecialization::DEFAULT)
.with_tile_thread_block(FwdTileThreadBlock_64x64x64)
.with_tile_block_gemm(TileBlockGemmDesc_16x16_v3_intrawave)
.with_tile_transfer(FwdTileTransfer_4x4x4)
.with_tile_optimizations(TileOptimizations{
.num_groups_to_merge = 1, .split_image = false, .explicit_gemm = false});
using Builder = ConvBuilder<FwdConvSignature, FwdConvAlgorithm>;
run_ck_tile_test<Builder>({
"grouped_convolution_forward",
"fp16",
"NHWGC_GKYXC_NHWGK",
"64x64x64",
"2x2",
"16x16x16",
// "4x4x4", // TODO: Enable this check
"Default",
"Intrawave",
"CShuffleEpilogue",
"set",
"pipeline_AgBgCrCompV3",
"DoubleSmemBuffer_0",
"NumWaveGroups_1",
"MergedGroups_1",
"SplitImage_0",
"ExplicitGemm_0",
});
}
} // namespace

View File

@@ -243,6 +243,73 @@ struct LargeTensorWrapper
ConvAlgorithmSpecialization::LARGE_TENSOR;
};
// Specify thread block dimensions for a GEMM (CK Tile).
struct TileThreadBlock
{
// Size of the submatrix problem in a thread block.
MNK<size_t> tile_size;
};
static_assert(ckb::TileThreadBlockDescriptor<TileThreadBlock>);
struct TileTransfer
{
size_t a_scalar_per_vector;
size_t b_scalar_per_vector;
size_t c_scalar_per_vector;
};
static_assert(ckb::TileTransferDescriptor<TileTransfer>);
struct TileBlockGemm
{
// Number of warps per each dimension.
MNK<int> warps;
// Number of data processed per each dimension for each XDL/WMMA instruction.
MNK<int> warp_tile;
// Double LDS buffer.
bool double_smem_buffer;
// Waves grouping (Ping-Pong scheduler).
int num_wave_groups;
PipelineVersion pipeline_version;
PipelineScheduler scheduler;
};
static_assert(ckb::TileBlockGemmDescriptor<TileBlockGemm>);
struct TileOptimizations
{
// Number of convolution groups processed per one workgroup
int num_groups_to_merge;
// Split image for large tensors
bool split_image;
// Explicit gemm for 1x1, stride=0, pad=0 cases
bool explicit_gemm;
};
static_assert(ckb::TileOptimizationsDescriptor<TileOptimizations>);
struct TileConvSpecialization_
{
TileConvSpecialization specialization;
};
struct TileThreadBlock_
{
TileThreadBlock thread_block;
};
struct TileTransfer_
{
TileTransfer transfer;
};
struct TileBlockGemm_
{
TileBlockGemm block_gemm;
};
struct TileOptimizations_
{
TileOptimizations optimizations;
};
// Factory
template <typename... Components>
@@ -339,6 +406,51 @@ struct ConvAlgorithmTemplate : Components...
result.transfer = t;
return result;
}
template <typename S>
constexpr auto with_tile_specializations(const S& s) const
{
static_assert(std::is_base_of_v<TileConvSpecialization_, ConvAlgorithmTemplate>);
auto result = *this;
result.specialization = s;
return result;
}
template <typename TB>
constexpr auto with_tile_thread_block(const TB& tb) const
{
static_assert(std::is_base_of_v<TileThreadBlock_, ConvAlgorithmTemplate>);
auto result = *this;
result.thread_block = tb;
return result;
}
template <typename BG>
constexpr auto with_tile_block_gemm(const BG& bg) const
{
static_assert(std::is_base_of_v<TileBlockGemm_, ConvAlgorithmTemplate>);
auto result = *this;
result.block_gemm = bg;
return result;
}
template <typename T>
constexpr auto with_tile_transfer(const T& t) const
{
static_assert(std::is_base_of_v<TileTransfer_, ConvAlgorithmTemplate>);
auto result = *this;
result.transfer = t;
return result;
}
template <typename O>
constexpr auto with_tile_optimizations(const O& o) const
{
static_assert(std::is_base_of_v<TileOptimizations_, ConvAlgorithmTemplate>);
auto result = *this;
result.optimizations = o;
return result;
}
};
// Algorithm types
@@ -361,4 +473,10 @@ using ConvAlgorithm_DeviceGroupedConvFwdDlMultipleD_NHWC_KYXC_NHWK =
using ConvAlgorithm_DeviceGroupedConvFwdMultipleD_Xdl_CShuffle_Large_Tensor =
LargeTensorWrapper<ConvAlgorithm_DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle>;
using ConvAlgorithm_Tile_GroupedConvolutionKernel = ConvAlgorithmTemplate<TileThreadBlock_,
TileBlockGemm_,
TileTransfer_,
TileConvSpecialization_,
TileOptimizations_>;
} // namespace ck_tile::builder::test

View File

@@ -4,7 +4,7 @@
#include <gtest/gtest.h>
#include <type_traits>
#include "ck_tile/builder/factory/helpers/conv_elementwise_op.hpp"
#include "ck_tile/builder/factory/helpers/ck/conv_elementwise_op.hpp"
namespace {

View File

@@ -4,7 +4,7 @@
#include <gtest/gtest.h>
#include <type_traits>
#include "ck_tile/builder/factory/helpers/conv_tensor_layout.hpp"
#include "ck_tile/builder/factory/helpers/ck/conv_tensor_layout.hpp"
#include "impl/conv_signature_types.hpp"
namespace {

View File

@@ -4,7 +4,7 @@
#include <gtest/gtest.h>
#include <type_traits>
#include "ck_tile/builder/factory/helpers/conv_tensor_type.hpp"
#include "ck_tile/builder/factory/helpers/ck/conv_tensor_type.hpp"
namespace {

View File

@@ -2,7 +2,7 @@
// SPDX-License-Identifier: MIT
#include <gtest/gtest.h>
#include "ck_tile/builder/factory/helpers/conv_thread_block.hpp"
#include "ck_tile/builder/factory/helpers/ck/conv_thread_block.hpp"
namespace {

View File

@@ -3,7 +3,7 @@
#include <gtest/gtest.h>
#include "ck_tile/builder/factory/helpers/conv_tuning_params.hpp"
#include "ck_tile/builder/factory/helpers/ck/conv_tuning_params.hpp"
namespace {

View File

@@ -28,4 +28,20 @@ constexpr void run_test(const std::vector<std::string>& kernel_instance_componen
}
}
// Common CK Tile test implementation
template <typename Builder>
constexpr void run_ck_tile_test(const std::vector<std::string>& kernel_instance_components)
{
auto instance = typename Builder::Instance{};
const auto kernel_string = instance.GetTypeString();
std::cout << "Generated kernel: " << kernel_string << std::endl;
EXPECT_GT(kernel_string.size(), 0);
std::cout << kernel_string << std::endl;
for(const auto& component : kernel_instance_components)
{
EXPECT_THAT(kernel_string, ::testing::HasSubstr(component));
}
}
} // namespace ck_tile::builder::test_utils

View File

@@ -0,0 +1,85 @@
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
// SPDX-License-Identifier: MIT
#pragma once
#include "impl/conv_algorithm_types.hpp"
#include "impl/conv_signature_types.hpp"
#include "ck_tile/builder/conv_builder.hpp"
namespace ck_tile::builder::test_utils {
using namespace ck_tile::builder;
using namespace test;
constexpr TileTransfer FwdTileTransfer_1x1x1{
.a_scalar_per_vector = 1,
.b_scalar_per_vector = 1,
.c_scalar_per_vector = 1,
};
constexpr TileTransfer FwdTileTransfer_4x4x4{
.a_scalar_per_vector = 4,
.b_scalar_per_vector = 4,
.c_scalar_per_vector = 4,
};
constexpr TileTransfer FwdTileTransfer_8x8x8{
.a_scalar_per_vector = 8,
.b_scalar_per_vector = 8,
.c_scalar_per_vector = 8,
};
constexpr TileThreadBlock FwdTileThreadBlock_256x256x32{.tile_size = {.m = 256, .n = 256, .k = 32}};
constexpr TileThreadBlock FwdTileThreadBlock_256x128x32{.tile_size = {.m = 256, .n = 128, .k = 32}};
constexpr TileThreadBlock FwdTileThreadBlock_128x128x32{.tile_size = {.m = 128, .n = 128, .k = 32}};
constexpr TileThreadBlock FwdTileThreadBlock_128x128x16{.tile_size = {.m = 128, .n = 128, .k = 16}};
constexpr TileThreadBlock FwdTileThreadBlock_64x32x32{.tile_size = {.m = 64, .n = 32, .k = 32}};
constexpr TileThreadBlock FwdTileThreadBlock_64x64x64{.tile_size = {.m = 64, .n = 64, .k = 64}};
constexpr TileBlockGemm TileBlockGemmDesc_16x16_v1_intrawave = {
.warps = {.m = 2, .n = 2, .k = 1},
.warp_tile = {.m = 16, .n = 16, .k = 16},
.double_smem_buffer = false,
.num_wave_groups = 1,
.pipeline_version = PipelineVersion::V1,
.scheduler = PipelineScheduler::INTRAWAVE};
constexpr TileBlockGemm TileBlockGemmDesc_16x16_v2_intrawave = {
.warps = {.m = 2, .n = 2, .k = 1},
.warp_tile = {.m = 16, .n = 16, .k = 16},
.double_smem_buffer = false,
.num_wave_groups = 1,
.pipeline_version = PipelineVersion::V2,
.scheduler = PipelineScheduler::INTRAWAVE};
constexpr TileBlockGemm TileBlockGemmDesc_16x16_v3_intrawave = {
.warps = {.m = 2, .n = 2, .k = 1},
.warp_tile = {.m = 16, .n = 16, .k = 16},
.double_smem_buffer = false,
.num_wave_groups = 1,
.pipeline_version = PipelineVersion::V3,
.scheduler = PipelineScheduler::INTRAWAVE};
constexpr TileBlockGemm TileBlockGemmDesc_16x16_v4_intrawave = {
.warps = {.m = 2, .n = 2, .k = 1},
.warp_tile = {.m = 16, .n = 16, .k = 16},
.double_smem_buffer = false,
.num_wave_groups = 1,
.pipeline_version = PipelineVersion::V4,
.scheduler = PipelineScheduler::INTRAWAVE};
constexpr TileBlockGemm TileBlockGemmDesc_16x16_v5_intrawave = {
.warps = {.m = 2, .n = 2, .k = 1},
.warp_tile = {.m = 16, .n = 16, .k = 16},
.double_smem_buffer = false,
.num_wave_groups = 1,
.pipeline_version = PipelineVersion::V5,
.scheduler = PipelineScheduler::INTRAWAVE};
} // namespace ck_tile::builder::test_utils