[CK_BUILDER] Improve CK Builder and CK Builder tests (#3382)

* Remove stale documentation.

* Add placeholder for conv algorithm design description. Add link to conv factory description.

* Improve testing transfer parameters.

* Python script to check the block tilings.

* Improve tests and conv types serialization.

* Change representation of boolean values from 1/0 to true/false in instance strings.

* Change representation of boolean values from 1/0 to true/false in conv algorithm types.

* Test code improvements.

* Improve covn descriptions tests.

* Improve conv signature definition in conv fwd builder tests.

* clang-format.

* Remove obsolete script.

* Revert StaticAssertTypeEq changes in conv layout tests.

* Remove obsolete using declaration.

---------

Co-authored-by: Ville Pietilä <>
This commit is contained in:
Ville Pietilä
2025-12-11 09:50:00 +02:00
committed by GitHub
parent 6d25525adc
commit d66e5f667c
33 changed files with 1568 additions and 1042 deletions

View File

@@ -9,27 +9,34 @@
namespace {
namespace ckb = ::ck_tile::builder;
using ::ck_tile::builder::DataType;
using ::ck_tile::builder::ElementwiseOperation;
using ::ck_tile::builder::TensorLayout;
using ::ck_tile::builder::factory::internal::AuxiliaryTensorLayouts;
using ::ck_tile::builder::factory::internal::ConvTensorLayouts;
using ::ck_tile::builder::factory::internal::LayoutToCK;
namespace ckb = ck_tile::builder;
using ck_tile::builder::DataType;
using ck_tile::builder::ElementwiseOperation;
using ck_tile::builder::TensorLayout;
using ck_tile::builder::factory::internal::AuxiliaryTensorLayouts;
using ck_tile::builder::factory::internal::ConvTensorLayouts;
using ck_tile::builder::factory::internal::LayoutToCK;
using ck_tile::builder::test::ConvolutionTensor;
using ck_tile::builder::test::ConvSignature;
using ck_tile::builder::test::TensorConfig;
using ck_tile::builder::test::TensorOperation;
using namespace ::ck_tile::builder::test;
using enum ::ck_tile::builder::ConvDirection;
namespace enums {
using enum ck_tile::builder::ConvDirection;
using enum ck_tile::builder::TensorLayout;
using enum ck_tile::builder::DataType;
} // namespace enums
TEST(ConvTensorLayout, AssignsLayoutsFor1D_NWGC_GKXC_NWGK)
{
static constexpr auto sig =
ConvSignature<>{.spatial_dim = 1,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NWGC}},
.weight = {.config = {.layout = TensorLayout::GKXC}},
.output = {.config = {.layout = TensorLayout::NWGK}}};
using namespace enums;
static constexpr auto sig = ConvSignature<>{.spatial_dim = 1,
.direction = FORWARD,
.data_type = FP16,
.accumulation_data_type = FP32,
.input = {.config = {.layout = NWGC}},
.weight = {.config = {.layout = GKXC}},
.output = {.config = {.layout = NWGK}}};
using TensorLayouts = ConvTensorLayouts<sig, 1, FORWARD>;
@@ -41,14 +48,14 @@ TEST(ConvTensorLayout, AssignsLayoutsFor1D_NWGC_GKXC_NWGK)
TEST(ConvTensorLayout, AssignsLayoutsFor1D_NGCW_GKXC_NGKW)
{
static constexpr auto sig =
ConvSignature<>{.spatial_dim = 1,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NGCW}},
.weight = {.config = {.layout = TensorLayout::GKXC}},
.output = {.config = {.layout = TensorLayout::NGKW}}};
using namespace enums;
static constexpr auto sig = ConvSignature<>{.spatial_dim = 1,
.direction = FORWARD,
.data_type = FP16,
.accumulation_data_type = FP32,
.input = {.config = {.layout = NGCW}},
.weight = {.config = {.layout = GKXC}},
.output = {.config = {.layout = NGKW}}};
using TensorLayouts = ConvTensorLayouts<sig, 1, FORWARD>;
@@ -60,14 +67,14 @@ TEST(ConvTensorLayout, AssignsLayoutsFor1D_NGCW_GKXC_NGKW)
TEST(ConvTensorLayout, AssignsLayoutsFor1D_GNWC_GKXC_GNWK)
{
static constexpr auto sig =
ConvSignature<>{.spatial_dim = 1,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::GNWC}},
.weight = {.config = {.layout = TensorLayout::GKXC}},
.output = {.config = {.layout = TensorLayout::GNWK}}};
using namespace enums;
static constexpr auto sig = ConvSignature<>{.spatial_dim = 1,
.direction = FORWARD,
.data_type = FP16,
.accumulation_data_type = FP32,
.input = {.config = {.layout = GNWC}},
.weight = {.config = {.layout = GKXC}},
.output = {.config = {.layout = GNWK}}};
using TensorLayouts = ConvTensorLayouts<sig, 1, FORWARD>;
@@ -79,14 +86,14 @@ TEST(ConvTensorLayout, AssignsLayoutsFor1D_GNWC_GKXC_GNWK)
TEST(ConvTensorLayout, AssignsLayoutsFor1D_NGCW_GKCX_NGKW)
{
static constexpr auto sig =
ConvSignature<>{.spatial_dim = 1,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NGCW}},
.weight = {.config = {.layout = TensorLayout::GKCX}},
.output = {.config = {.layout = TensorLayout::NGKW}}};
using namespace enums;
static constexpr auto sig = ConvSignature<>{.spatial_dim = 1,
.direction = FORWARD,
.data_type = FP16,
.accumulation_data_type = FP32,
.input = {.config = {.layout = NGCW}},
.weight = {.config = {.layout = GKCX}},
.output = {.config = {.layout = NGKW}}};
using TensorLayouts = ConvTensorLayouts<sig, 1, FORWARD>;
@@ -98,14 +105,14 @@ TEST(ConvTensorLayout, AssignsLayoutsFor1D_NGCW_GKCX_NGKW)
TEST(ConvTensorLayout, AssignsLayoutsFor2D_NGCHW_GKYXC_NGKHW)
{
static constexpr auto sig =
ConvSignature<>{.spatial_dim = 2,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NGCHW}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::NGKHW}}};
using namespace enums;
static constexpr auto sig = ConvSignature<>{.spatial_dim = 2,
.direction = FORWARD,
.data_type = FP16,
.accumulation_data_type = FP32,
.input = {.config = {.layout = NGCHW}},
.weight = {.config = {.layout = GKYXC}},
.output = {.config = {.layout = NGKHW}}};
using TensorLayouts = ConvTensorLayouts<sig, 2, FORWARD>;
@@ -117,14 +124,14 @@ TEST(ConvTensorLayout, AssignsLayoutsFor2D_NGCHW_GKYXC_NGKHW)
TEST(ConvTensorLayout, AssignsLayoutsFor2D_NHWGC_GKYXC_NHWGK)
{
static constexpr auto sig =
ConvSignature<>{.spatial_dim = 2,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NHWGC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::NHWGK}}};
using namespace enums;
static constexpr auto sig = ConvSignature<>{.spatial_dim = 2,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = NHWGC}},
.weight = {.config = {.layout = GKYXC}},
.output = {.config = {.layout = NHWGK}}};
using TensorLayouts = ConvTensorLayouts<sig, 2, FORWARD>;
@@ -136,14 +143,14 @@ TEST(ConvTensorLayout, AssignsLayoutsFor2D_NHWGC_GKYXC_NHWGK)
TEST(ConvTensorLayout, AssignsLayoutsFor2D_GNHWC_GKYXC_GNHWK)
{
static constexpr auto sig =
ConvSignature<>{.spatial_dim = 2,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::GNHWC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::GNHWK}}};
using namespace enums;
static constexpr auto sig = ConvSignature<>{.spatial_dim = 2,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = GNHWC}},
.weight = {.config = {.layout = GKYXC}},
.output = {.config = {.layout = GNHWK}}};
using TensorLayouts = ConvTensorLayouts<sig, 2, FORWARD>;
@@ -155,14 +162,14 @@ TEST(ConvTensorLayout, AssignsLayoutsFor2D_GNHWC_GKYXC_GNHWK)
TEST(ConvTensorLayout, AssignsLayoutsFor2D_NGCHW_GKCYX_NGKHW)
{
static constexpr auto sig =
ConvSignature<>{.spatial_dim = 2,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NGCHW}},
.weight = {.config = {.layout = TensorLayout::GKCYX}},
.output = {.config = {.layout = TensorLayout::NGKHW}}};
using namespace enums;
static constexpr auto sig = ConvSignature<>{.spatial_dim = 2,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = NGCHW}},
.weight = {.config = {.layout = GKCYX}},
.output = {.config = {.layout = NGKHW}}};
using TensorLayouts = ConvTensorLayouts<sig, 2, FORWARD>;
@@ -174,14 +181,14 @@ TEST(ConvTensorLayout, AssignsLayoutsFor2D_NGCHW_GKCYX_NGKHW)
TEST(ConvTensorLayout, AssignsLayoutsFor3D_NGCDHW_GKCZYX_NGKDHW)
{
static constexpr auto sig =
ConvSignature<>{.spatial_dim = 3,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NGCDHW}},
.weight = {.config = {.layout = TensorLayout::GKCZYX}},
.output = {.config = {.layout = TensorLayout::NGKDHW}}};
using namespace enums;
static constexpr auto sig = ConvSignature<>{.spatial_dim = 3,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = NGCDHW}},
.weight = {.config = {.layout = GKCZYX}},
.output = {.config = {.layout = NGKDHW}}};
using TensorLayouts = ConvTensorLayouts<sig, 3, FORWARD>;
@@ -193,14 +200,14 @@ TEST(ConvTensorLayout, AssignsLayoutsFor3D_NGCDHW_GKCZYX_NGKDHW)
TEST(ConvTensorLayout, AssignsLayoutsFor3D_NDHWGC_GKZYXC_NDHWGK)
{
static constexpr auto sig =
ConvSignature<>{.spatial_dim = 3,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NDHWGC}},
.weight = {.config = {.layout = TensorLayout::GKZYXC}},
.output = {.config = {.layout = TensorLayout::NDHWGK}}};
using namespace enums;
static constexpr auto sig = ConvSignature<>{.spatial_dim = 3,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = NDHWGC}},
.weight = {.config = {.layout = GKZYXC}},
.output = {.config = {.layout = NDHWGK}}};
using TensorLayouts = ConvTensorLayouts<sig, 3, FORWARD>;
@@ -212,14 +219,14 @@ TEST(ConvTensorLayout, AssignsLayoutsFor3D_NDHWGC_GKZYXC_NDHWGK)
TEST(ConvTensorLayout, AssignsLayoutsFor3D_GNDHWC_GKZYXC_GNDHWK)
{
static constexpr auto sig =
ConvSignature<>{.spatial_dim = 3,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::GNDHWC}},
.weight = {.config = {.layout = TensorLayout::GKZYXC}},
.output = {.config = {.layout = TensorLayout::GNDHWK}}};
using namespace enums;
static constexpr auto sig = ConvSignature<>{.spatial_dim = 3,
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = GNDHWC}},
.weight = {.config = {.layout = GKZYXC}},
.output = {.config = {.layout = GNDHWK}}};
using TensorLayouts = ConvTensorLayouts<sig, 3, FORWARD>;
@@ -261,8 +268,10 @@ struct MockAuxiliaryTensorConfig
TEST(AuxiliaryTensorLayoutIntegration, SingleBiasTensorWithG_K_Layout)
{
using namespace enums;
static constexpr std::array<MockAuxiliaryTensorConfig, 1> aux_configs = {
MockAuxiliaryTensorConfig{.layout = TensorLayout::G_K_strided}};
MockAuxiliaryTensorConfig{.layout = G_K_strided}};
using AuxLayouts = AuxiliaryTensorLayouts<aux_configs, 2, FORWARD>;
@@ -273,6 +282,8 @@ TEST(AuxiliaryTensorLayoutIntegration, SingleBiasTensorWithG_K_Layout)
TEST(AuxiliaryTensorLayoutIntegration, SingleBiasTensorWithGC_Layout)
{
using namespace enums;
static constexpr std::array<MockAuxiliaryTensorConfig, 1> aux_configs = {
MockAuxiliaryTensorConfig{.layout = TensorLayout::GC}};
@@ -285,8 +296,10 @@ TEST(AuxiliaryTensorLayoutIntegration, SingleBiasTensorWithGC_Layout)
TEST(AuxiliaryTensorLayoutIntegration, SingleBiasTensorWithG_C_Layout)
{
using namespace enums;
static constexpr std::array<MockAuxiliaryTensorConfig, 1> aux_configs = {
MockAuxiliaryTensorConfig{.layout = TensorLayout::G_C_strided}};
MockAuxiliaryTensorConfig{.layout = G_C_strided}};
using AuxLayouts = AuxiliaryTensorLayouts<aux_configs, 2, FORWARD>;
@@ -297,9 +310,11 @@ TEST(AuxiliaryTensorLayoutIntegration, SingleBiasTensorWithG_C_Layout)
TEST(AuxiliaryTensorLayoutIntegration, TwoAuxiliaryTensors)
{
using namespace enums;
static constexpr std::array<MockAuxiliaryTensorConfig, 2> aux_configs = {
MockAuxiliaryTensorConfig{.layout = TensorLayout::G_K_strided},
MockAuxiliaryTensorConfig{.layout = TensorLayout::GC}};
MockAuxiliaryTensorConfig{.layout = GC}};
using AuxLayouts = AuxiliaryTensorLayouts<aux_configs, 2, FORWARD>;
@@ -311,10 +326,12 @@ TEST(AuxiliaryTensorLayoutIntegration, TwoAuxiliaryTensors)
TEST(AuxiliaryTensorLayoutIntegration, ThreeAuxiliaryTensors)
{
using namespace enums;
static constexpr std::array<MockAuxiliaryTensorConfig, 3> aux_configs = {
MockAuxiliaryTensorConfig{.layout = TensorLayout::G_K_strided},
MockAuxiliaryTensorConfig{.layout = TensorLayout::GC},
MockAuxiliaryTensorConfig{.layout = TensorLayout::G_C_strided}};
MockAuxiliaryTensorConfig{.layout = G_K_strided},
MockAuxiliaryTensorConfig{.layout = GC},
MockAuxiliaryTensorConfig{.layout = G_C_strided}};
using AuxLayouts = AuxiliaryTensorLayouts<aux_configs, 2, FORWARD>;
@@ -327,8 +344,10 @@ TEST(AuxiliaryTensorLayoutIntegration, ThreeAuxiliaryTensors)
TEST(AuxiliaryTensorLayoutIntegration, WorksWith1DConvolution)
{
using namespace enums;
static constexpr std::array<MockAuxiliaryTensorConfig, 1> aux_configs = {
MockAuxiliaryTensorConfig{.layout = TensorLayout::G_K_strided}};
MockAuxiliaryTensorConfig{.layout = G_K_strided}};
using AuxLayouts = AuxiliaryTensorLayouts<aux_configs, 1, FORWARD>;
@@ -339,8 +358,10 @@ TEST(AuxiliaryTensorLayoutIntegration, WorksWith1DConvolution)
TEST(AuxiliaryTensorLayoutIntegration, WorksWith3DConvolution)
{
using namespace enums;
static constexpr std::array<MockAuxiliaryTensorConfig, 1> aux_configs = {
MockAuxiliaryTensorConfig{.layout = TensorLayout::GC}};
MockAuxiliaryTensorConfig{.layout = GC}};
using AuxLayouts = AuxiliaryTensorLayouts<aux_configs, 3, FORWARD>;
@@ -351,7 +372,8 @@ TEST(AuxiliaryTensorLayoutIntegration, WorksWith3DConvolution)
TEST(ConvTensorLayoutsWithAuxiliary, Conv2DWithSingleBiasG_K)
{
using OutputOp = TensorOperation<TensorConfig{.layout = TensorLayout::G_K_strided}>;
using namespace enums;
using OutputOp = TensorOperation<TensorConfig{.layout = G_K_strided}>;
static constexpr auto sig =
ConvSignature<ConvolutionTensor<>, ConvolutionTensor<>, ConvolutionTensor<OutputOp>>{
@@ -359,9 +381,9 @@ TEST(ConvTensorLayoutsWithAuxiliary, Conv2DWithSingleBiasG_K)
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NGCHW}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::NGKHW},
.input = {.config = {.layout = NGCHW}},
.weight = {.config = {.layout = GKYXC}},
.output = {.config = {.layout = NGKHW},
.operation =
OutputOp{.elementwise_operation = ElementwiseOperation::SCALE}}};
@@ -377,7 +399,8 @@ TEST(ConvTensorLayoutsWithAuxiliary, Conv2DWithSingleBiasG_K)
TEST(ConvTensorLayoutsWithAuxiliary, Conv2DWithSingleBiasGC)
{
using OutputOp = TensorOperation<TensorConfig{.layout = TensorLayout::GC}>;
using namespace enums;
using OutputOp = TensorOperation<TensorConfig{.layout = GC}>;
static constexpr auto sig =
ConvSignature<ConvolutionTensor<>, ConvolutionTensor<>, ConvolutionTensor<OutputOp>>{
@@ -385,9 +408,9 @@ TEST(ConvTensorLayoutsWithAuxiliary, Conv2DWithSingleBiasGC)
.direction = FORWARD,
.data_type = DataType::BF16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NHWGC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::NHWGK},
.input = {.config = {.layout = NHWGC}},
.weight = {.config = {.layout = GKYXC}},
.output = {.config = {.layout = NHWGK},
.operation =
OutputOp{.elementwise_operation = ElementwiseOperation::SCALE}}};
@@ -403,8 +426,9 @@ TEST(ConvTensorLayoutsWithAuxiliary, Conv2DWithSingleBiasGC)
TEST(ConvTensorLayoutsWithAuxiliary, Conv2DWithTwoAuxiliaryTensors)
{
using OutputOp = TensorOperation<TensorConfig{.layout = TensorLayout::G_K_strided},
TensorConfig{.layout = TensorLayout::GC}>;
using namespace enums;
using OutputOp =
TensorOperation<TensorConfig{.layout = G_K_strided}, TensorConfig{.layout = GC}>;
static constexpr auto sig =
ConvSignature<ConvolutionTensor<>, ConvolutionTensor<>, ConvolutionTensor<OutputOp>>{
@@ -412,9 +436,9 @@ TEST(ConvTensorLayoutsWithAuxiliary, Conv2DWithTwoAuxiliaryTensors)
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::GNHWC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::GNHWK},
.input = {.config = {.layout = GNHWC}},
.weight = {.config = {.layout = GKYXC}},
.output = {.config = {.layout = GNHWK},
.operation = OutputOp{.elementwise_operation =
ElementwiseOperation::SCALEADD_SCALEADD_RELU}}};
@@ -431,7 +455,8 @@ TEST(ConvTensorLayoutsWithAuxiliary, Conv2DWithTwoAuxiliaryTensors)
TEST(ConvTensorLayoutsWithAuxiliary, Conv1DWithBias)
{
using OutputOp = TensorOperation<TensorConfig{.layout = TensorLayout::G_K_strided}>;
using namespace enums;
using OutputOp = TensorOperation<TensorConfig{.layout = G_K_strided}>;
static constexpr auto sig =
ConvSignature<ConvolutionTensor<>, ConvolutionTensor<>, ConvolutionTensor<OutputOp>>{
@@ -439,9 +464,9 @@ TEST(ConvTensorLayoutsWithAuxiliary, Conv1DWithBias)
.direction = FORWARD,
.data_type = DataType::FP32,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NWGC}},
.weight = {.config = {.layout = TensorLayout::GKXC}},
.output = {.config = {.layout = TensorLayout::NWGK},
.input = {.config = {.layout = NWGC}},
.weight = {.config = {.layout = GKXC}},
.output = {.config = {.layout = NWGK},
.operation =
OutputOp{.elementwise_operation = ElementwiseOperation::SCALE}}};
@@ -457,7 +482,8 @@ TEST(ConvTensorLayoutsWithAuxiliary, Conv1DWithBias)
TEST(ConvTensorLayoutsWithAuxiliary, Conv3DWithBias)
{
using OutputOp = TensorOperation<TensorConfig{.layout = TensorLayout::G_C_strided}>;
using namespace enums;
using OutputOp = TensorOperation<TensorConfig{.layout = G_C_strided}>;
static constexpr auto sig =
ConvSignature<ConvolutionTensor<>, ConvolutionTensor<>, ConvolutionTensor<OutputOp>>{
@@ -465,9 +491,9 @@ TEST(ConvTensorLayoutsWithAuxiliary, Conv3DWithBias)
.direction = FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NDHWGC}},
.weight = {.config = {.layout = TensorLayout::GKZYXC}},
.output = {.config = {.layout = TensorLayout::NDHWGK},
.input = {.config = {.layout = NDHWGC}},
.weight = {.config = {.layout = GKZYXC}},
.output = {.config = {.layout = NDHWGK},
.operation = OutputOp{.elementwise_operation =
ElementwiseOperation::BIAS_BNORM_CLAMP}}};