Files
composable_kernel/experimental/builder/test/validation/test_reference_execution.cpp
Robin Voetter 1040d9b1f5 [CK_BUILDER] Replace reference conv with old ck implementation (#3604)
* ck-builder: remove SPATIAL_DIM parameter from ConvTensorLayouts

This information is already in the SIGNATURE, so its pointless to pass it
separately. This streamlines the interface of those functions a bit. Also
touches up the style of those files in general.

* ck-builder: implement reference conv using old ck

The old ck implementation is more featureful and better tested.

* ck-builder: replace test_reference_execution reference with old ck

This strips out the ck-tile gpu reference implementation completely.

* ck-builder: clean up test_reference_execution

- Remove unneccesary messages
- Replace EXPECT_TRUE(true) with EXPECT_NO_THROW()
2026-01-21 19:18:47 +01:00

518 lines
23 KiB
C++

// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
// SPDX-License-Identifier: MIT
#include "ck_tile/builder/conv_builder.hpp"
#include "ck_tile/builder/types.hpp"
#include "impl/conv_algorithm_types.hpp"
#include "utils/ckb_conv_test_configs.hpp"
#include "ck/library/reference_tensor_operation/gpu/naive_conv_fwd_gpu.hpp"
#include "ck/library/reference_tensor_operation/gpu/naive_conv_bwd_weight_gpu.hpp"
#include "ck/library/reference_tensor_operation/gpu/naive_conv_bwd_data_gpu.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/check_err.hpp"
#include <gtest/gtest.h>
#include <vector>
namespace {
using namespace ck_tile::builder;
using namespace ck_tile::builder::test; // For ConvAlgorithm_Reference
using namespace ck_tile::builder::test_utils;
TEST(ReferenceExecution, Forward_2D_FP16)
{
// Note: When you don't specify .operation, it defaults to PassThrough
// Reference implementation only supports PassThrough elementwise operations
constexpr ConvSignature sig{.spatial_dim = 2,
.direction = ConvDirection::FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::GNHWC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::GNHWK}}};
constexpr auto ref_alg = ConvAlgorithm_Reference{};
using RefKernel = ConvBuilder<sig, ref_alg>::Instance;
// Simple dimensions
const int G = 1, N = 2, C = 4, K = 4, H = 3, W = 3;
// Allocate minimal device memory (just to test API)
const size_t in_size = G * N * C * H * W * sizeof(ck::half_t);
const size_t wei_size = G * K * C * 3 * 3 * sizeof(ck::half_t);
const size_t out_size = G * N * K * H * W * sizeof(ck::half_t);
ck::DeviceMem in_dev(in_size);
ck::DeviceMem wei_dev(wei_size);
ck::DeviceMem out_dev(out_size);
in_dev.SetZero();
wei_dev.SetZero();
out_dev.SetZero();
// Prepare parameters for Run()
std::vector<ck_tile::long_index_t> input_spatial{H, W};
std::vector<ck_tile::long_index_t> filter_spatial{3, 3};
std::vector<ck_tile::long_index_t> strides{1, 1};
std::vector<ck_tile::long_index_t> dilations{1, 1};
std::vector<ck_tile::long_index_t> left_pads{1, 1};
std::vector<ck_tile::long_index_t> right_pads{1, 1};
RefKernel ref_kernel;
EXPECT_NO_THROW(ref_kernel.Run(reinterpret_cast<const ck::half_t*>(in_dev.GetDeviceBuffer()),
reinterpret_cast<const ck::half_t*>(wei_dev.GetDeviceBuffer()),
reinterpret_cast<ck::half_t*>(out_dev.GetDeviceBuffer()),
G,
N,
K,
C,
input_spatial,
filter_spatial,
strides,
dilations,
left_pads,
right_pads));
}
TEST(ReferenceExecution, BackwardData_2D_FP16)
{
// Note: When you don't specify .operation, it defaults to PassThrough
// Reference implementation only supports PassThrough elementwise operations
constexpr ConvSignature sig{.spatial_dim = 2,
.direction = ConvDirection::BACKWARD_DATA,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::GNHWC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::GNHWK}}};
constexpr auto ref_alg = ConvAlgorithm_Reference{};
using RefKernel = ConvBuilder<sig, ref_alg>::Instance;
const int G = 1, N = 2, C = 4, K = 4, H = 3, W = 3;
const size_t in_grad_size = G * N * C * H * W * sizeof(ck::half_t);
const size_t wei_size = G * K * C * 3 * 3 * sizeof(ck::half_t);
const size_t out_grad_size = G * N * K * H * W * sizeof(ck::half_t);
ck::DeviceMem in_grad_dev(in_grad_size);
ck::DeviceMem wei_dev(wei_size);
ck::DeviceMem out_grad_dev(out_grad_size);
in_grad_dev.SetZero();
wei_dev.SetZero();
out_grad_dev.SetZero();
std::vector<ck_tile::long_index_t> input_spatial{H, W};
std::vector<ck_tile::long_index_t> filter_spatial{3, 3};
std::vector<ck_tile::long_index_t> strides{1, 1};
std::vector<ck_tile::long_index_t> dilations{1, 1};
std::vector<ck_tile::long_index_t> left_pads{1, 1};
std::vector<ck_tile::long_index_t> right_pads{1, 1};
RefKernel ref_kernel;
EXPECT_NO_THROW(
ref_kernel.Run(reinterpret_cast<ck::half_t*>(in_grad_dev.GetDeviceBuffer()),
reinterpret_cast<const ck::half_t*>(wei_dev.GetDeviceBuffer()),
reinterpret_cast<const ck::half_t*>(out_grad_dev.GetDeviceBuffer()),
G,
N,
K,
C,
input_spatial,
filter_spatial,
strides,
dilations,
left_pads,
right_pads));
}
TEST(ReferenceExecution, BackwardWeight_2D_FP16)
{
// Note: When you don't specify .operation, it defaults to PassThrough
// Reference implementation only supports PassThrough elementwise operations
constexpr ConvSignature sig{.spatial_dim = 2,
.direction = ConvDirection::BACKWARD_WEIGHT,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::GNHWC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::GNHWK}}};
constexpr auto ref_alg = ConvAlgorithm_Reference{};
using RefKernel = ConvBuilder<sig, ref_alg>::Instance;
const int G = 1, N = 2, C = 4, K = 4, H = 3, W = 3;
const size_t in_size = G * N * C * H * W * sizeof(ck::half_t);
const size_t wei_grad_size = G * K * C * 3 * 3 * sizeof(ck::half_t);
const size_t out_grad_size = G * N * K * H * W * sizeof(ck::half_t);
ck::DeviceMem in_dev(in_size);
ck::DeviceMem wei_grad_dev(wei_grad_size);
ck::DeviceMem out_grad_dev(out_grad_size);
in_dev.SetZero();
wei_grad_dev.SetZero();
out_grad_dev.SetZero();
std::vector<ck_tile::long_index_t> input_spatial{H, W};
std::vector<ck_tile::long_index_t> filter_spatial{3, 3};
std::vector<ck_tile::long_index_t> strides{1, 1};
std::vector<ck_tile::long_index_t> dilations{1, 1};
std::vector<ck_tile::long_index_t> left_pads{1, 1};
std::vector<ck_tile::long_index_t> right_pads{1, 1};
RefKernel ref_kernel;
EXPECT_NO_THROW(
ref_kernel.Run(reinterpret_cast<const ck::half_t*>(in_dev.GetDeviceBuffer()),
reinterpret_cast<ck::half_t*>(wei_grad_dev.GetDeviceBuffer()),
reinterpret_cast<const ck::half_t*>(out_grad_dev.GetDeviceBuffer()),
G,
N,
K,
C,
input_spatial,
filter_spatial,
strides,
dilations,
left_pads,
right_pads));
}
// Test Builder Reference vs Direct GPU Reference with RANDOM INPUT
TEST(ReferenceExecution, Forward_2D_FP16_Builder_vs_DirectGPUReference_Random)
{
constexpr ConvSignature sig{.spatial_dim = 2,
.direction = ConvDirection::FORWARD,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NHWGC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::NHWGK}}};
constexpr auto ref_alg = ConvAlgorithm_Reference{};
using RefKernel = ConvBuilder<sig, ref_alg>::Instance;
const int G = 1, N = 2, C = 16, K = 16, H = 14, W = 14; // Small for fast testing
const size_t in_size = G * N * C * H * W * sizeof(ck::half_t);
const size_t wei_size = G * K * C * 3 * 3 * sizeof(ck::half_t);
const size_t out_size = G * N * K * H * W * sizeof(ck::half_t);
// Create host buffers with random data
const size_t in_elements = G * N * C * H * W;
const size_t wei_elements = G * K * C * 3 * 3;
const size_t out_elements = G * N * K * H * W;
std::vector<ck::half_t> in_host(in_elements);
std::vector<ck::half_t> wei_host(wei_elements);
// Fill with random values
std::srand(12345); // Fixed seed for reproducibility
for(size_t i = 0; i < in_elements; i++)
{
in_host[i] = ck::half_t(static_cast<float>(std::rand()) / RAND_MAX * 2.0f - 1.0f);
}
for(size_t i = 0; i < wei_elements; i++)
{
wei_host[i] = ck::half_t(static_cast<float>(std::rand()) / RAND_MAX * 2.0f - 1.0f);
}
// Allocate GPU memory
ck::DeviceMem in_dev(in_size);
ck::DeviceMem wei_dev(wei_size);
ck::DeviceMem out_builder_dev(out_size);
ck::DeviceMem out_naive_dev(out_size);
// Transfer random data to GPU
in_dev.ToDevice(in_host.data());
wei_dev.ToDevice(wei_host.data());
out_builder_dev.SetZero();
out_naive_dev.SetZero();
std::vector<ck_tile::long_index_t> input_spatial{H, W};
std::vector<ck_tile::long_index_t> filter_spatial{3, 3};
std::vector<ck_tile::long_index_t> strides{1, 1};
std::vector<ck_tile::long_index_t> dilations{1, 1};
std::vector<ck_tile::long_index_t> left_pads{1, 1};
std::vector<ck_tile::long_index_t> right_pads{1, 1};
RefKernel builder_kernel;
// Run 1: Builder Reference Factory
builder_kernel.Run(reinterpret_cast<const ck::half_t*>(in_dev.GetDeviceBuffer()),
reinterpret_cast<const ck::half_t*>(wei_dev.GetDeviceBuffer()),
reinterpret_cast<ck::half_t*>(out_builder_dev.GetDeviceBuffer()),
G,
N,
K,
C,
input_spatial,
filter_spatial,
strides,
dilations,
left_pads,
right_pads);
// Run 2: Direct GPU Reference (same kernel the Builder calls internally!)
ck::ref::naive_conv_fwd<ck::tensor_layout::convolution::NHWGC,
ck::tensor_layout::convolution::GKYXC,
ck::tensor_layout::convolution::NHWGK,
ck::half_t,
ck::half_t,
ck::half_t,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>(
reinterpret_cast<const ck::half_t*>(in_dev.GetDeviceBuffer()),
reinterpret_cast<const ck::half_t*>(wei_dev.GetDeviceBuffer()),
reinterpret_cast<ck::half_t*>(out_naive_dev.GetDeviceBuffer()),
ck::utils::conv::ConvParam(2,
G,
N,
K,
C,
filter_spatial,
input_spatial,
strides,
dilations,
left_pads,
right_pads));
// Copy results back
std::vector<ck::half_t> out_builder_result(out_elements);
std::vector<ck::half_t> out_naive_result(out_elements);
out_builder_dev.FromDevice(out_builder_result.data());
out_naive_dev.FromDevice(out_naive_result.data());
// Compare - should be IDENTICAL (both call same kernel)
EXPECT_TRUE(ck::utils::check_err(out_builder_result,
out_naive_result,
"Error: Builder Reference != Direct GPU Reference",
1e-6,
1e-6)); // Very tight tolerance!
}
// Test Builder Reference vs Direct GPU Reference with RANDOM INPUT - Backward Data
TEST(ReferenceExecution, BackwardData_2D_FP16_Builder_vs_DirectGPUReference_Random)
{
constexpr ConvSignature sig{.spatial_dim = 2,
.direction = ConvDirection::BACKWARD_DATA,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NHWGC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::NHWGK}}};
constexpr auto ref_alg = ConvAlgorithm_Reference{};
using RefKernel = ConvBuilder<sig, ref_alg>::Instance;
const int G = 1, N = 2, C = 16, K = 16, H = 14, W = 14;
const size_t in_grad_size = G * N * C * H * W * sizeof(ck::half_t);
const size_t wei_size = G * K * C * 3 * 3 * sizeof(ck::half_t);
const size_t out_grad_size = G * N * K * H * W * sizeof(ck::half_t);
const size_t in_grad_elements = G * N * C * H * W;
const size_t wei_elements = G * K * C * 3 * 3;
const size_t out_grad_elements = G * N * K * H * W;
std::vector<ck::half_t> wei_host(wei_elements);
std::vector<ck::half_t> out_grad_host(out_grad_elements);
// Fill with random values
std::srand(12346);
for(size_t i = 0; i < wei_elements; i++)
{
wei_host[i] = ck::half_t(static_cast<float>(std::rand()) / RAND_MAX * 2.0f - 1.0f);
}
for(size_t i = 0; i < out_grad_elements; i++)
{
out_grad_host[i] = ck::half_t(static_cast<float>(std::rand()) / RAND_MAX * 2.0f - 1.0f);
}
ck::DeviceMem in_grad_builder_dev(in_grad_size);
ck::DeviceMem in_grad_naive_dev(in_grad_size);
ck::DeviceMem wei_dev(wei_size);
ck::DeviceMem out_grad_dev(out_grad_size);
wei_dev.ToDevice(wei_host.data());
out_grad_dev.ToDevice(out_grad_host.data());
in_grad_builder_dev.SetZero();
in_grad_naive_dev.SetZero();
std::vector<ck_tile::long_index_t> input_spatial{H, W};
std::vector<ck_tile::long_index_t> filter_spatial{3, 3};
std::vector<ck_tile::long_index_t> strides{1, 1};
std::vector<ck_tile::long_index_t> dilations{1, 1};
std::vector<ck_tile::long_index_t> left_pads{1, 1};
std::vector<ck_tile::long_index_t> right_pads{1, 1};
RefKernel builder_kernel;
// Run 1: Builder Reference Factory
builder_kernel.Run(reinterpret_cast<ck::half_t*>(in_grad_builder_dev.GetDeviceBuffer()),
reinterpret_cast<const ck::half_t*>(wei_dev.GetDeviceBuffer()),
reinterpret_cast<const ck::half_t*>(out_grad_dev.GetDeviceBuffer()),
G,
N,
K,
C,
input_spatial,
filter_spatial,
strides,
dilations,
left_pads,
right_pads);
// Run 2: Direct GPU Reference
ck::ref::naive_conv_bwd_data<ck::tensor_layout::convolution::NHWGC,
ck::tensor_layout::convolution::GKYXC,
ck::tensor_layout::convolution::NHWGK,
ck::half_t,
ck::half_t,
ck::half_t,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>(
reinterpret_cast<ck::half_t*>(in_grad_naive_dev.GetDeviceBuffer()),
reinterpret_cast<const ck::half_t*>(wei_dev.GetDeviceBuffer()),
reinterpret_cast<const ck::half_t*>(out_grad_dev.GetDeviceBuffer()),
ck::utils::conv::ConvParam(2,
G,
N,
K,
C,
filter_spatial,
input_spatial,
strides,
dilations,
left_pads,
right_pads));
// Compare
std::vector<ck::half_t> in_grad_builder_result(in_grad_elements);
std::vector<ck::half_t> in_grad_naive_result(in_grad_elements);
in_grad_builder_dev.FromDevice(in_grad_builder_result.data());
in_grad_naive_dev.FromDevice(in_grad_naive_result.data());
EXPECT_TRUE(ck::utils::check_err(in_grad_builder_result,
in_grad_naive_result,
"Error: Builder Backward Data != Direct GPU Reference",
1e-6,
1e-6));
}
// Test Builder Reference vs Direct GPU Reference with RANDOM INPUT - Backward Weight
TEST(ReferenceExecution, BackwardWeight_2D_FP16_Builder_vs_DirectGPUReference_Random)
{
constexpr ConvSignature sig{.spatial_dim = 2,
.direction = ConvDirection::BACKWARD_WEIGHT,
.data_type = DataType::FP16,
.accumulation_data_type = DataType::FP32,
.input = {.config = {.layout = TensorLayout::NHWGC}},
.weight = {.config = {.layout = TensorLayout::GKYXC}},
.output = {.config = {.layout = TensorLayout::NHWGK}}};
constexpr auto ref_alg = ConvAlgorithm_Reference{};
using RefKernel = ConvBuilder<sig, ref_alg>::Instance;
const int G = 1, N = 2, C = 16, K = 16, H = 14, W = 14;
const size_t in_size = G * N * C * H * W * sizeof(ck::half_t);
const size_t wei_grad_size = G * K * C * 3 * 3 * sizeof(ck::half_t);
const size_t out_grad_size = G * N * K * H * W * sizeof(ck::half_t);
const size_t in_elements = G * N * C * H * W;
const size_t wei_grad_elements = G * K * C * 3 * 3;
const size_t out_grad_elements = G * N * K * H * W;
std::vector<ck::half_t> in_host(in_elements);
std::vector<ck::half_t> out_grad_host(out_grad_elements);
// Fill with random values
std::srand(12347);
for(size_t i = 0; i < in_elements; i++)
{
in_host[i] = ck::half_t(static_cast<float>(std::rand()) / RAND_MAX * 2.0f - 1.0f);
}
for(size_t i = 0; i < out_grad_elements; i++)
{
out_grad_host[i] = ck::half_t(static_cast<float>(std::rand()) / RAND_MAX * 2.0f - 1.0f);
}
ck::DeviceMem in_dev(in_size);
ck::DeviceMem wei_grad_builder_dev(wei_grad_size);
ck::DeviceMem wei_grad_naive_dev(wei_grad_size);
ck::DeviceMem out_grad_dev(out_grad_size);
in_dev.ToDevice(in_host.data());
out_grad_dev.ToDevice(out_grad_host.data());
wei_grad_builder_dev.SetZero();
wei_grad_naive_dev.SetZero();
std::vector<ck_tile::long_index_t> input_spatial{H, W};
std::vector<ck_tile::long_index_t> filter_spatial{3, 3};
std::vector<ck_tile::long_index_t> strides{1, 1};
std::vector<ck_tile::long_index_t> dilations{1, 1};
std::vector<ck_tile::long_index_t> left_pads{1, 1};
std::vector<ck_tile::long_index_t> right_pads{1, 1};
RefKernel builder_kernel;
// Run 1: Builder Reference Factory
builder_kernel.Run(reinterpret_cast<const ck::half_t*>(in_dev.GetDeviceBuffer()),
reinterpret_cast<ck::half_t*>(wei_grad_builder_dev.GetDeviceBuffer()),
reinterpret_cast<const ck::half_t*>(out_grad_dev.GetDeviceBuffer()),
G,
N,
K,
C,
input_spatial,
filter_spatial,
strides,
dilations,
left_pads,
right_pads);
// Run 2: Direct GPU Reference
ck::ref::naive_conv_bwd_weight<ck::tensor_layout::convolution::NHWGC,
ck::tensor_layout::convolution::GKYXC,
ck::tensor_layout::convolution::NHWGK,
ck::half_t,
ck::half_t,
ck::half_t,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>(
reinterpret_cast<const ck::half_t*>(in_dev.GetDeviceBuffer()),
reinterpret_cast<ck::half_t*>(wei_grad_naive_dev.GetDeviceBuffer()),
reinterpret_cast<const ck::half_t*>(out_grad_dev.GetDeviceBuffer()),
ck::utils::conv::ConvParam(2,
G,
N,
K,
C,
filter_spatial,
input_spatial,
strides,
dilations,
left_pads,
right_pads));
// Compare
std::vector<ck::half_t> wei_grad_builder_result(wei_grad_elements);
std::vector<ck::half_t> wei_grad_naive_result(wei_grad_elements);
wei_grad_builder_dev.FromDevice(wei_grad_builder_result.data());
wei_grad_naive_dev.FromDevice(wei_grad_naive_result.data());
EXPECT_TRUE(ck::utils::check_err(wei_grad_builder_result,
wei_grad_naive_result,
"Error: Builder Backward Weight != Direct GPU Reference",
1e-6,
1e-6));
}
} // namespace