Add grouped convnd dataset tests for bwd_data, bwd_weight and make them parallel (#3380)

* Parallelization in dataset generation

* Parallelizable tests for fwd, bwd data, bwd weight with datasets

* .gitignore generated datasets

* Test parallelization script with round-robin GPU scheduling

* Parallelization updates to test generation and running

* Dataset paths relative to executable

* Update output from test generation

* Default to one GPU in test generation

* Add small dataset tests to Jenkins

* Update copyright lines

* Update test_data/generate_test_dataset.sh

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Move trap disable

* Common get path function

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

[ROCm/composable_kernel commit: fe35ba5dac]
This commit is contained in:
Johannes Graner
2025-12-15 13:38:25 +01:00
committed by GitHub
parent a45c051ac9
commit 2fe4c8acec
10 changed files with 2349 additions and 331 deletions

5
.gitignore vendored
View File

@@ -83,6 +83,11 @@ __pycache__/
.cache/
# Generated test data
test_data/*
!test_data/*.py
!test_data/*.sh
# Exceptions to build* patterns above
# The experimental/builder directory should be tracked despite matching build*
!experimental/builder

10
Jenkinsfile vendored
View File

@@ -1476,15 +1476,19 @@ pipeline {
setup_args = "NO_CK_BUILD"
execute_args = """ cd ../build && \
../script/cmake-ck-dev.sh ../ gfx90a && \
make -j64 test_grouped_convnd_fwd_dataset_xdl && \
make -j64 test_grouped_convnd_fwd_dataset_xdl \
test_grouped_convnd_bwd_data_dataset_xdl \
test_grouped_convnd_bwd_weight_dataset_xdl && \
cd ../test_data && \
# Dataset generation modes:
# - small: ~60 test cases (minimal, quick testing - 3 models, 2 batch sizes, 2 image sizes)
# - half: ~300 test cases (moderate coverage - 16 models, 3 batch sizes, 5 image sizes), ~ 17 hours testing time
# - full: ~600 test cases (comprehensive - 16 models, 5 batch sizes, 9 image sizes), ~ 40 hours testing time
./generate_test_dataset.sh half && \
./generate_test_dataset.sh small && \
cd ../build && \
./bin/test_grouped_convnd_fwd_dataset_xdl"""
./bin/test_grouped_convnd_fwd_dataset_xdl && \
./bin/test_grouped_convnd_bwd_data_dataset_xdl && \
./bin/test_grouped_convnd_bwd_weight_dataset_xdl"""
}
steps{
buildHipClangJobAndReboot(setup_args:setup_args, build_type: 'Release', execute_cmd: execute_args)

View File

@@ -0,0 +1,246 @@
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
// SPDX-License-Identifier: MIT
#pragma once
#include <fstream>
#include <filesystem>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include "ck/library/utility/convolution_parameter.hpp"
namespace ck {
namespace test {
namespace fs = std::filesystem;
// Helper function to find test_data directory relative to the test binary
static std::string GetTestDataPath()
{
// Get the path to the current executable
fs::path exe_path = fs::read_symlink("/proc/self/exe");
// Get the directory containing the executable
fs::path current_dir = exe_path.parent_path();
// Search for test_data directory by going up the directory tree
// This makes the code robust regardless of build directory depth
while(current_dir != current_dir.root_path())
{
fs::path test_data_path = current_dir / "test_data";
if(fs::exists(test_data_path) && fs::is_directory(test_data_path))
{
return test_data_path.string();
}
current_dir = current_dir.parent_path();
}
// If not found, return empty string
std::cerr << "ERROR: Could not find test_data directory relative to executable" << std::endl;
return "";
}
// CSV Reader Function for Loading Test Cases
// Reads convolution parameters from CSV file and returns vector of ConvParam structures
inline std::vector<ck::utils::conv::ConvParam> load_csv_test_cases(const std::string& filename)
{
std::vector<ck::utils::conv::ConvParam> conv_params; // Return vector
std::ifstream file(filename); // Open CSV file
if(!file.is_open())
{
std::cerr << "ERROR: Cannot open CSV file: " << filename << std::endl;
return conv_params; // Return empty vector on error
}
std::string line;
int line_number = 0;
// Read file line by line
while(std::getline(file, line))
{
line_number++;
// Skip comment lines (starting with #) and empty lines
if(line.empty() || line[0] == '#')
{
continue;
}
// Skip header line (contains column names)
if(line.find("NDim,Groups,BatchSize") != std::string::npos)
{
continue;
}
// Parse CSV line using stringstream
std::stringstream ss(line);
std::string cell;
std::vector<std::string> row;
// Split line by commas
while(std::getline(ss, cell, ','))
{
row.push_back(cell);
}
// Validate row has correct number of columns
if(row.size() < 19)
{ // Need at least 19 columns for 2D (excluding TestName)
std::cerr << "WARNING: Line " << line_number << " has insufficient columns ("
<< row.size() << "), skipping" << std::endl;
continue;
}
try
{
// Parse CSV data into ConvParam structure
// CSV Format:
// NDim,Groups,BatchSize,OutChannels,InChannels,KernelH,KernelW,InputH,InputW,OutputH,OutputW,StrideH,StrideW,DilationH,DilationW,LeftPadH,LeftPadW,RightPadH,RightPadW,TestName
int NDim = std::stoi(row[0]);
int Groups = std::stoi(row[1]);
int BatchSize = std::stoi(row[2]);
int OutChannels = std::stoi(row[3]);
int InChannels = std::stoi(row[4]);
if(NDim == 1)
{
// 1D Convolution: Need fewer columns for 1D parameters
if(row.size() < 13)
{
std::cerr << "WARNING: 1D convolution on line " << line_number
<< " needs 13+ columns, has " << row.size() << ", skipping"
<< std::endl;
continue;
}
// 1D Convolution: {NDim, Groups, BatchSize, OutChannels, InChannels,
// {KernelW}, {InputW}, {StrideW}, {DilationW}, {LeftPadW}, {RightPadW}}
ck::utils::conv::ConvParam param = {
NDim, // NDim = 1
Groups, // Groups
BatchSize, // Batch size
OutChannels, // Output channels
InChannels, // Input channels
{std::stoi(row[5])}, // Kernel: {W}
{std::stoi(row[7])}, // Input: {W}
{std::stoi(row[11])}, // Stride: {W}
{std::stoi(row[13])}, // Dilation: {W}
{std::stoi(row[15])}, // Left pad: {W}
{std::stoi(row[17])} // Right pad: {W}
};
conv_params.push_back(param);
}
else if(NDim == 2)
{
// 2D Convolution: {NDim, Groups, BatchSize, OutChannels, InChannels,
// {KernelH,KernelW}, {InputH,InputW}, {StrideH,StrideW}, {DilationH,DilationW},
// {LeftPadH,LeftPadW}, {RightPadH,RightPadW}}
ck::utils::conv::ConvParam param = {
NDim, // NDim = 2
Groups, // Groups
BatchSize, // Batch size
OutChannels, // Output channels
InChannels, // Input channels
{std::stoi(row[5]), std::stoi(row[6])}, // Kernel: {H, W}
{std::stoi(row[7]), std::stoi(row[8])}, // Input: {H, W}
{std::stoi(row[11]), std::stoi(row[12])}, // Stride: {H, W}
{std::stoi(row[13]), std::stoi(row[14])}, // Dilation: {H, W}
{std::stoi(row[15]), std::stoi(row[16])}, // Left pad: {H, W}
{std::stoi(row[17]), std::stoi(row[18])} // Right pad: {H, W}
};
conv_params.push_back(param);
}
else if(NDim == 3)
{
// 3D Convolution: Need more columns for 3D parameters
if(row.size() < 26)
{
std::cerr << "WARNING: 3D convolution on line " << line_number
<< " needs 26+ columns, has " << row.size() << ", skipping"
<< std::endl;
continue;
}
// 3D Convolution: {NDim, Groups, BatchSize, OutChannels, InChannels,
// {KernelD,KernelH,KernelW}, {InputD,InputH,InputW}, {OutputD,OutputH,OutputW},
// {StrideD,StrideH,StrideW}, {DilationD,DilationH,DilationW},
// {LeftPadD,LeftPadH,LeftPadW}, {RightPadD,RightPadH,RightPadW}}
ck::utils::conv::ConvParam param = {
NDim, // NDim = 3
Groups, // Groups
BatchSize, // Batch size
OutChannels, // Output channels
InChannels, // Input channels
{std::stoi(row[5]), std::stoi(row[6]), std::stoi(row[7])}, // Kernel: {D, H, W}
{std::stoi(row[8]), std::stoi(row[9]), std::stoi(row[10])}, // Input: {D, H, W}
{std::stoi(row[14]),
std::stoi(row[15]),
std::stoi(row[16])}, // Stride: {D, H, W}
{std::stoi(row[17]),
std::stoi(row[18]),
std::stoi(row[19])}, // Dilation: {D, H, W}
{std::stoi(row[20]),
std::stoi(row[21]),
std::stoi(row[22])}, // Left pad: {D, H, W}
{std::stoi(row[23]),
std::stoi(row[24]),
std::stoi(row[25])} // Right pad: {D, H, W}
};
conv_params.push_back(param);
}
else
{
std::cerr << "WARNING: Unsupported NDim=" << NDim << " on line " << line_number
<< ", skipping" << std::endl;
}
}
catch(const std::exception& e)
{
std::cerr << "ERROR: Failed to parse line " << line_number << ": " << e.what()
<< std::endl;
continue;
}
}
file.close();
std::cout << "Loaded " << conv_params.size() << " test cases from " << filename << std::endl;
return conv_params;
}
// Helper function to load CSV test cases and populate conv_params vector
// Returns true if loading succeeded, false otherwise
inline bool load_and_populate_test_cases(const std::vector<std::string>& csv_paths,
std::vector<ck::utils::conv::ConvParam>& conv_params,
const std::string& dimension_label)
{
for(const auto& csv_path : csv_paths)
{
auto csv_cases = load_csv_test_cases(csv_path);
if(!csv_cases.empty())
{
// Successfully loaded CSV data - add all test cases to conv_params
for(const auto& test_case : csv_cases)
{
conv_params.push_back(test_case);
}
std::cout << "Loaded " << csv_cases.size() << " " << dimension_label
<< " test cases from " << csv_path << std::endl;
return true;
}
}
// Failed to load from any path
std::cerr << "ERROR: Failed to load CSV test data from any of these locations:" << std::endl;
for(const auto& path : csv_paths)
{
std::cerr << " - " << path << std::endl;
}
std::cerr << "\nPlease ensure CSV test data exists in one of these locations." << std::endl;
std::cerr << "Run generate_test_dataset.sh in test_data/ to create test datasets." << std::endl;
return false;
}
} // namespace test
} // namespace ck

View File

@@ -9,6 +9,10 @@ if(GPU_TARGETS MATCHES "gfx9|gfx11|gfx12")
add_executable(test_grouped_convnd_bwd_data_xdl_large_cases test_grouped_convnd_bwd_data_xdl_large_cases.cpp)
target_compile_options(test_grouped_convnd_bwd_data_xdl_large_cases PRIVATE -Wno-global-constructors -Wno-undef)
target_link_libraries(test_grouped_convnd_bwd_data_xdl_large_cases PRIVATE gtest_main getopt::getopt utility device_grouped_conv2d_bwd_data_instance device_grouped_conv3d_bwd_data_instance)
add_executable(test_grouped_convnd_bwd_data_dataset_xdl test_grouped_convnd_bwd_data_dataset_xdl.cpp)
target_compile_options(test_grouped_convnd_bwd_data_dataset_xdl PRIVATE -Wno-global-constructors -Wno-undef)
target_link_libraries(test_grouped_convnd_bwd_data_dataset_xdl PRIVATE gtest_main getopt::getopt utility device_grouped_conv2d_bwd_data_instance device_grouped_conv3d_bwd_data_instance)
endif()
add_gtest_executable(test_grouped_convnd_bwd_data_wmma test_grouped_convnd_bwd_data_wmma.cpp)
if(result EQUAL 0)

View File

@@ -0,0 +1,317 @@
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
// SPDX-License-Identifier: MIT
#include <cstdlib> // Standard C library (exit codes, malloc)
#include <iostream> // C++ I/O streams (cout, cerr)
#include <initializer_list> // C++ initializer list support (unused here)
#include <vector> // C++ vector container - stores test cases
#include <string> // String operations
#include <gtest/gtest.h> // Google Test framework - provides TEST_P, INSTANTIATE_TEST_SUITE_P
#include "profiler/profile_grouped_conv_bwd_data_impl.hpp" // The actual GPU profiler that does convolution work
#include "../common/csv_test_loader.hpp" // Shared CSV test case loader
using namespace ck::tensor_layout::convolution; // Import tensor layout names (GNHWK, GKYXC, etc.)
// Load CSV data for 2D tests
static std::vector<ck::utils::conv::ConvParam> Get2DTestCases()
{
static std::vector<ck::utils::conv::ConvParam> test_cases;
if(test_cases.empty())
{
std::string test_data_dir = ck::test::GetTestDataPath();
if(test_data_dir.empty())
{
std::cerr << "FATAL: test_data directory not found" << std::endl;
return test_cases;
}
std::vector<std::string> csv_paths = {test_data_dir + "/conv_test_set_2d_dataset.csv"};
bool loaded = ck::test::load_and_populate_test_cases(csv_paths, test_cases, "2D");
if(!loaded)
{
std::cerr << "FATAL: Failed to load 2D test cases from " << csv_paths[0] << std::endl;
}
}
return test_cases;
}
// Load CSV data for 3D tests
static std::vector<ck::utils::conv::ConvParam> Get3DTestCases()
{
static std::vector<ck::utils::conv::ConvParam> test_cases;
if(test_cases.empty())
{
std::string test_data_dir = ck::test::GetTestDataPath();
if(test_data_dir.empty())
{
std::cerr << "FATAL: test_data directory not found" << std::endl;
return test_cases;
}
std::vector<std::string> csv_paths = {test_data_dir + "/conv_test_set_3d_dataset.csv"};
bool loaded = ck::test::load_and_populate_test_cases(csv_paths, test_cases, "3D");
if(!loaded)
{
std::cerr << "FATAL: Failed to load 3D test cases from " << csv_paths[0] << std::endl;
}
}
return test_cases;
}
// Helper template to run a single backward data convolution test with split_k
template <ck::index_t NDimSpatial,
typename OutLayout,
typename WeiLayout,
typename InLayout,
typename DataType>
bool RunConvBwdDataTest(const ck::utils::conv::ConvParam& param, ck::index_t split_k)
{
return ck::profiler::profile_grouped_conv_bwd_data_impl<NDimSpatial,
OutLayout,
WeiLayout,
InLayout,
DataType,
DataType,
DataType>(true, // do_verification
1, // init_method
false, // do_log
false, // time_kernel
param, // ConvParam
split_k, // Split-K value
-1); // instance_index
}
// 2D Tests - GNHWK layout - Float - SplitK=1
class TestGroupedConvndBwdData2dGNHWKFloatSplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData2dGNHWKFloatSplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<2, GNHWK, GKYXC, GNHWC, float>(GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData2dGNHWKFloatSplitK1,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - GNHWK layout - Float - SplitK=2
class TestGroupedConvndBwdData2dGNHWKFloatSplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData2dGNHWKFloatSplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<2, GNHWK, GKYXC, GNHWC, float>(GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData2dGNHWKFloatSplitK2,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - GNHWK layout - Half - SplitK=1
class TestGroupedConvndBwdData2dGNHWKHalfSplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData2dGNHWKHalfSplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<2, GNHWK, GKYXC, GNHWC, ck::half_t>(GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData2dGNHWKHalfSplitK1,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - GNHWK layout - Half - SplitK=2
class TestGroupedConvndBwdData2dGNHWKHalfSplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData2dGNHWKHalfSplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<2, GNHWK, GKYXC, GNHWC, ck::half_t>(GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData2dGNHWKHalfSplitK2,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - GNHWK layout - BFloat16 - SplitK=1
class TestGroupedConvndBwdData2dGNHWKBFloat16SplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData2dGNHWKBFloat16SplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<2, GNHWK, GKYXC, GNHWC, ck::bhalf_t>(GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData2dGNHWKBFloat16SplitK1,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - GNHWK layout - BFloat16 - SplitK=2
class TestGroupedConvndBwdData2dGNHWKBFloat16SplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData2dGNHWKBFloat16SplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<2, GNHWK, GKYXC, GNHWC, ck::bhalf_t>(GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData2dGNHWKBFloat16SplitK2,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - NHWGK layout - Float - SplitK=1
class TestGroupedConvndBwdData2dNHWGKFloatSplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData2dNHWGKFloatSplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<2, NHWGK, GKYXC, NHWGC, float>(GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData2dNHWGKFloatSplitK1,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - NHWGK layout - Float - SplitK=2
class TestGroupedConvndBwdData2dNHWGKFloatSplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData2dNHWGKFloatSplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<2, NHWGK, GKYXC, NHWGC, float>(GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData2dNHWGKFloatSplitK2,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - NHWGK layout - Half - SplitK=1
class TestGroupedConvndBwdData2dNHWGKHalfSplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData2dNHWGKHalfSplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<2, NHWGK, GKYXC, NHWGC, ck::half_t>(GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData2dNHWGKHalfSplitK1,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - NHWGK layout - Half - SplitK=2
class TestGroupedConvndBwdData2dNHWGKHalfSplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData2dNHWGKHalfSplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<2, NHWGK, GKYXC, NHWGC, ck::half_t>(GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData2dNHWGKHalfSplitK2,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - NHWGK layout - BFloat16 - SplitK=1
class TestGroupedConvndBwdData2dNHWGKBFloat16SplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData2dNHWGKBFloat16SplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<2, NHWGK, GKYXC, NHWGC, ck::bhalf_t>(GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData2dNHWGKBFloat16SplitK1,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - NHWGK layout - BFloat16 - SplitK=2
class TestGroupedConvndBwdData2dNHWGKBFloat16SplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData2dNHWGKBFloat16SplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<2, NHWGK, GKYXC, NHWGC, ck::bhalf_t>(GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData2dNHWGKBFloat16SplitK2,
::testing::ValuesIn(Get2DTestCases()));
// 3D Tests - NDHWGK layout - Float - SplitK=1
class TestGroupedConvndBwdData3dNDHWGKFloatSplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData3dNDHWGKFloatSplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<3, NDHWGK, GKZYXC, NDHWGC, float>(GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData3dNDHWGKFloatSplitK1,
::testing::ValuesIn(Get3DTestCases()));
// 3D Tests - NDHWGK layout - Float - SplitK=2
class TestGroupedConvndBwdData3dNDHWGKFloatSplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData3dNDHWGKFloatSplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<3, NDHWGK, GKZYXC, NDHWGC, float>(GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData3dNDHWGKFloatSplitK2,
::testing::ValuesIn(Get3DTestCases()));
// 3D Tests - NDHWGK layout - Half - SplitK=1
class TestGroupedConvndBwdData3dNDHWGKHalfSplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData3dNDHWGKHalfSplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<3, NDHWGK, GKZYXC, NDHWGC, ck::half_t>(GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData3dNDHWGKHalfSplitK1,
::testing::ValuesIn(Get3DTestCases()));
// 3D Tests - NDHWGK layout - Half - SplitK=2
class TestGroupedConvndBwdData3dNDHWGKHalfSplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData3dNDHWGKHalfSplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<3, NDHWGK, GKZYXC, NDHWGC, ck::half_t>(GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData3dNDHWGKHalfSplitK2,
::testing::ValuesIn(Get3DTestCases()));
// 3D Tests - NDHWGK layout - BFloat16 - SplitK=1
class TestGroupedConvndBwdData3dNDHWGKBFloat16SplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData3dNDHWGKBFloat16SplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<3, NDHWGK, GKZYXC, NDHWGC, ck::bhalf_t>(GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData3dNDHWGKBFloat16SplitK1,
::testing::ValuesIn(Get3DTestCases()));
// 3D Tests - NDHWGK layout - BFloat16 - SplitK=2
class TestGroupedConvndBwdData3dNDHWGKBFloat16SplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdData3dNDHWGKBFloat16SplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdDataTest<3, NDHWGK, GKZYXC, NDHWGC, ck::bhalf_t>(GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdData3dNDHWGKBFloat16SplitK2,
::testing::ValuesIn(Get3DTestCases()));

View File

@@ -4,6 +4,10 @@
if(GPU_TARGETS MATCHES "gfx9|gfx11|gfx12")
add_gtest_executable(test_grouped_convnd_bwd_weight test_grouped_convnd_bwd_weight.cpp)
target_link_libraries(test_grouped_convnd_bwd_weight PRIVATE utility device_grouped_conv1d_bwd_weight_instance device_grouped_conv2d_bwd_weight_instance device_grouped_conv3d_bwd_weight_instance device_grouped_convnd_bwd_weight_instance)
add_executable(test_grouped_convnd_bwd_weight_dataset_xdl test_grouped_convnd_bwd_weight_dataset_xdl.cpp)
target_compile_options(test_grouped_convnd_bwd_weight_dataset_xdl PRIVATE -Wno-global-constructors -Wno-undef)
target_link_libraries(test_grouped_convnd_bwd_weight_dataset_xdl PRIVATE gtest_main getopt::getopt utility device_grouped_conv1d_bwd_weight_instance device_grouped_conv2d_bwd_weight_instance device_grouped_conv3d_bwd_weight_instance device_grouped_convnd_bwd_weight_instance)
elseif(DL_KERNELS)
add_gtest_executable(test_grouped_convnd_bwd_weight test_grouped_convnd_bwd_weight.cpp)
target_link_libraries(test_grouped_convnd_bwd_weight PRIVATE utility device_grouped_conv1d_bwd_weight_instance device_grouped_conv2d_bwd_weight_instance device_grouped_conv3d_bwd_weight_instance)

View File

@@ -0,0 +1,258 @@
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
// SPDX-License-Identifier: MIT
#include <cstdlib> // Standard C library (exit codes, malloc)
#include <iostream> // C++ I/O streams (cout, cerr)
#include <initializer_list> // C++ initializer list support (unused here)
#include <vector> // C++ vector container - stores test cases
#include <string> // String operations
#include <gtest/gtest.h> // Google Test framework - provides TEST_P, INSTANTIATE_TEST_SUITE_P
#include "ck/utility/common_header.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/host_utility/device_prop.hpp"
#include "profiler/profile_grouped_conv_bwd_weight_impl.hpp" // The actual GPU profiler that does convolution work
#include "../common/csv_test_loader.hpp" // Shared CSV test case loader
using namespace ck::tensor_layout::convolution;
// Load CSV data for 2D tests
static std::vector<ck::utils::conv::ConvParam> Get2DTestCases()
{
static std::vector<ck::utils::conv::ConvParam> test_cases;
if(test_cases.empty())
{
std::string test_data_dir = ck::test::GetTestDataPath();
if(test_data_dir.empty())
{
std::cerr << "FATAL: test_data directory not found" << std::endl;
return test_cases;
}
std::vector<std::string> csv_paths = {test_data_dir + "/conv_test_set_2d_dataset.csv"};
bool loaded = ck::test::load_and_populate_test_cases(csv_paths, test_cases, "2D");
if(!loaded)
{
std::cerr << "FATAL: Failed to load 2D test cases from " << csv_paths[0] << std::endl;
}
}
return test_cases;
}
// Load CSV data for 3D tests
static std::vector<ck::utils::conv::ConvParam> Get3DTestCases()
{
static std::vector<ck::utils::conv::ConvParam> test_cases;
if(test_cases.empty())
{
std::string test_data_dir = ck::test::GetTestDataPath();
if(test_data_dir.empty())
{
std::cerr << "FATAL: test_data directory not found" << std::endl;
return test_cases;
}
std::vector<std::string> csv_paths = {test_data_dir + "/conv_test_set_3d_dataset.csv"};
bool loaded = ck::test::load_and_populate_test_cases(csv_paths, test_cases, "3D");
if(!loaded)
{
std::cerr << "FATAL: Failed to load 3D test cases from " << csv_paths[0] << std::endl;
}
}
return test_cases;
}
// Helper template to run a single backward weight convolution test
template <ck::index_t NDimSpatial,
typename InLayout,
typename WeiLayout,
typename OutLayout,
typename InDataType,
typename WeiDataType,
typename OutDataType>
bool RunConvBwdWeightTest(const ck::utils::conv::ConvParam& param, ck::index_t split_k)
{
return ck::profiler::profile_grouped_conv_bwd_weight_impl<NDimSpatial,
InLayout,
WeiLayout,
OutLayout,
InDataType,
WeiDataType,
OutDataType>(
true, // do_verification
1, // init_method
false, // do_log
false, // time_kernel
param, // ConvParam
std::to_string(split_k), // Split-K value as string
-1); // instance_index
}
// 2D Tests - NHWGK layout - Float - SplitK=1
class TestGroupedConvndBwdWeight2dNHWGKFloatSplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdWeight2dNHWGKFloatSplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdWeightTest<2, NHWGC, GKYXC, NHWGK, float, float, float>(GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdWeight2dNHWGKFloatSplitK1,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - NHWGK layout - Float - SplitK=2
class TestGroupedConvndBwdWeight2dNHWGKFloatSplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdWeight2dNHWGKFloatSplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdWeightTest<2, NHWGC, GKYXC, NHWGK, float, float, float>(GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdWeight2dNHWGKFloatSplitK2,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - NHWGK layout - Half - SplitK=1
class TestGroupedConvndBwdWeight2dNHWGKHalfSplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdWeight2dNHWGKHalfSplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdWeightTest<2, NHWGC, GKYXC, NHWGK, ck::half_t, ck::half_t, ck::half_t>(
GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdWeight2dNHWGKHalfSplitK1,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - NHWGK layout - Half - SplitK=2
class TestGroupedConvndBwdWeight2dNHWGKHalfSplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdWeight2dNHWGKHalfSplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdWeightTest<2, NHWGC, GKYXC, NHWGK, ck::half_t, ck::half_t, ck::half_t>(
GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdWeight2dNHWGKHalfSplitK2,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - NHWGK layout - BFloat16 - SplitK=1
class TestGroupedConvndBwdWeight2dNHWGKBFloat16SplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdWeight2dNHWGKBFloat16SplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdWeightTest<2, NHWGC, GKYXC, NHWGK, ck::bhalf_t, float, ck::bhalf_t>(
GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdWeight2dNHWGKBFloat16SplitK1,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - NHWGK layout - BFloat16 - SplitK=2
class TestGroupedConvndBwdWeight2dNHWGKBFloat16SplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdWeight2dNHWGKBFloat16SplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdWeightTest<2, NHWGC, GKYXC, NHWGK, ck::bhalf_t, float, ck::bhalf_t>(
GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdWeight2dNHWGKBFloat16SplitK2,
::testing::ValuesIn(Get2DTestCases()));
// 3D Tests - NDHWGK layout - Float - SplitK=1
class TestGroupedConvndBwdWeight3dNDHWGKFloatSplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdWeight3dNDHWGKFloatSplitK1, ConvTest)
{
EXPECT_TRUE(
(RunConvBwdWeightTest<3, NDHWGC, GKZYXC, NDHWGK, float, float, float>(GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdWeight3dNDHWGKFloatSplitK1,
::testing::ValuesIn(Get3DTestCases()));
// 3D Tests - NDHWGK layout - Float - SplitK=2
class TestGroupedConvndBwdWeight3dNDHWGKFloatSplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdWeight3dNDHWGKFloatSplitK2, ConvTest)
{
EXPECT_TRUE(
(RunConvBwdWeightTest<3, NDHWGC, GKZYXC, NDHWGK, float, float, float>(GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdWeight3dNDHWGKFloatSplitK2,
::testing::ValuesIn(Get3DTestCases()));
// 3D Tests - NDHWGK layout - Half - SplitK=1
class TestGroupedConvndBwdWeight3dNDHWGKHalfSplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdWeight3dNDHWGKHalfSplitK1, ConvTest)
{
EXPECT_TRUE(
(RunConvBwdWeightTest<3, NDHWGC, GKZYXC, NDHWGK, ck::half_t, ck::half_t, ck::half_t>(
GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdWeight3dNDHWGKHalfSplitK1,
::testing::ValuesIn(Get3DTestCases()));
// 3D Tests - NDHWGK layout - Half - SplitK=2
class TestGroupedConvndBwdWeight3dNDHWGKHalfSplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdWeight3dNDHWGKHalfSplitK2, ConvTest)
{
EXPECT_TRUE(
(RunConvBwdWeightTest<3, NDHWGC, GKZYXC, NDHWGK, ck::half_t, ck::half_t, ck::half_t>(
GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdWeight3dNDHWGKHalfSplitK2,
::testing::ValuesIn(Get3DTestCases()));
// 3D Tests - NDHWGK layout - BFloat16 - SplitK=1
class TestGroupedConvndBwdWeight3dNDHWGKBFloat16SplitK1
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdWeight3dNDHWGKBFloat16SplitK1, ConvTest)
{
EXPECT_TRUE((RunConvBwdWeightTest<3, NDHWGC, GKZYXC, NDHWGK, ck::bhalf_t, float, ck::bhalf_t>(
GetParam(), 1)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdWeight3dNDHWGKBFloat16SplitK1,
::testing::ValuesIn(Get3DTestCases()));
// 3D Tests - NDHWGK layout - BFloat16 - SplitK=2
class TestGroupedConvndBwdWeight3dNDHWGKBFloat16SplitK2
: public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndBwdWeight3dNDHWGKBFloat16SplitK2, ConvTest)
{
EXPECT_TRUE((RunConvBwdWeightTest<3, NDHWGC, GKZYXC, NDHWGK, ck::bhalf_t, float, ck::bhalf_t>(
GetParam(), 2)));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndBwdWeight3dNDHWGKBFloat16SplitK2,
::testing::ValuesIn(Get3DTestCases()));

View File

@@ -5,330 +5,165 @@
#include <iostream> // C++ I/O streams (cout, cerr)
#include <initializer_list> // C++ initializer list support (unused here)
#include <vector> // C++ vector container - stores test cases
#include <fstream> // File I/O for CSV reading
#include <sstream> // String stream for CSV parsing
#include <string> // String operations
#include <gtest/gtest.h> // Google Test framework - provides TYPED_TEST, EXPECT_TRUE
#include <gtest/gtest.h> // Google Test framework - provides TEST_P, INSTANTIATE_TEST_SUITE_P
#include "profiler/profile_grouped_conv_fwd_impl.hpp" // The actual GPU profiler that does convolution work
// CSV Reader Function for Loading Test Cases
// Reads convolution parameters from CSV file and returns vector of ConvParam structures
std::vector<ck::utils::conv::ConvParam> load_csv_test_cases(const std::string& filename)
{
std::vector<ck::utils::conv::ConvParam> conv_params; // Return vector
std::ifstream file(filename); // Open CSV file
if(!file.is_open())
{
std::cerr << "ERROR: Cannot open CSV file: " << filename << std::endl;
return conv_params; // Return empty vector on error
}
std::string line;
int line_number = 0;
// Read file line by line
while(std::getline(file, line))
{
line_number++;
// Skip comment lines (starting with #) and empty lines
if(line.empty() || line[0] == '#')
{
continue;
}
// Skip header line (contains column names)
if(line.find("NDim,Groups,BatchSize") != std::string::npos)
{
continue;
}
// Parse CSV line using stringstream
std::stringstream ss(line);
std::string cell;
std::vector<std::string> row;
// Split line by commas
while(std::getline(ss, cell, ','))
{
row.push_back(cell);
}
// Validate row has correct number of columns
if(row.size() < 19)
{ // Need at least 19 columns for 2D (excluding TestName)
std::cerr << "WARNING: Line " << line_number << " has insufficient columns ("
<< row.size() << "), skipping" << std::endl;
continue;
}
try
{
// Parse CSV data into ConvParam structure
// CSV Format:
// NDim,Groups,BatchSize,OutChannels,InChannels,KernelH,KernelW,InputH,InputW,OutputH,OutputW,StrideH,StrideW,DilationH,DilationW,LeftPadH,LeftPadW,RightPadH,RightPadW,TestName
int NDim = std::stoi(row[0]);
int Groups = std::stoi(row[1]);
int BatchSize = std::stoi(row[2]);
int OutChannels = std::stoi(row[3]);
int InChannels = std::stoi(row[4]);
if(NDim == 2)
{
// 2D Convolution: {NDim, Groups, BatchSize, OutChannels, InChannels,
// {KernelH,KernelW}, {InputH,InputW}, {StrideH,StrideW}, {DilationH,DilationW},
// {LeftPadH,LeftPadW}, {RightPadH,RightPadW}}
ck::utils::conv::ConvParam param = {
NDim, // NDim = 2
Groups, // Groups
BatchSize, // Batch size
OutChannels, // Output channels
InChannels, // Input channels
{std::stoi(row[5]), std::stoi(row[6])}, // Kernel: {H, W}
{std::stoi(row[7]), std::stoi(row[8])}, // Input: {H, W}
{std::stoi(row[11]), std::stoi(row[12])}, // Stride: {H, W}
{std::stoi(row[13]), std::stoi(row[14])}, // Dilation: {H, W}
{std::stoi(row[15]), std::stoi(row[16])}, // Left pad: {H, W}
{std::stoi(row[17]), std::stoi(row[18])} // Right pad: {H, W}
};
conv_params.push_back(param);
}
else if(NDim == 3)
{
// 3D Convolution: Need more columns for 3D parameters
if(row.size() < 26)
{
std::cerr << "WARNING: 3D convolution on line " << line_number
<< " needs 26+ columns, has " << row.size() << ", skipping"
<< std::endl;
continue;
}
// 3D Convolution: {NDim, Groups, BatchSize, OutChannels, InChannels,
// {KernelD,KernelH,KernelW}, {InputD,InputH,InputW}, {OutputD,OutputH,OutputW},
// {StrideD,StrideH,StrideW}, {DilationD,DilationH,DilationW},
// {LeftPadD,LeftPadH,LeftPadW}, {RightPadD,RightPadH,RightPadW}}
ck::utils::conv::ConvParam param = {
NDim, // NDim = 3
Groups, // Groups
BatchSize, // Batch size
OutChannels, // Output channels
InChannels, // Input channels
{std::stoi(row[5]), std::stoi(row[6]), std::stoi(row[7])}, // Kernel: {D, H, W}
{std::stoi(row[8]), std::stoi(row[9]), std::stoi(row[10])}, // Input: {D, H, W}
{std::stoi(row[14]),
std::stoi(row[15]),
std::stoi(row[16])}, // Stride: {D, H, W}
{std::stoi(row[17]),
std::stoi(row[18]),
std::stoi(row[19])}, // Dilation: {D, H, W}
{std::stoi(row[20]),
std::stoi(row[21]),
std::stoi(row[22])}, // Left pad: {D, H, W}
{std::stoi(row[23]),
std::stoi(row[24]),
std::stoi(row[25])} // Right pad: {D, H, W}
};
conv_params.push_back(param);
}
else
{
std::cerr << "WARNING: Unsupported NDim=" << NDim << " on line " << line_number
<< ", skipping" << std::endl;
}
}
catch(const std::exception& e)
{
std::cerr << "ERROR: Failed to parse line " << line_number << ": " << e.what()
<< std::endl;
continue;
}
}
file.close();
std::cout << "Loaded " << conv_params.size() << " test cases from " << filename << std::endl;
return conv_params;
}
// Template class that works with different data types and tensor layouts
template <typename Tuple>
class TestGroupedConvndFwd : public ::testing::Test // Inherit from Google Test base class
{
protected:
using DataType =
std::tuple_element_t<0, Tuple>; // Extract data type from tuple (fp32, fp16, bf16, int8)
using InLayout =
std::tuple_element_t<1, Tuple>; // Extract input tensor layout (NHWGC, NDHWGC, etc.)
using WeiLayout =
std::tuple_element_t<2, Tuple>; // Extract weight tensor layout (GKYXC, GKZYXC, etc.)
using OutLayout =
std::tuple_element_t<3, Tuple>; // Extract output tensor layout (NHWGK, NDHWGK, etc.)
using IndexType = ck::long_index_t; // 64-bit integer type for tensor dimensions
// THE KEY CONTAINER: This stores all test case parameters
// Each test will push_back() ConvParam structures here
std::vector<ck::utils::conv::ConvParam> conv_params;
// Template function to run tests for N-dimensional spatial convolution (2D or 3D)
template <ck::index_t NDimSpatial>
void Run()
{
EXPECT_FALSE(conv_params.empty()); // Google Test assertion: ensure we have test cases
bool pass = true; // Track overall pass/fail across all test cases
// MAIN LOOP: Execute every test case that was added to conv_params
for(auto& param : conv_params)
{
// CALL THE ACTUAL GPU PROFILER - This is where convolution happens!
pass = pass &&
ck::profiler::profile_grouped_conv_fwd_impl<NDimSpatial,
InLayout, // Input tensor layout
WeiLayout, // Weight tensor layout
OutLayout, // Output tensor layout
DataType, // Input data type
DataType, // Weight data type
DataType, // Output data type
DataType, // Accumulation type
DataType, // Bias type
IndexType>( // Index type (int64)
true, // do_verification: Compare GPU result with CPU reference
1, // init_method: How to initialize random test data (1 = uniform -5 to 5)
false, // do_log: Don't print detailed tensor values
false, // time_kernel: Don't do performance timing (just correctness)
param); // ConvParam: {NDim, Groups, Batch, OutChannels, InChannels,
// KernelSize, InputSize, ...}
}
EXPECT_TRUE(pass); // Google Test assertion: ALL test cases must pass
}
};
#include "../common/csv_test_loader.hpp" // Shared CSV test case loader
using namespace ck::tensor_layout::convolution; // Import tensor layout names (NHWGC, GKYXC, etc.)
// GOOGLE TEST TYPE COMBINATIONS: Define what data types and layouts to test
// This creates 4 separate test instances for 2D convolution:
using KernelTypes2d =
::testing::Types<std::tuple<float, NHWGC, GKYXC, NHWGK>, // fp32 test
std::tuple<ck::half_t, NHWGC, GKYXC, NHWGK>, // fp16 test
std::tuple<ck::bhalf_t, NHWGC, GKYXC, NHWGK>, // bfloat16 test
std::tuple<int8_t, NHWGC, GKYXC, NHWGK>>; // int8 test
// This creates 3 separate test instances for 3D convolution (no int8 support for 3D):
using KernelTypes3d =
::testing::Types<std::tuple<float, NDHWGC, GKZYXC, NDHWGK>, // fp32 3D test
std::tuple<ck::half_t, NDHWGC, GKZYXC, NDHWGK>, // fp16 3D test
std::tuple<ck::bhalf_t, NDHWGC, GKZYXC, NDHWGK>>; // bfloat16 3D test
// Create specialized test classes that inherit from the base template class
template <typename Tuple>
class TestGroupedConvndFwd2d : public TestGroupedConvndFwd<Tuple> // 2D convolution test class
// Load CSV data for 2D tests
static std::vector<ck::utils::conv::ConvParam> Get2DTestCases()
{
};
template <typename Tuple>
class TestGroupedConvndFwd3d : public TestGroupedConvndFwd<Tuple> // 3D convolution test class
{
};
// GOOGLE TEST MAGIC: Create test suites
// This tells Google Test to create 4 test instances for 2D (fp32, fp16, bf16, int8)
TYPED_TEST_SUITE(TestGroupedConvndFwd2d, KernelTypes2d);
// This tells Google Test to create 3 test instances for 3D (fp32, fp16, bf16)
TYPED_TEST_SUITE(TestGroupedConvndFwd3d, KernelTypes3d);
// THE ACTUAL 2D TEST - This runs 4 times (once for each data type: fp32, fp16, bf16, int8)
TYPED_TEST(TestGroupedConvndFwd2d, Test2D)
{
// LOAD TEST CASES FROM CSV FILE instead of hardcoded cases
// Try different locations for the CSV file (build directory vs source directory)
std::vector<std::string> csv_paths = {
"../test_data/conv_test_set_2d_dataset.csv", // From build directory to source
};
bool loaded = false;
for(const auto& csv_path : csv_paths)
static std::vector<ck::utils::conv::ConvParam> test_cases;
if(test_cases.empty())
{
auto csv_cases = load_csv_test_cases(csv_path);
if(!csv_cases.empty())
std::string test_data_dir = ck::test::GetTestDataPath();
if(test_data_dir.empty())
{
// Successfully loaded CSV data - add all test cases to conv_params
for(const auto& test_case : csv_cases)
{
this->conv_params.push_back(test_case);
}
std::cout << "Loaded " << csv_cases.size() << " 2D test cases from " << csv_path
<< std::endl;
loaded = true;
break;
std::cerr << "FATAL: test_data directory not found" << std::endl;
return test_cases;
}
std::vector<std::string> csv_paths = {test_data_dir + "/conv_test_set_2d_dataset.csv"};
bool loaded = ck::test::load_and_populate_test_cases(csv_paths, test_cases, "2D");
if(!loaded)
{
std::cerr << "FATAL: Failed to load 2D test cases from " << csv_paths[0] << std::endl;
}
}
// FAIL if CSV loading fails - no fallback!
if(!loaded)
{
std::cerr << "ERROR: Failed to load CSV test data from any of these locations:"
<< std::endl;
for(const auto& path : csv_paths)
{
std::cerr << " - " << path << std::endl;
}
std::cerr << "\nPlease ensure CSV test data exists in one of these locations." << std::endl;
std::cerr << "Run generate_test_dataset.sh in test_data/ to create test datasets."
<< std::endl;
// Force test failure - no test cases means test should fail
EXPECT_TRUE(loaded) << "CSV test data loading failed";
}
// Execute all test cases with 2D convolution
// This calls Run<2>() which loops through conv_params and calls GPU profiler for each
this->template Run<2>();
return test_cases;
}
// THE ACTUAL 3D TEST - This runs 3 times (once for each data type: fp32, fp16, bf16)
TYPED_TEST(TestGroupedConvndFwd3d, Test3D)
// Load CSV data for 3D tests
static std::vector<ck::utils::conv::ConvParam> Get3DTestCases()
{
// LOAD TEST CASES FROM CSV FILE instead of hardcoded cases
// Try different locations for the CSV file (build directory vs source directory)
std::vector<std::string> csv_paths = {
"../test_data/conv_test_set_3d_dataset.csv", // From build directory to source
};
bool loaded = false;
for(const auto& csv_path : csv_paths)
static std::vector<ck::utils::conv::ConvParam> test_cases;
if(test_cases.empty())
{
auto csv_cases = load_csv_test_cases(csv_path);
if(!csv_cases.empty())
std::string test_data_dir = ck::test::GetTestDataPath();
if(test_data_dir.empty())
{
// Successfully loaded CSV data - add all test cases to conv_params
for(const auto& test_case : csv_cases)
{
this->conv_params.push_back(test_case);
}
std::cout << "Loaded " << csv_cases.size() << " 3D test cases from " << csv_path
<< std::endl;
loaded = true;
break;
std::cerr << "FATAL: test_data directory not found" << std::endl;
return test_cases;
}
std::vector<std::string> csv_paths = {test_data_dir + "/conv_test_set_3d_dataset.csv"};
bool loaded = ck::test::load_and_populate_test_cases(csv_paths, test_cases, "3D");
if(!loaded)
{
std::cerr << "FATAL: Failed to load 3D test cases from " << csv_paths[0] << std::endl;
}
}
// FAIL if CSV loading fails - no fallback!
if(!loaded)
{
std::cerr << "ERROR: Failed to load CSV test data from any of these locations:"
<< std::endl;
for(const auto& path : csv_paths)
{
std::cerr << " - " << path << std::endl;
}
std::cerr << "\nPlease ensure CSV test data exists in one of these locations." << std::endl;
std::cerr << "Run generate_test_dataset.sh in test_data/ to create test datasets."
<< std::endl;
// Force test failure - no test cases means test should fail
EXPECT_TRUE(loaded) << "CSV test data loading failed";
}
// Execute all test cases with 3D convolution
// This calls Run<3>() which loops through conv_params and calls GPU profiler for each
this->template Run<3>();
return test_cases;
}
// Helper template to run a single convolution test
template <ck::index_t NDimSpatial,
typename InLayout,
typename WeiLayout,
typename OutLayout,
typename DataType>
bool RunConvTest(const ck::utils::conv::ConvParam& param)
{
using IndexType = ck::long_index_t;
return ck::profiler::profile_grouped_conv_fwd_impl<NDimSpatial,
InLayout,
WeiLayout,
OutLayout,
DataType,
DataType,
DataType,
DataType,
DataType,
IndexType>(true, // do_verification
1, // init_method
false, // do_log
false, // time_kernel
param);
}
// 2D Tests - Float
class TestGroupedConvndFwd2dFloat : public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndFwd2dFloat, ConvTest)
{
EXPECT_TRUE((RunConvTest<2, NHWGC, GKYXC, NHWGK, float>(GetParam())));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndFwd2dFloat,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - Half
class TestGroupedConvndFwd2dHalf : public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndFwd2dHalf, ConvTest)
{
EXPECT_TRUE((RunConvTest<2, NHWGC, GKYXC, NHWGK, ck::half_t>(GetParam())));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndFwd2dHalf,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - BFloat16
class TestGroupedConvndFwd2dBFloat16 : public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndFwd2dBFloat16, ConvTest)
{
EXPECT_TRUE((RunConvTest<2, NHWGC, GKYXC, NHWGK, ck::bhalf_t>(GetParam())));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndFwd2dBFloat16,
::testing::ValuesIn(Get2DTestCases()));
// 2D Tests - Int8
class TestGroupedConvndFwd2dInt8 : public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndFwd2dInt8, ConvTest)
{
EXPECT_TRUE((RunConvTest<2, NHWGC, GKYXC, NHWGK, int8_t>(GetParam())));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndFwd2dInt8,
::testing::ValuesIn(Get2DTestCases()));
// 3D Tests - Float
class TestGroupedConvndFwd3dFloat : public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndFwd3dFloat, ConvTest)
{
EXPECT_TRUE((RunConvTest<3, NDHWGC, GKZYXC, NDHWGK, float>(GetParam())));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndFwd3dFloat,
::testing::ValuesIn(Get3DTestCases()));
// 3D Tests - Half
class TestGroupedConvndFwd3dHalf : public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndFwd3dHalf, ConvTest)
{
EXPECT_TRUE((RunConvTest<3, NDHWGC, GKZYXC, NDHWGK, ck::half_t>(GetParam())));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndFwd3dHalf,
::testing::ValuesIn(Get3DTestCases()));
// 3D Tests - BFloat16
class TestGroupedConvndFwd3dBFloat16 : public ::testing::TestWithParam<ck::utils::conv::ConvParam>
{
};
TEST_P(TestGroupedConvndFwd3dBFloat16, ConvTest)
{
EXPECT_TRUE((RunConvTest<3, NDHWGC, GKZYXC, NDHWGK, ck::bhalf_t>(GetParam())));
}
INSTANTIATE_TEST_SUITE_P(Dataset,
TestGroupedConvndFwd3dBFloat16,
::testing::ValuesIn(Get3DTestCases()));

View File

@@ -8,6 +8,20 @@
set -e # Exit on error
set +x # Disable command echo (even if called with bash -x)
# Trap to kill all background jobs on script exit/interruption
cleanup() {
echo ""
echo "Cleaning up background processes..."
# Kill all jobs in the current process group
jobs -p | xargs -r kill 2>/dev/null || true
wait 2>/dev/null || true
echo "Cleanup complete."
exit 1
}
# Set up trap for common termination signals
trap cleanup SIGINT SIGTERM EXIT
echo "=========================================="
echo "CK Convolution Test Dataset Generator"
echo "=========================================="
@@ -18,7 +32,7 @@ if ! python3 -c "import torch" 2>/dev/null; then
echo "PyTorch not found. Creating virtual environment..."
# Create a virtual environment in the current directory
VENV_DIR="./pytorch_venv"
VENV_DIR="./.venv"
if [ ! -d "$VENV_DIR" ]; then
python3 -m venv $VENV_DIR || {
echo "ERROR: Failed to create virtual environment."
@@ -66,11 +80,71 @@ if ! $PYTHON_CMD -c "import torch; import sys; sys.exit(0 if torch.cuda.is_avail
echo "Continuing anyway to generate placeholder data..."
fi
# Parse command line arguments
CONFIG_MODE="full" # Default configuration mode: 'small', 'half' or 'full'
MAX_PARALLEL_JOBS=1 # Default number of parallel jobs
NUM_GPUS=1 # Number of GPUs to use (0 means no GPU assignment)
# Process arguments
while [[ $# -gt 0 ]]; do
case $1 in
-j)
MAX_PARALLEL_JOBS="$2"
shift 2
;;
-j*)
MAX_PARALLEL_JOBS="${1#-j}"
shift
;;
--gpus)
NUM_GPUS="$2"
shift 2
;;
small|half|full)
CONFIG_MODE="$1"
shift
;;
*)
echo "Usage: $0 [small|half|full] [-j <num_jobs>] [--gpus <num_gpus>]"
echo " Configuration modes: small, half, full (default: full)"
echo " -j <num_jobs>: Number of parallel jobs (default: 1)"
echo " --gpus <num_gpus>: Number of GPUs to use (e.g., 8 for GPUs 0-7)"
exit 1
;;
esac
done
# Setup GPU array if GPUs are requested
if [ $NUM_GPUS -gt 0 ]; then
# Auto-detect available GPUs
AVAILABLE_GPUS_COUNT=$(rocm-smi --showid 2>/dev/null | grep -oP 'GPU\[\K[0-9]+' | wc -l)
if [ "$AVAILABLE_GPUS_COUNT" -gt 0 ]; then
MAX_AVAILABLE=$AVAILABLE_GPUS_COUNT
else
MAX_AVAILABLE=0
fi
# Validate requested GPU count
if [ $NUM_GPUS -gt $MAX_AVAILABLE ]; then
echo "WARNING: Requested $NUM_GPUS GPUs but only $MAX_AVAILABLE available. Using $MAX_AVAILABLE GPUs."
NUM_GPUS=$MAX_AVAILABLE
fi
# Build GPU array (0 to NUM_GPUS-1)
GPU_ARRAY=()
for ((i=0; i<NUM_GPUS; i++)); do
GPU_ARRAY+=($i)
done
echo "Using $NUM_GPUS GPU(s): ${GPU_ARRAY[*]}"
else
echo "No GPU assignment specified, using default GPU behavior"
GPU_ARRAY=()
fi
# Configuration
OUTPUT_DIR="generated_datasets"
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
# Get configuration mode from command line argument (default: full)
CONFIG_MODE="${1:-full}" # Configuration mode: 'small', 'half' or 'full'
# Colors
RED='\033[0;31m'
@@ -128,7 +202,8 @@ rocm-smi --showdriverversion || true
echo ""
echo "Step 2: Running 2D/3D models and capturing MIOpen commands"
echo "-----------------------------------------"
echo "Using up to $MAX_PARALLEL_JOBS parallel jobs"
echo ""
# Process 2D models from CSV configuration file
echo "Processing 2D models from $OUTPUT_DIR/model_configs_2d.csv..."
@@ -140,6 +215,11 @@ CURRENT_CONFIG=0
echo "Total configurations to process: $TOTAL_CONFIGS"
echo ""
# Array to track background job PIDs
declare -a job_pids=()
# Counter for round-robin GPU assignment
GPU_COUNTER=0
# Read 2D configurations from CSV (skip comments and header)
while IFS=',' read -r config_name model batch_size channels height width precision; do
# Skip comments and empty lines
@@ -150,21 +230,57 @@ while IFS=',' read -r config_name model batch_size channels height width precisi
# Increment counter
CURRENT_CONFIG=$((CURRENT_CONFIG + 1))
# Build configuration command
CONFIG="--model $model --batch-size $batch_size --channels $channels --height $height --width $width --precision $precision"
CONFIG_NAME="$config_name"
echo -e "${GREEN}[${CURRENT_CONFIG}/${TOTAL_CONFIGS}]${NC} ${CYAN}2D${NC} ${YELLOW}$CONFIG_NAME${NC}"
# Assign GPU in round-robin fashion if GPUs are specified
if [ $NUM_GPUS -gt 0 ]; then
GPU_ID=${GPU_ARRAY[$((GPU_COUNTER % NUM_GPUS))]}
GPU_COUNTER=$((GPU_COUNTER + 1))
echo -e "${GREEN}[${CURRENT_CONFIG}/${TOTAL_CONFIGS}]${NC} ${CYAN}2D${NC} ${YELLOW}$CONFIG_NAME${NC} ${PURPLE}[GPU ${GPU_ID}]${NC} - Starting in background"
else
GPU_ID=""
echo -e "${GREEN}[${CURRENT_CONFIG}/${TOTAL_CONFIGS}]${NC} ${CYAN}2D${NC} ${YELLOW}$CONFIG_NAME${NC} - Starting in background"
fi
# Actual run with logging (suppress stdout, only capture stderr with MIOpen commands)
MIOPEN_ENABLE_LOGGING_CMD=1 $PYTHON_CMD run_model_with_miopen.py \
--model $model --batch-size $batch_size --channels $channels --height $height --width $width --precision $precision \
> /dev/null 2>> $OUTPUT_DIR/${model}_miopen_log_2d.txt || true
# Run in background
(
# Set HIP_VISIBLE_DEVICES if GPU was assigned
if [ -n "$GPU_ID" ]; then
export HIP_VISIBLE_DEVICES=$GPU_ID
fi
MIOPEN_ENABLE_LOGGING_CMD=1 $PYTHON_CMD run_model_with_miopen.py \
--model $model --batch-size $batch_size --channels $channels --height $height --width $width --precision $precision \
> /dev/null 2>> $OUTPUT_DIR/${model}_miopen_log_2d.txt || true
echo -e "${GREEN}[DONE]${NC} ${CYAN}2D${NC} ${YELLOW}$CONFIG_NAME${NC}"
) &
job_pids+=($!)
# Limit number of parallel jobs
if [ ${#job_pids[@]} -ge $MAX_PARALLEL_JOBS ]; then
# Wait for any job to complete
wait -n
# Remove completed jobs from array
for i in "${!job_pids[@]}"; do
if ! kill -0 "${job_pids[$i]}" 2>/dev/null; then
unset 'job_pids[$i]'
fi
done
job_pids=("${job_pids[@]}") # Re-index array
fi
done < $OUTPUT_DIR/model_configs_2d.csv
# Wait for all remaining 2D jobs to complete
echo "Waiting for remaining 2D jobs to complete..."
wait
echo "All 2D models processed!"
echo ""
# Process 3D models from CSV configuration file
echo "Processing 3D models from $OUTPUT_DIR/model_configs_3d.csv..."
@@ -175,6 +291,10 @@ CURRENT_3D_CONFIG=0
echo "Total 3D configurations to process: $TOTAL_3D_CONFIGS"
echo ""
# Reset job tracking array
declare -a job_pids=()
# GPU counter continues from 2D models for round-robin assignment
# Read 3D configurations from CSV (skip comments and header)
while IFS=',' read -r config_name model batch_size channels temporal_size height width precision; do
# Skip comments and empty lines
@@ -185,21 +305,59 @@ while IFS=',' read -r config_name model batch_size channels temporal_size height
# Increment counter
CURRENT_3D_CONFIG=$((CURRENT_3D_CONFIG + 1))
# Build configuration command for 3D models
CONFIG="--model $model --batch-size $batch_size --channels $channels --temporal-size $temporal_size --height $height --width $width --precision $precision"
CONFIG_NAME="$config_name"
echo -e "${GREEN}[${CURRENT_3D_CONFIG}/${TOTAL_3D_CONFIGS}]${NC} ${CYAN}3D${NC} ${YELLOW}$CONFIG_NAME${NC}"
# Assign GPU in round-robin fashion if GPUs are specified
if [ $NUM_GPUS -gt 0 ]; then
GPU_ID=${GPU_ARRAY[$((GPU_COUNTER % NUM_GPUS))]}
GPU_COUNTER=$((GPU_COUNTER + 1))
echo -e "${GREEN}[${CURRENT_3D_CONFIG}/${TOTAL_3D_CONFIGS}]${NC} ${CYAN}3D${NC} ${YELLOW}$CONFIG_NAME${NC} ${PURPLE}[GPU ${GPU_ID}]${NC} - Starting in background"
else
GPU_ID=""
echo -e "${GREEN}[${CURRENT_3D_CONFIG}/${TOTAL_3D_CONFIGS}]${NC} ${CYAN}3D${NC} ${YELLOW}$CONFIG_NAME${NC} - Starting in background"
fi
# Run in background
(
# Set HIP_VISIBLE_DEVICES if GPU was assigned
if [ -n "$GPU_ID" ]; then
export HIP_VISIBLE_DEVICES=$GPU_ID
fi
MIOPEN_ENABLE_LOGGING_CMD=1 $PYTHON_CMD run_model_with_miopen.py \
--model $model --batch-size $batch_size --channels $channels --temporal-size $temporal_size --height $height --width $width --precision $precision \
> /dev/null 2>> $OUTPUT_DIR/${model}_miopen_log_3d.txt || true
echo -e "${GREEN}[DONE]${NC} ${CYAN}3D${NC} ${YELLOW}$CONFIG_NAME${NC}"
) &
# Actual run with logging (suppress stdout, only capture stderr with MIOpen commands)
MIOPEN_ENABLE_LOGGING_CMD=1 $PYTHON_CMD run_model_with_miopen.py \
--model $model --batch-size $batch_size --channels $channels --temporal-size $temporal_size --height $height --width $width --precision $precision \
> /dev/null 2>> $OUTPUT_DIR/${model}_miopen_log_3d.txt || true
job_pids+=($!)
# Limit number of parallel jobs
if [ ${#job_pids[@]} -ge $MAX_PARALLEL_JOBS ]; then
# Wait for any job to complete
wait -n
# Remove completed jobs from array
for i in "${!job_pids[@]}"; do
if ! kill -0 "${job_pids[$i]}" 2>/dev/null; then
unset 'job_pids[$i]'
fi
done
job_pids=("${job_pids[@]}") # Re-index array
fi
done < $OUTPUT_DIR/model_configs_3d.csv
# Wait for all remaining 3D jobs to complete
echo "Waiting for remaining 3D jobs to complete..."
wait
echo "All 3D models processed!"
echo ""
# Disable trap on successful completion
trap - SIGINT SIGTERM EXIT
echo ""
echo "Step 3: Converting MIOpen commands to CSV test cases"
@@ -311,7 +469,7 @@ if [ $COUNT_3D -gt 0 ]; then
fi
echo " - Intermediate files in: $OUTPUT_DIR/"
echo ""
echo "To use these datasets:"
echo " 1. Build the test: cd ../script && make -j64 test_grouped_convnd_fwd_dataset_xdl"
echo " 2. Run the test: ./bin/test_grouped_convnd_fwd_dataset_xdl"
echo "To use these datasets for direction (bwd_data, bwd_weight, or fwd):"
echo " 1. Build the test: cd ../script && make -j64 test_grouped_convnd_<direction>_dataset_xdl"
echo " 2. Run the test: ./bin/test_grouped_convnd_<direction>_dataset_xdl"
echo ""

1187
test_data/gtest_parallel.py Normal file

File diff suppressed because it is too large Load Diff