CSV-driven convolution test pipeline (#2581)

* Add CSV-driven convolution test pipeline

- Add test_grouped_convnd_fwd_dataset_xdl.cpp with CSV reader functionality
- Add complete dataset generation toolchain in test_data/
- Add Jenkins integration with RUN_CONV_COMPREHENSIVE_DATASET parameter
- Ready for comprehensive convolution testing with scalable datasets

* Update convolution test dataset generation pipeline

* add 2d, 3d dataset csv files

* Remove CSV test dataset files from repository

* Update generate_test_dataset.sh

* Fix channel division for MIOpen to CK conversion

* Remove unnecessary test files

* Fix clang-format-18 formatting issues

---------

Co-authored-by: Bartłomiej Kocot <barkocot@amd.com>
This commit is contained in:
JH-Leon-KIM-AMD
2025-08-13 17:24:34 +03:00
committed by GitHub
parent 3142562c22
commit b963478759
7 changed files with 1298 additions and 0 deletions

31
Jenkinsfile vendored
View File

@@ -892,6 +892,10 @@ pipeline {
name: "RUN_GROUPED_CONV_LARGE_CASES_TESTS",
defaultValue: false,
description: "Run the grouped conv large cases tests (default: OFF)")
booleanParam(
name: "RUN_CONV_COMPREHENSIVE_DATASET",
defaultValue: false,
description: "Run comprehensive convolution dataset tests before important changes (default: OFF)")
booleanParam(
name: "RUN_CODEGEN_TESTS",
defaultValue: true,
@@ -1090,6 +1094,33 @@ pipeline {
}
}
}
stage("Run Comprehensive Convolution Dataset Tests")
{
parallel
{
stage("Run Comprehensive Dataset Tests on gfx90a")
{
when {
beforeAgent true
expression { params.RUN_CONV_COMPREHENSIVE_DATASET.toBoolean() }
}
agent{ label rocmnode("gfx90a")}
environment{
setup_args = "NO_CK_BUILD"
execute_args = """ cd test_data && \
./generate_test_dataset.sh && \
cd ../script && \
../script/cmake-ck-dev.sh ../ gfx90a && \
make -j64 test_grouped_convnd_fwd_dataset_xdl && \
./bin/test_grouped_convnd_fwd_dataset_xdl"""
}
steps{
buildHipClangJobAndReboot(setup_args:setup_args, no_reboot:true, build_type: 'Release', execute_cmd: execute_args)
cleanWs()
}
}
}
}
stage("Run Codegen Tests")
{
parallel

View File

@@ -11,6 +11,10 @@ if(GPU_TARGETS MATCHES "gfx9")
add_executable(test_grouped_convnd_fwd_large_cases_xdl test_grouped_convnd_fwd_large_cases_xdl.cpp)
target_compile_options(test_grouped_convnd_fwd_large_cases_xdl PRIVATE -Wno-global-constructors -Wno-undef)
target_link_libraries(test_grouped_convnd_fwd_large_cases_xdl PRIVATE gtest_main getopt::getopt utility device_grouped_conv1d_fwd_instance device_grouped_conv2d_fwd_instance device_grouped_conv3d_fwd_instance)
add_executable(test_grouped_convnd_fwd_dataset_xdl test_grouped_convnd_fwd_dataset_xdl.cpp)
target_compile_options(test_grouped_convnd_fwd_dataset_xdl PRIVATE -Wno-global-constructors -Wno-undef)
target_link_libraries(test_grouped_convnd_fwd_dataset_xdl PRIVATE gtest_main getopt::getopt utility device_grouped_conv1d_fwd_instance device_grouped_conv2d_fwd_instance device_grouped_conv3d_fwd_instance)
endif()
add_gtest_executable(test_grouped_convnd_fwd_multi_ab_interface test_grouped_convnd_fwd_multi_ab_interface.cpp)

View File

@@ -0,0 +1,335 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2024-2025, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib> // Standard C library (exit codes, malloc)
#include <iostream> // C++ I/O streams (cout, cerr)
#include <initializer_list> // C++ initializer list support (unused here)
#include <vector> // C++ vector container - stores test cases
#include <fstream> // File I/O for CSV reading
#include <sstream> // String stream for CSV parsing
#include <string> // String operations
#include <gtest/gtest.h> // Google Test framework - provides TYPED_TEST, EXPECT_TRUE
#include "profiler/profile_grouped_conv_fwd_impl.hpp" // The actual GPU profiler that does convolution work
// CSV Reader Function for Loading Test Cases
// Reads convolution parameters from CSV file and returns vector of ConvParam structures
std::vector<ck::utils::conv::ConvParam> load_csv_test_cases(const std::string& filename)
{
std::vector<ck::utils::conv::ConvParam> conv_params; // Return vector
std::ifstream file(filename); // Open CSV file
if(!file.is_open())
{
std::cerr << "ERROR: Cannot open CSV file: " << filename << std::endl;
return conv_params; // Return empty vector on error
}
std::string line;
int line_number = 0;
// Read file line by line
while(std::getline(file, line))
{
line_number++;
std::cout << "Line " << line_number << ": " << line << std::endl;
// Skip comment lines (starting with #) and empty lines
if(line.empty() || line[0] == '#')
{
continue;
}
// Skip header line (contains column names)
if(line.find("NDim,Groups,BatchSize") != std::string::npos)
{
continue;
}
// Parse CSV line using stringstream
std::stringstream ss(line);
std::string cell;
std::vector<std::string> row;
// Split line by commas
while(std::getline(ss, cell, ','))
{
row.push_back(cell);
}
// Validate row has correct number of columns
if(row.size() < 19)
{ // Need at least 19 columns for 2D (excluding TestName)
std::cerr << "WARNING: Line " << line_number << " has insufficient columns ("
<< row.size() << "), skipping" << std::endl;
continue;
}
try
{
// Parse CSV data into ConvParam structure
// CSV Format:
// NDim,Groups,BatchSize,OutChannels,InChannels,KernelH,KernelW,InputH,InputW,OutputH,OutputW,StrideH,StrideW,DilationH,DilationW,LeftPadH,LeftPadW,RightPadH,RightPadW,TestName
int NDim = std::stoi(row[0]);
int Groups = std::stoi(row[1]);
int BatchSize = std::stoi(row[2]);
int OutChannels = std::stoi(row[3]);
int InChannels = std::stoi(row[4]);
if(NDim == 2)
{
// 2D Convolution: {NDim, Groups, BatchSize, OutChannels, InChannels,
// {KernelH,KernelW}, {InputH,InputW}, {StrideH,StrideW}, {DilationH,DilationW},
// {LeftPadH,LeftPadW}, {RightPadH,RightPadW}}
ck::utils::conv::ConvParam param = {
NDim, // NDim = 2
Groups, // Groups
BatchSize, // Batch size
OutChannels, // Output channels
InChannels, // Input channels
{std::stoi(row[5]), std::stoi(row[6])}, // Kernel: {H, W}
{std::stoi(row[7]), std::stoi(row[8])}, // Input: {H, W}
{std::stoi(row[11]), std::stoi(row[12])}, // Stride: {H, W}
{std::stoi(row[13]), std::stoi(row[14])}, // Dilation: {H, W}
{std::stoi(row[15]), std::stoi(row[16])}, // Left pad: {H, W}
{std::stoi(row[17]), std::stoi(row[18])} // Right pad: {H, W}
};
conv_params.push_back(param);
}
else if(NDim == 3)
{
// 3D Convolution: Need more columns for 3D parameters
if(row.size() < 26)
{
std::cerr << "WARNING: 3D convolution on line " << line_number
<< " needs 26+ columns, has " << row.size() << ", skipping"
<< std::endl;
continue;
}
// 3D Convolution: {NDim, Groups, BatchSize, OutChannels, InChannels,
// {KernelD,KernelH,KernelW}, {InputD,InputH,InputW}, {OutputD,OutputH,OutputW},
// {StrideD,StrideH,StrideW}, {DilationD,DilationH,DilationW},
// {LeftPadD,LeftPadH,LeftPadW}, {RightPadD,RightPadH,RightPadW}}
ck::utils::conv::ConvParam param = {
NDim, // NDim = 3
Groups, // Groups
BatchSize, // Batch size
OutChannels, // Output channels
InChannels, // Input channels
{std::stoi(row[5]), std::stoi(row[6]), std::stoi(row[7])}, // Kernel: {D, H, W}
{std::stoi(row[8]), std::stoi(row[9]), std::stoi(row[10])}, // Input: {D, H, W}
{std::stoi(row[14]),
std::stoi(row[15]),
std::stoi(row[16])}, // Stride: {D, H, W}
{std::stoi(row[17]),
std::stoi(row[18]),
std::stoi(row[19])}, // Dilation: {D, H, W}
{std::stoi(row[20]),
std::stoi(row[21]),
std::stoi(row[22])}, // Left pad: {D, H, W}
{std::stoi(row[23]),
std::stoi(row[24]),
std::stoi(row[25])} // Right pad: {D, H, W}
};
conv_params.push_back(param);
}
else
{
std::cerr << "WARNING: Unsupported NDim=" << NDim << " on line " << line_number
<< ", skipping" << std::endl;
}
}
catch(const std::exception& e)
{
std::cerr << "ERROR: Failed to parse line " << line_number << ": " << e.what()
<< std::endl;
continue;
}
}
file.close();
std::cout << "Loaded " << conv_params.size() << " test cases from " << filename << std::endl;
return conv_params;
}
// Template class that works with different data types and tensor layouts
template <typename Tuple>
class TestGroupedConvndFwd : public ::testing::Test // Inherit from Google Test base class
{
protected:
using DataType =
std::tuple_element_t<0, Tuple>; // Extract data type from tuple (fp32, fp16, bf16, int8)
using InLayout =
std::tuple_element_t<1, Tuple>; // Extract input tensor layout (NHWGC, NDHWGC, etc.)
using WeiLayout =
std::tuple_element_t<2, Tuple>; // Extract weight tensor layout (GKYXC, GKZYXC, etc.)
using OutLayout =
std::tuple_element_t<3, Tuple>; // Extract output tensor layout (NHWGK, NDHWGK, etc.)
using IndexType = ck::long_index_t; // 64-bit integer type for tensor dimensions
// THE KEY CONTAINER: This stores all test case parameters
// Each test will push_back() ConvParam structures here
std::vector<ck::utils::conv::ConvParam> conv_params;
// Template function to run tests for N-dimensional spatial convolution (2D or 3D)
template <ck::index_t NDimSpatial>
void Run()
{
EXPECT_FALSE(conv_params.empty()); // Google Test assertion: ensure we have test cases
bool pass = true; // Track overall pass/fail across all test cases
// MAIN LOOP: Execute every test case that was added to conv_params
for(auto& param : conv_params)
{
// CALL THE ACTUAL GPU PROFILER - This is where convolution happens!
pass = pass &&
ck::profiler::profile_grouped_conv_fwd_impl<NDimSpatial,
InLayout, // Input tensor layout
WeiLayout, // Weight tensor layout
OutLayout, // Output tensor layout
DataType, // Input data type
DataType, // Weight data type
DataType, // Output data type
DataType, // Accumulation type
DataType, // Bias type
IndexType>( // Index type (int64)
true, // do_verification: Compare GPU result with CPU reference
1, // init_method: How to initialize random test data (1 = uniform -5 to 5)
false, // do_log: Don't print detailed tensor values
false, // time_kernel: Don't do performance timing (just correctness)
param); // ConvParam: {NDim, Groups, Batch, OutChannels, InChannels,
// KernelSize, InputSize, ...}
}
EXPECT_TRUE(pass); // Google Test assertion: ALL test cases must pass
}
};
using namespace ck::tensor_layout::convolution; // Import tensor layout names (NHWGC, GKYXC, etc.)
// GOOGLE TEST TYPE COMBINATIONS: Define what data types and layouts to test
// This creates 4 separate test instances for 2D convolution:
using KernelTypes2d =
::testing::Types<std::tuple<float, NHWGC, GKYXC, NHWGK>, // fp32 test
std::tuple<ck::half_t, NHWGC, GKYXC, NHWGK>, // fp16 test
std::tuple<ck::bhalf_t, NHWGC, GKYXC, NHWGK>, // bfloat16 test
std::tuple<int8_t, NHWGC, GKYXC, NHWGK>>; // int8 test
// This creates 3 separate test instances for 3D convolution (no int8 support for 3D):
using KernelTypes3d =
::testing::Types<std::tuple<float, NDHWGC, GKZYXC, NDHWGK>, // fp32 3D test
std::tuple<ck::half_t, NDHWGC, GKZYXC, NDHWGK>, // fp16 3D test
std::tuple<ck::bhalf_t, NDHWGC, GKZYXC, NDHWGK>>; // bfloat16 3D test
// Create specialized test classes that inherit from the base template class
template <typename Tuple>
class TestGroupedConvndFwd2d : public TestGroupedConvndFwd<Tuple> // 2D convolution test class
{
};
template <typename Tuple>
class TestGroupedConvndFwd3d : public TestGroupedConvndFwd<Tuple> // 3D convolution test class
{
};
// GOOGLE TEST MAGIC: Create test suites
// This tells Google Test to create 4 test instances for 2D (fp32, fp16, bf16, int8)
TYPED_TEST_SUITE(TestGroupedConvndFwd2d, KernelTypes2d);
// This tells Google Test to create 3 test instances for 3D (fp32, fp16, bf16)
TYPED_TEST_SUITE(TestGroupedConvndFwd3d, KernelTypes3d);
// THE ACTUAL 2D TEST - This runs 4 times (once for each data type: fp32, fp16, bf16, int8)
TYPED_TEST(TestGroupedConvndFwd2d, Test2D)
{
// LOAD TEST CASES FROM CSV FILE instead of hardcoded cases
// Try different locations for the CSV file (build directory vs source directory)
std::vector<std::string> csv_paths = {
"../test_data/conv_test_set_2d_dataset.csv", // From build directory to source
};
bool loaded = false;
for(const auto& csv_path : csv_paths)
{
auto csv_cases = load_csv_test_cases(csv_path);
if(!csv_cases.empty())
{
// Successfully loaded CSV data - add all test cases to conv_params
for(const auto& test_case : csv_cases)
{
this->conv_params.push_back(test_case);
}
std::cout << "Loaded " << csv_cases.size() << " 2D test cases from " << csv_path
<< std::endl;
loaded = true;
break;
}
}
// FAIL if CSV loading fails - no fallback!
if(!loaded)
{
std::cerr << "ERROR: Failed to load CSV test data from any of these locations:"
<< std::endl;
for(const auto& path : csv_paths)
{
std::cerr << " - " << path << std::endl;
}
std::cerr << "\nPlease ensure CSV test data exists in one of these locations." << std::endl;
std::cerr << "Run generate_test_dataset.sh in test_data/ to create test datasets."
<< std::endl;
// Force test failure - no test cases means test should fail
EXPECT_TRUE(loaded) << "CSV test data loading failed";
}
// Execute all test cases with 2D convolution
// This calls Run<2>() which loops through conv_params and calls GPU profiler for each
this->template Run<2>();
}
// THE ACTUAL 3D TEST - This runs 3 times (once for each data type: fp32, fp16, bf16)
TYPED_TEST(TestGroupedConvndFwd3d, Test3D)
{
// LOAD TEST CASES FROM CSV FILE instead of hardcoded cases
// Try different locations for the CSV file (build directory vs source directory)
std::vector<std::string> csv_paths = {
"../test_data/conv_test_set_3d_dataset.csv", // From build directory to source
};
bool loaded = false;
for(const auto& csv_path : csv_paths)
{
auto csv_cases = load_csv_test_cases(csv_path);
if(!csv_cases.empty())
{
// Successfully loaded CSV data - add all test cases to conv_params
for(const auto& test_case : csv_cases)
{
this->conv_params.push_back(test_case);
}
std::cout << "Loaded " << csv_cases.size() << " 3D test cases from " << csv_path
<< std::endl;
loaded = true;
break;
}
}
// FAIL if CSV loading fails - no fallback!
if(!loaded)
{
std::cerr << "ERROR: Failed to load CSV test data from any of these locations:"
<< std::endl;
for(const auto& path : csv_paths)
{
std::cerr << " - " << path << std::endl;
}
std::cerr << "\nPlease ensure CSV test data exists in one of these locations." << std::endl;
std::cerr << "Run generate_test_dataset.sh in test_data/ to create test datasets."
<< std::endl;
// Force test failure - no test cases means test should fail
EXPECT_TRUE(loaded) << "CSV test data loading failed";
}
// Execute all test cases with 3D convolution
// This calls Run<3>() which loops through conv_params and calls GPU profiler for each
this->template Run<3>();
}

View File

@@ -0,0 +1,167 @@
#!/usr/bin/env python3
"""
Generate Model Configuration Combinations for MIOpen Testing
This script generates all possible combinations of model parameters
and saves them as CSV files that can be read by the shell script.
"""
import csv
import itertools
import argparse
def generate_2d_configs():
"""Generate all 2D model configuration combinations"""
# Define parameter ranges
models_2d = [
'resnet18', 'resnet34', 'resnet50',
'mobilenet_v2', 'mobilenet_v3_large', 'mobilenet_v3_small',
'vgg11', 'vgg16', 'vgg19',
'alexnet', 'googlenet',
'densenet121', 'densenet161',
'squeezenet1_0', 'squeezenet1_1',
'shufflenet_v2_x1_0'
]
batch_sizes = [1, 4, 8, 16, 32]
# Input dimensions: (height, width)
input_dims = [
(64, 64), (128, 128), (224, 224), (256, 256), (512, 512), # Square
(224, 320), (224, 448), (320, 224), (448, 224), # Rectangular
(227, 227), # AlexNet preferred
(299, 299) # Inception preferred
]
precisions = ['fp32'] #, 'fp16', 'bf16']
channels = [3] # Most models expect RGB
configs = []
config_id = 1
# Generate all combinations (but limit to reasonable subset)
for model in models_2d:
for batch_size in batch_sizes:
for height, width in input_dims:
for precision in precisions:
# Skip some combinations to keep dataset manageable
if batch_size > 16 and height > 256:
continue # Skip large batch + large image combinations
if precision != 'fp32' and batch_size < 8:
continue # Skip mixed precision with tiny batches
config_name = f"{model}_b{batch_size}_{height}x{width}_{precision}"
config = {
'config_name': config_name,
'model': model,
'batch_size': batch_size,
'channels': channels[0],
'height': height,
'width': width,
'precision': precision
}
configs.append(config)
config_id += 1
return configs
def generate_3d_configs():
"""Generate all 3D model configuration combinations"""
models_3d = ['r3d_18', 'mc3_18', 'r2plus1d_18']
batch_sizes = [1, 2, 4, 8] # 3D models are more memory intensive
temporal_sizes = [8, 16, 32]
# 3D input dimensions: (height, width)
input_dims = [
(112, 112), (224, 224), (256, 256), # Standard sizes
(224, 320), (320, 224) # Rectangular
]
precisions = ['fp32'] #, 'fp16'] # Skip bf16 for 3D to reduce combinations
channels = [3]
configs = []
for model in models_3d:
for batch_size in batch_sizes:
for temporal_size in temporal_sizes:
for height, width in input_dims:
for precision in precisions:
# Skip very large combinations
if batch_size > 4 and temporal_size > 16:
continue
if batch_size > 2 and height > 224:
continue
config_name = f"{model}_b{batch_size}_t{temporal_size}_{height}x{width}_{precision}"
config = {
'config_name': config_name,
'model': model,
'batch_size': batch_size,
'channels': channels[0],
'temporal_size': temporal_size,
'height': height,
'width': width,
'precision': precision
}
configs.append(config)
return configs
def save_configs_to_csv(configs, filename, config_type):
"""Save configurations to CSV file"""
if not configs:
print(f"No {config_type} configurations generated")
return
fieldnames = list(configs[0].keys())
with open(filename, 'w', newline='\n', encoding='utf-8') as csvfile:
csvfile.write(f"# {config_type} Model Configurations\n")
csvfile.write(f"# Generated {len(configs)} configurations\n")
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, lineterminator='\n')
writer.writeheader()
for config in configs:
writer.writerow(config)
print(f"Generated {len(configs)} {config_type} configurations → {filename}")
def main():
parser = argparse.ArgumentParser(description='Generate model configuration combinations')
parser.add_argument('--output-2d', type=str, default='model_configs_2d.csv',
help='Output file for 2D configurations')
parser.add_argument('--output-3d', type=str, default='model_configs_3d.csv',
help='Output file for 3D configurations')
parser.add_argument('--limit', type=int,
help='Limit number of configurations per type (for testing)')
args = parser.parse_args()
print("Generating 2D model configurations...")
configs_2d = generate_2d_configs()
if args.limit:
configs_2d = configs_2d[:args.limit]
save_configs_to_csv(configs_2d, args.output_2d, "2D")
print("Generating 3D model configurations...")
configs_3d = generate_3d_configs()
if args.limit:
configs_3d = configs_3d[:args.limit]
save_configs_to_csv(configs_3d, args.output_3d, "3D")
print(f"\nTotal configurations: {len(configs_2d)} 2D + {len(configs_3d)} 3D = {len(configs_2d) + len(configs_3d)}")
print("\nTo use these configurations:")
print(" Update generate_test_dataset.sh to read from these CSV files")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,262 @@
#!/bin/bash
# Generate Comprehensive Convolution Test Dataset for CK
# This script captures MIOpen commands from PyTorch models and generates test cases
set -e # Exit on error
# Check if target files already exist
# if [ -f "conv_test_set_2d_dataset.csv" ] && [ -f "conv_test_set_3d_dataset.csv" ]; then
# echo "Target files already exist:"
# [ -f "conv_test_set_2d_dataset.csv" ] && echo " - conv_test_set_2d_dataset.csv ($(wc -l < conv_test_set_2d_dataset.csv) lines)"
# [ -f "conv_test_set_3d_dataset.csv" ] && echo " - conv_test_set_3d_dataset.csv ($(wc -l < conv_test_set_3d_dataset.csv) lines)"
# echo ""
# echo "To regenerate, please remove these files first:"
# echo " rm conv_test_set_2d_dataset.csv conv_test_set_3d_dataset.csv"
# exit 0
# fi
echo "=========================================="
echo "CK Convolution Test Dataset Generator"
echo "=========================================="
# Configuration
OUTPUT_DIR="generated_datasets"
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
MAX_ITERATIONS=0 # Maximum number of iterations per model type (set to 0 for unlimited)
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Create output directory
rm -rf "$OUTPUT_DIR"
mkdir -p $OUTPUT_DIR
echo ""
echo "Step 1: Generating model configurations"
echo "-----------------------------------------"
# Generate model configuration files (with limit for testing)
echo "Generating model configuration files..."
python3 generate_model_configs.py \
--output-2d $OUTPUT_DIR/model_configs_2d.csv \
--output-3d $OUTPUT_DIR/model_configs_3d.csv
if [ ! -f "$OUTPUT_DIR/model_configs_2d.csv" ] || [ ! -f "$OUTPUT_DIR/model_configs_3d.csv" ]; then
echo "ERROR: Failed to generate configuration files"
exit 1
fi
# Check if running on GPU
if ! command -v rocm-smi &> /dev/null; then
echo "WARNING: ROCm not detected. Models will run on CPU (no MIOpen commands)."
echo "For actual MIOpen commands, run this on a system with AMD GPU."
fi
echo ""
echo "Step 2: Running 2D/3D models and capturing MIOpen commands"
echo "-----------------------------------------"
# Process 2D models from CSV configuration file
echo "Processing 2D models from $OUTPUT_DIR/model_configs_2d.csv..."
# Count total configurations (excluding comments and header)
TOTAL_CONFIGS=$(grep -v "^#" $OUTPUT_DIR/model_configs_2d.csv | tail -n +2 | wc -l)
CURRENT_CONFIG=0
echo "Total configurations to process: $TOTAL_CONFIGS"
echo ""
# Read 2D configurations from CSV (skip comments and header)
while IFS=',' read -r config_name model batch_size channels height width precision; do
# Skip comments and empty lines
[[ "$config_name" =~ ^#.*$ ]] && continue
[[ "$config_name" == "config_name" ]] && continue # Skip header
[[ -z "$config_name" ]] && continue
# Increment counter
CURRENT_CONFIG=$((CURRENT_CONFIG + 1))
# Stop after MAX_ITERATIONS if set
if [ $MAX_ITERATIONS -gt 0 ] && [ $CURRENT_CONFIG -gt $MAX_ITERATIONS ]; then
echo -e "${RED}Stopping after $MAX_ITERATIONS iterations (testing mode)${NC}"
break
fi
# Build configuration command
CONFIG="--model $model --batch-size $batch_size --channels $channels --height $height --width $width --precision $precision"
CONFIG_NAME="$config_name"
echo -e "${GREEN}[${CURRENT_CONFIG}/${TOTAL_CONFIGS}]${NC} ${PURPLE}Running MIOpenDriver${NC} ${CYAN}2D${NC} ${YELLOW}$CONFIG_NAME${NC}: ${BLUE}$CONFIG${NC}"
# Actual run with logging
MIOPEN_ENABLE_LOGGING_CMD=1 python3 run_model_with_miopen.py \
--model $model --batch-size $batch_size --channels $channels --height $height --width $width --precision $precision \
2>> $OUTPUT_DIR/${model}_miopen_log_2d.txt || true
done < $OUTPUT_DIR/model_configs_2d.csv
# Process 3D models from CSV configuration file
echo "Processing 3D models from $OUTPUT_DIR/model_configs_3d.csv..."
# Count total 3D configurations (excluding comments and header)
TOTAL_3D_CONFIGS=$(grep -v "^#" $OUTPUT_DIR/model_configs_3d.csv | tail -n +2 | wc -l)
CURRENT_3D_CONFIG=0
echo "Total 3D configurations to process: $TOTAL_3D_CONFIGS"
echo ""
# Read 3D configurations from CSV (skip comments and header)
while IFS=',' read -r config_name model batch_size channels temporal_size height width precision; do
# Skip comments and empty lines
[[ "$config_name" =~ ^#.*$ ]] && continue
[[ "$config_name" == "config_name" ]] && continue # Skip header
[[ -z "$config_name" ]] && continue
# Increment counter
CURRENT_3D_CONFIG=$((CURRENT_3D_CONFIG + 1))
# Stop after MAX_ITERATIONS if set
if [ $MAX_ITERATIONS -gt 0 ] && [ $CURRENT_3D_CONFIG -gt $MAX_ITERATIONS ]; then
echo -e "${RED}Stopping after $MAX_ITERATIONS iterations (testing mode)${NC}"
break
fi
# Build configuration command for 3D models
CONFIG="--model $model --batch-size $batch_size --channels $channels --temporal-size $temporal_size --height $height --width $width --precision $precision"
CONFIG_NAME="$config_name"
echo -e "${GREEN}[${CURRENT_3D_CONFIG}/${TOTAL_3D_CONFIGS}]${NC} ${PURPLE}Running MIOpenDriver${NC} ${CYAN}3D${NC} ${YELLOW}$CONFIG_NAME${NC}: ${BLUE}$CONFIG${NC}"
# Actual run with logging
MIOPEN_ENABLE_LOGGING_CMD=1 python3 run_model_with_miopen.py \
--model $model --batch-size $batch_size --channels $channels --temporal-size $temporal_size --height $height --width $width --precision $precision \
2>> $OUTPUT_DIR/${model}_miopen_log_3d.txt || true
done < $OUTPUT_DIR/model_configs_3d.csv
echo ""
echo "Step 3: Converting MIOpen commands to CSV test cases"
echo "-----------------------------------------"
# Convert 2D MIOpen logs to CSV
echo "Converting 2D MIOpen logs to CSV..."
for log_file in $OUTPUT_DIR/*_miopen_log_2d.txt; do
if [ -f "$log_file" ]; then
# Extract model name from filename (e.g., resnet_miopen_log_2d.txt -> resnet)
base_name=$(basename "$log_file" _miopen_log_2d.txt)
output_csv="$OUTPUT_DIR/${base_name}_cases_2d.csv"
echo " Converting $log_file -> $output_csv"
python3 miopen_to_csv.py \
--input "$log_file" \
--output-2d "$output_csv" \
--model-name "$base_name" \
--filter-duplicates || true
fi
done
# Convert 3D MIOpen logs to CSV
echo "Converting 3D MIOpen logs to CSV..."
for log_file in $OUTPUT_DIR/*_miopen_log_3d.txt; do
if [ -f "$log_file" ]; then
# Extract model name from filename (e.g., resnet3d_18_miopen_log_3d.txt -> resnet3d_18)
base_name=$(basename "$log_file" _miopen_log_3d.txt)
output_csv="$OUTPUT_DIR/${base_name}_cases_3d.csv"
echo " Converting $log_file -> $output_csv"
python3 miopen_to_csv.py \
--input "$log_file" \
--output-3d "$output_csv" \
--model-name "$base_name" \
--filter-duplicates || true
fi
done
echo ""
echo "Step 4: Combining CSV files into final datasets"
echo "-----------------------------------------"
# Combine all 2D CSV files into one
echo "Combining all 2D test cases..."
# First create empty file with comment headers
echo "# 2D Convolution Test Cases" > conv_test_set_2d_dataset.csv
echo "# Combined from multiple models" >> conv_test_set_2d_dataset.csv
# Add header from first file as a comment
first_2d_file=$(ls $OUTPUT_DIR/*_cases_2d.csv 2>/dev/null | head -1)
if [ -f "$first_2d_file" ]; then
# Get the CSV header line and prefix with #
header_line=$(grep "^NDim," "$first_2d_file" | head -1)
if [ ! -z "$header_line" ]; then
echo "# $header_line" >> conv_test_set_2d_dataset.csv
fi
fi
# Append all data rows (skip comment lines and CSV header) from all files
for csv_file in $OUTPUT_DIR/*_cases_2d.csv; do
if [ -f "$csv_file" ]; then
# Skip lines starting with # and the NDim header line
grep -v "^#" "$csv_file" | grep -v "^NDim," >> conv_test_set_2d_dataset.csv 2>/dev/null || true
fi
done
# Combine all 3D CSV files into one
echo "Combining all 3D test cases..."
# First create empty file with comment headers
echo "# 3D Convolution Test Cases" > conv_test_set_3d_dataset.csv
echo "# Combined from multiple models" >> conv_test_set_3d_dataset.csv
# Add header from first file as a comment
first_3d_file=$(ls $OUTPUT_DIR/*_cases_3d.csv 2>/dev/null | head -1)
if [ -f "$first_3d_file" ]; then
# Get the CSV header line and prefix with #
header_line=$(grep "^NDim," "$first_3d_file" | head -1)
if [ ! -z "$header_line" ]; then
echo "# $header_line" >> conv_test_set_3d_dataset.csv
fi
fi
# Append all data rows (skip comment lines and CSV header) from all files
for csv_file in $OUTPUT_DIR/*_cases_3d.csv; do
if [ -f "$csv_file" ]; then
# Skip lines starting with # and the NDim header line
grep -v "^#" "$csv_file" | grep -v "^NDim," >> conv_test_set_3d_dataset.csv 2>/dev/null || true
fi
done
# Count test cases
COUNT_2D=0
COUNT_3D=0
if [ -f "conv_test_set_2d_dataset.csv" ]; then
COUNT_2D=$(grep -v "^#" conv_test_set_2d_dataset.csv | tail -n +2 | wc -l)
fi
if [ -f "conv_test_set_3d_dataset.csv" ]; then
COUNT_3D=$(grep -v "^#" conv_test_set_3d_dataset.csv | tail -n +2 | wc -l)
fi
echo ""
echo "=========================================="
echo "Dataset Generation Complete!"
echo "=========================================="
echo ""
echo "Generated files:"
if [ $COUNT_2D -gt 0 ]; then
echo " - conv_test_set_2d_dataset.csv: $COUNT_2D test cases"
fi
if [ $COUNT_3D -gt 0 ]; then
echo " - conv_test_set_3d_dataset.csv: $COUNT_3D test cases"
fi
echo " - Intermediate files in: $OUTPUT_DIR/"
echo ""
echo "To use these datasets:"
echo " 1. Build the test: cd ../script && make -j64 test_grouped_convnd_fwd_dataset_xdl"
echo " 2. Run the test: ./bin/test_grouped_convnd_fwd_dataset_xdl"
echo ""

363
test_data/miopen_to_csv.py Normal file
View File

@@ -0,0 +1,363 @@
#!/usr/bin/env python3
"""
Convert MIOpen Driver Commands to CSV Test Cases
Parses MIOpen driver commands from log files and converts them to CSV format
for CK convolution testing.
Usage:
python3 miopen_to_csv.py --input miopen_commands.txt --output conv_cases.csv
python3 miopen_to_csv.py --input miopen_log.txt --output-2d conv_2d.csv --output-3d conv_3d.csv
"""
import argparse
import csv
import re
import os
def parse_miopen_command(command_line):
"""
Parse MIOpen driver command line into parameter dictionary
Example input:
./bin/MIOpenDriver conv -n 4 -c 3 -H 224 -W 224 -k 64 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1
Returns dict with parsed parameters or None if parsing fails
"""
if not command_line.strip().startswith('./bin/MIOpenDriver conv'):
return None
# Extract parameters using regex
params = {}
# Parameter mapping: flag -> description
# Support both short (-D) and long (--in_d) parameter formats
param_patterns = {
'n': r'-n\s+(\d+)', # batch size
'c': r'-c\s+(\d+)', # input channels
'k': r'-k\s+(\d+)', # output channels
'H': r'-H\s+(\d+)', # input height
'W': r'-W\s+(\d+)', # input width
'D': r'(?:-D|--in_d)\s+(\d+)', # input depth (3D only) - supports both -D and --in_d
'y': r'-y\s+(\d+)', # kernel height
'x': r'-x\s+(\d+)', # kernel width
'z': r'(?:-z|--fil_d)\s+(\d+)', # kernel depth (3D only) - supports both -z and --fil_d
'u': r'-u\s+(\d+)', # stride height
'v': r'-v\s+(\d+)', # stride width
'w': r'(?:-w|--conv_stride_d)\s+(\d+)', # stride depth (3D only) - supports both -w and --conv_stride_d
'p': r'-p\s+(\d+)', # pad height
'q': r'-q\s+(\d+)', # pad width
's': r'(?:-s|--pad_d)\s+(\d+)', # pad depth (3D only) - supports both -s and --pad_d
'l': r'-l\s+(\d+)', # dilation height
'j': r'-j\s+(\d+)', # dilation width
'r': r'(?:-r|--dilation_d)\s+(\d+)', # dilation depth (3D only) - supports both -r and --dilation_d
'g': r'-g\s+(\d+)', # groups
'F': r'-F\s+(\d+)', # direction (1=fwd, 2=bwd_weight, 4=bwd_data)
}
for param, pattern in param_patterns.items():
match = re.search(pattern, command_line)
if match:
params[param] = int(match.group(1))
return params if params else None
def miopen_to_conv_param(miopen_params):
"""
Convert MIOpen parameters to CK ConvParam format
Returns dictionary in CSV format or None if conversion fails
"""
if not miopen_params:
return None
# Determine if 2D or 3D convolution
is_3d = 'D' in miopen_params or 'z' in miopen_params or 'w' in miopen_params or 'r' in miopen_params or 's' in miopen_params
# Extract basic parameters with defaults
ndim = 3 if is_3d else 2
groups = miopen_params.get('g', 1)
batch_size = miopen_params.get('n', 1)
# MIOpen uses total channels (C*G), CK uses channels per group
out_channels_total = miopen_params.get('k', 64)
in_channels_total = miopen_params.get('c', 3)
out_channels = out_channels_total // groups # CK format: channels per group
in_channels = in_channels_total // groups # CK format: channels per group
if is_3d:
# 3D convolution
kernel_d = miopen_params.get('z', 3)
kernel_h = miopen_params.get('y', 3)
kernel_w = miopen_params.get('x', 3)
input_d = miopen_params.get('D', 16)
input_h = miopen_params.get('H', 32)
input_w = miopen_params.get('W', 32)
stride_d = miopen_params.get('w', 1)
stride_h = miopen_params.get('u', 1)
stride_w = miopen_params.get('v', 1)
dilation_d = miopen_params.get('r', 1)
dilation_h = miopen_params.get('l', 1)
dilation_w = miopen_params.get('j', 1)
pad_d = miopen_params.get('s', 0)
pad_h = miopen_params.get('p', 0)
pad_w = miopen_params.get('q', 0)
# Calculate output dimensions
output_d = (input_d + 2 * pad_d - dilation_d * (kernel_d - 1) - 1) // stride_d + 1
output_h = (input_h + 2 * pad_h - dilation_h * (kernel_h - 1) - 1) // stride_h + 1
output_w = (input_w + 2 * pad_w - dilation_w * (kernel_w - 1) - 1) // stride_w + 1
# Skip invalid configurations
if output_d <= 0 or output_h <= 0 or output_w <= 0:
return None
direction = miopen_params.get('F', 1) # 1=fwd, 2=bwd_weight, 4=bwd_data
direction_name = {1: 'fwd', 2: 'bwd_weight', 4: 'bwd_data'}.get(direction, 'fwd')
return {
'NDim': ndim,
'Groups': groups,
'BatchSize': batch_size,
'OutChannels': out_channels,
'InChannels': in_channels,
'KernelD': kernel_d, 'KernelH': kernel_h, 'KernelW': kernel_w,
'InputD': input_d, 'InputH': input_h, 'InputW': input_w,
'OutputD': output_d, 'OutputH': output_h, 'OutputW': output_w,
'StrideD': stride_d, 'StrideH': stride_h, 'StrideW': stride_w,
'DilationD': dilation_d, 'DilationH': dilation_h, 'DilationW': dilation_w,
'LeftPadD': pad_d, 'LeftPadH': pad_h, 'LeftPadW': pad_w,
'RightPadD': pad_d, 'RightPadH': pad_h, 'RightPadW': pad_w,
'TestName': f'MIOpen_3D_{direction_name}'
}
else:
# 2D convolution
kernel_h = miopen_params.get('y', 3)
kernel_w = miopen_params.get('x', 3)
input_h = miopen_params.get('H', 32)
input_w = miopen_params.get('W', 32)
stride_h = miopen_params.get('u', 1)
stride_w = miopen_params.get('v', 1)
dilation_h = miopen_params.get('l', 1)
dilation_w = miopen_params.get('j', 1)
pad_h = miopen_params.get('p', 0)
pad_w = miopen_params.get('q', 0)
# Calculate output dimensions
output_h = (input_h + 2 * pad_h - dilation_h * (kernel_h - 1) - 1) // stride_h + 1
output_w = (input_w + 2 * pad_w - dilation_w * (kernel_w - 1) - 1) // stride_w + 1
# Skip invalid configurations
if output_h <= 0 or output_w <= 0:
return None
direction = miopen_params.get('F', 1)
direction_name = {1: 'fwd', 2: 'bwd_weight', 4: 'bwd_data'}.get(direction, 'fwd')
return {
'NDim': ndim,
'Groups': groups,
'BatchSize': batch_size,
'OutChannels': out_channels,
'InChannels': in_channels,
'KernelH': kernel_h, 'KernelW': kernel_w,
'InputH': input_h, 'InputW': input_w,
'OutputH': output_h, 'OutputW': output_w,
'StrideH': stride_h, 'StrideW': stride_w,
'DilationH': dilation_h, 'DilationW': dilation_w,
'LeftPadH': pad_h, 'LeftPadW': pad_w,
'RightPadH': pad_h, 'RightPadW': pad_w,
'TestName': f'MIOpen_2D_{direction_name}'
}
def write_csv_cases(test_cases, output_file, ndim):
"""Write test cases to CSV file"""
if not test_cases:
print(f"No {ndim}D test cases to write")
return
print(f"Writing {len(test_cases)} {ndim}D test cases to {output_file}")
# Define CSV headers based on dimension
if ndim == 2:
headers = ['NDim', 'Groups', 'BatchSize', 'OutChannels', 'InChannels',
'KernelH', 'KernelW', 'InputH', 'InputW', 'OutputH', 'OutputW',
'StrideH', 'StrideW', 'DilationH', 'DilationW',
'LeftPadH', 'LeftPadW', 'RightPadH', 'RightPadW', 'TestName']
else: # 3D
headers = ['NDim', 'Groups', 'BatchSize', 'OutChannels', 'InChannels',
'KernelD', 'KernelH', 'KernelW', 'InputD', 'InputH', 'InputW',
'OutputD', 'OutputH', 'OutputW', 'StrideD', 'StrideH', 'StrideW',
'DilationD', 'DilationH', 'DilationW',
'LeftPadD', 'LeftPadH', 'LeftPadW', 'RightPadD', 'RightPadH', 'RightPadW', 'TestName']
with open(output_file, 'w', newline='') as csvfile:
# Write header comment
csvfile.write(f"# {ndim}D Convolution Test Cases from MIOpen Commands\n")
csvfile.write(f"# Generated {len(test_cases)} test cases\n")
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
for test_case in test_cases:
# Only write fields that exist in headers
filtered_case = {k: v for k, v in test_case.items() if k in headers}
writer.writerow(filtered_case)
def main():
parser = argparse.ArgumentParser(description='Convert MIOpen commands to CSV test cases')
parser.add_argument('--input', type=str, required=True,
help='Input file with MIOpen driver commands')
parser.add_argument('--output', type=str,
help='Output CSV file (for mixed 2D/3D cases)')
parser.add_argument('--output-2d', type=str, default='miopen_conv_2d.csv',
help='Output CSV file for 2D cases')
parser.add_argument('--output-3d', type=str, default='miopen_conv_3d.csv',
help='Output CSV file for 3D cases')
parser.add_argument('--filter-duplicates', action='store_true',
help='Remove duplicate test cases')
parser.add_argument('--model-name', type=str, default='MIOpen',
help='Model name to use in test case names (default: MIOpen)')
args = parser.parse_args()
if not os.path.exists(args.input):
print(f"ERROR: Input file not found: {args.input}")
return 1
print(f"Parsing MIOpen commands from {args.input}...")
test_cases_2d = []
test_cases_3d = []
total_lines = 0
parsed_lines = 0
with open(args.input, 'r') as f:
for line_num, line in enumerate(f, 1):
total_lines += 1
line = line.strip()
# Skip empty lines and non-MIOpen commands
# Handle both direct commands and logged commands with MIOpen prefix
if not line:
continue
# Extract the actual MIOpenDriver command from logged format
if 'MIOpenDriver conv' in line:
# Extract command after finding MIOpenDriver
command_start = line.find('./bin/MIOpenDriver conv')
if command_start != -1:
line = line[command_start:]
else:
# Handle cases where path might be different - create standard format
driver_start = line.find('MIOpenDriver conv')
if driver_start != -1:
line = './bin/' + line[driver_start:]
else:
continue
elif not line.startswith('./bin/MIOpenDriver conv'):
continue
try:
# Parse MIOpen command
miopen_params = parse_miopen_command(line)
if not miopen_params:
continue
# Convert to ConvParam format
conv_param = miopen_to_conv_param(miopen_params)
if not conv_param:
continue
# Add model name to test name
conv_param['TestName'] = f"{args.model_name}_{conv_param['NDim']}D_fwd"
# Separate 2D and 3D cases
if conv_param['NDim'] == 2:
test_cases_2d.append(conv_param)
else:
test_cases_3d.append(conv_param)
parsed_lines += 1
except Exception as e:
print(f"WARNING: Failed to parse line {line_num}: {e}")
continue
print(f"Processed {total_lines} lines, parsed {parsed_lines} commands")
print(f"Found {len(test_cases_2d)} 2D cases, {len(test_cases_3d)} 3D cases")
# Remove duplicates if requested
if args.filter_duplicates:
# Simple duplicate removal based on key parameters
def make_key(case):
if case['NDim'] == 2:
return (case['Groups'], case['BatchSize'], case['OutChannels'], case['InChannels'],
case['KernelH'], case['KernelW'], case['InputH'], case['InputW'],
case['StrideH'], case['StrideW'])
else:
return (case['Groups'], case['BatchSize'], case['OutChannels'], case['InChannels'],
case['KernelD'], case['KernelH'], case['KernelW'],
case['InputD'], case['InputH'], case['InputW'],
case['StrideD'], case['StrideH'], case['StrideW'])
seen_2d = set()
unique_2d = []
for case in test_cases_2d:
key = make_key(case)
if key not in seen_2d:
seen_2d.add(key)
unique_2d.append(case)
seen_3d = set()
unique_3d = []
for case in test_cases_3d:
key = make_key(case)
if key not in seen_3d:
seen_3d.add(key)
unique_3d.append(case)
print(f"After deduplication: {len(unique_2d)} 2D cases, {len(unique_3d)} 3D cases")
test_cases_2d = unique_2d
test_cases_3d = unique_3d
# Write output files
if args.output:
# Write mixed cases to single file
all_cases = test_cases_2d + test_cases_3d
if all_cases:
print(f"Writing {len(all_cases)} total cases to {args.output}")
# Use 2D headers for mixed file, extend as needed
mixed_headers = ['NDim', 'Groups', 'BatchSize', 'OutChannels', 'InChannels',
'KernelH', 'KernelW', 'InputH', 'InputW', 'OutputH', 'OutputW',
'StrideH', 'StrideW', 'DilationH', 'DilationW',
'LeftPadH', 'LeftPadW', 'RightPadH', 'RightPadW', 'TestName']
with open(args.output, 'w', newline='') as csvfile:
csvfile.write(f"# Mixed 2D/3D Convolution Test Cases from MIOpen Commands\n")
writer = csv.DictWriter(csvfile, fieldnames=mixed_headers, extrasaction='ignore')
writer.writeheader()
for case in all_cases:
writer.writerow(case)
else:
# Write separate files for 2D and 3D
if test_cases_2d:
write_csv_cases(test_cases_2d, args.output_2d, 2)
if test_cases_3d:
write_csv_cases(test_cases_3d, args.output_3d, 3)
print("Conversion completed!")
return 0
if __name__ == "__main__":
exit(main())

View File

@@ -0,0 +1,136 @@
#!/usr/bin/env python3
"""
PyTorch Model Runner with MIOpen Command Logging using torchvision models
Usage:
MIOPEN_ENABLE_LOGGING_CMD=1 python3 run_model_with_miopen.py --model resnet18 2> miopen_commands.txt
Available 2D models: alexnet, vgg11, vgg16, resnet18, resnet50, mobilenet_v2, etc.
Available 3D models: r3d_18, mc3_18, r2plus1d_18
"""
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.models.video as video_models
import argparse
import os
# Define available models
MODELS_2D = [
'alexnet', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn',
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d', 'resnext101_64x4d',
'wide_resnet50_2', 'wide_resnet101_2',
'densenet121', 'densenet161', 'densenet169', 'densenet201',
'inception_v3', 'googlenet',
'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0',
'mobilenet_v2', 'mobilenet_v3_large', 'mobilenet_v3_small',
'mnasnet0_5', 'mnasnet0_75', 'mnasnet1_0', 'mnasnet1_3',
'squeezenet1_0', 'squeezenet1_1'
]
MODELS_3D = [
'r3d_18', 'mc3_18', 'r2plus1d_18'
]
ALL_MODELS = MODELS_2D + MODELS_3D
def main():
parser = argparse.ArgumentParser(description='PyTorch Model Runner with MIOpen Command Logging')
# Model selection
parser.add_argument('--model', choices=ALL_MODELS, default='resnet18',
help='Model to run')
# Input tensor dimensions
parser.add_argument('--batch-size', type=int, default=4,
help='Batch size')
parser.add_argument('--channels', type=int, default=3,
help='Input channels (e.g., 3 for RGB, 1 for grayscale)')
parser.add_argument('--height', type=int, default=224,
help='Input height')
parser.add_argument('--width', type=int, default=224,
help='Input width')
parser.add_argument('--input-size', type=int,
help='Input size (sets both height and width to same value)')
parser.add_argument('--temporal-size', type=int, default=16,
help='Temporal dimension for 3D models')
# Device and precision
parser.add_argument('--device', choices=['cuda', 'cpu', 'auto'], default='auto',
help='Device to run on')
parser.add_argument('--precision', choices=['fp32', 'fp16', 'bf16'], default='fp32',
help='Floating point precision')
# Output control
parser.add_argument('--quiet', action='store_true',
help='Suppress output except errors')
parser.add_argument('--verbose', action='store_true',
help='Verbose output')
args = parser.parse_args()
# Handle input-size override
if args.input_size:
args.height = args.input_size
args.width = args.input_size
# Check MIOpen logging
if not os.environ.get('MIOPEN_ENABLE_LOGGING_CMD') and not args.quiet:
print("WARNING: Set MIOPEN_ENABLE_LOGGING_CMD=1 to capture commands")
# Device selection
if args.device == 'auto':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
device = torch.device(args.device)
if not args.quiet:
print(f"Using device: {device}")
# Create model using torchvision
if args.model in MODELS_3D:
# 3D Video models
model = getattr(video_models, args.model)(weights=None)
# 3D input: (batch, channels, temporal, height, width)
input_tensor = torch.randn(args.batch_size, args.channels, args.temporal_size, args.height, args.width)
if not args.quiet:
print(f"3D model: {args.model}")
print(f"Input shape: {input_tensor.shape} (B, C, T, H, W)")
else:
# 2D Image models
model = getattr(models, args.model)(weights=None)
# 2D input: (batch, channels, height, width)
input_tensor = torch.randn(args.batch_size, args.channels, args.height, args.width)
if not args.quiet:
print(f"2D model: {args.model}")
print(f"Input shape: {input_tensor.shape} (B, C, H, W)")
# Set precision
if args.precision == 'fp16':
model = model.half()
input_tensor = input_tensor.half()
elif args.precision == 'bf16':
model = model.bfloat16()
input_tensor = input_tensor.bfloat16()
model = model.to(device)
input_tensor = input_tensor.to(device)
if not args.quiet:
print(f"Running {args.model} model...")
# Run inference
model.eval()
with torch.no_grad():
output = model(input_tensor)
if not args.quiet:
print(f"Output shape: {output.shape}")
if not args.quiet:
print("Done! MIOpen commands logged to stderr")
if __name__ == "__main__":
main()