[rocm-libraries] ROCm/rocm-libraries#4797 (commit 1a30400)

[CK_TILE] Add CK Tile bwd weight profiler
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

## Motivation

To compare old CK and CK Tile, we need to extend the current CK profiler
to support running also CK Tile instance with the same API. In order to
have the same instance coverage in CK Tile compared to the old CK, I've
added code generation from old CK configurations to CK Tile instances
using the CK Builder.

## Technical Details

- The codegen python script for CK Tile fwd convs is extended to support
also bwd weight and bwd data.
- The generated instances are added to the CMake build (target
`device_grouped_conv_bwd_weight_tile_instance`s).
- A new profiler op (`grouped_conv_bwd_weight_tile`) has been added to
the CK Profiler.
This commit is contained in:
Ville Pietilä
2026-03-04 21:50:29 +00:00
committed by assistant-librarian[bot]
parent fc1e1a5155
commit ae4e632c7d
68 changed files with 5194 additions and 196 deletions

View File

@@ -0,0 +1,229 @@
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
// SPDX-License-Identifier: MIT
#pragma once
#include <iostream>
#include <tuple>
#include "../../experimental/builder/test/utils/conv_algorithm_type_utils.hpp"
#include "grouped_convolution_signatures.hpp"
#include "ck_tile/ref/naive_grouped_conv_bwd_weight_gpu.hpp"
#include "ck_tile/builder/testing/filter_extent.hpp"
#include "ck_tile/builder/testing/conv/fwd.hpp"
#include "ck_tile/builder/testing/conv/ck_tile.hpp"
#include "ck_tile/builder/testing/conv/reference.hpp"
#include "ck_tile/builder/conv_builder.hpp"
namespace ck_tile::builder::profiling {
namespace ckb = ck_tile::builder;
namespace ckt = ck_tile::builder::test;
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_ndhwgc_fp32.inc"
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_nhwgc_fp32.inc"
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_nhwgc_bf16.inc"
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_nhwgc_fp16.inc"
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_ndhwgc_bf16.inc"
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_ndhwgc_fp16.inc"
std::vector<int> get_split_k_values(const std::string& split_k)
{
std::vector<int> split_k_list = {/*auto deduce value*/ -1, 1, 2, 4, 8, 16, 32, 64, 128};
if(split_k != "all")
{
try
{
int split_k_value = std::stoi(split_k);
split_k_list = {split_k_value};
}
catch(const std::exception& e)
{
std::cerr << e.what() << '\n';
exit(EXIT_FAILURE);
}
}
return split_k_list;
}
template <auto SIGNATURE>
void run_cpu_validation(const ckt::Args<SIGNATURE>& args,
const ckt::Outputs<SIGNATURE>& outputs,
const ckt::Outputs<SIGNATURE>& reference)
{
using DataType =
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP32,
float,
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP16,
ck_tile::half_t,
ck_tile::bfloat16_t>>;
const auto conv_param = args.to_ck_tile_conv_param();
const std::size_t weight_bytes_num = conv_param.template GetWeightByte<DataType>();
std::vector<DataType> wei(weight_bytes_num / sizeof(DataType));
std::vector<DataType> ref(weight_bytes_num / sizeof(DataType));
HIP_CHECK_ERROR(
hipMemcpy(&ref.data()[0], reference.weight, weight_bytes_num, hipMemcpyDeviceToHost));
HIP_CHECK_ERROR(
hipMemcpy(&wei.data()[0], outputs.weight, weight_bytes_num, hipMemcpyDeviceToHost));
ck_tile::check_err(wei, ref, "\tError: Incorrect results!");
}
template <auto SIGNATURE>
std::tuple<double, double>
get_rtol_atol(const int num_accums, const int num_accums_split_k, const float max_accumulated_value)
{
using WeiDataType =
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP32,
float,
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP16,
ck_tile::half_t,
ck_tile::bfloat16_t>>;
using ComputeType = WeiDataType;
using AccDataType = float;
auto rtol = ck_tile::get_relative_threshold<ComputeType, WeiDataType, AccDataType>(
num_accums / num_accums_split_k);
auto atol = ck_tile::get_absolute_threshold<ComputeType, WeiDataType, AccDataType>(
max_accumulated_value / num_accums_split_k, num_accums / num_accums_split_k);
// Calculate error due to split_k accumulation
auto rtol_split_k =
ck_tile::get_relative_threshold<WeiDataType, WeiDataType, WeiDataType>(num_accums_split_k);
auto atol_split_k = ck_tile::get_absolute_threshold<WeiDataType, WeiDataType, WeiDataType>(
max_accumulated_value, num_accums_split_k);
// Use higher threshold
rtol = std::max(rtol, rtol_split_k);
atol = std::max(atol, atol_split_k);
return std::make_tuple(rtol, atol);
}
/// @brief `run_grouped_conv_backward_weight_tile_algs()` run all grouped conv fwd instances.
///
/// @tparam SIGNATURE Forward convolution signature.
///
/// @see run_grouped_conv_backward_weight_tile_algs()
template <auto SIGNATURE>
std::tuple<bool, float, std::string, int>
run_grouped_conv_backward_weight_tile_algs(const ckt::Args<SIGNATURE>& args,
const std::string& split_k,
const ckt::Inputs<SIGNATURE>& inputs,
const ckt::Outputs<SIGNATURE>& outputs,
const ck_tile::stream_config& s_conf)
{
float best_avg_time = std::numeric_limits<float>::max();
std::string best_op_name, op_name;
int best_split_k;
bool is_supported;
float avg_time;
bool all_instances_valid = true;
using DataType =
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP32,
float,
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP16,
ck_tile::half_t,
ck_tile::bfloat16_t>>;
auto reference = ckt::alloc_outputs(args);
using ReferenceInstance =
typename ckb::ConvBuilder<SIGNATURE, ckt::ConvAlgorithm_Reference{}>::Instance;
auto ref_conv = ReferenceInstance{};
auto ref_result = ckt::run(ref_conv, args, inputs, reference.get());
const auto conv_param = args.to_ck_tile_conv_param();
// Get max possible value in the output
const std::size_t weight_bytes_num = conv_param.template GetWeightByte<DataType>();
std::vector<DataType> ref(weight_bytes_num / sizeof(DataType));
HIP_CHECK_ERROR(
hipMemcpy(&ref.data()[0], reference.get().weight, weight_bytes_num, hipMemcpyDeviceToHost));
const float max_accumulated_value = *std::max_element(ref.begin(), ref.end());
const index_t num_accums = std::accumulate(std::begin(conv_param.output_spatial_lengths_),
std::end(conv_param.output_spatial_lengths_),
static_cast<std::size_t>(1),
std::multiplies<std::size_t>()) *
conv_param.N_;
const auto split_k_values = get_split_k_values(split_k);
auto run_alg = [&](auto&& run_alg_func) {
for(auto& k_batch : split_k_values)
{
std::tie(is_supported, avg_time, op_name) = run_alg_func(args, inputs, outputs, s_conf);
if(is_supported)
{
ckt::ValidationReport report;
auto&& [rtol, atol] =
get_rtol_atol<SIGNATURE>(num_accums, k_batch, max_accumulated_value);
ckt::Outputs<SIGNATURE>::reflect(
args,
[&](std::string_view name,
const auto& desc,
void* ckt::Outputs<SIGNATURE>::*ptr) {
report.check(name, desc, outputs.*ptr, reference.get().*ptr, rtol, atol);
});
const bool valid = report.get_errors().empty();
if(valid)
{
best_avg_time = std::min(best_avg_time, avg_time);
best_op_name = best_avg_time < avg_time ? best_op_name : op_name;
best_split_k = best_avg_time < avg_time ? best_split_k : k_batch;
std::cout << "[Valid] Perf: " << std::setw(10) << avg_time << " ms," << " "
<< op_name << ", SplitK " << k_batch << std::endl;
}
else
{
std::cout << "[Error] " << op_name << ", SplitK " << k_batch << std::endl;
for(const auto& error : report.get_errors())
{
std::cout << "\tNumber of incorrect values: " << error.wrong_elements
<< " Is all zero:" << error.is_all_zero()
<< " max err: " << error.max_error << std::endl;
// Check with cpu verification to get a values
run_cpu_validation<SIGNATURE>(args, outputs, reference.get());
}
all_instances_valid = false;
}
}
else
{
std::cout << "[Not supported] " << op_name << ", SplitK " << k_batch << std::endl;
}
}
};
if constexpr(SIGNATURE == SIGNATURE_NHWGC_FP16_BWD_WEIGHT)
{
#include "../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_nhwgc_fp16_calls.inc"
}
else if constexpr(SIGNATURE == SIGNATURE_NHWGC_BF16_BWD_WEIGHT)
{
#include "../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_nhwgc_bf16_calls.inc"
}
else if constexpr(SIGNATURE == SIGNATURE_NHWGC_FP32_BWD_WEIGHT)
{
#include "../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_nhwgc_fp32_calls.inc"
}
else if constexpr(SIGNATURE == SIGNATURE_NDHWGC_FP16_BWD_WEIGHT)
{
#include "../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_ndhwgc_fp16_calls.inc"
}
else if constexpr(SIGNATURE == SIGNATURE_NDHWGC_BF16_BWD_WEIGHT)
{
#include "../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_ndhwgc_bf16_calls.inc"
}
else if constexpr(SIGNATURE == SIGNATURE_NDHWGC_FP32_BWD_WEIGHT)
{
#include "../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_ndhwgc_fp32_calls.inc"
}
else
{
std::cout << "Signature not supported" << std::endl;
return std::make_tuple(false, best_avg_time, best_op_name, best_split_k);
}
return std::make_tuple(all_instances_valid, best_avg_time, best_op_name, best_split_k);
}
} // namespace ck_tile::builder::profiling

View File

@@ -23,79 +23,12 @@ namespace ck_tile::builder::profiling {
namespace ckb = ck_tile::builder;
namespace ckt = ck_tile::builder::test;
#include "../../experimental/grouped_convolution_tile_instances/grouped_convolution_forward_tile_nhwgc_fp32.inc"
#include "../../experimental/grouped_convolution_tile_instances/grouped_convolution_forward_tile_nhwgc_bf16.inc"
#include "../../experimental/grouped_convolution_tile_instances/grouped_convolution_forward_tile_nhwgc_fp16.inc"
#include "../../experimental/grouped_convolution_tile_instances/grouped_convolution_forward_tile_ndhwgc_fp32.inc"
#include "../../experimental/grouped_convolution_tile_instances/grouped_convolution_forward_tile_ndhwgc_bf16.inc"
#include "../../experimental/grouped_convolution_tile_instances/grouped_convolution_forward_tile_ndhwgc_fp16.inc"
template <auto SIGNATURE>
auto parse_conv_args(int arg_idx, char* const argv[])
{
const std::size_t G = static_cast<size_t>(std::stol(argv[arg_idx++]));
const std::size_t N = static_cast<size_t>(std::stol(argv[arg_idx++]));
const std::size_t K = static_cast<size_t>(std::stol(argv[arg_idx++]));
const std::size_t C = static_cast<size_t>(std::stol(argv[arg_idx++]));
constexpr auto num_dim_spatial = SIGNATURE.spatial_dim;
std::vector<std::size_t> filter_spatial_lengths(num_dim_spatial);
std::vector<std::size_t> input_spatial_lengths(num_dim_spatial);
std::vector<std::size_t> conv_filter_strides(num_dim_spatial);
std::vector<std::size_t> conv_filter_dilations(num_dim_spatial);
std::vector<std::size_t> input_left_pads(num_dim_spatial);
std::vector<std::size_t> input_right_pads(num_dim_spatial);
for(int i = 0; i < num_dim_spatial; ++i)
{
filter_spatial_lengths[i] = static_cast<size_t>(std::stol(argv[arg_idx++]));
}
for(int i = 0; i < num_dim_spatial; ++i)
{
input_spatial_lengths[i] = static_cast<size_t>(std::stol(argv[arg_idx++]));
}
for(int i = 0; i < num_dim_spatial; ++i)
{
conv_filter_strides[i] = static_cast<size_t>(std::stol(argv[arg_idx++]));
}
for(int i = 0; i < num_dim_spatial; ++i)
{
conv_filter_dilations[i] = static_cast<size_t>(std::stol(argv[arg_idx++]));
}
for(int i = 0; i < num_dim_spatial; ++i)
{
input_left_pads[i] = static_cast<size_t>(std::stol(argv[arg_idx++]));
}
for(int i = 0; i < num_dim_spatial; ++i)
{
input_right_pads[i] = static_cast<size_t>(std::stol(argv[arg_idx++]));
}
ckt::Args<SIGNATURE> args = {
.lengths =
{
.batch_size = N,
.groups = G,
.input_channels = C,
.output_channels = K,
.image = ckt::filter_extent_from_vector<num_dim_spatial>(input_spatial_lengths),
.filter = ckt::filter_extent_from_vector<num_dim_spatial>(filter_spatial_lengths),
},
.filter_strides = ckt::filter_extent_from_vector<num_dim_spatial>(conv_filter_strides),
.filter_dilation = ckt::filter_extent_from_vector<num_dim_spatial>(conv_filter_dilations),
.input_left_pad = ckt::filter_extent_from_vector<num_dim_spatial>(input_left_pads),
.input_right_pad = ckt::filter_extent_from_vector<num_dim_spatial>(input_right_pads),
.a_elementwise_op = {},
.b_elementwise_op = {},
.cde_elementwise_op = {},
};
return args;
}
#include "../../experimental/grouped_convolution_tile_instances/instances/forward/grouped_convolution_forward_tile_nhwgc_fp32.inc"
#include "../../experimental/grouped_convolution_tile_instances/instances/forward/grouped_convolution_forward_tile_nhwgc_bf16.inc"
#include "../../experimental/grouped_convolution_tile_instances/instances/forward/grouped_convolution_forward_tile_nhwgc_fp16.inc"
#include "../../experimental/grouped_convolution_tile_instances/instances/forward/grouped_convolution_forward_tile_ndhwgc_fp32.inc"
#include "../../experimental/grouped_convolution_tile_instances/instances/forward/grouped_convolution_forward_tile_ndhwgc_bf16.inc"
#include "../../experimental/grouped_convolution_tile_instances/instances/forward/grouped_convolution_forward_tile_ndhwgc_fp16.inc"
template <auto SIGNATURE>
void run_cpu_validation(const ckt::Args<SIGNATURE>& args,
@@ -189,27 +122,27 @@ run_grouped_conv_forward_tile_algs(const ckt::Args<SIGNATURE>& args,
if constexpr(SIGNATURE == SIGNATURE_NHWGC_FP16_FWD)
{
#include "../../experimental/grouped_convolution_tile_instances/grouped_convolution_forward_tile_nhwgc_fp16_calls.inc"
#include "../../experimental/grouped_convolution_tile_instances/instances/forward/grouped_convolution_forward_tile_nhwgc_fp16_calls.inc"
}
else if constexpr(SIGNATURE == SIGNATURE_NHWGC_BF16_FWD)
{
#include "../../experimental/grouped_convolution_tile_instances/grouped_convolution_forward_tile_nhwgc_bf16_calls.inc"
#include "../../experimental/grouped_convolution_tile_instances/instances/forward/grouped_convolution_forward_tile_nhwgc_bf16_calls.inc"
}
else if constexpr(SIGNATURE == SIGNATURE_NHWGC_FP32_FWD)
{
#include "../../experimental/grouped_convolution_tile_instances/grouped_convolution_forward_tile_nhwgc_fp32_calls.inc"
#include "../../experimental/grouped_convolution_tile_instances/instances/forward/grouped_convolution_forward_tile_nhwgc_fp32_calls.inc"
}
else if constexpr(SIGNATURE == SIGNATURE_NDHWGC_FP16_FWD)
{
#include "../../experimental/grouped_convolution_tile_instances/grouped_convolution_forward_tile_ndhwgc_fp16_calls.inc"
#include "../../experimental/grouped_convolution_tile_instances/instances/forward/grouped_convolution_forward_tile_ndhwgc_fp16_calls.inc"
}
else if constexpr(SIGNATURE == SIGNATURE_NDHWGC_BF16_FWD)
{
#include "../../experimental/grouped_convolution_tile_instances/grouped_convolution_forward_tile_ndhwgc_bf16_calls.inc"
#include "../../experimental/grouped_convolution_tile_instances/instances/forward/grouped_convolution_forward_tile_ndhwgc_bf16_calls.inc"
}
else if constexpr(SIGNATURE == SIGNATURE_NDHWGC_FP32_FWD)
{
#include "../../experimental/grouped_convolution_tile_instances/grouped_convolution_forward_tile_ndhwgc_fp32_calls.inc"
#include "../../experimental/grouped_convolution_tile_instances/instances/forward/grouped_convolution_forward_tile_ndhwgc_fp32_calls.inc"
}
else
{

View File

@@ -67,4 +67,62 @@ constexpr auto SIGNATURE_NDHWGC_FP16_FWD =
.weight = {.config = {.layout = ckb::TensorLayout::GKZYXC}},
.output = {.config = {.layout = ckb::TensorLayout::NDHWGK}}};
/////////////////////////////////////////
// BWD WEIGHT signatures
//////////////////////////////////////////
constexpr auto SIGNATURE_NHWGC_BF16_BWD_WEIGHT =
ckt::ConvSignature{.spatial_dim = 2,
.direction = ckb::ConvDirection::BACKWARD_WEIGHT,
.data_type = ckb::DataType::BF16,
.accumulation_data_type = ckb::DataType::FP32,
.input = {.config = {.layout = ckb::TensorLayout::NHWGC}},
.weight = {.config = {.layout = ckb::TensorLayout::GKYXC}},
.output = {.config = {.layout = ckb::TensorLayout::NHWGK}}};
constexpr auto SIGNATURE_NHWGC_FP16_BWD_WEIGHT =
ckt::ConvSignature{.spatial_dim = 2,
.direction = ckb::ConvDirection::BACKWARD_WEIGHT,
.data_type = ckb::DataType::FP16,
.accumulation_data_type = ckb::DataType::FP32,
.input = {.config = {.layout = ckb::TensorLayout::NHWGC}},
.weight = {.config = {.layout = ckb::TensorLayout::GKYXC}},
.output = {.config = {.layout = ckb::TensorLayout::NHWGK}}};
constexpr auto SIGNATURE_NHWGC_FP32_BWD_WEIGHT =
ckt::ConvSignature{.spatial_dim = 2,
.direction = ckb::ConvDirection::BACKWARD_WEIGHT,
.data_type = ckb::DataType::FP32,
.accumulation_data_type = ckb::DataType::FP32,
.input = {.config = {.layout = ckb::TensorLayout::NHWGC}},
.weight = {.config = {.layout = ckb::TensorLayout::GKYXC}},
.output = {.config = {.layout = ckb::TensorLayout::NHWGK}}};
constexpr auto SIGNATURE_NDHWGC_BF16_BWD_WEIGHT =
ckt::ConvSignature{.spatial_dim = 3,
.direction = ckb::ConvDirection::BACKWARD_WEIGHT,
.data_type = ckb::DataType::BF16,
.accumulation_data_type = ckb::DataType::FP32,
.input = {.config = {.layout = ckb::TensorLayout::NDHWGC}},
.weight = {.config = {.layout = ckb::TensorLayout::GKZYXC}},
.output = {.config = {.layout = ckb::TensorLayout::NDHWGK}}};
constexpr auto SIGNATURE_NDHWGC_FP16_BWD_WEIGHT =
ckt::ConvSignature{.spatial_dim = 3,
.direction = ckb::ConvDirection::BACKWARD_WEIGHT,
.data_type = ckb::DataType::FP16,
.accumulation_data_type = ckb::DataType::FP32,
.input = {.config = {.layout = ckb::TensorLayout::NDHWGC}},
.weight = {.config = {.layout = ckb::TensorLayout::GKZYXC}},
.output = {.config = {.layout = ckb::TensorLayout::NDHWGK}}};
constexpr auto SIGNATURE_NDHWGC_FP32_BWD_WEIGHT =
ckt::ConvSignature{.spatial_dim = 3,
.direction = ckb::ConvDirection::BACKWARD_WEIGHT,
.data_type = ckb::DataType::FP32,
.accumulation_data_type = ckb::DataType::FP32,
.input = {.config = {.layout = ckb::TensorLayout::NDHWGC}},
.weight = {.config = {.layout = ckb::TensorLayout::GKZYXC}},
.output = {.config = {.layout = ckb::TensorLayout::NDHWGK}}};
} // namespace ck_tile::builder::profiling

View File

@@ -25,6 +25,58 @@
namespace ck {
namespace profiler {
namespace bwd_data {
template <ck::index_t NDimSpatial,
typename InLayout,
typename WeiLayout,
typename OutLayout,
typename InDataType,
typename WeiDataType,
typename OutDataType,
typename InElementOp,
typename WeiElementOp,
typename OutElementOp,
typename ComputeDataType>
void print_instances()
{
using DeviceOp =
ck::tensor_operation::device::DeviceGroupedConvBwdDataMultipleD<NDimSpatial,
OutLayout,
WeiLayout,
ck::Tuple<>,
InLayout,
OutDataType,
WeiDataType,
ck::Tuple<>,
InDataType,
OutElementOp,
WeiElementOp,
InElementOp,
ComputeDataType,
ComputeDataType>;
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
DeviceOp>::GetInstances();
for(const auto& op_ptr : op_ptrs)
{
#ifdef CK_EXPERIMENTAL_BUILDER
const auto& instance_str = op_ptr->GetInstanceString();
if(!instance_str.empty())
{
std::cout << instance_str << std::endl;
}
else
{
std::cout << op_ptr->GetTypeString() << std::endl;
}
#else
std::cout << op_ptr->GetTypeString() << std::endl;
#endif
}
}
} // namespace bwd_data
template <ck::index_t NDimSpatial,
typename OutLayout,
typename WeiLayout,

View File

@@ -29,6 +29,56 @@
namespace ck {
namespace profiler {
namespace bwd_weight {
template <ck::index_t NDimSpatial,
typename InLayout,
typename WeiLayout,
typename OutLayout,
typename InDataType,
typename WeiDataType,
typename OutDataType,
typename InElementOp,
typename WeiElementOp,
typename OutElementOp,
typename ComputeTypeA,
typename ComputeTypeB>
void print_instances()
{
using DeviceOp = ck::tensor_operation::device::DeviceGroupedConvBwdWeight<NDimSpatial,
InLayout,
WeiLayout,
OutLayout,
InDataType,
WeiDataType,
OutDataType,
InElementOp,
WeiElementOp,
OutElementOp,
ComputeTypeA,
ComputeTypeB>;
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
DeviceOp>::GetInstances();
for(const auto& op_ptr : op_ptrs)
{
#ifdef CK_EXPERIMENTAL_BUILDER
const auto& instance_str = op_ptr->GetInstanceString();
if(!instance_str.empty())
{
std::cout << instance_str << std::endl;
}
else
{
std::cout << op_ptr->GetTypeString() << std::endl;
}
#else
std::cout << op_ptr->GetTypeString() << std::endl;
#endif
}
}
} // namespace bwd_weight
template <ck::index_t NDimSpatial,
typename InLayout,
typename WeiLayout,

View File

@@ -30,6 +30,46 @@
namespace ck {
namespace profiler {
namespace fwd {
template <ck::index_t NDimSpatial,
typename InLayout,
typename WeiLayout,
typename OutLayout,
typename InDataType,
typename WeiDataType,
typename OutDataType,
typename InElementOp,
typename WeiElementOp,
typename OutElementOp,
typename ComputeTypeA,
typename ComputeTypeB>
void print_instances()
{
using DeviceOp = ck::tensor_operation::device::DeviceGroupedConvFwdMultipleABD<NDimSpatial,
InLayout,
WeiLayout,
ck::Tuple<>,
OutLayout,
InDataType,
WeiDataType,
ck::Tuple<>,
OutDataType,
InElementOp,
WeiElementOp,
OutElementOp,
ComputeTypeA,
ComputeTypeB>;
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
DeviceOp>::GetInstances();
for(const auto& op_ptr : op_ptrs)
{
std::cout << op_ptr->GetTypeString() << std::endl;
}
}
} // namespace fwd
template <ck::index_t NDimSpatial,
typename InLayout,
typename WeiLayout,

View File

@@ -0,0 +1,80 @@
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
// SPDX-License-Identifier: MIT
#pragma once
#include <vector>
#include "../../experimental/builder/test/utils/conv_algorithm_type_utils.hpp"
namespace ck_tile::builder::profiling {
namespace ckt = ck_tile::builder::test;
template <auto SIGNATURE>
auto parse_conv_args(int arg_idx, char* const argv[])
{
const std::size_t G = static_cast<size_t>(std::stol(argv[arg_idx++]));
const std::size_t N = static_cast<size_t>(std::stol(argv[arg_idx++]));
const std::size_t K = static_cast<size_t>(std::stol(argv[arg_idx++]));
const std::size_t C = static_cast<size_t>(std::stol(argv[arg_idx++]));
constexpr auto num_dim_spatial = SIGNATURE.spatial_dim;
std::vector<std::size_t> filter_spatial_lengths(num_dim_spatial);
std::vector<std::size_t> input_spatial_lengths(num_dim_spatial);
std::vector<std::size_t> conv_filter_strides(num_dim_spatial);
std::vector<std::size_t> conv_filter_dilations(num_dim_spatial);
std::vector<std::size_t> input_left_pads(num_dim_spatial);
std::vector<std::size_t> input_right_pads(num_dim_spatial);
for(int i = 0; i < num_dim_spatial; ++i)
{
filter_spatial_lengths[i] = static_cast<size_t>(std::stol(argv[arg_idx++]));
}
for(int i = 0; i < num_dim_spatial; ++i)
{
input_spatial_lengths[i] = static_cast<size_t>(std::stol(argv[arg_idx++]));
}
for(int i = 0; i < num_dim_spatial; ++i)
{
conv_filter_strides[i] = static_cast<size_t>(std::stol(argv[arg_idx++]));
}
for(int i = 0; i < num_dim_spatial; ++i)
{
conv_filter_dilations[i] = static_cast<size_t>(std::stol(argv[arg_idx++]));
}
for(int i = 0; i < num_dim_spatial; ++i)
{
input_left_pads[i] = static_cast<size_t>(std::stol(argv[arg_idx++]));
}
for(int i = 0; i < num_dim_spatial; ++i)
{
input_right_pads[i] = static_cast<size_t>(std::stol(argv[arg_idx++]));
}
ckt::Args<SIGNATURE> args = {
.lengths =
{
.batch_size = N,
.groups = G,
.input_channels = C,
.output_channels = K,
.image = ckt::filter_extent_from_vector<num_dim_spatial>(input_spatial_lengths),
.filter = ckt::filter_extent_from_vector<num_dim_spatial>(filter_spatial_lengths),
},
.filter_strides = ckt::filter_extent_from_vector<num_dim_spatial>(conv_filter_strides),
.filter_dilation = ckt::filter_extent_from_vector<num_dim_spatial>(conv_filter_dilations),
.input_left_pad = ckt::filter_extent_from_vector<num_dim_spatial>(input_left_pads),
.input_right_pad = ckt::filter_extent_from_vector<num_dim_spatial>(input_right_pads),
.a_elementwise_op = {},
.b_elementwise_op = {},
.cde_elementwise_op = {},
};
return args;
}
} // namespace ck_tile::builder::profiling