mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-13 17:55:48 +00:00
[rocm-libraries] ROCm/rocm-libraries#5516 (commit ff3afda)
[CK_TILE, CK_BUILDER] Add bwd data to CK Tile profiler (#5516) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Motivation We want close the performance gap between old CK and CK Tile for bwd data convolutions. To achieve this, we need tow things - Configurations for the old CK kernel instances such that we can map them into CK Tile instances. - Support in CK profiler to run the CK Tile instance with the same API as for old CK instances. ## Technical Details Extracted kernel configurations from old CK. The codegen python script for CK Tile convs is extended to support also bwd data. The generated instances are added to the CMake build (target `device_grouped_conv_bwd_data_tile_instances`). A new profiler op (`grouped_conv_bwd_data_tile`) has been added to the CK Profiler. The API is same as for old CK's profiler op `grouped_conv_bwd_data`.
This commit is contained in:
committed by
assistant-librarian[bot]
parent
1834e318da
commit
ec2dbfbfde
@@ -0,0 +1,204 @@
|
||||
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <tuple>
|
||||
|
||||
#include "../../experimental/builder/test/utils/conv_algorithm_type_utils.hpp"
|
||||
#include "grouped_convolution_signatures.hpp"
|
||||
#include "ck_tile/ref/naive_grouped_conv_bwd_data_gpu.hpp"
|
||||
|
||||
#include "ck_tile/builder/testing/filter_extent.hpp"
|
||||
#include "ck_tile/builder/testing/conv/ck_tile.hpp"
|
||||
#include "ck_tile/builder/testing/conv/reference.hpp"
|
||||
#include "ck_tile/builder/conv_builder.hpp"
|
||||
#include "tile_profiler_utils.hpp"
|
||||
|
||||
namespace ck_tile::builder::profiling {
|
||||
|
||||
namespace ckb = ck_tile::builder;
|
||||
namespace ckt = ck_tile::builder::test;
|
||||
|
||||
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_data/grouped_convolution_backward_data_tile_ndhwgc_fp32.inc"
|
||||
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_data/grouped_convolution_backward_data_tile_nhwgc_fp32.inc"
|
||||
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_data/grouped_convolution_backward_data_tile_nhwgc_bf16.inc"
|
||||
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_data/grouped_convolution_backward_data_tile_nhwgc_fp16.inc"
|
||||
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_data/grouped_convolution_backward_data_tile_ndhwgc_bf16.inc"
|
||||
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_data/grouped_convolution_backward_data_tile_ndhwgc_fp16.inc"
|
||||
|
||||
template <auto SIGNATURE>
|
||||
void run_cpu_validation(const ckt::Args<SIGNATURE>& args,
|
||||
const ckt::Outputs<SIGNATURE>& outputs,
|
||||
const ckt::Outputs<SIGNATURE>& reference)
|
||||
{
|
||||
using DataType =
|
||||
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP32,
|
||||
float,
|
||||
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP16,
|
||||
ck_tile::half_t,
|
||||
ck_tile::bfloat16_t>>;
|
||||
const auto conv_param = args.to_ck_tile_conv_param();
|
||||
|
||||
const std::size_t input_bytes_num = conv_param.template GetInputByte<DataType>();
|
||||
std::vector<DataType> in(input_bytes_num / sizeof(DataType));
|
||||
std::vector<DataType> ref(input_bytes_num / sizeof(DataType));
|
||||
HIP_CHECK_ERROR(
|
||||
hipMemcpy(&ref.data()[0], reference.input, input_bytes_num, hipMemcpyDeviceToHost));
|
||||
HIP_CHECK_ERROR(
|
||||
hipMemcpy(&in.data()[0], outputs.input, input_bytes_num, hipMemcpyDeviceToHost));
|
||||
ck_tile::check_err(in, ref, "\tError: Incorrect results!");
|
||||
}
|
||||
|
||||
/// @brief `run_grouped_conv_backward_data_tile_algs()` run all grouped conv fwd instances.
|
||||
///
|
||||
/// @tparam SIGNATURE Forward convolution signature.
|
||||
///
|
||||
/// @see run_grouped_conv_backward_data_tile_algs()
|
||||
template <auto SIGNATURE>
|
||||
std::tuple<bool, float, std::string, int, int>
|
||||
run_grouped_conv_backward_data_tile_algs(const ckt::Args<SIGNATURE>& args,
|
||||
const std::string& split_k,
|
||||
const index_t instance_index,
|
||||
const ckt::Inputs<SIGNATURE>& inputs,
|
||||
const ckt::Outputs<SIGNATURE>& outputs,
|
||||
const ck_tile::stream_config& s_conf)
|
||||
{
|
||||
float best_avg_time = std::numeric_limits<float>::max();
|
||||
std::string best_op_name, op_name;
|
||||
int best_split_k = 0;
|
||||
ck::index_t best_instance_index = -1;
|
||||
bool is_supported = false;
|
||||
float avg_time;
|
||||
bool all_instances_valid = true;
|
||||
|
||||
using DataType =
|
||||
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP32,
|
||||
float,
|
||||
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP16,
|
||||
ck_tile::half_t,
|
||||
ck_tile::bfloat16_t>>;
|
||||
|
||||
auto reference = ckt::alloc_outputs(args);
|
||||
using ReferenceInstance =
|
||||
typename ckb::ConvBuilder<SIGNATURE, ckt::ConvAlgorithm_Reference{}>::Instance;
|
||||
auto ref_conv = ReferenceInstance{};
|
||||
auto ref_result = ckt::run(ref_conv, args, inputs, reference.get());
|
||||
|
||||
const auto conv_param = args.to_ck_tile_conv_param();
|
||||
|
||||
// Get max possible value in the output
|
||||
const std::size_t input_bytes_num = conv_param.template GetInputByte<DataType>();
|
||||
std::vector<DataType> ref(input_bytes_num / sizeof(DataType));
|
||||
HIP_CHECK_ERROR(
|
||||
hipMemcpy(&ref.data()[0], reference.get().input, input_bytes_num, hipMemcpyDeviceToHost));
|
||||
const float max_accumulated_value = *std::max_element(ref.begin(), ref.end());
|
||||
|
||||
const index_t num_accums = conv_param.K_;
|
||||
|
||||
// BWD data doesn't support split-K autodeduce value -1
|
||||
auto split_k_values = get_split_k_values(split_k);
|
||||
split_k_values.erase(std::remove(split_k_values.begin(), split_k_values.end(), -1),
|
||||
split_k_values.end());
|
||||
|
||||
index_t num_kernel = 0;
|
||||
auto run_alg = [&](auto&& run_alg_func) {
|
||||
num_kernel++;
|
||||
// Skip if a specific instance was requested and this isn't it
|
||||
const bool running_specific_instance = (instance_index != -1);
|
||||
const bool current_is_target = (num_kernel - 1 == instance_index);
|
||||
if(running_specific_instance && !current_is_target)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
for(auto& k_batch : split_k_values)
|
||||
{
|
||||
ckt::Args<SIGNATURE> args_k_batch = args;
|
||||
args_k_batch.k_batch = k_batch;
|
||||
std::tie(is_supported, avg_time, op_name) =
|
||||
run_alg_func(args_k_batch, inputs, outputs, s_conf);
|
||||
if(is_supported)
|
||||
{
|
||||
ckt::ValidationReport report;
|
||||
auto&& [rtol, atol] =
|
||||
get_rtol_atol<SIGNATURE>(num_accums, k_batch, max_accumulated_value);
|
||||
ckt::Outputs<SIGNATURE>::reflect(
|
||||
args_k_batch,
|
||||
[&](std::string_view name,
|
||||
const auto& desc,
|
||||
void* ckt::Outputs<SIGNATURE>::*ptr) {
|
||||
report.check(name, desc, outputs.*ptr, reference.get().*ptr, rtol, atol);
|
||||
});
|
||||
|
||||
const bool valid = report.get_errors().empty();
|
||||
if(valid)
|
||||
{
|
||||
if(avg_time < best_avg_time)
|
||||
{
|
||||
best_instance_index = num_kernel - 1;
|
||||
}
|
||||
best_avg_time = std::min(best_avg_time, avg_time);
|
||||
best_op_name = best_avg_time < avg_time ? best_op_name : op_name;
|
||||
best_split_k = best_avg_time < avg_time ? best_split_k : k_batch;
|
||||
std::cout << "[Valid] Perf: " << std::setw(10) << avg_time << " ms," << " "
|
||||
<< op_name << " (instance " << num_kernel - 1 << "), SplitK "
|
||||
<< k_batch << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << "[Error] " << op_name << ", SplitK " << k_batch << std::endl;
|
||||
for(const auto& error : report.get_errors())
|
||||
{
|
||||
std::cout << "\tNumber of incorrect values: " << error.wrong_elements
|
||||
<< " Is all zero:" << error.is_all_zero()
|
||||
<< " max err: " << error.max_error << std::endl;
|
||||
// Check with cpu verification to get a values
|
||||
run_cpu_validation<SIGNATURE>(args_k_batch, outputs, reference.get());
|
||||
}
|
||||
all_instances_valid = false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << "[Not supported] " << op_name << ", SplitK " << k_batch << std::endl;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if constexpr(SIGNATURE == SIGNATURE_NHWGC_FP16_BWD_DATA)
|
||||
{
|
||||
#include "../../experimental/grouped_convolution_tile_instances/instances/backward_data/grouped_convolution_backward_data_tile_nhwgc_fp16_calls.inc"
|
||||
}
|
||||
else if constexpr(SIGNATURE == SIGNATURE_NHWGC_BF16_BWD_DATA)
|
||||
{
|
||||
#include "../../experimental/grouped_convolution_tile_instances/instances/backward_data/grouped_convolution_backward_data_tile_nhwgc_bf16_calls.inc"
|
||||
}
|
||||
else if constexpr(SIGNATURE == SIGNATURE_NHWGC_FP32_BWD_DATA)
|
||||
{
|
||||
#include "../../experimental/grouped_convolution_tile_instances/instances/backward_data/grouped_convolution_backward_data_tile_nhwgc_fp32_calls.inc"
|
||||
}
|
||||
else if constexpr(SIGNATURE == SIGNATURE_NDHWGC_FP16_BWD_DATA)
|
||||
{
|
||||
#include "../../experimental/grouped_convolution_tile_instances/instances/backward_data/grouped_convolution_backward_data_tile_ndhwgc_fp16_calls.inc"
|
||||
}
|
||||
else if constexpr(SIGNATURE == SIGNATURE_NDHWGC_BF16_BWD_DATA)
|
||||
{
|
||||
#include "../../experimental/grouped_convolution_tile_instances/instances/backward_data/grouped_convolution_backward_data_tile_ndhwgc_bf16_calls.inc"
|
||||
}
|
||||
else if constexpr(SIGNATURE == SIGNATURE_NDHWGC_FP32_BWD_DATA)
|
||||
{
|
||||
#include "../../experimental/grouped_convolution_tile_instances/instances/backward_data/grouped_convolution_backward_data_tile_ndhwgc_fp32_calls.inc"
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << "Signature not supported" << std::endl;
|
||||
return std::make_tuple(
|
||||
false, best_avg_time, best_op_name, best_split_k, best_instance_index);
|
||||
}
|
||||
return std::make_tuple(
|
||||
all_instances_valid, best_avg_time, best_op_name, best_split_k, best_instance_index);
|
||||
}
|
||||
|
||||
} // namespace ck_tile::builder::profiling
|
||||
@@ -15,6 +15,7 @@
|
||||
#include "ck_tile/builder/testing/conv/ck_tile.hpp"
|
||||
#include "ck_tile/builder/testing/conv/reference.hpp"
|
||||
#include "ck_tile/builder/conv_builder.hpp"
|
||||
#include "tile_profiler_utils.hpp"
|
||||
|
||||
namespace ck_tile::builder::profiling {
|
||||
|
||||
@@ -28,26 +29,6 @@ namespace ckt = ck_tile::builder::test;
|
||||
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_ndhwgc_bf16.inc"
|
||||
#include "../../../experimental/grouped_convolution_tile_instances/instances/backward_weight/grouped_convolution_backward_weight_tile_ndhwgc_fp16.inc"
|
||||
|
||||
std::vector<int> get_split_k_values(const std::string& split_k)
|
||||
{
|
||||
std::vector<int> split_k_list = {/*auto deduce value*/ -1, 1, 2, 4, 8, 16, 32, 64, 128};
|
||||
|
||||
if(split_k != "all")
|
||||
{
|
||||
try
|
||||
{
|
||||
int split_k_value = std::stoi(split_k);
|
||||
split_k_list = {split_k_value};
|
||||
}
|
||||
catch(const std::exception& e)
|
||||
{
|
||||
std::cerr << e.what() << '\n';
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
return split_k_list;
|
||||
}
|
||||
|
||||
template <auto SIGNATURE>
|
||||
void run_cpu_validation(const ckt::Args<SIGNATURE>& args,
|
||||
const ckt::Outputs<SIGNATURE>& outputs,
|
||||
@@ -71,36 +52,6 @@ void run_cpu_validation(const ckt::Args<SIGNATURE>& args,
|
||||
ck_tile::check_err(wei, ref, "\tError: Incorrect results!");
|
||||
}
|
||||
|
||||
template <auto SIGNATURE>
|
||||
std::tuple<double, double>
|
||||
get_rtol_atol(const int num_accums, const int k_batch, const float max_accumulated_value)
|
||||
{
|
||||
using WeiDataType =
|
||||
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP32,
|
||||
float,
|
||||
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP16,
|
||||
ck_tile::half_t,
|
||||
ck_tile::bfloat16_t>>;
|
||||
using ComputeType = WeiDataType;
|
||||
using AccDataType = float;
|
||||
|
||||
// Assign middle value of the range for auto deduce
|
||||
const int num_accums_split_k = k_batch > 0 ? k_batch : 64;
|
||||
auto rtol = ck_tile::get_relative_threshold<ComputeType, WeiDataType, AccDataType>(
|
||||
num_accums / num_accums_split_k);
|
||||
auto atol = ck_tile::get_absolute_threshold<ComputeType, WeiDataType, AccDataType>(
|
||||
max_accumulated_value / num_accums_split_k, num_accums / num_accums_split_k);
|
||||
// Calculate error due to split_k accumulation
|
||||
auto rtol_split_k =
|
||||
ck_tile::get_relative_threshold<WeiDataType, WeiDataType, WeiDataType>(num_accums_split_k);
|
||||
auto atol_split_k = ck_tile::get_absolute_threshold<WeiDataType, WeiDataType, WeiDataType>(
|
||||
max_accumulated_value, num_accums_split_k);
|
||||
// Use higher threshold
|
||||
rtol = std::max(rtol, rtol_split_k);
|
||||
atol = std::max(atol, atol_split_k);
|
||||
return std::make_tuple(rtol, atol);
|
||||
}
|
||||
|
||||
/// @brief `run_grouped_conv_backward_weight_tile_algs()` run all grouped conv fwd instances.
|
||||
///
|
||||
/// @tparam SIGNATURE Forward convolution signature.
|
||||
|
||||
@@ -5,124 +5,5 @@
|
||||
|
||||
#include <tuple>
|
||||
|
||||
#include "../../experimental/builder/test/impl/conv_signature_types.hpp"
|
||||
#include "../../experimental/grouped_convolution_tile_instances/include/signatures.hpp"
|
||||
#include "ck_tile/builder/testing/conv/ck_tile.hpp"
|
||||
|
||||
namespace ck_tile::builder::profiling {
|
||||
|
||||
namespace ckb = ck_tile::builder;
|
||||
namespace ckt = ck_tile::builder::test;
|
||||
|
||||
constexpr auto SIGNATURE_NHWGC_FP32_FWD =
|
||||
ckt::ConvSignature{.spatial_dim = 2,
|
||||
.direction = ckb::ConvDirection::FORWARD,
|
||||
.data_type = ckb::DataType::FP32,
|
||||
.accumulation_data_type = ckb::DataType::FP32,
|
||||
.input = {.config = {.layout = ckb::TensorLayout::NHWGC}},
|
||||
.weight = {.config = {.layout = ckb::TensorLayout::GKYXC}},
|
||||
.output = {.config = {.layout = ckb::TensorLayout::NHWGK}}};
|
||||
|
||||
constexpr auto SIGNATURE_NHWGC_BF16_FWD =
|
||||
ckt::ConvSignature{.spatial_dim = 2,
|
||||
.direction = ckb::ConvDirection::FORWARD,
|
||||
.data_type = ckb::DataType::BF16,
|
||||
.accumulation_data_type = ckb::DataType::FP32,
|
||||
.input = {.config = {.layout = ckb::TensorLayout::NHWGC}},
|
||||
.weight = {.config = {.layout = ckb::TensorLayout::GKYXC}},
|
||||
.output = {.config = {.layout = ckb::TensorLayout::NHWGK}}};
|
||||
|
||||
constexpr auto SIGNATURE_NHWGC_FP16_FWD =
|
||||
ckt::ConvSignature{.spatial_dim = 2,
|
||||
.direction = ckb::ConvDirection::FORWARD,
|
||||
.data_type = ckb::DataType::FP16,
|
||||
.accumulation_data_type = ckb::DataType::FP32,
|
||||
.input = {.config = {.layout = ckb::TensorLayout::NHWGC}},
|
||||
.weight = {.config = {.layout = ckb::TensorLayout::GKYXC}},
|
||||
.output = {.config = {.layout = ckb::TensorLayout::NHWGK}}};
|
||||
|
||||
constexpr auto SIGNATURE_NDHWGC_FP32_FWD =
|
||||
ckt::ConvSignature{.spatial_dim = 3,
|
||||
.direction = ckb::ConvDirection::FORWARD,
|
||||
.data_type = ckb::DataType::FP32,
|
||||
.accumulation_data_type = ckb::DataType::FP32,
|
||||
.input = {.config = {.layout = ckb::TensorLayout::NDHWGC}},
|
||||
.weight = {.config = {.layout = ckb::TensorLayout::GKZYXC}},
|
||||
.output = {.config = {.layout = ckb::TensorLayout::NDHWGK}}};
|
||||
|
||||
constexpr auto SIGNATURE_NDHWGC_BF16_FWD =
|
||||
ckt::ConvSignature{.spatial_dim = 3,
|
||||
.direction = ckb::ConvDirection::FORWARD,
|
||||
.data_type = ckb::DataType::BF16,
|
||||
.accumulation_data_type = ckb::DataType::FP32,
|
||||
.input = {.config = {.layout = ckb::TensorLayout::NDHWGC}},
|
||||
.weight = {.config = {.layout = ckb::TensorLayout::GKZYXC}},
|
||||
.output = {.config = {.layout = ckb::TensorLayout::NDHWGK}}};
|
||||
|
||||
constexpr auto SIGNATURE_NDHWGC_FP16_FWD =
|
||||
ckt::ConvSignature{.spatial_dim = 3,
|
||||
.direction = ckb::ConvDirection::FORWARD,
|
||||
.data_type = ckb::DataType::FP16,
|
||||
.accumulation_data_type = ckb::DataType::FP32,
|
||||
.input = {.config = {.layout = ckb::TensorLayout::NDHWGC}},
|
||||
.weight = {.config = {.layout = ckb::TensorLayout::GKZYXC}},
|
||||
.output = {.config = {.layout = ckb::TensorLayout::NDHWGK}}};
|
||||
|
||||
/////////////////////////////////////////
|
||||
// BWD WEIGHT signatures
|
||||
//////////////////////////////////////////
|
||||
|
||||
constexpr auto SIGNATURE_NHWGC_BF16_BWD_WEIGHT =
|
||||
ckt::ConvSignature{.spatial_dim = 2,
|
||||
.direction = ckb::ConvDirection::BACKWARD_WEIGHT,
|
||||
.data_type = ckb::DataType::BF16,
|
||||
.accumulation_data_type = ckb::DataType::FP32,
|
||||
.input = {.config = {.layout = ckb::TensorLayout::NHWGC}},
|
||||
.weight = {.config = {.layout = ckb::TensorLayout::GKYXC}},
|
||||
.output = {.config = {.layout = ckb::TensorLayout::NHWGK}}};
|
||||
|
||||
constexpr auto SIGNATURE_NHWGC_FP16_BWD_WEIGHT =
|
||||
ckt::ConvSignature{.spatial_dim = 2,
|
||||
.direction = ckb::ConvDirection::BACKWARD_WEIGHT,
|
||||
.data_type = ckb::DataType::FP16,
|
||||
.accumulation_data_type = ckb::DataType::FP32,
|
||||
.input = {.config = {.layout = ckb::TensorLayout::NHWGC}},
|
||||
.weight = {.config = {.layout = ckb::TensorLayout::GKYXC}},
|
||||
.output = {.config = {.layout = ckb::TensorLayout::NHWGK}}};
|
||||
|
||||
constexpr auto SIGNATURE_NHWGC_FP32_BWD_WEIGHT =
|
||||
ckt::ConvSignature{.spatial_dim = 2,
|
||||
.direction = ckb::ConvDirection::BACKWARD_WEIGHT,
|
||||
.data_type = ckb::DataType::FP32,
|
||||
.accumulation_data_type = ckb::DataType::FP32,
|
||||
.input = {.config = {.layout = ckb::TensorLayout::NHWGC}},
|
||||
.weight = {.config = {.layout = ckb::TensorLayout::GKYXC}},
|
||||
.output = {.config = {.layout = ckb::TensorLayout::NHWGK}}};
|
||||
|
||||
constexpr auto SIGNATURE_NDHWGC_BF16_BWD_WEIGHT =
|
||||
ckt::ConvSignature{.spatial_dim = 3,
|
||||
.direction = ckb::ConvDirection::BACKWARD_WEIGHT,
|
||||
.data_type = ckb::DataType::BF16,
|
||||
.accumulation_data_type = ckb::DataType::FP32,
|
||||
.input = {.config = {.layout = ckb::TensorLayout::NDHWGC}},
|
||||
.weight = {.config = {.layout = ckb::TensorLayout::GKZYXC}},
|
||||
.output = {.config = {.layout = ckb::TensorLayout::NDHWGK}}};
|
||||
|
||||
constexpr auto SIGNATURE_NDHWGC_FP16_BWD_WEIGHT =
|
||||
ckt::ConvSignature{.spatial_dim = 3,
|
||||
.direction = ckb::ConvDirection::BACKWARD_WEIGHT,
|
||||
.data_type = ckb::DataType::FP16,
|
||||
.accumulation_data_type = ckb::DataType::FP32,
|
||||
.input = {.config = {.layout = ckb::TensorLayout::NDHWGC}},
|
||||
.weight = {.config = {.layout = ckb::TensorLayout::GKZYXC}},
|
||||
.output = {.config = {.layout = ckb::TensorLayout::NDHWGK}}};
|
||||
|
||||
constexpr auto SIGNATURE_NDHWGC_FP32_BWD_WEIGHT =
|
||||
ckt::ConvSignature{.spatial_dim = 3,
|
||||
.direction = ckb::ConvDirection::BACKWARD_WEIGHT,
|
||||
.data_type = ckb::DataType::FP32,
|
||||
.accumulation_data_type = ckb::DataType::FP32,
|
||||
.input = {.config = {.layout = ckb::TensorLayout::NDHWGC}},
|
||||
.weight = {.config = {.layout = ckb::TensorLayout::GKZYXC}},
|
||||
.output = {.config = {.layout = ckb::TensorLayout::NDHWGK}}};
|
||||
|
||||
} // namespace ck_tile::builder::profiling
|
||||
|
||||
@@ -4,14 +4,70 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include "../../experimental/builder/test/utils/conv_algorithm_type_utils.hpp"
|
||||
|
||||
namespace ck_tile::builder::profiling {
|
||||
|
||||
namespace ckt = ck_tile::builder::test;
|
||||
|
||||
inline std::vector<int> get_split_k_values(const std::string& split_k)
|
||||
{
|
||||
std::vector<int> split_k_list = {/*auto deduce value*/ -1, 1, 2, 4, 8, 16, 32, 64, 128};
|
||||
|
||||
if(split_k != "all")
|
||||
{
|
||||
try
|
||||
{
|
||||
int split_k_value = std::stoi(split_k);
|
||||
split_k_list = {split_k_value};
|
||||
}
|
||||
catch(const std::exception& e)
|
||||
{
|
||||
std::cerr << e.what() << '\n';
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
return split_k_list;
|
||||
}
|
||||
|
||||
template <auto SIGNATURE>
|
||||
auto parse_conv_args(int arg_idx, char* const argv[])
|
||||
inline std::tuple<double, double>
|
||||
get_rtol_atol(const int num_accums, const int k_batch, const float max_accumulated_value)
|
||||
{
|
||||
using DataType =
|
||||
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP32,
|
||||
float,
|
||||
std::conditional_t<SIGNATURE.data_type == ckb::DataType::FP16,
|
||||
ck_tile::half_t,
|
||||
ck_tile::bfloat16_t>>;
|
||||
using ComputeType = DataType;
|
||||
using AccDataType = float;
|
||||
|
||||
// Assign middle value of the range for auto deduce
|
||||
const int num_accums_split_k = k_batch > 0 ? k_batch : 64;
|
||||
auto rtol = ck_tile::get_relative_threshold<ComputeType, DataType, AccDataType>(
|
||||
num_accums / num_accums_split_k);
|
||||
auto atol = ck_tile::get_absolute_threshold<ComputeType, DataType, AccDataType>(
|
||||
max_accumulated_value / num_accums_split_k, num_accums / num_accums_split_k);
|
||||
// Calculate error due to split_k accumulation
|
||||
auto rtol_split_k =
|
||||
ck_tile::get_relative_threshold<DataType, DataType, DataType>(num_accums_split_k);
|
||||
auto atol_split_k = ck_tile::get_absolute_threshold<DataType, DataType, DataType>(
|
||||
max_accumulated_value, num_accums_split_k);
|
||||
// Use higher threshold
|
||||
rtol = std::max(rtol, rtol_split_k);
|
||||
atol = std::max(atol, atol_split_k);
|
||||
return std::make_tuple(rtol, atol);
|
||||
}
|
||||
|
||||
template <auto SIGNATURE>
|
||||
inline ckt::Args<SIGNATURE> parse_conv_args(int arg_idx, char* const argv[])
|
||||
{
|
||||
const std::size_t G = static_cast<size_t>(std::stol(argv[arg_idx++]));
|
||||
const std::size_t N = static_cast<size_t>(std::stol(argv[arg_idx++]));
|
||||
|
||||
Reference in New Issue
Block a user