mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-04-19 22:39:03 +00:00
Merge remote-tracking branch 'origin/develop' into jakpiase/pool2d_fwd_new
This commit is contained in:
@@ -47,6 +47,14 @@ target_link_libraries(client_conv3d_fwd_convscale_add_fp8 PRIVATE composable_ker
|
||||
add_executable(client_conv3d_fwd_convscale_relu_fp8
|
||||
grouped_convnd_fwd_convscale_relu/conv3d_fwd_convscale_relu_fp8.cpp)
|
||||
target_link_libraries(client_conv3d_fwd_convscale_relu_fp8 PRIVATE composable_kernel::device_conv_operations)
|
||||
# Fwd convscale + ReLU + AMAX
|
||||
add_executable(client_conv3d_fwd_convscale_relu_amax_fp8
|
||||
grouped_convnd_fwd_convscale_reduce/conv3d_fwd_convscale_relu_amax_fp8.cpp)
|
||||
target_link_libraries(client_conv3d_fwd_convscale_relu_amax_fp8
|
||||
PRIVATE composable_kernel::device_conv_operations
|
||||
composable_kernel::device_other_operations
|
||||
composable_kernel::device_reduction_operations
|
||||
utility)
|
||||
# Fwd convscale
|
||||
add_executable(client_conv3d_fwd_convscale_fp8
|
||||
grouped_convnd_fwd_convscale/conv3d_fwd_convscale_fp8.cpp)
|
||||
|
||||
@@ -0,0 +1,835 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <iterator>
|
||||
#include <numeric>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/library/utility/algorithm.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_elementwise.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_reduce.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/reduction_operator_mapping.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/combined_element_wise_operation.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp"
|
||||
#include "ck/utility/tuple.hpp"
|
||||
#include "ck/utility/type.hpp"
|
||||
#include "ck/library/tensor_operation_instance/gpu/grouped_convolution_forward_convscale_relu.hpp"
|
||||
#include "ck/utility/reduction_enums.hpp"
|
||||
#include "ck/library/tensor_operation_instance/gpu/permute_scale.hpp"
|
||||
#include "ck/library/tensor_operation_instance/gpu/reduce/reduce.hpp"
|
||||
#include "ck/library/utility/host_tensor.hpp"
|
||||
|
||||
namespace ew = ck::tensor_operation::element_wise;
|
||||
|
||||
using PassThrough = ew::PassThrough;
|
||||
using ConvScaleRelu = ew::UnaryCombinedOp<ew::Scale, ew::Scale, ew::Relu>;
|
||||
using ConvScale = ew::UnaryCombinedOp<ew::Scale, ew::Scale, PassThrough>;
|
||||
|
||||
struct SimpleDeviceMem
|
||||
{
|
||||
SimpleDeviceMem() = delete;
|
||||
|
||||
SimpleDeviceMem(std::size_t mem_size) : p_mem_{}
|
||||
{
|
||||
(void)hipMalloc(static_cast<void**>(&p_mem_), mem_size);
|
||||
}
|
||||
|
||||
void* GetDeviceBuffer() { return p_mem_; }
|
||||
|
||||
~SimpleDeviceMem() { (void)hipFree(p_mem_); }
|
||||
|
||||
void* p_mem_;
|
||||
};
|
||||
|
||||
template <ck::index_t NumDimSpatial, ck::index_t NumNonSpatialDim = 3>
|
||||
std::size_t
|
||||
GetFlops(const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& output_lengths,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& weights_lengths,
|
||||
const std::size_t& ds_size)
|
||||
{
|
||||
// 2 * G * N * K * C * <output spatial lengths product> * <filter spatial lengths product> +
|
||||
// + ds_size * <output tensor size> =>
|
||||
// => <output tensor size> * ( 2 * C * <filter spatial lengths product> + ds_size) =>
|
||||
// => G * N * K * <output spatial lengths product> * (2 * C * <filter spatial lengths product> +
|
||||
// ds_size)
|
||||
ck::index_t G = weights_lengths[0];
|
||||
ck::index_t N = output_lengths[1];
|
||||
ck::index_t K = weights_lengths[1];
|
||||
ck::index_t C = weights_lengths[2];
|
||||
|
||||
return G * N * K *
|
||||
std::accumulate(std::next(std::begin(output_lengths), NumNonSpatialDim),
|
||||
std::end(output_lengths),
|
||||
static_cast<std::size_t>(1),
|
||||
std::multiplies<>()) *
|
||||
(ds_size + static_cast<std::size_t>(2) * C *
|
||||
std::accumulate(std::next(std::begin(weights_lengths), NumNonSpatialDim),
|
||||
std::end(weights_lengths),
|
||||
static_cast<std::size_t>(1),
|
||||
std::multiplies<>()));
|
||||
}
|
||||
|
||||
template <ck::index_t NumDimSpatial, ck::index_t NumNonSpatialDim = 3>
|
||||
std::size_t GetTensorSize(const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& lengths)
|
||||
{
|
||||
|
||||
return std::accumulate(std::begin(lengths),
|
||||
std::end(lengths),
|
||||
static_cast<std::size_t>(1),
|
||||
std::multiplies<std::size_t>());
|
||||
}
|
||||
|
||||
template <typename InDataType, ck::index_t NumDimSpatial, ck::index_t NumNonSpatialDim = 3>
|
||||
std::size_t
|
||||
GetInputByte(const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& input_lengths)
|
||||
{
|
||||
// sizeof(InDataType) * (G * N * C * <input spatial lengths product>) +
|
||||
return sizeof(InDataType) * GetTensorSize<NumDimSpatial>(input_lengths);
|
||||
}
|
||||
|
||||
template <typename WeiDataType, ck::index_t NumDimSpatial, ck::index_t NumNonSpatialDim = 3>
|
||||
std::size_t
|
||||
GetWeightByte(const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& weights_lengths)
|
||||
{
|
||||
// sizeof(WeiDataType) * (G * K * C * <filter spatial lengths product>) +
|
||||
return sizeof(WeiDataType) * GetTensorSize<NumDimSpatial>(weights_lengths);
|
||||
}
|
||||
|
||||
template <typename OutDataType, ck::index_t NumDimSpatial, ck::index_t NumNonSpatialDim = 3>
|
||||
std::size_t
|
||||
GetOutputByte(const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& output_lengths)
|
||||
{
|
||||
// sizeof(OutDataType) * (G * N * K * <output spatial lengths product>);
|
||||
return sizeof(OutDataType) * GetTensorSize<NumDimSpatial>(output_lengths);
|
||||
}
|
||||
|
||||
template <typename InDataType,
|
||||
typename WeiDataType,
|
||||
typename OutDataType,
|
||||
typename ConvElementOp,
|
||||
typename InLayout,
|
||||
typename WeiLayout,
|
||||
typename OutLayout,
|
||||
ck::index_t NumDimSpatial,
|
||||
ck::index_t NumNonSpatialDim = 3,
|
||||
typename AComputeType = InDataType,
|
||||
typename BComputeType = AComputeType>
|
||||
bool ConvolutionScale(SimpleDeviceMem& in,
|
||||
SimpleDeviceMem& wei,
|
||||
SimpleDeviceMem& out,
|
||||
ConvElementOp elementwise_op,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& in_lengths,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& in_strides,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& wei_lengths,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& wei_strides,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& out_lengths,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& out_strides);
|
||||
|
||||
template <typename InDataType,
|
||||
typename OutDataType,
|
||||
ck::index_t NumDimSpatial,
|
||||
ck::index_t NumNonSpatialDim = 3>
|
||||
bool TensorScaleConvert(SimpleDeviceMem& in,
|
||||
SimpleDeviceMem& out,
|
||||
float scale_out,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& lengths,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& strides);
|
||||
|
||||
template <typename InDataType,
|
||||
typename OutDataType,
|
||||
ck::ReduceTensorOp ReduceOpId,
|
||||
ck::index_t NumDimSpatial,
|
||||
ck::index_t NumNonSpatialDim = 3>
|
||||
bool TensorFullReduction(SimpleDeviceMem& tensor,
|
||||
SimpleDeviceMem& out_amax,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& lengths,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& strides);
|
||||
|
||||
template <ck::index_t NumDimSpatial,
|
||||
typename InDataType,
|
||||
typename WeiDataType,
|
||||
typename ConvOutDataType,
|
||||
typename OutDataType,
|
||||
typename ConvElementOp,
|
||||
ck::ReduceTensorOp ReduceOp,
|
||||
typename InLayout,
|
||||
typename WeiLayout,
|
||||
typename OutLayout,
|
||||
ck::index_t NumNonSpatialDim = 3,
|
||||
typename AComputeType = InDataType,
|
||||
typename BComputeType = AComputeType>
|
||||
bool run_grouped_conv_fwd_convscale_reduce(
|
||||
std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim> in_lengths,
|
||||
std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim> wei_lengths,
|
||||
std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim> out_lengths)
|
||||
{
|
||||
|
||||
namespace ctc = ck::tensor_layout::convolution;
|
||||
static_assert(NumDimSpatial == 3 && ck::is_same_v<InLayout, ctc::NDHWGC> &&
|
||||
ck::is_same_v<WeiLayout, ctc::GKZYXC> &&
|
||||
ck::is_same_v<OutLayout, ctc::NDHWGK>,
|
||||
"Unsupported configuration");
|
||||
|
||||
const ck::index_t G = in_lengths[4];
|
||||
const ck::index_t N = in_lengths[0];
|
||||
const ck::index_t K = wei_lengths[1];
|
||||
const ck::index_t C = in_lengths[5];
|
||||
const ck::index_t Z = wei_lengths[2];
|
||||
const ck::index_t Y = wei_lengths[3];
|
||||
const ck::index_t X = wei_lengths[4];
|
||||
const ck::index_t Di = in_lengths[1];
|
||||
const ck::index_t Hi = in_lengths[2];
|
||||
const ck::index_t Wi = in_lengths[3];
|
||||
const ck::index_t Do = out_lengths[1];
|
||||
const ck::index_t Ho = out_lengths[2];
|
||||
const ck::index_t Wo = out_lengths[3];
|
||||
|
||||
const std::size_t in_mem_size = sizeof(InDataType) * N * Di * Hi * Wi * G * C;
|
||||
const std::size_t wei_mem_size = sizeof(WeiDataType) * G * K * Z * Y * X * C;
|
||||
const std::size_t conv_out_mem_size = sizeof(ConvOutDataType) * N * Do * Ho * Wo * G * K;
|
||||
const std::size_t out_mem_size = sizeof(OutDataType) * N * Do * Ho * Wo * G * K;
|
||||
|
||||
SimpleDeviceMem in(in_mem_size);
|
||||
SimpleDeviceMem wei(wei_mem_size);
|
||||
SimpleDeviceMem conv_out(conv_out_mem_size);
|
||||
SimpleDeviceMem out(out_mem_size);
|
||||
|
||||
float scale_in = float(std::rand()) / float(RAND_MAX);
|
||||
float scale_wei = float(std::rand()) / float(RAND_MAX);
|
||||
float scale_out = float(std::rand()) / float(RAND_MAX);
|
||||
|
||||
// We have NDHWGC/GKZYXC/NDHWGK (x, weight, y) in memory space.
|
||||
// However, CK's API only accepts lengths and strides with order of GNCDHW/GKCZYX/GNKDHW.
|
||||
// Hence, we need to adjust the order of strides.
|
||||
const std::array<ck::index_t, NumDimSpatial + 3> input_lengths{G, N, C, Di, Hi, Wi};
|
||||
const std::array<ck::index_t, NumDimSpatial + 3> input_strides{
|
||||
C, Di * Hi * Wi * G * C, 1, Hi * Wi * G * C, Wi * G * C, G * C};
|
||||
const std::array<ck::index_t, NumDimSpatial + 3> weights_lengths{G, K, C, Z, Y, X};
|
||||
const std::array<ck::index_t, NumDimSpatial + 3> weights_strides{
|
||||
K * Z * Y * X * C, Z * Y * X * C, 1, Y * X * C, X * C, C};
|
||||
const std::array<ck::index_t, NumDimSpatial + 3> output_lengths{G, N, K, Do, Ho, Wo};
|
||||
const std::array<ck::index_t, NumDimSpatial + 3> output_strides{
|
||||
K, Do * Ho * Wo * G * K, 1, Ho * Wo * G * K, Wo * G * K, G * K};
|
||||
|
||||
/*
|
||||
* FP8 Convolution with Scaling
|
||||
*/
|
||||
std::cout << "\n\nConvolution with scale Benchmarking:" << std::endl;
|
||||
auto elementwise_op = ConvElementOp{ew::Scale{scale_in}, ew::Scale{scale_wei}, {}};
|
||||
auto conv_ok = ConvolutionScale<InDataType,
|
||||
WeiDataType,
|
||||
ConvOutDataType,
|
||||
ConvElementOp,
|
||||
InLayout,
|
||||
WeiLayout,
|
||||
OutLayout,
|
||||
NumDimSpatial>(in,
|
||||
wei,
|
||||
conv_out,
|
||||
elementwise_op,
|
||||
input_lengths,
|
||||
input_strides,
|
||||
weights_lengths,
|
||||
weights_strides,
|
||||
output_lengths,
|
||||
output_strides);
|
||||
|
||||
if(!conv_ok)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Scale with output weight and convert to FP8
|
||||
*/
|
||||
std::cout << "\n\nElement-wise scale + convert Benchmarking:" << std::endl;
|
||||
auto elem_wise_ok = TensorScaleConvert<ConvOutDataType, OutDataType, NumDimSpatial>(
|
||||
conv_out, out, scale_out, output_lengths, output_strides);
|
||||
|
||||
if(!elem_wise_ok)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Compute AMAX
|
||||
*/
|
||||
std::cout << "\n\nAMAX Benchmarking:" << std::endl;
|
||||
SimpleDeviceMem amax_device(sizeof(ConvOutDataType));
|
||||
auto reduction_ok =
|
||||
TensorFullReduction<ConvOutDataType,
|
||||
ConvOutDataType,
|
||||
ck::ReduceTensorOp::AMAX,
|
||||
NumDimSpatial>(conv_out, amax_device, output_lengths, output_strides);
|
||||
|
||||
if(!reduction_ok)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename InDataType,
|
||||
typename WeiDataType,
|
||||
typename OutDataType,
|
||||
typename ConvElementOp,
|
||||
typename InLayout,
|
||||
typename WeiLayout,
|
||||
typename OutLayout,
|
||||
ck::index_t NumDimSpatial,
|
||||
ck::index_t NumNonSpatialDim,
|
||||
typename AComputeType,
|
||||
typename BComputeType>
|
||||
bool ConvolutionScale(SimpleDeviceMem& in,
|
||||
SimpleDeviceMem& wei,
|
||||
SimpleDeviceMem& out,
|
||||
ConvElementOp elementwise_op,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& in_lengths,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& in_strides,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& wei_lengths,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& wei_strides,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& out_lengths,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& out_strides)
|
||||
{
|
||||
|
||||
const std::array<ck::index_t, NumDimSpatial> conv_filter_strides{1, 1, 1};
|
||||
const std::array<ck::index_t, NumDimSpatial> conv_filter_dilations{1, 1, 1};
|
||||
const std::array<ck::index_t, NumDimSpatial> input_left_pads{1, 1, 1};
|
||||
const std::array<ck::index_t, NumDimSpatial> input_right_pads{1, 1, 1};
|
||||
|
||||
const auto in_mem_size = GetInputByte<InDataType, NumDimSpatial>(in_lengths);
|
||||
const auto wei_mem_size = GetWeightByte<WeiDataType, NumDimSpatial>(wei_lengths);
|
||||
const auto out_mem_size = GetOutputByte<OutDataType, NumDimSpatial>(out_lengths);
|
||||
|
||||
std::size_t ds_size = 2; // 2 element-wise scale multipliers
|
||||
if constexpr(ck::is_same_v<ConvElementOp, ConvScaleRelu>)
|
||||
{
|
||||
ds_size += 1; // +1 element-wise relu
|
||||
}
|
||||
std::size_t flop = GetFlops<NumDimSpatial>(out_lengths, wei_lengths, ds_size);
|
||||
std::size_t num_bytes =
|
||||
in_mem_size + wei_mem_size + sizeof(float) + sizeof(float) + out_mem_size;
|
||||
|
||||
using ConvDeviceOp =
|
||||
ck::tensor_operation::device::DeviceGroupedConvFwdMultipleABD<NumDimSpatial,
|
||||
InLayout,
|
||||
WeiLayout,
|
||||
ck::Tuple<>,
|
||||
OutLayout,
|
||||
InDataType,
|
||||
WeiDataType,
|
||||
ck::Tuple<>,
|
||||
OutDataType,
|
||||
PassThrough,
|
||||
PassThrough,
|
||||
ConvElementOp,
|
||||
AComputeType,
|
||||
BComputeType>;
|
||||
// get device op instances
|
||||
const auto conv_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
ConvDeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << conv_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string conv_best_op_name;
|
||||
int conv_best_op_id = -1;
|
||||
float conv_best_avg_time = std::numeric_limits<float>::max();
|
||||
float conv_best_gb_per_sec = 0;
|
||||
float conv_best_tflops = 0;
|
||||
|
||||
// profile device operation instances
|
||||
std::cout << "Run all convolution instances and do timing" << std::endl;
|
||||
|
||||
for(int i = 0; i < conv_ptrs.size(); ++i)
|
||||
{
|
||||
auto& op_ptr = conv_ptrs[i];
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
||||
in.GetDeviceBuffer(),
|
||||
wei.GetDeviceBuffer(),
|
||||
std::array<const void*, 0>{},
|
||||
out.GetDeviceBuffer(),
|
||||
in_lengths,
|
||||
in_strides,
|
||||
wei_lengths,
|
||||
wei_strides,
|
||||
std::array<std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>, 0>{},
|
||||
std::array<std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>, 0>{},
|
||||
out_lengths,
|
||||
out_strides,
|
||||
conv_filter_strides,
|
||||
conv_filter_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads,
|
||||
PassThrough{},
|
||||
PassThrough{},
|
||||
elementwise_op);
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
|
||||
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
||||
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << tflops << " TFlops, "
|
||||
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
||||
|
||||
if(tflops > conv_best_tflops)
|
||||
{
|
||||
conv_best_op_id = i;
|
||||
conv_best_op_name = op_name;
|
||||
conv_best_avg_time = avg_time;
|
||||
conv_best_gb_per_sec = gb_per_sec;
|
||||
conv_best_tflops = tflops;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cerr << op_name << " does not support this problem" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
if(conv_best_op_id < 0)
|
||||
{
|
||||
std::cerr << "no suitable instance" << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
std::cout << "Best Perf: " << std::setw(10) << conv_best_avg_time << " ms, " << conv_best_tflops
|
||||
<< " TFlops, " << conv_best_gb_per_sec << " GB/s, " << conv_best_op_name << std::endl;
|
||||
|
||||
// run the best instance
|
||||
{
|
||||
auto& op_ptr = conv_ptrs[conv_best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
<< std::endl;
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
||||
in.GetDeviceBuffer(),
|
||||
wei.GetDeviceBuffer(),
|
||||
std::array<const void*, 0>{},
|
||||
out.GetDeviceBuffer(),
|
||||
in_lengths,
|
||||
in_strides,
|
||||
wei_lengths,
|
||||
wei_strides,
|
||||
std::array<std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>, 0>{},
|
||||
std::array<std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>, 0>{},
|
||||
out_lengths,
|
||||
out_strides,
|
||||
conv_filter_strides,
|
||||
conv_filter_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads,
|
||||
PassThrough{},
|
||||
PassThrough{},
|
||||
elementwise_op);
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
|
||||
}
|
||||
|
||||
std::cout << "Done" << std::endl;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename InDataType,
|
||||
typename OutDataType,
|
||||
ck::index_t NumDimSpatial,
|
||||
ck::index_t NumNonSpatialDim>
|
||||
bool TensorScaleConvert(SimpleDeviceMem& in,
|
||||
SimpleDeviceMem& out,
|
||||
float scale_out,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& lengths,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& strides)
|
||||
{
|
||||
|
||||
const auto tensor_size = GetTensorSize<NumDimSpatial>(lengths);
|
||||
|
||||
const std::size_t in_mem_size = sizeof(InDataType) * tensor_size;
|
||||
const std::size_t out_mem_size = sizeof(OutDataType) * tensor_size;
|
||||
|
||||
std::size_t flop = 2 * tensor_size; // element-wise scale + convert
|
||||
|
||||
std::size_t bytes =
|
||||
in_mem_size + sizeof(float) + out_mem_size; // read from in, scale, write to out
|
||||
|
||||
using DeviceScaleConvert =
|
||||
ck::tensor_operation::device::DeviceElementwise<ck::Tuple<InDataType>,
|
||||
ck::Tuple<OutDataType>,
|
||||
ew::Scale,
|
||||
NumDimSpatial + NumNonSpatialDim>;
|
||||
|
||||
// get device op instances
|
||||
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
DeviceScaleConvert>::GetInstances();
|
||||
|
||||
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_op_name;
|
||||
int best_op_id = -1;
|
||||
float best_avg_time = std::numeric_limits<float>::max();
|
||||
float best_gb_per_sec = 0;
|
||||
float best_tflops = 0;
|
||||
|
||||
// profile device operation instances
|
||||
std::cout << "Run all DeviceScaleConvert instances and do timing" << std::endl;
|
||||
|
||||
auto scale_convert = ew::Scale{scale_out};
|
||||
|
||||
for(int i = 0; i < op_ptrs.size(); ++i)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(lengths,
|
||||
{strides},
|
||||
{strides},
|
||||
{in.GetDeviceBuffer()},
|
||||
{out.GetDeviceBuffer()},
|
||||
scale_convert);
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
|
||||
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
||||
float gb_per_sec = bytes / 1.E6 / avg_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << tflops << " TFlops, "
|
||||
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
||||
|
||||
if(tflops > best_tflops)
|
||||
{
|
||||
best_op_id = i;
|
||||
best_op_name = op_name;
|
||||
best_avg_time = avg_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
best_tflops = tflops;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cerr << op_name << " does not support this problem" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
if(best_op_id < 0)
|
||||
{
|
||||
std::cerr << "no suitable instance found." << std::endl;
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << "Best Perf: " << std::setw(10) << best_avg_time << " ms, " << best_tflops
|
||||
<< " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
<< std::endl;
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(lengths,
|
||||
{strides},
|
||||
{strides},
|
||||
{in.GetDeviceBuffer()},
|
||||
{out.GetDeviceBuffer()},
|
||||
scale_convert);
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
|
||||
}
|
||||
|
||||
std::cout << "Done" << std::endl;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename InDataType,
|
||||
typename OutDataType,
|
||||
ck::ReduceTensorOp ReduceOpId,
|
||||
ck::index_t NumDimSpatial,
|
||||
ck::index_t NumNonSpatialDim>
|
||||
bool TensorFullReduction(SimpleDeviceMem& tensor,
|
||||
SimpleDeviceMem& out_amax,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& lengths,
|
||||
const std::array<ck::index_t, NumDimSpatial + NumNonSpatialDim>& strides)
|
||||
{
|
||||
const auto spatial_dim_size = std::accumulate(std::next(std::begin(lengths), NumNonSpatialDim),
|
||||
std::end(lengths),
|
||||
static_cast<std::size_t>(1),
|
||||
std::multiplies<>());
|
||||
const auto tensor_size = GetTensorSize<NumDimSpatial>(lengths);
|
||||
|
||||
auto copy = [](const auto& x, auto& y) { ck::ranges::copy(x, y.begin()); };
|
||||
|
||||
// Get the reduction operation
|
||||
using ReduceOperation = typename ck::reduce_binary_operator<ReduceOpId>::opType;
|
||||
using InElementwiseOperation =
|
||||
typename ck::reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
|
||||
using AccElementwiseOperation =
|
||||
typename ck::reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
|
||||
InElementwiseOperation in_elementwise_op;
|
||||
AccElementwiseOperation acc_elementwise_op;
|
||||
std::tie(in_elementwise_op, acc_elementwise_op) =
|
||||
ck::reduce_unary_operator<ReduceOpId, true, true>::GetElementwiseOperator(
|
||||
static_cast<int32_t>(tensor_size));
|
||||
|
||||
std::array<ck::index_t, 1> reduce_out_lengths{1};
|
||||
std::array<ck::index_t, 1> reduce_out_strides{1};
|
||||
|
||||
SimpleDeviceMem partial_reduce_tensor(sizeof(OutDataType) * spatial_dim_size);
|
||||
std::array<ck::index_t, NumDimSpatial> reduce_part_lengths;
|
||||
std::copy(std::next(std::begin(lengths), NumNonSpatialDim),
|
||||
std::end(lengths),
|
||||
std::begin(reduce_part_lengths));
|
||||
std::array<ck::index_t, NumDimSpatial> reduce_part_strides;
|
||||
copy(HostTensorDescriptor(reduce_part_lengths).GetStrides(), reduce_part_strides);
|
||||
|
||||
{
|
||||
std::cout << "\nReduction of nonspatial dimensions:" << std::endl;
|
||||
using DeviceOp =
|
||||
ck::tensor_operation::device::DeviceReduce<InDataType,
|
||||
OutDataType,
|
||||
OutDataType,
|
||||
NumDimSpatial + NumNonSpatialDim,
|
||||
NumNonSpatialDim,
|
||||
ReduceOperation,
|
||||
InElementwiseOperation,
|
||||
PassThrough,
|
||||
true, // PropagateNan
|
||||
false>; // OutputIndex
|
||||
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
DeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_op_name;
|
||||
int best_op_id = -1;
|
||||
float best_ave_time = std::numeric_limits<float>::max();
|
||||
float best_gb_per_sec = 0;
|
||||
|
||||
std::array<int, NumNonSpatialDim> reduce_dims;
|
||||
std::iota(reduce_dims.begin(), reduce_dims.end(), 0); // 0,..., NumNonSpatialDim-1
|
||||
|
||||
ck::index_t num_in_elements = tensor_size;
|
||||
ck::index_t num_out_elements = spatial_dim_size;
|
||||
|
||||
// profile device operation instances
|
||||
std::cout << "Run partial reduction and do timing" << std::endl;
|
||||
|
||||
for(int i = 0; i < op_ptrs.size(); ++i)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(lengths,
|
||||
strides,
|
||||
reduce_part_lengths,
|
||||
reduce_part_strides,
|
||||
reduce_dims,
|
||||
1.0,
|
||||
0.0,
|
||||
tensor.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
partial_reduce_tensor.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
in_elementwise_op,
|
||||
PassThrough{});
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
std::size_t num_bytes =
|
||||
num_in_elements * sizeof(InDataType) + num_out_elements * sizeof(OutDataType);
|
||||
|
||||
float gb_per_sec = num_bytes / 1.E6 / ave_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << gb_per_sec
|
||||
<< " GB/s, " << op_name << std::endl;
|
||||
|
||||
if(ave_time < best_ave_time)
|
||||
{
|
||||
best_op_id = i;
|
||||
best_op_name = op_name;
|
||||
best_ave_time = ave_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << op_name << " does not support this problem" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
if(best_op_id < 0)
|
||||
{
|
||||
std::cerr << "no suitable instance found." << std::endl;
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_gb_per_sec << " GB/s, "
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
// run the best instance
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
<< std::endl;
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(lengths,
|
||||
strides,
|
||||
reduce_part_lengths,
|
||||
reduce_part_strides,
|
||||
reduce_dims,
|
||||
1.0,
|
||||
0.0,
|
||||
tensor.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
partial_reduce_tensor.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
in_elementwise_op,
|
||||
PassThrough{});
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
|
||||
}
|
||||
|
||||
std::cout << "Done" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
std::cout << "\nReduction of spatial dimensions:" << std::endl;
|
||||
using DeviceOp = ck::tensor_operation::device::DeviceReduce<OutDataType,
|
||||
OutDataType,
|
||||
OutDataType,
|
||||
NumDimSpatial,
|
||||
NumDimSpatial,
|
||||
ReduceOperation,
|
||||
PassThrough,
|
||||
AccElementwiseOperation,
|
||||
true, // PropagateNan
|
||||
false>; // OutputIndex
|
||||
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
DeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_op_name;
|
||||
int best_op_id = -1;
|
||||
float best_ave_time = std::numeric_limits<float>::max();
|
||||
float best_gb_per_sec = 0;
|
||||
|
||||
std::array<int, NumDimSpatial> reduce_dims;
|
||||
std::iota(reduce_dims.begin(), reduce_dims.end(), 0); // 0,..., NumDimSpatial-1
|
||||
|
||||
ck::index_t num_in_elements = spatial_dim_size;
|
||||
ck::index_t num_out_elements = 1;
|
||||
|
||||
// profile device operation instances
|
||||
std::cout << "Run final reduction and do timing" << std::endl;
|
||||
|
||||
for(int i = 0; i < op_ptrs.size(); ++i)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(reduce_part_lengths,
|
||||
reduce_part_strides,
|
||||
reduce_out_lengths,
|
||||
reduce_out_strides,
|
||||
reduce_dims,
|
||||
1.0,
|
||||
0.0,
|
||||
partial_reduce_tensor.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
out_amax.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
PassThrough{},
|
||||
acc_elementwise_op);
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
|
||||
std::size_t num_bytes =
|
||||
num_in_elements * sizeof(OutDataType) + num_out_elements * sizeof(OutDataType);
|
||||
|
||||
float gb_per_sec = num_bytes / 1.E6 / ave_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << gb_per_sec
|
||||
<< " GB/s, " << op_name << std::endl;
|
||||
|
||||
if(ave_time < best_ave_time)
|
||||
{
|
||||
best_op_id = i;
|
||||
best_op_name = op_name;
|
||||
best_ave_time = ave_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << op_name << " does not support this problem" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
if(best_op_id < 0)
|
||||
{
|
||||
std::cerr << "no suitable instance found." << std::endl;
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_gb_per_sec << " GB/s, "
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
// run the best instance
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
<< std::endl;
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(reduce_part_lengths,
|
||||
reduce_part_strides,
|
||||
reduce_out_lengths,
|
||||
reduce_out_strides,
|
||||
reduce_dims,
|
||||
1.0,
|
||||
0.0,
|
||||
partial_reduce_tensor.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
out_amax.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
PassThrough{},
|
||||
acc_elementwise_op);
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
|
||||
}
|
||||
|
||||
std::cout << "Done" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "common.hpp"
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
||||
|
||||
using InDataType = ck::f8_t;
|
||||
using WeiDataType = ck::f8_t;
|
||||
using CShuffleDataType = float;
|
||||
using ConvOutDataType = float; // data type of convolution result
|
||||
using OutDataType = ck::f8_t; // data type of final result
|
||||
using AComputeDataType = ck::f8_t;
|
||||
using BComputeDataType = ck::f8_t;
|
||||
|
||||
using ConvElementOp = ConvScaleRelu;
|
||||
|
||||
using InLayout = ck::tensor_layout::convolution::NDHWGC;
|
||||
using WeiLayout = ck::tensor_layout::convolution::GKZYXC;
|
||||
using OutLayout = ck::tensor_layout::convolution::NDHWGK;
|
||||
|
||||
constexpr auto ReduceOpId = ck::ReduceTensorOp::AMAX;
|
||||
|
||||
static constexpr ck::index_t NumDimSpatial = 3;
|
||||
static constexpr ck::index_t G = 1;
|
||||
static constexpr ck::index_t N = 64;
|
||||
static constexpr ck::index_t K = 128;
|
||||
static constexpr ck::index_t C = 64;
|
||||
static constexpr ck::index_t Z = 3;
|
||||
static constexpr ck::index_t Y = 3;
|
||||
static constexpr ck::index_t X = 3;
|
||||
static constexpr ck::index_t Di = 28;
|
||||
static constexpr ck::index_t Hi = 28;
|
||||
static constexpr ck::index_t Wi = 3;
|
||||
static constexpr ck::index_t Do = 28;
|
||||
static constexpr ck::index_t Ho = 28;
|
||||
static constexpr ck::index_t Wo = 3;
|
||||
|
||||
int main()
|
||||
{
|
||||
return run_grouped_conv_fwd_convscale_reduce<NumDimSpatial,
|
||||
InDataType,
|
||||
WeiDataType,
|
||||
ConvOutDataType,
|
||||
OutDataType,
|
||||
ConvElementOp,
|
||||
ReduceOpId,
|
||||
InLayout,
|
||||
WeiLayout,
|
||||
OutLayout,
|
||||
3,
|
||||
AComputeDataType,
|
||||
BComputeDataType>(
|
||||
{N, Di, Hi, Wi, G, C}, {G, K, Z, Y, X, C}, {N, Do, Ho, Wo, G, K})
|
||||
? EXIT_SUCCESS
|
||||
: EXIT_FAILURE;
|
||||
}
|
||||
@@ -1,2 +1,2 @@
|
||||
rocm-docs-core==1.7.0
|
||||
rocm-docs-core==1.7.1
|
||||
sphinxcontrib-bibtex==2.6.2
|
||||
|
||||
@@ -103,7 +103,7 @@ requests==2.32.3
|
||||
# via
|
||||
# pygithub
|
||||
# sphinx
|
||||
rocm-docs-core==1.7.0
|
||||
rocm-docs-core==1.7.1
|
||||
# via -r requirements.in
|
||||
six==1.16.0
|
||||
# via pybtex
|
||||
|
||||
@@ -3,6 +3,7 @@ add_subdirectory(convinvscale)
|
||||
add_subdirectory(convscale)
|
||||
add_subdirectory(convscale_relu)
|
||||
add_subdirectory(convscale_add)
|
||||
add_subdirectory(convscale_reduce)
|
||||
add_subdirectory(multi_AB)
|
||||
add_subdirectory(unary)
|
||||
|
||||
|
||||
11
example/62_convnd_activ/convscale_reduce/CMakeLists.txt
Normal file
11
example/62_convnd_activ/convscale_reduce/CMakeLists.txt
Normal file
@@ -0,0 +1,11 @@
|
||||
list(APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942)
|
||||
set(target 0)
|
||||
foreach(gpu IN LISTS GPU_TARGETS)
|
||||
if(gpu IN_LIST gpu_list AND target EQUAL 0)
|
||||
add_custom_target(example_convnd_activ_xdl_convscale_reduce)
|
||||
add_example_executable(example_convnd_fwd_xdl_convscale_relu_amax_fp8 convnd_fwd_xdl_convscale_relu_amax_fp8.cpp)
|
||||
add_example_dependencies(example_convnd_activ_xdl_convscale_reduce example_convnd_fwd_xdl_convscale_relu_amax_fp8 )
|
||||
|
||||
set(target 1)
|
||||
endif()
|
||||
endforeach()
|
||||
@@ -0,0 +1,502 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
|
||||
#include "ck/library/utility/algorithm.hpp"
|
||||
#include "ck/library/utility/check_err.hpp"
|
||||
#include "ck/library/utility/device_memory.hpp"
|
||||
#include "ck/library/utility/host_tensor.hpp"
|
||||
#include "ck/library/utility/host_tensor_generator.hpp"
|
||||
#include "ck/library/utility/convolution_parameter.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_reduce.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/combined_element_wise_operation.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_elementwise_dynamic_vector_dims_impl.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_reduce_multiblock.hpp"
|
||||
#include "ck/utility/reduction_operator.hpp"
|
||||
#include "ck/utility/reduction_enums.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/reduction_operator_mapping.hpp"
|
||||
#include "ck/utility/type.hpp"
|
||||
|
||||
namespace ew = ck::tensor_operation::element_wise;
|
||||
|
||||
using PassThrough = ew::PassThrough;
|
||||
using ConvScaleRelu = ew::UnaryCombinedOp<ew::Scale, ew::Scale, ew::Relu>;
|
||||
using ConvScale = ew::UnaryCombinedOp<ew::Scale, ew::Scale, PassThrough>;
|
||||
|
||||
using UnaryScaleConvert = ew::Scale;
|
||||
|
||||
void print_helper_msg()
|
||||
{
|
||||
std::cout << "arg1: verification (0=no, 1=yes)\n"
|
||||
<< "arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"
|
||||
<< "arg3: time kernel (0=no, 1=yes)\n"
|
||||
<< ck::utils::conv::get_conv_param_parser_helper_msg() << std::endl;
|
||||
}
|
||||
|
||||
template <typename DataType>
|
||||
inline __host__ __device__ constexpr double get_rtol()
|
||||
{
|
||||
if constexpr(std::is_same_v<DataType, float>)
|
||||
{
|
||||
return 1e-3;
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, double>)
|
||||
{
|
||||
return 1e-6;
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, ck::half_t>)
|
||||
{
|
||||
return 1e-3;
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, ck::bhalf_t>)
|
||||
{
|
||||
return 5e-2;
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, int32_t>)
|
||||
{
|
||||
return 1e-1;
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, int8_t>)
|
||||
{
|
||||
return 1e-1;
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, ck::f8_t>)
|
||||
{
|
||||
return 1e-1; // 240 and 224 are acceptable
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, ck::bf8_t>)
|
||||
{
|
||||
return 1.5e-1; // 57344 and 49152 are acceptable
|
||||
}
|
||||
else
|
||||
{
|
||||
return 1e-3;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename DataType>
|
||||
inline __host__ __device__ constexpr double get_atol()
|
||||
{
|
||||
if constexpr(std::is_same_v<DataType, float>)
|
||||
{
|
||||
return 1e-3;
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, double>)
|
||||
{
|
||||
return 1e-6;
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, ck::half_t>)
|
||||
{
|
||||
return 1e-3;
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, ck::bhalf_t>)
|
||||
{
|
||||
return 5e-2;
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, int32_t>)
|
||||
{
|
||||
return 1e-1;
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, int8_t>)
|
||||
{
|
||||
return 1e-1;
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, ck::f8_t>)
|
||||
{
|
||||
return 16.1; // 240 and 224 are acceptable
|
||||
}
|
||||
else if constexpr(std::is_same_v<DataType, ck::bf8_t>)
|
||||
{
|
||||
return 8192.1; // 57344 and 49152 are acceptable
|
||||
}
|
||||
else
|
||||
{
|
||||
return 1e-3;
|
||||
}
|
||||
}
|
||||
|
||||
template <ck::index_t NDimSpatial,
|
||||
typename InDataType,
|
||||
typename WeiDataType,
|
||||
typename ConvOutDataType,
|
||||
typename OutDataType,
|
||||
typename InElementOp,
|
||||
typename WeiElementOp,
|
||||
typename ConvElementOp,
|
||||
typename DeviceConvNDFwdInstance>
|
||||
bool run_grouped_conv_fwd(bool do_verification,
|
||||
int init_method,
|
||||
bool time_kernel,
|
||||
const ck::utils::conv::ConvParam& conv_param,
|
||||
const HostTensorDescriptor& in_g_n_c_wis_desc,
|
||||
const HostTensorDescriptor& wei_g_k_c_xs_desc,
|
||||
const HostTensorDescriptor& out_g_n_k_wos_desc,
|
||||
const InElementOp& in_element_op,
|
||||
const WeiElementOp& wei_element_op)
|
||||
{
|
||||
Tensor<InDataType> in(in_g_n_c_wis_desc);
|
||||
Tensor<WeiDataType> wei(wei_g_k_c_xs_desc);
|
||||
Tensor<ConvOutDataType> host_conv(out_g_n_k_wos_desc);
|
||||
Tensor<ConvOutDataType> device_conv(out_g_n_k_wos_desc);
|
||||
Tensor<OutDataType> out_host(out_g_n_k_wos_desc);
|
||||
Tensor<OutDataType> out_device(out_g_n_k_wos_desc);
|
||||
|
||||
std::cout << "in: " << in.mDesc << std::endl;
|
||||
std::cout << "wei: " << wei.mDesc << std::endl;
|
||||
std::cout << "out: " << out_host.mDesc << std::endl;
|
||||
|
||||
switch(init_method)
|
||||
{
|
||||
case 0: break;
|
||||
case 1:
|
||||
in.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5});
|
||||
wei.GenerateTensorValue(GeneratorTensor_2<WeiDataType>{-5, 5});
|
||||
break;
|
||||
case 11: // used for debugging
|
||||
in.GenerateTensorValue(GeneratorTensor_1<InDataType>{1});
|
||||
wei.GenerateTensorValue(GeneratorTensor_1<WeiDataType>{1});
|
||||
break;
|
||||
default:
|
||||
in.GenerateTensorValue(GeneratorTensor_3<InDataType>{-1.0, 1.0});
|
||||
wei.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.5, 0.5});
|
||||
}
|
||||
|
||||
DeviceMem in_device_buf(sizeof(InDataType) * in.mDesc.GetElementSpaceSize());
|
||||
DeviceMem wei_device_buf(sizeof(WeiDataType) * wei.mDesc.GetElementSpaceSize());
|
||||
DeviceMem conv_device_buf(conv_param.GetOutputByte<ConvOutDataType>());
|
||||
DeviceMem out_device_buf(conv_param.GetOutputByte<OutDataType>());
|
||||
|
||||
in_device_buf.ToDevice(in.mData.data());
|
||||
wei_device_buf.ToDevice(wei.mData.data());
|
||||
|
||||
std::array<ck::index_t, NDimSpatial + 3> a_g_n_c_wis_lengths{};
|
||||
std::array<ck::index_t, NDimSpatial + 3> a_g_n_c_wis_strides{};
|
||||
std::array<ck::index_t, NDimSpatial + 3> b_g_k_c_xs_lengths{};
|
||||
std::array<ck::index_t, NDimSpatial + 3> b_g_k_c_xs_strides{};
|
||||
std::array<ck::index_t, NDimSpatial + 3> e_g_n_k_wos_lengths{};
|
||||
std::array<ck::index_t, NDimSpatial + 3> e_g_n_k_wos_strides{};
|
||||
std::array<ck::index_t, NDimSpatial> conv_filter_strides{};
|
||||
std::array<ck::index_t, NDimSpatial> conv_filter_dilations{};
|
||||
std::array<ck::index_t, NDimSpatial> input_left_pads{};
|
||||
std::array<ck::index_t, NDimSpatial> input_right_pads{};
|
||||
|
||||
auto copy = [](const auto& x, auto& y) { ck::ranges::copy(x, y.begin()); };
|
||||
|
||||
copy(in_g_n_c_wis_desc.GetLengths(), a_g_n_c_wis_lengths);
|
||||
copy(in_g_n_c_wis_desc.GetStrides(), a_g_n_c_wis_strides);
|
||||
copy(wei_g_k_c_xs_desc.GetLengths(), b_g_k_c_xs_lengths);
|
||||
copy(wei_g_k_c_xs_desc.GetStrides(), b_g_k_c_xs_strides);
|
||||
copy(out_g_n_k_wos_desc.GetLengths(), e_g_n_k_wos_lengths);
|
||||
copy(out_g_n_k_wos_desc.GetStrides(), e_g_n_k_wos_strides);
|
||||
copy(conv_param.conv_filter_strides_, conv_filter_strides);
|
||||
copy(conv_param.conv_filter_dilations_, conv_filter_dilations);
|
||||
copy(conv_param.input_left_pads_, input_left_pads);
|
||||
copy(conv_param.input_right_pads_, input_right_pads);
|
||||
|
||||
// random scale values
|
||||
float scale_in = float(std::rand()) / float(RAND_MAX);
|
||||
float scale_wei = float(std::rand()) / float(RAND_MAX);
|
||||
float scale_out = float(std::rand()) / float(RAND_MAX);
|
||||
|
||||
std::cout << std::endl;
|
||||
std::cout << "scale_in: " << scale_in << std::endl;
|
||||
std::cout << "scale_wei: " << scale_wei << std::endl;
|
||||
std::cout << "scale_out: " << scale_out << std::endl;
|
||||
|
||||
// convolution elementwise operation
|
||||
auto conv_element_op = ConvElementOp{ew::Scale{scale_in}, ew::Scale{scale_wei}, {}};
|
||||
auto scale_convert = UnaryScaleConvert{scale_out}; // elementwise scale and type cast
|
||||
|
||||
// do Conv
|
||||
auto conv = DeviceConvNDFwdInstance{};
|
||||
auto conv_invoker = conv.MakeInvoker();
|
||||
auto conv_argument =
|
||||
conv.MakeArgument(in_device_buf.GetDeviceBuffer(),
|
||||
wei_device_buf.GetDeviceBuffer(),
|
||||
std::array<const void*, 0>{},
|
||||
conv_device_buf.GetDeviceBuffer(),
|
||||
a_g_n_c_wis_lengths,
|
||||
a_g_n_c_wis_strides,
|
||||
b_g_k_c_xs_lengths,
|
||||
b_g_k_c_xs_strides,
|
||||
std::array<std::array<ck::index_t, NDimSpatial + 3>, 0>{},
|
||||
std::array<std::array<ck::index_t, NDimSpatial + 3>, 0>{},
|
||||
e_g_n_k_wos_lengths,
|
||||
e_g_n_k_wos_strides,
|
||||
conv_filter_strides,
|
||||
conv_filter_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads,
|
||||
in_element_op,
|
||||
wei_element_op,
|
||||
conv_element_op);
|
||||
|
||||
if(!conv.IsSupportedArgument(conv_argument))
|
||||
{
|
||||
throw std::runtime_error(
|
||||
"wrong! device_conv with the specified compilation parameters does "
|
||||
"not support this Conv problem");
|
||||
}
|
||||
|
||||
std::string kernels = conv.GetTypeString();
|
||||
|
||||
float avg_time = conv_invoker.Run(conv_argument, StreamConfig{nullptr, time_kernel});
|
||||
|
||||
using DeviceElementwiseScale = ck::tensor_operation::device::DeviceElementwiseImpl<
|
||||
ck::Tuple<ConvOutDataType>, // InDataTypeTuple
|
||||
ck::Tuple<OutDataType>, // OutDataTypeTuple
|
||||
UnaryScaleConvert, // UnaryScaleConvert
|
||||
NDimSpatial + 3, // NumDim
|
||||
256, // BlockSize
|
||||
128, // M0PerBlock
|
||||
128, // M1PerBlock
|
||||
8, // M0PerThread
|
||||
8, // M1PerThread
|
||||
ck::Sequence<1, 0>, // ThreadClusterArrangeOrder
|
||||
ck::Sequence<8>, // InScalarPerVectorSeq
|
||||
ck::Sequence<8>>; // OutScalarPerVectorSeq
|
||||
|
||||
auto device_ew_scale = DeviceElementwiseScale{};
|
||||
auto scale_invoker = device_ew_scale.MakeInvoker();
|
||||
auto scale_argument = device_ew_scale.MakeArgument(e_g_n_k_wos_lengths,
|
||||
{e_g_n_k_wos_strides},
|
||||
{e_g_n_k_wos_strides},
|
||||
{conv_device_buf.GetDeviceBuffer()},
|
||||
{out_device_buf.GetDeviceBuffer()},
|
||||
scale_convert);
|
||||
|
||||
if(!device_ew_scale.IsSupportedArgument(scale_argument))
|
||||
{
|
||||
throw std::runtime_error(
|
||||
"wrong! DeviceElementwiseScale with the specified compilation parameters does "
|
||||
"not support this problem");
|
||||
}
|
||||
|
||||
kernels += std::string("\n\t\t ") + device_ew_scale.GetTypeString();
|
||||
|
||||
avg_time += scale_invoker.Run(scale_argument, StreamConfig{nullptr, time_kernel});
|
||||
|
||||
constexpr auto ReduceOpId = ck::ReduceTensorOp::AMAX;
|
||||
using ReduceOperation = typename ck::reduce_binary_operator<ReduceOpId>::opType;
|
||||
using InElementwiseOperation =
|
||||
typename ck::reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
|
||||
using AccElementwiseOperation =
|
||||
typename ck::reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
|
||||
using DeviceReduceInstance =
|
||||
ck::tensor_operation::device::DeviceReduceMultiBlock<ConvOutDataType,
|
||||
ConvOutDataType,
|
||||
ConvOutDataType,
|
||||
NDimSpatial + 3,
|
||||
NDimSpatial + 3,
|
||||
ReduceOperation,
|
||||
InElementwiseOperation,
|
||||
AccElementwiseOperation,
|
||||
ck::InMemoryDataOperationEnum::Set,
|
||||
true, // PropagateNan
|
||||
false, // OutputIndex
|
||||
false, // HaveIndexInputIfOutputIndex
|
||||
256, // BlockSize
|
||||
4, // MThreadClusterSize
|
||||
64, // KThreadClusterSize
|
||||
1, // MThreadSliceSize
|
||||
1, // KThreadSliceSize
|
||||
1, // InSrcVectorDim
|
||||
1, // InSrceVectorSize
|
||||
1>; // OutDstVectorSize
|
||||
|
||||
std::vector<size_t> outLengths = {1};
|
||||
Tensor<ConvOutDataType> amax_host(outLengths);
|
||||
Tensor<ConvOutDataType> amax_from_device(outLengths);
|
||||
auto amax_host_strides = amax_host.mDesc.GetStrides();
|
||||
|
||||
std::array<int, NDimSpatial + 3> reduce_dims;
|
||||
std::iota(reduce_dims.begin(), reduce_dims.end(), 0); // 0,..., NDimSpatial+3-1
|
||||
|
||||
std::array<ck::index_t, 1> reduce_out_lengths{1};
|
||||
std::array<ck::index_t, 1> reduce_out_strides{static_cast<ck::index_t>(amax_host_strides[0])};
|
||||
|
||||
DeviceMem amax_device(sizeof(ConvOutDataType) * amax_host.mDesc.GetElementSpaceSize());
|
||||
DeviceMem index_device;
|
||||
|
||||
InElementwiseOperation in_elementwise_op;
|
||||
AccElementwiseOperation acc_elementwise_op;
|
||||
std::tie(in_elementwise_op, acc_elementwise_op) =
|
||||
ck::reduce_unary_operator<ReduceOpId, true, true>::GetElementwiseOperator(
|
||||
static_cast<int32_t>(host_conv.mDesc.GetElementSize()));
|
||||
|
||||
// Hack convolution output strides for reduction as kernel expects stride 1 for the last
|
||||
// dimension. It only works because the reduction is done on the whole tensor and result is
|
||||
// independent of the order of elements.
|
||||
std::array<ck::index_t, NDimSpatial + 3> reduction_strides{};
|
||||
copy(HostTensorDescriptor(e_g_n_k_wos_lengths).GetStrides(), reduction_strides);
|
||||
|
||||
auto device_reduce = DeviceReduceInstance{};
|
||||
auto reduce_invoker = device_reduce.MakeInvokerPointer();
|
||||
auto reduce_argument = device_reduce.MakeArgumentPointer(e_g_n_k_wos_lengths,
|
||||
reduction_strides,
|
||||
reduce_out_lengths,
|
||||
reduce_out_strides,
|
||||
reduce_dims,
|
||||
1.0,
|
||||
0.0,
|
||||
conv_device_buf.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
amax_device.GetDeviceBuffer(),
|
||||
nullptr,
|
||||
in_elementwise_op,
|
||||
acc_elementwise_op);
|
||||
|
||||
if(!device_reduce.IsSupportedArgument(reduce_argument.get()))
|
||||
{
|
||||
throw std::runtime_error(
|
||||
"wrong! DeviceReduceInstance with the specified compilation parameters does "
|
||||
"not support this runtime parameters!");
|
||||
};
|
||||
|
||||
kernels += std::string("\n\t\t ") + device_reduce.GetTypeString();
|
||||
|
||||
float reduce_time =
|
||||
reduce_invoker->Run(reduce_argument.get(), StreamConfig{nullptr, time_kernel});
|
||||
|
||||
if(time_kernel)
|
||||
std::cout << "\nReduce time: " << reduce_time << " ms" << std::endl;
|
||||
|
||||
avg_time += reduce_time;
|
||||
|
||||
std::size_t flop = conv_param.GetFlops(); // convolution FLOPs
|
||||
auto conv_out_elems = host_conv.GetElementSize(); // number of elements in conv result tensor
|
||||
|
||||
// 3 element-wise scale multipliers + 1 AMAX
|
||||
std::size_t elementwise_ops = 3 + 1;
|
||||
if constexpr(ck::is_same_v<ConvElementOp, ConvScaleRelu>)
|
||||
{
|
||||
elementwise_ops += 1; // +1 element-wise relu
|
||||
}
|
||||
|
||||
flop += elementwise_ops * conv_out_elems;
|
||||
|
||||
// convolution + elementwise scaling (in + wei + output byte count)
|
||||
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, ConvOutDataType>();
|
||||
num_btype += sizeof(float) + sizeof(float); // + 2 scales
|
||||
|
||||
// elementwise scaling + F8 conversion
|
||||
num_btype += conv_param.GetOutputByte<ConvOutDataType>() + sizeof(float) +
|
||||
conv_param.GetOutputByte<OutDataType>();
|
||||
|
||||
// AMAX
|
||||
num_btype += conv_param.GetOutputByte<ConvOutDataType>() + sizeof(float);
|
||||
|
||||
if(time_kernel)
|
||||
{
|
||||
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
||||
float gb_per_sec = num_btype / 1.E6 / avg_time;
|
||||
std::cout << "Perf: " << avg_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
||||
<< " GB/s, " << std::endl;
|
||||
}
|
||||
|
||||
std::cout << "\nKernels: " << kernels << std::endl;
|
||||
|
||||
if(do_verification)
|
||||
{
|
||||
auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<NDimSpatial,
|
||||
InDataType,
|
||||
WeiDataType,
|
||||
ConvOutDataType,
|
||||
InElementOp,
|
||||
WeiElementOp,
|
||||
ConvElementOp>();
|
||||
|
||||
auto ref_invoker = ref_conv.MakeInvoker();
|
||||
auto ref_argument = ref_conv.MakeArgument(in,
|
||||
wei,
|
||||
host_conv,
|
||||
conv_param.conv_filter_strides_,
|
||||
conv_param.conv_filter_dilations_,
|
||||
conv_param.input_left_pads_,
|
||||
conv_param.input_right_pads_,
|
||||
in_element_op,
|
||||
wei_element_op,
|
||||
conv_element_op);
|
||||
|
||||
ref_invoker.Run(ref_argument);
|
||||
|
||||
conv_device_buf.FromDevice(device_conv.mData.data());
|
||||
|
||||
out_device_buf.FromDevice(out_device.mData.data());
|
||||
|
||||
out_host.ForEach([&](auto&, auto idx) { scale_convert(out_host(idx), host_conv(idx)); });
|
||||
|
||||
std::cout << "\nComparing output to reference: " << std::endl;
|
||||
auto tight_tol_check = ck::utils::check_err(out_device, out_host, "Error: ");
|
||||
if(!tight_tol_check)
|
||||
{
|
||||
std::cout << "\n\tRecompare applying tolerances...\n";
|
||||
std::cout << "\t\trtol = " << get_rtol<OutDataType>() << std::endl;
|
||||
std::cout << "\t\tatol = " << get_atol<OutDataType>() << std::endl;
|
||||
auto loose_tol_check = ck::utils::check_err(out_device,
|
||||
out_host,
|
||||
"Error: incorrect convolution results!",
|
||||
get_rtol<OutDataType>(),
|
||||
get_atol<OutDataType>());
|
||||
if(!loose_tol_check)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
std::cout << "Success!" << std::endl;
|
||||
|
||||
/// Verify AMAX
|
||||
|
||||
using RefReduceInstance =
|
||||
ck::tensor_operation::host::ReferenceReduce<ConvOutDataType,
|
||||
ConvOutDataType,
|
||||
ConvOutDataType,
|
||||
NDimSpatial + 3,
|
||||
NDimSpatial + 3,
|
||||
ReduceOperation,
|
||||
InElementwiseOperation,
|
||||
AccElementwiseOperation,
|
||||
true,
|
||||
false>;
|
||||
|
||||
auto ref_reduce = RefReduceInstance{};
|
||||
auto ref_reduce_invoker = ref_reduce.MakeInvokerPointer();
|
||||
auto ref_reduce_argument = ref_reduce.MakeArgumentPointer(e_g_n_k_wos_lengths,
|
||||
e_g_n_k_wos_strides,
|
||||
reduce_out_lengths,
|
||||
reduce_out_strides,
|
||||
reduce_dims,
|
||||
1.0,
|
||||
0.0,
|
||||
host_conv.mData.data(),
|
||||
nullptr,
|
||||
amax_host.mData.data(),
|
||||
nullptr,
|
||||
in_elementwise_op,
|
||||
acc_elementwise_op);
|
||||
|
||||
if(!ref_reduce.IsSupportedArgument(ref_reduce_argument.get()))
|
||||
{
|
||||
throw std::runtime_error(
|
||||
"wrong! RefReduceInstance with the specified compilation parameters does "
|
||||
"not support this runtime parameters!");
|
||||
};
|
||||
|
||||
ref_reduce_invoker->Run(ref_reduce_argument.get());
|
||||
|
||||
amax_device.FromDevice(amax_from_device.mData.data());
|
||||
|
||||
std::cout << "\namax: " << amax_from_device.mData[0] << std::endl;
|
||||
std::cout << "amax_ref: " << amax_host.mData[0] << std::endl;
|
||||
|
||||
return ck::utils::check_err(amax_from_device, amax_host, "Error: incorrect AMAX results!");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "convnd_fwd_convscale_reduce_common.hpp"
|
||||
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_abd_xdl_cshuffle.hpp"
|
||||
|
||||
using InDataType = ck::f8_t;
|
||||
using WeiDataType = ck::f8_t;
|
||||
using AccDataType = float;
|
||||
using CShuffleDataType = float;
|
||||
using ConvOutDataType = float; // data type of convolution result
|
||||
using OutDataType = ck::f8_t; // data type of final result
|
||||
using AComputeDataType = ck::f8_t;
|
||||
using BComputeDataType = ck::f8_t;
|
||||
|
||||
template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using InElementOp = PassThrough;
|
||||
using WeiElementOp = PassThrough;
|
||||
using OutElementOp = ConvScaleRelu;
|
||||
|
||||
static constexpr auto ConvSpec =
|
||||
ck::tensor_operation::device::ConvolutionForwardSpecialization::Default;
|
||||
|
||||
static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
|
||||
|
||||
template <ck::index_t NDimSpatial, typename InLayout, typename WeiLayout, typename OutLayout>
|
||||
using DeviceGroupedConvNDFwdInstance =
|
||||
ck::tensor_operation::device::DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<
|
||||
NDimSpatial,
|
||||
InLayout,
|
||||
WeiLayout,
|
||||
ck::Tuple<>,
|
||||
OutLayout,
|
||||
InDataType,
|
||||
WeiDataType,
|
||||
AccDataType,
|
||||
CShuffleDataType,
|
||||
ck::Tuple<>,
|
||||
ConvOutDataType,
|
||||
InElementOp,
|
||||
WeiElementOp,
|
||||
OutElementOp,
|
||||
ConvSpec, // ConvForwardSpecialization
|
||||
GemmSpec, // GemmSpecialization
|
||||
1, //
|
||||
256, // BlockSize
|
||||
128, // MPerBlock
|
||||
256, // NPerBlock
|
||||
32, // KPerBlock
|
||||
8, // AK1
|
||||
8, // BK1
|
||||
32, // MPerXdl
|
||||
32, // NPerXdl
|
||||
2, // MXdlPerWave
|
||||
4, // NXdlPerWave
|
||||
S<4, 64, 1>, // ABlockTransferThreadClusterLengths_AK0_M_AK1
|
||||
S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder
|
||||
S<1, 0, 2>, // ABlockTransferSrcAccessOrder
|
||||
2, // ABlockTransferSrcVectorDim
|
||||
8, // ABlockTransferSrcScalarPerVector
|
||||
8, // ABlockTransferDstScalarPerVector_AK1
|
||||
1, // ABlockLdsExtraM
|
||||
S<4, 64, 1>, // BBlockTransferThreadClusterLengths_BK0_N_BK1
|
||||
S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder
|
||||
S<1, 0, 2>, // BBlockTransferSrcAccessOrder
|
||||
2, // BBlockTransferSrcVectorDim
|
||||
8, // BBlockTransferSrcScalarPerVector
|
||||
8, // BBlockTransferDstScalarPerVector_BK1
|
||||
1, // BBlockLdsExtraN
|
||||
1,
|
||||
1,
|
||||
S<1, 32, 1, 8>,
|
||||
8,
|
||||
AComputeDataType,
|
||||
BComputeDataType>;
|
||||
|
||||
#include "run_convnd_fwd_example.inc"
|
||||
|
||||
int main(int argc, char* argv[]) { return run_convnd_fwd_example(argc, argv) ? 0 : 1; }
|
||||
@@ -0,0 +1,98 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
bool run_convnd_fwd_example(int argc, char* argv[])
|
||||
{
|
||||
print_helper_msg();
|
||||
|
||||
bool do_verification = true;
|
||||
int init_method = 1;
|
||||
bool time_kernel = false;
|
||||
|
||||
ck::utils::conv::ConvParam conv_param{
|
||||
2, 1, 128, 256, 192, {3, 3}, {71, 71}, {2, 2}, {1, 1}, {1, 1}, {1, 1}};
|
||||
|
||||
if(argc == 1)
|
||||
{
|
||||
// use default
|
||||
}
|
||||
else if(argc == 4)
|
||||
{
|
||||
do_verification = std::stoi(argv[1]);
|
||||
init_method = std::stoi(argv[2]);
|
||||
time_kernel = std::stoi(argv[3]);
|
||||
}
|
||||
else
|
||||
{
|
||||
do_verification = std::stoi(argv[1]);
|
||||
init_method = std::stoi(argv[2]);
|
||||
time_kernel = std::stoi(argv[3]);
|
||||
const ck::index_t num_dim_spatial = std::stoi(argv[4]);
|
||||
|
||||
conv_param = ck::utils::conv::parse_conv_param(num_dim_spatial, 5, argv);
|
||||
}
|
||||
|
||||
// instantiate in and wei element ops, will
|
||||
// instantiate out_element_op below for every iteration
|
||||
const auto in_element_op = InElementOp{};
|
||||
const auto wei_element_op = WeiElementOp{};
|
||||
|
||||
const auto run = [&](auto ndim_spatial, auto in_layout, auto wei_layout, auto out_layout) {
|
||||
constexpr ck::index_t ndim_spatial_value = ndim_spatial.value;
|
||||
|
||||
using InLayout = decltype(in_layout);
|
||||
using WeiLayout = decltype(wei_layout);
|
||||
using OutLayout = decltype(out_layout);
|
||||
|
||||
const auto in_g_n_c_wis_desc =
|
||||
ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed<InLayout>(
|
||||
conv_param);
|
||||
|
||||
const auto wei_g_k_c_xs_desc =
|
||||
ck::utils::conv::make_weight_host_tensor_descriptor_g_k_c_xs_packed<WeiLayout>(
|
||||
conv_param);
|
||||
|
||||
const auto out_g_n_k_wos_desc =
|
||||
ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed<OutLayout>(
|
||||
conv_param);
|
||||
|
||||
return run_grouped_conv_fwd<
|
||||
ndim_spatial_value,
|
||||
InDataType,
|
||||
WeiDataType,
|
||||
ConvOutDataType,
|
||||
OutDataType,
|
||||
InElementOp,
|
||||
WeiElementOp,
|
||||
OutElementOp,
|
||||
DeviceGroupedConvNDFwdInstance<ndim_spatial_value, InLayout, WeiLayout, OutLayout>>(
|
||||
do_verification,
|
||||
init_method,
|
||||
time_kernel,
|
||||
conv_param,
|
||||
in_g_n_c_wis_desc,
|
||||
wei_g_k_c_xs_desc,
|
||||
out_g_n_k_wos_desc,
|
||||
in_element_op,
|
||||
wei_element_op);
|
||||
};
|
||||
|
||||
namespace ctc = ck::tensor_layout::convolution;
|
||||
|
||||
if(conv_param.num_dim_spatial_ == 1)
|
||||
{
|
||||
return run(ck::Number<1>{}, ctc::GNWC{}, ctc::GKXC{}, ctc::GNWK{});
|
||||
}
|
||||
else if(conv_param.num_dim_spatial_ == 2)
|
||||
{
|
||||
return run(ck::Number<2>{}, ctc::GNHWC{}, ctc::GKYXC{}, ctc::GNHWK{});
|
||||
}
|
||||
else if(conv_param.num_dim_spatial_ == 3)
|
||||
{
|
||||
return run(ck::Number<3>{}, ctc::GNDHWC{}, ctc::GKZYXC{}, ctc::GNDHWK{});
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -184,6 +184,43 @@ using device_grouped_conv_fwd_xdl_outelementop_bf8_f8_instances = std::tuple<
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
template <index_t NDimSpatial,
|
||||
typename ALayout,
|
||||
typename BLayout,
|
||||
typename DsLayout,
|
||||
typename ELayout,
|
||||
ConvolutionForwardSpecialization ConvSpec,
|
||||
typename OutElementOp>
|
||||
using device_grouped_conv_fwd_xdl_outelementop_f8_f8_f32_instances = std::tuple<
|
||||
// clang-format off
|
||||
//########################################| NumDim| A| B| Ds| E| AData| BData| AccData| CShuffle| Ds| EData| A| B| CDE| ConvForward| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| Compute| Compute|
|
||||
//########################################| Spatial| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| DataType| Type| Elementwise| Elementwise| Elementwise| Specialization| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| TypeA| TypeB|
|
||||
//########################################| | | | | | | | | | | | Operation| Operation| Operation| | | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| | |
|
||||
//########################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
|
||||
#ifdef CK_ENABLE_FP8
|
||||
// generic instance
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 64, 64, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 8, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 8, 1, 1, 1, S<1, 16, 1, 4>, 1, F8, F8>,
|
||||
// instances for small conv.K and conv.C
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 64, 64, 32, 32, 8, 8, 32, 32, 2, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 1, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 256, 128, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 8, 1, 1, 1, S<1, 32, 1, 8>, 8, F8, F8>,
|
||||
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 256, 256, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 256, 128, 256, 32, 8, 8, 32, 32, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 128, 128, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 256, 128, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 128, 128, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 8, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 128, 64, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 64, 64, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 8, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 256, 128, 64, 32, 8, 8, 32, 32, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 256, 64, 128, 32, 8, 8, 32, 32, 1, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 128, 128, 32, 32, 8, 8, 32, 32, 2, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 8, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 128, 32, 128, 32, 8, 8, 32, 32, 1, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 64, 64, 32, 32, 8, 8, 32, 32, 2, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 8, F8, F8>,
|
||||
DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<NDimSpatial,ALayout,BLayout, DsLayout,ELayout, F8, F8, F32, F32, Tuple<>, F32, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmMNKPadding, 1, 64, 32, 64, 32, 8, 8, 32, 32, 1, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 8, F8, F8>
|
||||
#endif
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_grouped_conv_fwd_multiple_abd.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/combined_element_wise_operation.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp"
|
||||
#include "ck/library/tensor_operation_instance/device_operation_instance_factory.hpp"
|
||||
|
||||
@@ -99,6 +100,89 @@ struct DeviceOperationInstanceFactory<
|
||||
}
|
||||
};
|
||||
|
||||
namespace ew = ck::tensor_operation::element_wise;
|
||||
using CombConvScaleRelu = ew::UnaryCombinedOp<ew::Scale, ew::Scale, ew::Relu>;
|
||||
|
||||
#ifdef CK_ENABLE_FP8
|
||||
void add_device_grouped_conv3d_fwd_xdl_combconvscale_relu_ndhwgc_gkzyxc_ndhwgk_f8_f8_f32_instances(
|
||||
std::vector<std::unique_ptr<DeviceGroupedConvFwdMultipleABD<3,
|
||||
NDHWGC,
|
||||
GKZYXC,
|
||||
ck::Tuple<>,
|
||||
NDHWGK,
|
||||
F8,
|
||||
F8,
|
||||
ck::Tuple<>,
|
||||
F32,
|
||||
PassThrough,
|
||||
PassThrough,
|
||||
CombConvScaleRelu,
|
||||
F8,
|
||||
F8>>>& instances);
|
||||
#endif
|
||||
|
||||
template <ck::index_t NumDimSpatial,
|
||||
typename InLayout,
|
||||
typename WeiLayout,
|
||||
typename DLayouts,
|
||||
typename OutLayout,
|
||||
typename InDataType,
|
||||
typename WeiDataType,
|
||||
typename DDataTypes,
|
||||
typename OutDataType,
|
||||
typename AComputeType,
|
||||
typename BComputeType>
|
||||
struct DeviceOperationInstanceFactory<
|
||||
ck::tensor_operation::device::DeviceGroupedConvFwdMultipleABD<NumDimSpatial,
|
||||
InLayout,
|
||||
WeiLayout,
|
||||
DLayouts,
|
||||
OutLayout,
|
||||
InDataType,
|
||||
WeiDataType,
|
||||
DDataTypes,
|
||||
OutDataType,
|
||||
PassThrough,
|
||||
PassThrough,
|
||||
CombConvScaleRelu,
|
||||
AComputeType,
|
||||
BComputeType>>
|
||||
{
|
||||
using DeviceOp = DeviceGroupedConvFwdMultipleABD<NumDimSpatial,
|
||||
InLayout,
|
||||
WeiLayout,
|
||||
DLayouts,
|
||||
OutLayout,
|
||||
InDataType,
|
||||
WeiDataType,
|
||||
DDataTypes,
|
||||
OutDataType,
|
||||
PassThrough,
|
||||
PassThrough,
|
||||
CombConvScaleRelu,
|
||||
AComputeType,
|
||||
BComputeType>;
|
||||
|
||||
static auto GetInstances()
|
||||
{
|
||||
std::vector<std::unique_ptr<DeviceOp>> op_ptrs;
|
||||
if constexpr(NumDimSpatial == 3 && is_same_v<InLayout, NDHWGC> &&
|
||||
is_same_v<WeiLayout, GKZYXC> && is_same_v<OutLayout, NDHWGK>)
|
||||
{
|
||||
#ifdef CK_ENABLE_FP8
|
||||
if constexpr(is_same_v<InDataType, f8_t> && is_same_v<WeiDataType, f8_t> &&
|
||||
is_same_v<OutDataType, F32> && is_same_v<AComputeType, f8_t> &&
|
||||
is_same_v<BComputeType, f8_t>)
|
||||
{
|
||||
add_device_grouped_conv3d_fwd_xdl_combconvscale_relu_ndhwgc_gkzyxc_ndhwgk_f8_f8_f32_instances(
|
||||
op_ptrs);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
return op_ptrs;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
|
||||
@@ -70,6 +70,12 @@ void add_device_permute_scale_6d_f32_instances(
|
||||
DeviceElementwise<ck::Tuple<F32>, ck::Tuple<F32>, element_wise::Scale, 6>>>&);
|
||||
#endif
|
||||
|
||||
#ifdef CK_ENABLE_FP8
|
||||
void add_device_permute_scale_6d_f32_f8_instances(
|
||||
std::vector<std::unique_ptr<
|
||||
DeviceElementwise<ck::Tuple<F32>, ck::Tuple<F8>, element_wise::Scale, 6>>>&);
|
||||
#endif
|
||||
|
||||
template <typename InDataTypeTuple,
|
||||
typename OutDataTypeTuple,
|
||||
typename ElementwiseOperation,
|
||||
@@ -184,6 +190,13 @@ struct DeviceOperationInstanceFactory<
|
||||
{
|
||||
add_device_permute_scale_6d_f16_instances(op_ptrs);
|
||||
}
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP8
|
||||
if constexpr(is_same_v<InDataTypeTuple, ck::Tuple<F32>> &&
|
||||
is_same_v<OutDataTypeTuple, ck::Tuple<F8>>)
|
||||
{
|
||||
add_device_permute_scale_6d_f32_f8_instances(op_ptrs);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
return op_ptrs;
|
||||
|
||||
@@ -10,6 +10,7 @@ namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
using F8 = ck::f8_t;
|
||||
using F16 = ck::half_t;
|
||||
using F32 = float;
|
||||
|
||||
@@ -183,6 +184,51 @@ using device_permute_scale_f32_instances = std::tuple<
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F32>, ElementwiseOp, NDims, 32, 32, 16, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F32>, ElementwiseOp, NDims, 32, 16, 32, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>
|
||||
>;
|
||||
|
||||
#ifdef CK_ENABLE_FP8
|
||||
template <index_t NDims,
|
||||
typename ElementwiseOp>
|
||||
using device_permute_scale_f32_f8_instances = std::tuple<
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 256, 64, 64, 4, 4, ck::Sequence<1, 0>, ck::Sequence<4>, ck::Sequence<4>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 256, 128, 32, 4, 4, ck::Sequence<1, 0>, ck::Sequence<4>, ck::Sequence<4>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 256, 32, 128, 4, 4, ck::Sequence<1, 0>, ck::Sequence<4>, ck::Sequence<4>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 128, 64, 32, 4, 4, ck::Sequence<1, 0>, ck::Sequence<4>, ck::Sequence<4>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 128, 32, 64, 4, 4, ck::Sequence<1, 0>, ck::Sequence<4>, ck::Sequence<4>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 128, 16, 128, 4, 4, ck::Sequence<1, 0>, ck::Sequence<4>, ck::Sequence<4>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 128, 128, 16, 4, 4, ck::Sequence<1, 0>, ck::Sequence<4>, ck::Sequence<4>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 64, 32, 32, 4, 4, ck::Sequence<1, 0>, ck::Sequence<4>, ck::Sequence<4>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 64, 16, 64, 4, 4, ck::Sequence<1, 0>, ck::Sequence<4>, ck::Sequence<4>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 64, 64, 16, 4, 4, ck::Sequence<1, 0>, ck::Sequence<4>, ck::Sequence<4>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 32, 32, 16, 4, 4, ck::Sequence<1, 0>, ck::Sequence<4>, ck::Sequence<4>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 32, 16, 32, 4, 4, ck::Sequence<1, 0>, ck::Sequence<4>, ck::Sequence<4>>,
|
||||
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 256, 128, 128, 8, 8, ck::Sequence<1, 0>, ck::Sequence<8>, ck::Sequence<8>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 256, 256, 64, 8, 8, ck::Sequence<1, 0>, ck::Sequence<8>, ck::Sequence<8>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 256, 64, 256, 8, 8, ck::Sequence<1, 0>, ck::Sequence<8>, ck::Sequence<8>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 128, 128, 64, 8, 8, ck::Sequence<1, 0>, ck::Sequence<8>, ck::Sequence<8>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 128, 64, 128, 8, 8, ck::Sequence<1, 0>, ck::Sequence<8>, ck::Sequence<8>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 128, 32, 256, 8, 8, ck::Sequence<1, 0>, ck::Sequence<8>, ck::Sequence<8>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 128, 256, 32, 8, 8, ck::Sequence<1, 0>, ck::Sequence<8>, ck::Sequence<8>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 64, 64, 64, 8, 8, ck::Sequence<1, 0>, ck::Sequence<8>, ck::Sequence<8>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 64, 32, 128, 8, 8, ck::Sequence<1, 0>, ck::Sequence<8>, ck::Sequence<8>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 64, 128, 32, 8, 8, ck::Sequence<1, 0>, ck::Sequence<8>, ck::Sequence<8>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 32, 64, 32, 8, 8, ck::Sequence<1, 0>, ck::Sequence<8>, ck::Sequence<8>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 32, 32, 64, 8, 8, ck::Sequence<1, 0>, ck::Sequence<8>, ck::Sequence<8>>,
|
||||
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 256, 64, 64, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 256, 128, 32, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 256, 32, 128, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 128, 64, 32, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 128, 32, 64, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 128, 16, 128, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 128, 128, 16, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 64, 32, 32, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 64, 16, 64, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 64, 64, 16, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 32, 32, 16, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>,
|
||||
DeviceElementwiseImpl<ck::Tuple<F32>, ck::Tuple<F8>, ElementwiseOp, NDims, 32, 16, 32, 4, 4, ck::Sequence<1, 0>, ck::Sequence<1>, ck::Sequence<1>>
|
||||
>;
|
||||
#endif
|
||||
// clang-format on
|
||||
|
||||
} // namespace instance
|
||||
|
||||
@@ -14,15 +14,24 @@ namespace device {
|
||||
namespace instance {
|
||||
|
||||
// clang-format off
|
||||
// InDataType | AccDataType | OutDataType | Rank | NumReduceDim | ReduceOperation | InElementwiseOp | AccElementwiseOp | PropagateNan | UseIndex
|
||||
extern template void add_device_reduce_instance_blockwise<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise<F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise<F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
extern template void add_device_reduce_instance_blockwise<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
extern template void add_device_reduce_instance_blockwise<F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
extern template void add_device_reduce_instance_blockwise<F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
// InDataType | AccDataType | OutDataType | Rank | NumReduceDim | ReduceOperation | InElementwiseOp | AccElementwiseOp | PropagateNan | UseIndex
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 6, 6, ReduceAMax, UnaryAbs, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 6, 6, ReduceAMax, UnaryAbs, PassThrough, true, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 5, 5, ReduceAMax, UnaryAbs, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 5, 5, ReduceAMax, UnaryAbs, PassThrough, true, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, true, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 6, 3, ReduceAMax, UnaryAbs, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 6, 3, ReduceAMax, UnaryAbs, PassThrough, true, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 5, 3, ReduceAMax, UnaryAbs, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 5, 3, ReduceAMax, UnaryAbs, PassThrough, true, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, true, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 3, 3, ReduceAMax, PassThrough, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 3, 3, ReduceAMax, PassThrough, PassThrough, true, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 2, 2, ReduceAMax, PassThrough, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 2, 2, ReduceAMax, PassThrough, PassThrough, true, false>>&);
|
||||
extern template void add_device_reduce_instance_blockwise< F32, F32, F32, 1, 1, ReduceAMax, PassThrough, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 1, 1, ReduceAMax, PassThrough, PassThrough, true, false>>&);
|
||||
// clang-format on
|
||||
|
||||
} // namespace instance
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/grouped_conv_fwd/device_grouped_conv_fwd_xdl_outelementop_instance.hpp"
|
||||
#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/combined_element_wise_operation.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp"
|
||||
|
||||
namespace ck {
|
||||
@@ -57,6 +58,54 @@ void add_device_grouped_conv3d_fwd_xdl_convscale_relu_ndhwgc_gkzyxc_ndhwgk_f8_in
|
||||
ConvScaleRelu>{});
|
||||
}
|
||||
|
||||
namespace ew = ck::tensor_operation::element_wise;
|
||||
using CombConvScaleRelu = ew::UnaryCombinedOp<ew::Scale, ew::Scale, ew::Relu>;
|
||||
|
||||
void add_device_grouped_conv3d_fwd_xdl_combconvscale_relu_ndhwgc_gkzyxc_ndhwgk_f8_f8_f32_instances(
|
||||
std::vector<std::unique_ptr<DeviceGroupedConvFwdMultipleABD<3,
|
||||
NDHWGC,
|
||||
GKZYXC,
|
||||
ck::Tuple<>,
|
||||
NDHWGK,
|
||||
F8,
|
||||
F8,
|
||||
ck::Tuple<>,
|
||||
F32,
|
||||
PassThrough,
|
||||
PassThrough,
|
||||
CombConvScaleRelu,
|
||||
F8,
|
||||
F8>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(
|
||||
instances,
|
||||
device_grouped_conv_fwd_xdl_outelementop_f8_f8_f32_instances<3,
|
||||
NDHWGC,
|
||||
GKZYXC,
|
||||
ck::Tuple<>,
|
||||
NDHWGK,
|
||||
ConvFwdDefault,
|
||||
CombConvScaleRelu>{});
|
||||
add_device_operation_instances(
|
||||
instances,
|
||||
device_grouped_conv_fwd_xdl_outelementop_f8_f8_f32_instances<3,
|
||||
NDHWGC,
|
||||
GKZYXC,
|
||||
ck::Tuple<>,
|
||||
NDHWGK,
|
||||
ConvFwd1x1P0,
|
||||
CombConvScaleRelu>{});
|
||||
add_device_operation_instances(
|
||||
instances,
|
||||
device_grouped_conv_fwd_xdl_outelementop_f8_f8_f32_instances<3,
|
||||
NDHWGC,
|
||||
GKZYXC,
|
||||
ck::Tuple<>,
|
||||
NDHWGK,
|
||||
ConvFwd1x1S1P0,
|
||||
CombConvScaleRelu>{});
|
||||
}
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
add_instance_library(device_permute_scale_instance
|
||||
add_instance_library(device_permute_scale_instance
|
||||
device_permute_scale_1d_fp16_instances.cpp
|
||||
device_permute_scale_2d_fp16_instances.cpp
|
||||
device_permute_scale_3d_fp16_instances.cpp
|
||||
@@ -10,4 +10,5 @@ add_instance_library(device_permute_scale_instance
|
||||
device_permute_scale_3d_fp32_instances.cpp
|
||||
device_permute_scale_4d_fp32_instances.cpp
|
||||
device_permute_scale_5d_fp32_instances.cpp
|
||||
device_permute_scale_6d_fp32_instances.cpp)
|
||||
device_permute_scale_6d_fp32_instances.cpp
|
||||
device_permute_scale_6d_fp32_fp8_instances.cpp)
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/permute_scale/device_permute_scale_instances.hpp"
|
||||
#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
using Scale = element_wise::Scale;
|
||||
|
||||
void add_device_permute_scale_6d_f32_f8_instances(
|
||||
std::vector<std::unique_ptr<DeviceElementwise<ck::Tuple<F32>, ck::Tuple<F8>, Scale, 6>>>&
|
||||
instances)
|
||||
{
|
||||
#ifdef CK_ENABLE_FP8
|
||||
add_device_operation_instances(instances, device_permute_scale_f32_f8_instances<6, Scale>{});
|
||||
#else
|
||||
ignore = instances;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -10,15 +10,24 @@ namespace device {
|
||||
namespace instance {
|
||||
|
||||
// clang-format off
|
||||
// InDataType | AccDataType | OutDataType | Rank | NumReduceDim | ReduceOperation | InElementwiseOp | AccElementwiseOp | PropagateNan | UseIndex
|
||||
template void add_device_reduce_instance_blockwise<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
template void add_device_reduce_instance_blockwise<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
template void add_device_reduce_instance_blockwise<F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
template void add_device_reduce_instance_blockwise<F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
template void add_device_reduce_instance_blockwise<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
template void add_device_reduce_instance_blockwise<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
template void add_device_reduce_instance_blockwise<F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
template void add_device_reduce_instance_blockwise<F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
// InDataType | AccDataType | OutDataType | Rank | NumReduceDim | ReduceOperation | InElementwiseOp | AccElementwiseOp | PropagateNan | UseIndex
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>(std::vector<DeviceReducePtr<F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, false>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>(std::vector<DeviceReducePtr<F32, F32, F32, 2, 1, ReduceAMax, UnaryAbs, PassThrough, false, true>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 6, 6, ReduceAMax, UnaryAbs, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 6, 6, ReduceAMax, UnaryAbs, PassThrough, true, false>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 5, 5, ReduceAMax, UnaryAbs, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 5, 5, ReduceAMax, UnaryAbs, PassThrough, true, false>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 4, ReduceAMax, UnaryAbs, PassThrough, true, false>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 6, 3, ReduceAMax, UnaryAbs, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 6, 3, ReduceAMax, UnaryAbs, PassThrough, true, false>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 5, 3, ReduceAMax, UnaryAbs, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 5, 3, ReduceAMax, UnaryAbs, PassThrough, true, false>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 4, 3, ReduceAMax, UnaryAbs, PassThrough, true, false>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 3, 3, ReduceAMax, PassThrough, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 3, 3, ReduceAMax, PassThrough, PassThrough, true, false>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 2, 2, ReduceAMax, PassThrough, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 2, 2, ReduceAMax, PassThrough, PassThrough, true, false>>&);
|
||||
template void add_device_reduce_instance_blockwise< F32, F32, F32, 1, 1, ReduceAMax, PassThrough, PassThrough, true, false>(std::vector<DeviceReducePtr<F32, F32, F32, 1, 1, ReduceAMax, PassThrough, PassThrough, true, false>>&);
|
||||
// clang-format on
|
||||
|
||||
} // namespace instance
|
||||
|
||||
@@ -136,9 +136,10 @@ bool profile_grouped_conv_bwd_weight_impl(int do_verification,
|
||||
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_op_name;
|
||||
float best_avg_time = 0;
|
||||
float best_tflops = 0;
|
||||
float best_gb_per_sec = 0;
|
||||
float best_avg_time = 0;
|
||||
float best_tflops = 0;
|
||||
float best_gb_per_sec = 0;
|
||||
ck::index_t best_split_k = 1;
|
||||
|
||||
// profile device Conv instances
|
||||
bool all_pass = true;
|
||||
@@ -167,99 +168,115 @@ bool profile_grouped_conv_bwd_weight_impl(int do_verification,
|
||||
range_copy(conv_param.input_left_pads_, begin(input_left_pads));
|
||||
range_copy(conv_param.input_right_pads_, begin(input_right_pads));
|
||||
|
||||
std::vector<ck::index_t> split_k_list = {1, 2, 4, 8, 16, 32, 64, 128};
|
||||
|
||||
if(split_k > 0)
|
||||
{
|
||||
split_k_list = {split_k};
|
||||
}
|
||||
|
||||
for(auto& op_ptr : op_ptrs)
|
||||
{
|
||||
auto argument_ptr =
|
||||
op_ptr->MakeArgumentPointer(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
|
||||
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
|
||||
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
|
||||
input_lengths,
|
||||
input_strides,
|
||||
filter_lengths,
|
||||
weights_strides,
|
||||
output_lengths,
|
||||
output_strides,
|
||||
conv_filter_strides,
|
||||
conv_filter_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads,
|
||||
in_element_op,
|
||||
wei_element_op,
|
||||
out_element_op,
|
||||
split_k);
|
||||
|
||||
const std::size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
DeviceMem workspace_dev(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace_dev.GetDeviceBuffer());
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
for(std::size_t split_k_id = 0; split_k_id < split_k_list.size(); split_k_id++)
|
||||
{
|
||||
// using atomic add, so need to reset input
|
||||
wei_device_buf.SetZero();
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
||||
static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
|
||||
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
|
||||
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
|
||||
input_lengths,
|
||||
input_strides,
|
||||
filter_lengths,
|
||||
weights_strides,
|
||||
output_lengths,
|
||||
output_strides,
|
||||
conv_filter_strides,
|
||||
conv_filter_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads,
|
||||
in_element_op,
|
||||
wei_element_op,
|
||||
out_element_op,
|
||||
split_k_list[split_k_id]);
|
||||
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
const std::size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
DeviceMem workspace_dev(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace_dev.GetDeviceBuffer());
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
float avg_time =
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
||||
|
||||
std::size_t flop = conv_param.GetFlops();
|
||||
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
|
||||
|
||||
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
||||
float gb_per_sec = num_btype / 1.E6 / avg_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << tflops << " TFlops, "
|
||||
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
||||
|
||||
if(tflops > best_tflops)
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
best_op_name = op_name;
|
||||
best_tflops = tflops;
|
||||
best_avg_time = avg_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
}
|
||||
// using atomic add, so need to reset input
|
||||
wei_device_buf.SetZero();
|
||||
|
||||
if(do_verification)
|
||||
{
|
||||
wei_device_buf.FromDevice(weight_device_result.mData.data());
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
|
||||
bool pass = ck::utils::check_err(weight_device_result, weight_host_result);
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
if(!pass)
|
||||
float avg_time =
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
||||
|
||||
std::size_t flop = conv_param.GetFlops();
|
||||
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
|
||||
|
||||
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
||||
float gb_per_sec = num_btype / 1.E6 / avg_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << tflops
|
||||
<< " TFlops, " << gb_per_sec << " GB/s, " << op_name << ", SplitK "
|
||||
<< split_k_list[split_k_id] << std::endl;
|
||||
|
||||
if(tflops > best_tflops)
|
||||
{
|
||||
std::cout << "Fail info: " << op_ptr->GetTypeString() << std::endl;
|
||||
best_op_name = op_name;
|
||||
best_tflops = tflops;
|
||||
best_avg_time = avg_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
best_split_k = split_k_list[split_k_id];
|
||||
}
|
||||
|
||||
all_pass &= pass;
|
||||
|
||||
if(do_log)
|
||||
if(do_verification)
|
||||
{
|
||||
LogRangeAsType<float>(std::cout << "output : ", output.mData, ",") << std::endl;
|
||||
;
|
||||
LogRangeAsType<float>(
|
||||
std::cout << "weight (device): ", weight_device_result.mData, ",")
|
||||
<< std::endl;
|
||||
;
|
||||
LogRangeAsType<float>(
|
||||
std::cout << "weight (host): ", weight_host_result.mData, ",")
|
||||
<< std::endl;
|
||||
;
|
||||
LogRangeAsType<float>(std::cout << "input: ", input.mData, ",") << std::endl;
|
||||
;
|
||||
wei_device_buf.FromDevice(weight_device_result.mData.data());
|
||||
|
||||
bool pass = ck::utils::check_err(weight_device_result, weight_host_result);
|
||||
|
||||
if(!pass)
|
||||
{
|
||||
std::cout << "Fail info: " << op_ptr->GetTypeString() << std::endl;
|
||||
}
|
||||
|
||||
all_pass &= pass;
|
||||
|
||||
if(do_log)
|
||||
{
|
||||
LogRangeAsType<float>(std::cout << "output : ", output.mData, ",")
|
||||
<< std::endl;
|
||||
;
|
||||
LogRangeAsType<float>(
|
||||
std::cout << "weight (device): ", weight_device_result.mData, ",")
|
||||
<< std::endl;
|
||||
;
|
||||
LogRangeAsType<float>(
|
||||
std::cout << "weight (host): ", weight_host_result.mData, ",")
|
||||
<< std::endl;
|
||||
;
|
||||
LogRangeAsType<float>(std::cout << "input: ", input.mData, ",")
|
||||
<< std::endl;
|
||||
;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
||||
else
|
||||
{
|
||||
std::cout << op_ptr->GetTypeString() << " does not support this problem"
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "Best configuration parameters:"
|
||||
<< "\nname: " << best_op_name << "\navg_time: " << best_avg_time
|
||||
<< "\ntflops: " << best_tflops << "\nGB/s: " << best_gb_per_sec << std::endl;
|
||||
<< "\ntflops: " << best_tflops << "\nGB/s: " << best_gb_per_sec << ", SplitK "
|
||||
<< best_split_k << std::endl;
|
||||
|
||||
return all_pass;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <cstdlib>
|
||||
#include <initializer_list>
|
||||
@@ -81,7 +81,6 @@ int profile_grouped_conv_bwd_weight(int argc, char* argv[])
|
||||
const auto params = ck::utils::conv::parse_conv_param(num_dim_spatial, 9, argv);
|
||||
|
||||
ck::index_t split_k = std::stoi(argv[8 + 1 + 4 + 6 * num_dim_spatial]);
|
||||
split_k = std::max(1, split_k);
|
||||
|
||||
using F32 = float;
|
||||
using F16 = ck::half_t;
|
||||
|
||||
386
script/convert_miopen_driver_to_profiler.py
Normal file
386
script/convert_miopen_driver_to_profiler.py
Normal file
@@ -0,0 +1,386 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
# Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
# Convert miopen driver command to ck Profiler
|
||||
# Example: python3 ../script/convert_miopen_driver_to_profiler.py
|
||||
# /opt/rocm/bin/MIOpenDriver conv -n 32 -c 64 -H 28 -W 28 -k 64 -y 3 -x 3
|
||||
# -p 1 -q 1 -u 2 -v 2 -l 1 -j 1 -m conv -g 32 -F 1 -t 1
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
|
||||
|
||||
def init_const_args(args):
|
||||
args.ck_profiler_cmd = '../build/bin/ckProfiler'
|
||||
# use decimal values
|
||||
args.init_method = 2
|
||||
# don't print tensor values
|
||||
args.log_value = 0
|
||||
|
||||
|
||||
def run_ck_profiler_cmd(cmd):
|
||||
print("ckProfiler command:")
|
||||
print(cmd)
|
||||
subprocess.run(cmd)
|
||||
|
||||
|
||||
def parse_data_type(args):
|
||||
if args.data_type == "fp32":
|
||||
if args.ck_profier_op == "grouped_conv_bwd_weight" or \
|
||||
args.ck_profier_op == "grouped_conv_bwd_weight" or \
|
||||
args.ck_profier_op == "grouped_conv_fwd":
|
||||
args.data_type = 0
|
||||
if args.data_type == "fp16":
|
||||
if args.ck_profier_op == "grouped_conv_bwd_weight" or \
|
||||
args.ck_profier_op == "grouped_conv_bwd_data" or \
|
||||
args.ck_profier_op == "grouped_conv_fwd":
|
||||
args.data_type = 1
|
||||
if args.data_type == "int8":
|
||||
if args.ck_profier_op == "grouped_conv_bwd_weight":
|
||||
args.data_type = 4
|
||||
if args.ck_profier_op == "grouped_conv_bwd_data":
|
||||
print('Not supported data type for grouped_conv_bwd_data')
|
||||
exit(1)
|
||||
if args.ck_profier_op == "grouped_conv_fwd":
|
||||
args.data_type = 3
|
||||
if args.data_type == "bfp16":
|
||||
if args.ck_profier_op == "grouped_conv_bwd_weight" or \
|
||||
args.ck_profier_op == "grouped_conv_bwd_data" or \
|
||||
args.ck_profier_op == "grouped_conv_fwd":
|
||||
args.data_type = 2
|
||||
|
||||
|
||||
def add_conv_params_to_cmd(args, cmd):
|
||||
if args.spatial_dim == 1:
|
||||
cmd += [str(args.fil_w), str(args.in_w)]
|
||||
cmd += [str(args.conv_stride_w), str(args.dilation_w)]
|
||||
cmd += [str(args.pad_w), str(args.pad_w)]
|
||||
elif args.spatial_dim == 2:
|
||||
cmd += [str(args.fil_h), str(args.fil_w)]
|
||||
cmd += [str(args.in_h), str(args.in_w)]
|
||||
cmd += [str(args.conv_stride_h), str(args.conv_stride_w)]
|
||||
cmd += [str(args.dilation_h), str(args.dilation_w)]
|
||||
cmd += [str(args.pad_h), str(args.pad_w)]
|
||||
cmd += [str(args.pad_h), str(args.pad_w)]
|
||||
elif args.spatial_dim == 3:
|
||||
cmd += [str(args.fil_d), str(args.fil_h), str(args.fil_w)]
|
||||
cmd += [str(args.in_d), str(args.in_h), str(args.in_w)]
|
||||
cmd += [str(args.conv_stride_d), str(args.conv_stride_h)]
|
||||
cmd += [str(args.conv_stride_w)]
|
||||
cmd += [str(args.dilation_d),
|
||||
str(args.dilation_h),
|
||||
str(args.dilation_w)]
|
||||
cmd += [str(args.pad_d), str(args.pad_h), str(args.pad_w)]
|
||||
cmd += [str(args.pad_d), str(args.pad_h), str(args.pad_w)]
|
||||
else:
|
||||
print('Not supported spatial dim (supported: 1, 2, 3)')
|
||||
exit(1)
|
||||
|
||||
|
||||
def run_ck_grouped_conv_fwd(args):
|
||||
args.ck_profier_op = "grouped_conv_fwd"
|
||||
parse_data_type(args)
|
||||
# default for MIOpen NHWGC
|
||||
args.layout = 1
|
||||
# use int32 by default
|
||||
args.index_type = 0
|
||||
|
||||
cmd = [str(args.ck_profiler_cmd), str(args.ck_profier_op)]
|
||||
cmd += [str(args.data_type), str(args.layout), str(args.index_type)]
|
||||
cmd += [str(args.verify), str(args.init_method)]
|
||||
cmd += [str(args.log_value), str(args.time)]
|
||||
cmd += [str(args.spatial_dim), str(args.group_count)]
|
||||
cmd += [str(args.batchsize), str(args.out_channels)]
|
||||
cmd += [str(args.in_channels)]
|
||||
add_conv_params_to_cmd(args, cmd)
|
||||
|
||||
run_ck_profiler_cmd(cmd)
|
||||
|
||||
|
||||
def run_ck_grouped_conv_bwd_data(args):
|
||||
args.ck_profier_op = "grouped_conv_bwd_data"
|
||||
parse_data_type(args)
|
||||
# default for MIOpen NHWGC
|
||||
args.layout = 1
|
||||
|
||||
cmd = [str(args.ck_profiler_cmd), str(args.ck_profier_op)]
|
||||
cmd += [str(args.data_type), str(args.layout)]
|
||||
cmd += [str(args.verify), str(args.init_method)]
|
||||
cmd += [str(args.log_value), str(args.time)]
|
||||
cmd += [str(args.spatial_dim), str(args.group_count)]
|
||||
cmd += [str(args.batchsize), str(args.out_channels)]
|
||||
cmd += [str(args.in_channels)]
|
||||
add_conv_params_to_cmd(args, cmd)
|
||||
|
||||
run_ck_profiler_cmd(cmd)
|
||||
|
||||
|
||||
def run_ck_grouped_conv_bwd_weight(args):
|
||||
args.ck_profier_op = "grouped_conv_bwd_weight"
|
||||
parse_data_type(args)
|
||||
# default for MIOpen NHWGC
|
||||
args.layout = 2
|
||||
# Test all split K value from the list {1, 2, 4, 8, 32, 64, 128}
|
||||
args.split_k_value = -1
|
||||
|
||||
cmd = [str(args.ck_profiler_cmd), str(args.ck_profier_op)]
|
||||
cmd += [str(args.data_type), str(args.layout)]
|
||||
cmd += [str(args.verify), str(args.init_method)]
|
||||
cmd += [str(args.log_value), str(args.time)]
|
||||
cmd += [str(args.spatial_dim), str(args.group_count)]
|
||||
cmd += [str(args.batchsize), str(args.out_channels)]
|
||||
cmd += [str(args.in_channels)]
|
||||
add_conv_params_to_cmd(args, cmd)
|
||||
|
||||
cmd += [str(args.split_k_value)]
|
||||
run_ck_profiler_cmd(cmd)
|
||||
|
||||
# Get name of miopen driver, remove it from unknown
|
||||
def process_miopen_driver_name(args, unknown):
|
||||
if "convint8" in unknown:
|
||||
args.data_type = 'int8'
|
||||
unknown.remove("convint8")
|
||||
elif "convbfp16" in unknown:
|
||||
args.data_type = 'bfp16'
|
||||
unknown.remove("convbfp16")
|
||||
elif "convfp16" in unknown:
|
||||
args.data_type = 'fp16'
|
||||
unknown.remove("convfp16")
|
||||
elif "conv" in unknown:
|
||||
args.data_type = 'fp32'
|
||||
unknown.remove("conv")
|
||||
else:
|
||||
print('Not supported driver (supported: conv, convfp16, convint8,'
|
||||
' convbfp16).')
|
||||
exit(1)
|
||||
|
||||
|
||||
def run_ck_profiler(args):
|
||||
# MIOpen get number of channel per all groups, CK profiler get number of
|
||||
# channel per group
|
||||
args.in_channels = int(args.in_channels / args.group_count)
|
||||
args.out_channels = int(args.out_channels / args.group_count)
|
||||
|
||||
if args.forw == 0 or args.forw == 1 or args.forw == 3 or args.forw == 5:
|
||||
run_ck_grouped_conv_fwd(args)
|
||||
if args.forw == 0 or args.forw == 2 or args.forw == 3 or args.forw == 6:
|
||||
run_ck_grouped_conv_bwd_data(args)
|
||||
if args.forw == 0 or args.forw == 4 or args.forw == 5 or args.forw == 6:
|
||||
run_ck_grouped_conv_bwd_weight(args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="converter",
|
||||
description="Convert miopen driver command to ck Profiler"
|
||||
"\nExample: python3 "
|
||||
"../script/convert_miopen_driver_to_profiler.py "
|
||||
"/opt/rocm/bin/MIOpenDriver conv -n 32 -c 64 -H 28 -W 28 "
|
||||
"-k 64 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g "
|
||||
"32 -F 1 -t 1",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-in_layout",
|
||||
"-I",
|
||||
default=-1,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Input Layout (Default=NCHW for 2d conv, NCDHW for 3d conv)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-forw",
|
||||
"-F",
|
||||
default=0,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Flag enables fwd, bwd, wrw convolutions"
|
||||
"\n0 fwd+bwd+wrw (default)"
|
||||
"\n1 fwd only"
|
||||
"\n2 bwd only"
|
||||
"\n4 wrw only"
|
||||
"\n3 fwd+bwd"
|
||||
"\n5 fwd+wrw"
|
||||
"\n6 bwd+wrw"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-spatial_dim",
|
||||
"-_",
|
||||
default=2,
|
||||
type=int,
|
||||
required=False,
|
||||
help="convolution spatial dimension (Default-2)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-batchsize",
|
||||
"-n",
|
||||
default=100,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Mini-batch size (Default=100)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-in_channels",
|
||||
"-c",
|
||||
default=3,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Number of Input Channels (Default=3)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-in_d",
|
||||
"-!",
|
||||
default=32,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Input Depth (Default=32)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-in_h",
|
||||
"-H",
|
||||
default=32,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Input Height (Default=32)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-in_w",
|
||||
"-W",
|
||||
default=32,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Input Width (Default=32)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-out_channels",
|
||||
"-k",
|
||||
default=32,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Number of Output Channels (Default=32)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-fil_d",
|
||||
"-@",
|
||||
default=3,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Filter Depth (Default=3)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-fil_h",
|
||||
"-y",
|
||||
default=3,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Filter Height (Default=3)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-fil_w",
|
||||
"-x",
|
||||
default=3,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Filter Width (Default=3)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-conv_stride_d",
|
||||
"-#",
|
||||
default=1,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Convolution Stride for Depth (Default=1)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-conv_stride_h",
|
||||
"-u",
|
||||
default=1,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Convolution Stride for Height (Default=1)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-conv_stride_w",
|
||||
"-v",
|
||||
default=1,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Convolution Stride for Width (Default=1)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-pad_d",
|
||||
"-$",
|
||||
default=1,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Zero Padding for Depth (Default=0)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-pad_h",
|
||||
"-p",
|
||||
default=1,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Zero Padding for Height (Default=0)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-pad_w",
|
||||
"-q",
|
||||
default=1,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Zero Padding for Width (Default=0)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-verify",
|
||||
"-V",
|
||||
default=1,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Verify Each Layer (Default=1)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-time",
|
||||
"-t",
|
||||
default=0,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Time Each Layer (Default=0)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-dilation_d",
|
||||
"-^",
|
||||
default=1,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Dilation of Filter Depth (Default=1)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-dilation_h",
|
||||
"-l",
|
||||
default=1,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Dilation of Filter Height (Default=1)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-dilation_w",
|
||||
"-j",
|
||||
default=1,
|
||||
type=int,
|
||||
required=False,
|
||||
help="Dilation of Filter Width (Default=1)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-group_count",
|
||||
"-g",
|
||||
type=int,
|
||||
default=1,
|
||||
required=False,
|
||||
help="Number of Groups (Default=1)"
|
||||
)
|
||||
|
||||
args, unknown = parser.parse_known_args()
|
||||
init_const_args(args)
|
||||
process_miopen_driver_name(args, unknown)
|
||||
print("Ignored args:")
|
||||
print(unknown)
|
||||
run_ck_profiler(args)
|
||||
Reference in New Issue
Block a user