mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-04 13:41:24 +00:00
* convnd_fwd fp16 example * update example * update example * update instance * updating refernce conv * update reference conv * update conv fwd profiler * update conv 1d and 3d instance * update include path * clean * update profiler for conv bwd data and weight * update conv bwd weight * clean * update conv example * update profiler for conv bwd weight * update ckprofiler for conv bwd data * fix reference conv bwd data bug; update conv bwd data test * update examples * fix initialization issue * update test for conv fwd * clean * clean * remove test case too sensitive to error threshhold * fix test * clean * fix build * adding conv multiple d * adding conv multiple D * add matrix padder * add gemm padding to convnd * adding group conv * update gemm multi-d * refactor * refactor * refactor * clean * clean * refactor * refactor * reorg * add ds * add bias * clean * add G * adding group * adding group * adding group * update Tensor * clean * update example * update DeviceGemmMultipleD_Xdl_CShuffle * update conv bwd-data and bwd-weight * upate contraction example * update gemm and batch gemm with e permute * fix example build * instance for grouped conv1d * update example * adding group conv instance * update gemm bilinear instance * update gemm+add+add+fastgelu instance * update profiler * update profiler * update test * update test and client example * clean * add grouped conv into profiler * update profiler * clean * add test grouped conv, update all conv test to gtest * update test
389 lines
18 KiB
C++
389 lines
18 KiB
C++
// SPDX-License-Identifier: MIT
|
|
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
|
|
|
|
#pragma once
|
|
|
|
#include "ck/ck.hpp"
|
|
#include "ck/utility/reduction_operator.hpp"
|
|
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
|
#include "ck/tensor_operation/gpu/device/device_gemm_reduce.hpp"
|
|
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
|
|
|
#include "ck/library/utility/check_err.hpp"
|
|
#include "ck/library/utility/convolution_parameter.hpp"
|
|
#include "ck/library/utility/device_memory.hpp"
|
|
#include "ck/library/utility/host_tensor.hpp"
|
|
#include "ck/library/utility/host_tensor_generator.hpp"
|
|
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
|
|
|
namespace ck {
|
|
namespace tensor_operation {
|
|
namespace device {
|
|
namespace instance {
|
|
|
|
using F32 = float;
|
|
using F16 = ck::half_t;
|
|
using ReducePtrsGlobal = ck::Tuple<F32*, F32*>;
|
|
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
|
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
|
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
|
using ReduceInElementOps = ck::Tuple<Identity, Square>;
|
|
using ReduceOutElementOps = ck::Tuple<Div, Div>;
|
|
|
|
using DeviceGemmBiasAddReduceNoOpPtr =
|
|
ck::tensor_operation::device::DeviceGemmReducePtr<1, ReducePtrsGlobal::Size()>;
|
|
|
|
void add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_mk_kn_mn_instances(
|
|
std::vector<DeviceGemmBiasAddReduceNoOpPtr>&);
|
|
|
|
void add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_mk_nk_mn_instances(
|
|
std::vector<DeviceGemmBiasAddReduceNoOpPtr>&);
|
|
|
|
void add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_km_kn_mn_instances(
|
|
std::vector<DeviceGemmBiasAddReduceNoOpPtr>&);
|
|
|
|
void add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_km_nk_mn_instances(
|
|
std::vector<DeviceGemmBiasAddReduceNoOpPtr>&);
|
|
|
|
} // namespace instance
|
|
} // namespace device
|
|
} // namespace tensor_operation
|
|
} // namespace ck
|
|
|
|
namespace ck {
|
|
namespace profiler {
|
|
|
|
template <typename ADataType,
|
|
typename BDataType,
|
|
typename CDataType,
|
|
typename BiasDataType,
|
|
typename D0DataType,
|
|
typename ReduceDataType,
|
|
typename ALayout,
|
|
typename BLayout,
|
|
typename CLayout>
|
|
void profile_gemm_bias_add_reduce_impl(int do_verification,
|
|
int init_method,
|
|
bool do_log,
|
|
bool time_kernel,
|
|
int M,
|
|
int N,
|
|
int K,
|
|
int StrideA,
|
|
int StrideB,
|
|
int StrideC,
|
|
int StrideD0)
|
|
{
|
|
auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) {
|
|
return HostTensorDescriptor(std::vector<std::size_t>({len}),
|
|
std::vector<std::size_t>({stride}));
|
|
};
|
|
|
|
auto f_host_tensor_descriptor2d =
|
|
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
|
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
|
{
|
|
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
|
|
std::vector<std::size_t>({stride, 1}));
|
|
}
|
|
else
|
|
{
|
|
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
|
|
std::vector<std::size_t>({1, stride}));
|
|
}
|
|
};
|
|
|
|
Tensor<ADataType> a_m_k(f_host_tensor_descriptor2d(M, K, StrideA, ALayout{}));
|
|
Tensor<BDataType> b_k_n(f_host_tensor_descriptor2d(K, N, StrideB, BLayout{}));
|
|
|
|
Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor2d(M, N, StrideC, CLayout{}));
|
|
Tensor<BiasDataType> bias_n(f_host_tensor_descriptor1d(N, 1));
|
|
Tensor<D0DataType> d0_m_n(f_host_tensor_descriptor2d(M, N, StrideC, CLayout{}));
|
|
Tensor<ReduceDataType> reduce0_m_host_result(
|
|
HostTensorDescriptor(std::vector<std::size_t>({static_cast<std::size_t>(M)})));
|
|
Tensor<ReduceDataType> reduce1_m_host_result(
|
|
HostTensorDescriptor(std::vector<std::size_t>({static_cast<std::size_t>(M)})));
|
|
|
|
Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor2d(M, N, StrideC, CLayout{}));
|
|
Tensor<ReduceDataType> reduce0_m_device_result(
|
|
HostTensorDescriptor(std::vector<std::size_t>({static_cast<std::size_t>(M)})));
|
|
Tensor<ReduceDataType> reduce1_m_device_result(
|
|
HostTensorDescriptor(std::vector<std::size_t>({static_cast<std::size_t>(M)})));
|
|
|
|
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
|
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
|
std::cout << "c_m_n: " << c_m_n_host_result.mDesc << std::endl;
|
|
std::cout << "reduce0_m: " << reduce0_m_host_result.mDesc << std::endl;
|
|
std::cout << "reduce1_m: " << reduce1_m_host_result.mDesc << std::endl;
|
|
|
|
std::size_t num_thread = 1;
|
|
switch(init_method)
|
|
{
|
|
case 0: break;
|
|
case 1:
|
|
std::srand(0);
|
|
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5}, num_thread);
|
|
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5}, num_thread);
|
|
bias_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5}, num_thread);
|
|
d0_m_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5}, num_thread);
|
|
break;
|
|
default:
|
|
std::srand(0);
|
|
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0}, num_thread);
|
|
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5}, num_thread);
|
|
bias_n.GenerateTensorValue(GeneratorTensor_3<ADataType>{-0.5, 0.5}, num_thread);
|
|
d0_m_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5}, num_thread);
|
|
}
|
|
|
|
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
|
using AElementOp = PassThrough;
|
|
using BElementOp = PassThrough;
|
|
using CElementOp = PassThrough;
|
|
using D0ElementOp = PassThrough;
|
|
using ReduceOp0 = ck::reduce::Add;
|
|
using ReduceOp1 = ck::reduce::Add;
|
|
using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryDivide;
|
|
using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
|
|
using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
|
|
|
|
auto a_element_op = AElementOp{};
|
|
auto b_element_op = BElementOp{};
|
|
auto c_element_op = CElementOp{};
|
|
std::array<void*, 3> gemm_element_ops = {&a_element_op, &b_element_op, &c_element_op};
|
|
|
|
auto d0_element_op = D0ElementOp{};
|
|
const auto reduce0_op = ReduceOp0{};
|
|
const auto reduce1_op = ReduceOp1{};
|
|
|
|
auto passthrough = UnaryIdenticElementOp{};
|
|
auto square = UnarySquareElementOp{};
|
|
auto div = UnaryDivElementOp{N};
|
|
std::array<void*, 2> reduce_in_element_ops = {&passthrough, &square};
|
|
std::array<void*, 2> reduce_out_element_ops = {&div, &div};
|
|
|
|
if(do_verification)
|
|
{
|
|
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
|
BDataType,
|
|
CDataType,
|
|
ReduceDataType,
|
|
AElementOp,
|
|
BElementOp,
|
|
CElementOp>;
|
|
|
|
using ReduceAccDataType = ReduceDataType;
|
|
|
|
auto ref_gemm = ReferenceGemmInstance{};
|
|
auto ref_invoker = ref_gemm.MakeInvoker();
|
|
|
|
auto ref_argument = ref_gemm.MakeArgument(
|
|
a_m_k, b_k_n, c_m_n_host_result, a_element_op, b_element_op, PassThrough{});
|
|
|
|
ref_invoker.Run(ref_argument);
|
|
|
|
for(int m = 0; m < M; ++m)
|
|
for(int n = 0; n < N; ++n)
|
|
{
|
|
ReduceAccDataType acc = static_cast<ReduceAccDataType>(c_m_n_host_result(m, n)) +
|
|
static_cast<ReduceAccDataType>(bias_n(n));
|
|
|
|
ReduceAccDataType d0 = static_cast<ReduceAccDataType>(d0_m_n(m, n));
|
|
c_element_op(acc, acc);
|
|
d0_element_op(d0, d0);
|
|
acc += d0;
|
|
c_m_n_host_result(m, n) = static_cast<CDataType>(acc);
|
|
}
|
|
|
|
for(int m = 0; m < M; ++m)
|
|
{
|
|
auto reduce0_acc = reduce0_op.GetIdentityValue<ReduceAccDataType>();
|
|
auto reduce1_acc = reduce1_op.GetIdentityValue<ReduceAccDataType>();
|
|
|
|
for(int n = 0; n < N; ++n)
|
|
{
|
|
ReduceAccDataType d0_val =
|
|
ck::type_convert<ReduceAccDataType>(c_m_n_host_result(m, n));
|
|
ReduceAccDataType d1_val;
|
|
|
|
square(d1_val, d0_val);
|
|
reduce0_op(reduce0_acc, d0_val);
|
|
reduce1_op(reduce1_acc, d1_val);
|
|
}
|
|
|
|
div(reduce0_acc, reduce0_acc);
|
|
div(reduce1_acc, reduce1_acc);
|
|
reduce0_m_host_result(m) = ck::type_convert<ReduceDataType>(reduce0_acc);
|
|
reduce1_m_host_result(m) = ck::type_convert<ReduceDataType>(reduce1_acc);
|
|
}
|
|
}
|
|
|
|
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
|
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
|
DeviceMem c_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize());
|
|
DeviceMem bias_device_buf(sizeof(BiasDataType) * bias_n.mDesc.GetElementSpaceSize());
|
|
DeviceMem d0_device_buf(sizeof(D0DataType) * d0_m_n.mDesc.GetElementSpaceSize());
|
|
DeviceMem reduce0_device_buf(sizeof(ReduceDataType) *
|
|
reduce0_m_device_result.mDesc.GetElementSpaceSize());
|
|
DeviceMem reduce1_device_buf(sizeof(ReduceDataType) *
|
|
reduce1_m_device_result.mDesc.GetElementSpaceSize());
|
|
|
|
std::array<void*, 2> p_reduces = {reduce0_device_buf.GetDeviceBuffer(),
|
|
reduce1_device_buf.GetDeviceBuffer()};
|
|
|
|
a_device_buf.ToDevice(a_m_k.mData.data());
|
|
b_device_buf.ToDevice(b_k_n.mData.data());
|
|
bias_device_buf.ToDevice(bias_n.mData.data());
|
|
d0_device_buf.ToDevice(d0_m_n.mData.data());
|
|
|
|
// add device GEMM instances
|
|
std::vector<ck::tensor_operation::device::instance::DeviceGemmBiasAddReduceNoOpPtr> gemm_ptrs;
|
|
|
|
if constexpr(is_same<ADataType, half_t>::value && is_same<BDataType, half_t>::value &&
|
|
is_same<CDataType, half_t>::value)
|
|
{
|
|
if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value &&
|
|
is_same<BLayout, tensor_layout::gemm::RowMajor>::value &&
|
|
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
|
{
|
|
ck::tensor_operation::device::instance::
|
|
add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_mk_kn_mn_instances(
|
|
gemm_ptrs);
|
|
}
|
|
else if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value &&
|
|
is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value &&
|
|
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
|
{
|
|
ck::tensor_operation::device::instance::
|
|
add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_mk_nk_mn_instances(
|
|
gemm_ptrs);
|
|
}
|
|
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value &&
|
|
is_same<BLayout, tensor_layout::gemm::RowMajor>::value &&
|
|
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
|
{
|
|
ck::tensor_operation::device::instance::
|
|
add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_km_kn_mn_instances(
|
|
gemm_ptrs);
|
|
}
|
|
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value &&
|
|
is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value &&
|
|
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
|
{
|
|
ck::tensor_operation::device::instance::
|
|
add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_km_nk_mn_instances(
|
|
gemm_ptrs);
|
|
}
|
|
}
|
|
|
|
if(gemm_ptrs.size() <= 0)
|
|
{
|
|
throw std::runtime_error("wrong! no device GEMM instance found");
|
|
}
|
|
|
|
std::string best_gemm_name;
|
|
float best_ave_time = 0;
|
|
float best_tflops = 0;
|
|
float best_gb_per_sec = 0;
|
|
|
|
// profile device GEMM instances
|
|
for(auto& gemm_ptr : gemm_ptrs)
|
|
{
|
|
auto argument_ptr = gemm_ptr->MakeArgumentPointer(a_device_buf.GetDeviceBuffer(),
|
|
b_device_buf.GetDeviceBuffer(),
|
|
bias_device_buf.GetDeviceBuffer(),
|
|
{d0_device_buf.GetDeviceBuffer()},
|
|
c_device_buf.GetDeviceBuffer(),
|
|
p_reduces,
|
|
M,
|
|
N,
|
|
K,
|
|
StrideA,
|
|
StrideB,
|
|
StrideC,
|
|
{StrideD0},
|
|
gemm_element_ops,
|
|
{&d0_element_op},
|
|
reduce_in_element_ops,
|
|
reduce_out_element_ops);
|
|
|
|
auto invoker_ptr = gemm_ptr->MakeInvokerPointer();
|
|
|
|
if(gemm_ptr->IsSupportedArgument(argument_ptr.get()))
|
|
{
|
|
// init DO, D1 to 0
|
|
reduce0_device_buf.SetZero();
|
|
reduce1_device_buf.SetZero();
|
|
|
|
float ave_time =
|
|
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
|
|
|
std::string gemm_name = gemm_ptr->GetTypeString();
|
|
|
|
std::size_t flop = std::size_t(2) * M * N * K + std::size_t(2) * M * N;
|
|
|
|
std::size_t num_byte = sizeof(ADataType) * M * K + sizeof(BDataType) * K * N +
|
|
sizeof(CDataType) * M * N + sizeof(BiasDataType) * M * N +
|
|
sizeof(D0DataType) * M * N + sizeof(ReduceDataType) * M +
|
|
sizeof(ReduceDataType) * M;
|
|
|
|
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
|
|
|
float gb_per_sec = num_byte / 1.E6 / ave_time;
|
|
|
|
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
|
<< " GB/s, " << gemm_name << std::endl;
|
|
|
|
if(tflops > best_tflops)
|
|
{
|
|
best_gemm_name = gemm_name;
|
|
best_tflops = tflops;
|
|
best_ave_time = ave_time;
|
|
best_gb_per_sec = gb_per_sec;
|
|
}
|
|
|
|
if(do_verification)
|
|
{
|
|
c_device_buf.FromDevice(c_m_n_device_result.mData.data());
|
|
reduce0_device_buf.FromDevice(reduce0_m_device_result.mData.data());
|
|
reduce1_device_buf.FromDevice(reduce1_m_device_result.mData.data());
|
|
|
|
ck::utils::check_err(c_m_n_device_result.mData, c_m_n_host_result.mData);
|
|
ck::utils::check_err(reduce0_m_device_result.mData, reduce0_m_host_result.mData);
|
|
ck::utils::check_err(reduce1_m_device_result.mData, reduce1_m_host_result.mData);
|
|
|
|
if(do_log)
|
|
{
|
|
LogRangeAsType<float>(std::cout << "a : ", a_m_k.mData, ",") << std::endl;
|
|
LogRangeAsType<float>(std::cout << "b: ", b_k_n.mData, ",") << std::endl;
|
|
LogRangeAsType<float>(std::cout << "c_host: ", c_m_n_host_result.mData, ",")
|
|
<< std::endl;
|
|
LogRangeAsType<float>(std::cout << "c_device: ", c_m_n_device_result.mData, ",")
|
|
<< std::endl;
|
|
LogRangeAsType<float>(
|
|
std::cout << "d0_host: ", reduce0_m_host_result.mData, ",")
|
|
<< std::endl;
|
|
LogRangeAsType<float>(
|
|
std::cout << "d0_device: ", reduce0_m_device_result.mData, ",")
|
|
<< std::endl;
|
|
LogRangeAsType<float>(
|
|
std::cout << "d1_host: ", reduce1_m_host_result.mData, ",")
|
|
<< std::endl;
|
|
LogRangeAsType<float>(
|
|
std::cout << "d1_device: ", reduce1_m_device_result.mData, ",")
|
|
<< std::endl;
|
|
}
|
|
}
|
|
}
|
|
else
|
|
{
|
|
std::cout << "does not support this GEMM problem" << std::endl;
|
|
}
|
|
}
|
|
|
|
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
|
<< best_gb_per_sec << " GB/s, " << best_gemm_name << std::endl;
|
|
}
|
|
|
|
} // namespace profiler
|
|
} // namespace ck
|