mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-14 02:02:46 +00:00
MaxPool & AvgPool bwd instances, test, ckProfiler, client example (#861)
* Add maxpool instances
* Rename index pool to max pool.
* Add maxpool bwd bf16 instances
* Add avg pool bwd instances
* Rename avgpool and maxpool to avg_pool3d and max_pool
* Add bf16 pool fwd instances
* Add max pool bwd to ckProfiler
* Add avg pool3d bwd to ckProfiler
* Add avg pool bwd test
* Fix bug of reference pool fwd (dilation)
* Fix bug of max pool bwd (dilation and initZero)
* Support bf16 compute data type
* Force compute type be f32. Because atomicAdd only support f32
* Add max pool bwd test
* Rename folder
* Rename pool
* Add max pool bwd client example
* Add avg pool bwd client example
* Add missing workspace
* clang format
* Rename macro
* remove useless header
* remove useless layout
[ROCm/composable_kernel commit: 866377de18]
This commit is contained in:
@@ -100,6 +100,10 @@ int main(int argc, char* argv[])
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
|
||||
std::size_t num_byte = sizeof(XDataType) * M * N + sizeof(GammaDataType) * N +
|
||||
@@ -153,6 +157,10 @@ int main(int argc, char* argv[])
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
|
||||
}
|
||||
|
||||
|
||||
@@ -129,6 +129,10 @@ int main(int argc, char* argv[])
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
|
||||
std::size_t num_byte =
|
||||
@@ -184,6 +188,10 @@ int main(int argc, char* argv[])
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
|
||||
}
|
||||
|
||||
|
||||
11
client_example/19_pool/CMakeLists.txt
Normal file
11
client_example/19_pool/CMakeLists.txt
Normal file
@@ -0,0 +1,11 @@
|
||||
add_executable(client_max_pool2d_fwd max_pool2d_fwd.cpp)
|
||||
target_link_libraries(client_max_pool2d_fwd PRIVATE composable_kernel::device_operations)
|
||||
|
||||
add_executable(client_max_pool2d_bwd max_pool2d_bwd.cpp)
|
||||
target_link_libraries(client_max_pool2d_bwd PRIVATE composable_kernel::device_operations)
|
||||
|
||||
add_executable(client_avg_pool3d_fwd avg_pool3d_fwd.cpp)
|
||||
target_link_libraries(client_avg_pool3d_fwd PRIVATE composable_kernel::device_operations)
|
||||
|
||||
add_executable(client_avg_pool3d_bwd avg_pool3d_bwd.cpp)
|
||||
target_link_libraries(client_avg_pool3d_bwd PRIVATE composable_kernel::device_operations)
|
||||
191
client_example/19_pool/avg_pool3d_bwd.cpp
Normal file
191
client_example/19_pool/avg_pool3d_bwd.cpp
Normal file
@@ -0,0 +1,191 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iomanip>
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/avg_pool3d_bwd.hpp"
|
||||
|
||||
using DOutDataType = ck::half_t;
|
||||
using DInDataType = ck::half_t;
|
||||
|
||||
using DOutLayout = ck::tensor_layout::convolution::NDHWC;
|
||||
using DInLayout = ck::tensor_layout::convolution::NDHWC;
|
||||
|
||||
struct SimpleDeviceMem
|
||||
{
|
||||
SimpleDeviceMem() = delete;
|
||||
|
||||
SimpleDeviceMem(std::size_t mem_size) : p_mem_{}, mMemSize_(mem_size)
|
||||
{
|
||||
(void)hipMalloc(static_cast<void**>(&p_mem_), mem_size);
|
||||
}
|
||||
|
||||
void* GetDeviceBuffer() { return p_mem_; }
|
||||
|
||||
void SetZero() const { (void)hipMemset(p_mem_, 0, mMemSize_); }
|
||||
|
||||
~SimpleDeviceMem() { (void)hipFree(p_mem_); }
|
||||
|
||||
void* p_mem_;
|
||||
std::size_t mMemSize_;
|
||||
};
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
ck::index_t N = 2;
|
||||
ck::index_t C = 32;
|
||||
ck::index_t Z = 2;
|
||||
ck::index_t Y = 2;
|
||||
ck::index_t X = 2;
|
||||
ck::index_t Di = 30;
|
||||
ck::index_t Hi = 30;
|
||||
ck::index_t Wi = 30;
|
||||
ck::index_t window_stride_d = 2;
|
||||
ck::index_t window_stride_h = 2;
|
||||
ck::index_t window_stride_w = 2;
|
||||
ck::index_t window_dilation_d = 1;
|
||||
ck::index_t window_dilation_h = 1;
|
||||
ck::index_t window_dilation_w = 1;
|
||||
ck::index_t in_left_pad_d = 1;
|
||||
ck::index_t in_left_pad_h = 1;
|
||||
ck::index_t in_left_pad_w = 1;
|
||||
ck::index_t in_right_pad_d = 1;
|
||||
ck::index_t in_right_pad_h = 1;
|
||||
ck::index_t in_right_pad_w = 1;
|
||||
|
||||
const ck::index_t Zs = (Z - 1) * window_dilation_d + 1;
|
||||
const ck::index_t Ys = (Y - 1) * window_dilation_h + 1;
|
||||
const ck::index_t Xs = (X - 1) * window_dilation_w + 1;
|
||||
ck::index_t Do = (Di + in_left_pad_d + in_right_pad_d - Zs) / window_stride_d + 1;
|
||||
ck::index_t Ho = (Hi + in_left_pad_h + in_right_pad_h - Ys) / window_stride_h + 1;
|
||||
ck::index_t Wo = (Wi + in_left_pad_w + in_right_pad_w - Xs) / window_stride_w + 1;
|
||||
|
||||
// Pool API only support the order of NCDHW
|
||||
std::vector<ck::index_t> in_length = {N, C, Di, Hi, Wi};
|
||||
std::vector<ck::index_t> out_length = {N, C, Do, Ho, Wo};
|
||||
std::vector<ck::index_t> window_spatial_lengths = {Z, Y, X};
|
||||
std::vector<ck::index_t> window_strides = {window_stride_d, window_stride_h, window_stride_w};
|
||||
std::vector<ck::index_t> window_dilations{
|
||||
window_dilation_d, window_dilation_h, window_dilation_w};
|
||||
std::vector<ck::index_t> input_left_pads = {in_left_pad_d, in_left_pad_h, in_left_pad_w};
|
||||
std::vector<ck::index_t> input_right_pads = {in_right_pad_d, in_right_pad_h, in_right_pad_w};
|
||||
|
||||
std::size_t in_tensor_size = N * C * Di * Hi * Wi;
|
||||
std::size_t out_tensor_size = N * C * Do * Ho * Wo;
|
||||
|
||||
// tensor layout = NDHWC
|
||||
std::vector<ck::index_t> in_tensor_stride = {Di * C * Hi * Wi, 1, C * Hi * Wi, Wi * C, C};
|
||||
std::vector<ck::index_t> out_tensor_stride = {Do * C * Ho * Wo, 1, C * Ho * Wo, Wo * C, C};
|
||||
|
||||
SimpleDeviceMem dout_device_buf(sizeof(DOutDataType) * out_tensor_size);
|
||||
SimpleDeviceMem din_device_buf(sizeof(DInDataType) * in_tensor_size);
|
||||
|
||||
using DeviceOp = ck::tensor_operation::device::
|
||||
DeviceAvgPoolBwd<3, DOutDataType, DInDataType, DOutLayout, DInLayout>;
|
||||
|
||||
// get device op instances
|
||||
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
DeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_op_name;
|
||||
bool found = false;
|
||||
int best_op_id = -1;
|
||||
float best_ave_time = std::numeric_limits<float>::max();
|
||||
float best_gb_per_sec = 0;
|
||||
|
||||
// profile device operation instances
|
||||
std::cout << "Run all instances and do timing" << std::endl;
|
||||
|
||||
for(int i = 0; i < op_ptrs.size(); ++i)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
||||
static_cast<DOutDataType*>(dout_device_buf.GetDeviceBuffer()),
|
||||
static_cast<DInDataType*>(din_device_buf.GetDeviceBuffer()),
|
||||
out_length,
|
||||
in_length,
|
||||
out_tensor_stride,
|
||||
in_tensor_stride,
|
||||
window_spatial_lengths,
|
||||
window_strides,
|
||||
window_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads);
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
din_device_buf.SetZero();
|
||||
|
||||
float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
|
||||
std::size_t num_bytes =
|
||||
in_tensor_size * sizeof(DInDataType) + out_tensor_size * sizeof(DOutDataType);
|
||||
|
||||
float gb_per_sec = num_bytes / 1.E6 / ave_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << gb_per_sec << " GB/s, "
|
||||
<< op_name << std::endl;
|
||||
|
||||
if(ave_time < best_ave_time)
|
||||
{
|
||||
found = true;
|
||||
best_op_id = i;
|
||||
best_op_name = op_name;
|
||||
best_ave_time = ave_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << op_name << " does not support this problem" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_gb_per_sec << " GB/s, "
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
<< std::endl;
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
||||
static_cast<DOutDataType*>(dout_device_buf.GetDeviceBuffer()),
|
||||
static_cast<DInDataType*>(din_device_buf.GetDeviceBuffer()),
|
||||
out_length,
|
||||
in_length,
|
||||
out_tensor_stride,
|
||||
in_tensor_stride,
|
||||
window_spatial_lengths,
|
||||
window_strides,
|
||||
window_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads);
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
din_device_buf.SetZero();
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
|
||||
}
|
||||
|
||||
std::cout << "Done" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
280
client_example/19_pool/max_pool2d_bwd.cpp
Normal file
280
client_example/19_pool/max_pool2d_bwd.cpp
Normal file
@@ -0,0 +1,280 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iomanip>
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_pool_fwd.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_max_pool_bwd.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/pool3d_fwd.hpp"
|
||||
#include "ck/library/tensor_operation_instance/gpu/max_pool_bwd.hpp"
|
||||
|
||||
using InDataType = ck::half_t;
|
||||
using OutDataType = ck::half_t;
|
||||
using DOutDataType = ck::half_t;
|
||||
using DInDataType = ck::half_t;
|
||||
using IndexDataType = int32_t;
|
||||
|
||||
// We use pool3d to implement pool2d in this example
|
||||
using InLayout = ck::tensor_layout::convolution::NDHWC;
|
||||
using OutLayout = ck::tensor_layout::convolution::NDHWC;
|
||||
|
||||
constexpr ck::index_t InOutRank = 5;
|
||||
constexpr ck::index_t WindowRank = 3;
|
||||
|
||||
struct SimpleDeviceMem
|
||||
{
|
||||
SimpleDeviceMem() = delete;
|
||||
|
||||
SimpleDeviceMem(std::size_t mem_size) : p_mem_{}
|
||||
{
|
||||
(void)hipMalloc(static_cast<void**>(&p_mem_), mem_size);
|
||||
}
|
||||
|
||||
void* GetDeviceBuffer() { return p_mem_; }
|
||||
|
||||
~SimpleDeviceMem() { (void)hipFree(p_mem_); }
|
||||
|
||||
void* p_mem_;
|
||||
};
|
||||
|
||||
void TransformPool2dparamToPool3d(std::vector<ck::index_t>& input_lengths,
|
||||
std::vector<ck::index_t>& window_lengths,
|
||||
std::vector<ck::index_t>& output_lengths,
|
||||
std::vector<ck::index_t>& input_stride,
|
||||
std::vector<ck::index_t>& output_stride,
|
||||
std::vector<ck::index_t>& indices_stride,
|
||||
std::vector<ck::index_t>& window_strides,
|
||||
std::vector<ck::index_t>& window_dilations,
|
||||
std::vector<ck::index_t>& input_left_pads,
|
||||
std::vector<ck::index_t>& input_right_pads,
|
||||
std::vector<ck::index_t>& pooling_dims)
|
||||
{
|
||||
// NCHW to NCDHW
|
||||
input_lengths.insert(input_lengths.begin() + 2, 1);
|
||||
output_lengths.insert(output_lengths.begin() + 2, 1);
|
||||
input_stride.insert(input_stride.begin() + 2, 0);
|
||||
output_stride.insert(output_stride.begin() + 2, 0);
|
||||
indices_stride.insert(indices_stride.begin() + 2, 0);
|
||||
|
||||
// YX to ZYX
|
||||
window_lengths.insert(window_lengths.begin(), 1);
|
||||
window_strides.insert(window_strides.begin(), 0);
|
||||
window_dilations.insert(window_dilations.begin(), 0);
|
||||
input_left_pads.insert(input_left_pads.begin(), 0);
|
||||
input_right_pads.insert(input_right_pads.begin(), 0);
|
||||
|
||||
pooling_dims = {2, 3, 4};
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
ck::index_t N = 2;
|
||||
ck::index_t C = 32;
|
||||
ck::index_t Y = 2;
|
||||
ck::index_t X = 2;
|
||||
ck::index_t Hi = 30;
|
||||
ck::index_t Wi = 30;
|
||||
ck::index_t window_stride_h = 2;
|
||||
ck::index_t window_stride_w = 2;
|
||||
ck::index_t window_dilation_h = 1;
|
||||
ck::index_t window_dilation_w = 1;
|
||||
ck::index_t in_left_pad_h = 1;
|
||||
ck::index_t in_left_pad_w = 1;
|
||||
ck::index_t in_right_pad_h = 1;
|
||||
ck::index_t in_right_pad_w = 1;
|
||||
|
||||
const ck::index_t Ys = (Y - 1) * window_dilation_h + 1;
|
||||
const ck::index_t Xs = (X - 1) * window_dilation_w + 1;
|
||||
ck::index_t Ho = (Hi + in_left_pad_h + in_right_pad_h - Ys) / window_stride_h + 1;
|
||||
ck::index_t Wo = (Wi + in_left_pad_w + in_right_pad_w - Xs) / window_stride_w + 1;
|
||||
|
||||
// Pool API only support the order of NCHW
|
||||
std::vector<ck::index_t> in_length = {N, C, Hi, Wi};
|
||||
std::vector<ck::index_t> out_length = {N, C, Ho, Wo};
|
||||
std::vector<ck::index_t> window_spatial_lengths = {Y, X};
|
||||
std::vector<ck::index_t> window_strides = {window_stride_h, window_stride_w};
|
||||
std::vector<ck::index_t> window_dilations = {window_dilation_h, window_dilation_w};
|
||||
std::vector<ck::index_t> input_left_pads = {in_left_pad_h, in_left_pad_w};
|
||||
std::vector<ck::index_t> input_right_pads = {in_right_pad_h, in_right_pad_w};
|
||||
std::vector<ck::index_t> pooling_dims = {2, 3};
|
||||
|
||||
std::size_t in_tensor_size = N * C * Hi * Wi;
|
||||
std::size_t out_tensor_size = N * C * Ho * Wo;
|
||||
|
||||
// tensor layout = NHWC
|
||||
std::vector<ck::index_t> in_tensor_stride = {C * Hi * Wi, 1, Wi * C, C};
|
||||
std::vector<ck::index_t> out_tensor_stride = {C * Ho * Wo, 1, Wo * C, C};
|
||||
|
||||
TransformPool2dparamToPool3d(in_length,
|
||||
window_spatial_lengths,
|
||||
out_length,
|
||||
in_tensor_stride,
|
||||
out_tensor_stride,
|
||||
out_tensor_stride,
|
||||
window_strides,
|
||||
window_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads,
|
||||
pooling_dims);
|
||||
|
||||
SimpleDeviceMem in_device_buf(sizeof(InDataType) * in_tensor_size);
|
||||
SimpleDeviceMem out_device_buf(sizeof(OutDataType) * out_tensor_size);
|
||||
SimpleDeviceMem indices_device_buf(sizeof(IndexDataType) * out_tensor_size);
|
||||
SimpleDeviceMem dout_device_buf(sizeof(DOutDataType) * out_tensor_size);
|
||||
SimpleDeviceMem din_device_buf(sizeof(DInDataType) * in_tensor_size);
|
||||
|
||||
// Generate index data from max pool forward
|
||||
{
|
||||
using MaxPoolFwdDeviceOp =
|
||||
ck::tensor_operation::device::DevicePoolFwd<InOutRank,
|
||||
WindowRank,
|
||||
InDataType,
|
||||
OutDataType,
|
||||
IndexDataType,
|
||||
InLayout,
|
||||
OutLayout,
|
||||
ck::ReduceTensorOp::MAX,
|
||||
true>;
|
||||
|
||||
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
MaxPoolFwdDeviceOp>::GetInstances();
|
||||
|
||||
auto& op_ptr = op_ptrs[0];
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
||||
static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
|
||||
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
|
||||
static_cast<IndexDataType*>(indices_device_buf.GetDeviceBuffer()),
|
||||
in_length,
|
||||
window_spatial_lengths,
|
||||
out_length,
|
||||
in_tensor_stride,
|
||||
out_tensor_stride,
|
||||
out_tensor_stride,
|
||||
window_strides,
|
||||
window_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads,
|
||||
pooling_dims);
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
}
|
||||
|
||||
// Run MaxPool bwd
|
||||
using MaxPoolBwdDeviceOp =
|
||||
ck::tensor_operation::device::DeviceMaxPoolBwd<DOutDataType, IndexDataType, DInDataType>;
|
||||
|
||||
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
MaxPoolBwdDeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_op_name;
|
||||
bool found = false;
|
||||
int best_op_id = -1;
|
||||
float best_ave_time = std::numeric_limits<float>::max();
|
||||
float best_gb_per_sec = 0;
|
||||
|
||||
// profile device operation instances
|
||||
std::cout << "Run all instances and do timing" << std::endl;
|
||||
|
||||
for(int i = 0; i < op_ptrs.size(); ++i)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
||||
static_cast<InDataType*>(dout_device_buf.GetDeviceBuffer()),
|
||||
static_cast<IndexDataType*>(indices_device_buf.GetDeviceBuffer()),
|
||||
static_cast<DInDataType*>(din_device_buf.GetDeviceBuffer()),
|
||||
out_tensor_size,
|
||||
in_tensor_size,
|
||||
window_spatial_lengths,
|
||||
window_strides,
|
||||
window_dilations);
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
|
||||
std::size_t num_bytes = in_tensor_size * sizeof(DInDataType) +
|
||||
out_tensor_size * sizeof(IndexDataType) +
|
||||
out_tensor_size * sizeof(DOutDataType);
|
||||
|
||||
float gb_per_sec = num_bytes / 1.E6 / ave_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << gb_per_sec << "GB / s,"
|
||||
<< op_name << std::endl;
|
||||
|
||||
if(ave_time < best_ave_time)
|
||||
{
|
||||
found = true;
|
||||
best_op_id = i;
|
||||
best_op_name = op_name;
|
||||
best_ave_time = ave_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << op_name << " does not support this problem" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_gb_per_sec << " GB/s, "
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
<< std::endl;
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
||||
static_cast<InDataType*>(dout_device_buf.GetDeviceBuffer()),
|
||||
static_cast<IndexDataType*>(indices_device_buf.GetDeviceBuffer()),
|
||||
static_cast<DInDataType*>(din_device_buf.GetDeviceBuffer()),
|
||||
out_tensor_size,
|
||||
in_tensor_size,
|
||||
window_spatial_lengths,
|
||||
window_strides,
|
||||
window_dilations);
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
|
||||
}
|
||||
|
||||
std::cout << "Done" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
add_executable(client_max_pool2d_fwd max_pool2d_fwd.cpp)
|
||||
target_link_libraries(client_max_pool2d_fwd PRIVATE composable_kernel::device_operations)
|
||||
|
||||
add_executable(client_avg_pool3d_fwd avg_pool3d_fwd.cpp)
|
||||
target_link_libraries(client_avg_pool3d_fwd PRIVATE composable_kernel::device_operations)
|
||||
@@ -8,7 +8,7 @@
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/utility/reduction_enums.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_pool2d_fwd_nhwc_nhwc.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_index_pool_bwd_impl.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_max_pool_bwd_impl.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
||||
|
||||
#include "ck/library/utility/check_err.hpp"
|
||||
@@ -60,7 +60,7 @@ bool maxpool_bwd_test(bool do_verification,
|
||||
1>; // InSrcOutDstVectorSize
|
||||
|
||||
using DeviceMaxPoolBwdInstance = ck::tensor_operation::device::
|
||||
DeviceIndexPoolBwdImpl<DOutDataType, IndexDataType, DInDataType, 4>;
|
||||
DeviceMaxPoolBwdImpl<DOutDataType, IndexDataType, DInDataType, 4>;
|
||||
|
||||
const ck::index_t Ys = (Y - 1) * window_dilation_h + 1;
|
||||
const ck::index_t Xs = (X - 1) * window_dilation_w + 1;
|
||||
@@ -155,7 +155,8 @@ bool maxpool_bwd_test(bool do_verification,
|
||||
dout_n_c_ho_wo.mDesc.GetElementSpaceSize(),
|
||||
din_n_c_hi_wi_device.mDesc.GetElementSpaceSize(),
|
||||
window_spatial_lengths,
|
||||
window_strides);
|
||||
window_strides,
|
||||
window_dilations);
|
||||
|
||||
if(!pool_bwd.IsSupportedArgument(pool_bwd_argument_ptr.get()))
|
||||
{
|
||||
|
||||
@@ -13,7 +13,7 @@ namespace device {
|
||||
|
||||
// For pooling which used indexable operation, such as MaxPool, MinPool...etc
|
||||
template <typename DOutDataType, typename IndexDataType, typename DInDataType>
|
||||
struct DeviceIndexPoolBwd : public BaseOperator
|
||||
struct DeviceMaxPoolBwd : public BaseOperator
|
||||
{
|
||||
virtual std::unique_ptr<BaseArgument>
|
||||
MakeArgumentPointer(const void* p_dout,
|
||||
@@ -22,7 +22,8 @@ struct DeviceIndexPoolBwd : public BaseOperator
|
||||
index_t dout_length,
|
||||
index_t din_length,
|
||||
std::vector<ck::index_t> window_lengths,
|
||||
std::vector<ck::index_t> window_strides) = 0;
|
||||
std::vector<ck::index_t> window_strides,
|
||||
std::vector<ck::index_t> window_dilations) = 0;
|
||||
|
||||
virtual std::unique_ptr<BaseInvoker> MakeInvokerPointer() = 0;
|
||||
};
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
#include "ck/tensor_description/tensor_descriptor.hpp"
|
||||
#include "ck/tensor_description/tensor_descriptor_helper.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_index_pool_bwd.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_max_pool_bwd.hpp"
|
||||
#include "ck/tensor_operation/gpu/grid/gridwise_put_element_1d.hpp"
|
||||
#include "ck/tensor_operation/gpu/grid/gridwise_elementwise_1d.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
||||
@@ -25,7 +25,7 @@ template <typename DOutDataType,
|
||||
typename IndexDataType,
|
||||
typename DInDataType,
|
||||
ck::index_t InOutVectorSize>
|
||||
struct DeviceIndexPoolBwdImpl : public DeviceIndexPoolBwd<DOutDataType, IndexDataType, DInDataType>
|
||||
struct DeviceMaxPoolBwdImpl : public DeviceMaxPoolBwd<DOutDataType, IndexDataType, DInDataType>
|
||||
{
|
||||
using DInDataType_AutomicAddPreCast =
|
||||
conditional_t<is_same_v<DInDataType, float> || is_same_v<DInDataType, double>,
|
||||
@@ -91,7 +91,8 @@ struct DeviceIndexPoolBwdImpl : public DeviceIndexPoolBwd<DOutDataType, IndexDat
|
||||
index_t dout_length,
|
||||
index_t din_length,
|
||||
const std::vector<ck::index_t>& window_lengths,
|
||||
const std::vector<ck::index_t>& window_strides)
|
||||
const std::vector<ck::index_t>& window_strides,
|
||||
const std::vector<ck::index_t>& window_dilations)
|
||||
: p_dout_{p_dout},
|
||||
p_indices_{p_indices},
|
||||
p_din_{p_din},
|
||||
@@ -102,7 +103,8 @@ struct DeviceIndexPoolBwdImpl : public DeviceIndexPoolBwd<DOutDataType, IndexDat
|
||||
{
|
||||
for(size_t i = 0; i < window_lengths.size(); ++i)
|
||||
{
|
||||
windowOverlap_ |= window_lengths.at(i) > window_strides.at(i);
|
||||
auto eff = (window_lengths.at(i) - 1) * window_dilations.at(i) + 1;
|
||||
windowOverlap_ |= eff > window_strides.at(i);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -228,6 +230,11 @@ struct DeviceIndexPoolBwdImpl : public DeviceIndexPoolBwd<DOutDataType, IndexDat
|
||||
}
|
||||
else
|
||||
{
|
||||
hip_check_error(hipMemsetAsync(arg.p_din_,
|
||||
0,
|
||||
arg.din_length_raw_ * sizeof(DInDataType),
|
||||
stream_config.stream_id_));
|
||||
|
||||
const auto put_kernel = kernel_put_element_1d<GridwisePutElementSet,
|
||||
InOutGrid1dDesc,
|
||||
DOutDataType,
|
||||
@@ -292,7 +299,8 @@ struct DeviceIndexPoolBwdImpl : public DeviceIndexPoolBwd<DOutDataType, IndexDat
|
||||
index_t dout_length,
|
||||
index_t din_length,
|
||||
std::vector<ck::index_t> window_lengths,
|
||||
std::vector<ck::index_t> window_strides) override
|
||||
std::vector<ck::index_t> window_strides,
|
||||
std::vector<ck::index_t> window_dilations) override
|
||||
{
|
||||
// Assume p_dout, p_indices, p_din are packed memory space, dout_length and din_length are
|
||||
// physical size of the packed tensor
|
||||
@@ -302,7 +310,8 @@ struct DeviceIndexPoolBwdImpl : public DeviceIndexPoolBwd<DOutDataType, IndexDat
|
||||
dout_length,
|
||||
din_length,
|
||||
window_lengths,
|
||||
window_strides);
|
||||
window_strides,
|
||||
window_dilations);
|
||||
}
|
||||
|
||||
std::unique_ptr<BaseInvoker> MakeInvokerPointer() override
|
||||
@@ -116,7 +116,15 @@ struct Max
|
||||
template <typename T>
|
||||
__host__ __device__ static constexpr T GetIdentityValue()
|
||||
{
|
||||
return NumericLimits<T>::Lowest();
|
||||
if constexpr(is_same_v<T, bhalf_t>)
|
||||
{
|
||||
float val = NumericLimits<float>::Lowest();
|
||||
return type_convert<bhalf_t>(val);
|
||||
}
|
||||
else
|
||||
{
|
||||
return NumericLimits<T>::Lowest();
|
||||
}
|
||||
};
|
||||
|
||||
__host__ __device__ static constexpr bool
|
||||
@@ -138,6 +146,15 @@ struct Max
|
||||
a = b;
|
||||
}
|
||||
|
||||
__host__ __device__ inline constexpr void operator()(bhalf_t& a, bhalf_t b) const
|
||||
{
|
||||
float a_ = type_convert<float>(a);
|
||||
float b_ = type_convert<float>(b);
|
||||
|
||||
if(a_ < b_)
|
||||
a = b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const
|
||||
{
|
||||
@@ -152,6 +169,18 @@ struct Max
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
__host__ __device__ inline constexpr void operator()(bhalf_t& a, bhalf_t b, bool& changed) const
|
||||
{
|
||||
float a_ = type_convert<float>(a);
|
||||
float b_ = type_convert<float>(b);
|
||||
|
||||
if(a_ < b_)
|
||||
{
|
||||
a = b;
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct Min
|
||||
@@ -159,6 +188,15 @@ struct Min
|
||||
template <typename T>
|
||||
__host__ __device__ static constexpr T GetIdentityValue()
|
||||
{
|
||||
if constexpr(is_same_v<T, bhalf_t>)
|
||||
{
|
||||
float val = NumericLimits<float>::Max();
|
||||
return type_convert<bhalf_t>(val);
|
||||
}
|
||||
else
|
||||
{
|
||||
return NumericLimits<T>::Max();
|
||||
}
|
||||
return NumericLimits<T>::Max();
|
||||
};
|
||||
|
||||
@@ -181,6 +219,15 @@ struct Min
|
||||
a = b;
|
||||
}
|
||||
|
||||
__host__ __device__ inline constexpr void operator()(bhalf_t& a, bhalf_t b) const
|
||||
{
|
||||
float a_ = type_convert<float>(a);
|
||||
float b_ = type_convert<float>(b);
|
||||
|
||||
if(a_ > b_)
|
||||
a = b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const
|
||||
{
|
||||
@@ -195,6 +242,18 @@ struct Min
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
__host__ __device__ inline constexpr void operator()(bhalf_t& a, bhalf_t b, bool& changed) const
|
||||
{
|
||||
float a_ = type_convert<float>(a);
|
||||
float b_ = type_convert<float>(b);
|
||||
|
||||
if(a_ > b_)
|
||||
{
|
||||
a = b;
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct AMax
|
||||
|
||||
@@ -53,7 +53,16 @@ struct ReferenceMaxPoolBwd : public device::BaseOperator
|
||||
{
|
||||
int index = arg.indices_.mData[i];
|
||||
if(index >= 0 && index < din_length)
|
||||
buf[index] += ck::type_convert<ConputeDataType>(arg.dout_.mData[i]);
|
||||
{
|
||||
if constexpr(is_same_v<ConputeDataType, bhalf_t>)
|
||||
{
|
||||
float buf_val = ck::type_convert<float>(buf[index]);
|
||||
buf_val += ck::type_convert<float>(arg.dout_.mData[i]);
|
||||
buf[index] = ck::type_convert<ConputeDataType>(buf_val);
|
||||
}
|
||||
else
|
||||
buf[index] += ck::type_convert<ConputeDataType>(arg.dout_.mData[i]);
|
||||
}
|
||||
}
|
||||
|
||||
for(int i = 0; i < din_length; ++i)
|
||||
|
||||
@@ -256,10 +256,12 @@ struct ReferencePoolingFwd : public device::BaseOperator
|
||||
|
||||
for(ck::index_t y = 0; y < arg.window_spatial_lengths_[0]; ++y)
|
||||
{
|
||||
ck::index_t hi = ho * arg.window_strides_[0] + y - arg.in_left_pads_[0];
|
||||
ck::index_t hi = ho * arg.window_strides_[0] +
|
||||
y * arg.window_dilations_[0] - arg.in_left_pads_[0];
|
||||
for(ck::index_t x = 0; x < arg.window_spatial_lengths_[1]; ++x)
|
||||
{
|
||||
ck::index_t wi = wo * arg.window_strides_[1] + x - arg.in_left_pads_[1];
|
||||
ck::index_t wi = wo * arg.window_strides_[1] +
|
||||
x * arg.window_dilations_[1] - arg.in_left_pads_[1];
|
||||
if(hi >= 0 &&
|
||||
hi < static_cast<ck::index_t>(arg.in_.mDesc.GetLengths()[2]) &&
|
||||
wi >= 0 &&
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ck/tensor_operation/gpu/device/device_avgpool_bwd.hpp"
|
||||
#include "ck/library/tensor_operation_instance/device_operation_instance_factory.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
#ifdef CK_ENABLE_FP16
|
||||
void add_device_avgpool_bwd_ndhwc_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceAvgPoolBwd<3, F16, F16, NDHWC, NDHWC>>>&);
|
||||
#endif
|
||||
#ifdef CK_ENABLE_BF16
|
||||
void add_device_avgpool_bwd_ndhwc_bf16_instances(
|
||||
std::vector<std::unique_ptr<DeviceAvgPoolBwd<3, BF16, BF16, NDHWC, NDHWC>>>&);
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
void add_device_avgpool_bwd_ndhwc_f32_instances(
|
||||
std::vector<std::unique_ptr<DeviceAvgPoolBwd<3, F32, F32, NDHWC, NDHWC>>>&);
|
||||
#endif
|
||||
template <typename DOutDataType, typename DInDataType, typename InLayout, typename OutLayout>
|
||||
struct DeviceOperationInstanceFactory<
|
||||
ck::tensor_operation::device::
|
||||
DeviceAvgPoolBwd<3, DOutDataType, DInDataType, InLayout, OutLayout>>
|
||||
{
|
||||
using DeviceOp = DeviceAvgPoolBwd<3, DOutDataType, DInDataType, InLayout, OutLayout>;
|
||||
|
||||
static auto GetInstances()
|
||||
{
|
||||
std::vector<std::unique_ptr<DeviceOp>> op_ptrs;
|
||||
if constexpr(is_same_v<InLayout, NDHWC> && is_same_v<OutLayout, NDHWC>)
|
||||
{
|
||||
#ifdef CK_ENABLE_FP16
|
||||
if constexpr(is_same_v<DOutDataType, F16> && is_same_v<DInDataType, F16>)
|
||||
add_device_avgpool_bwd_ndhwc_f16_instances(op_ptrs);
|
||||
#endif
|
||||
#ifdef CK_ENABLE_BF16
|
||||
else if constexpr(is_same_v<DOutDataType, BF16> && is_same_v<DInDataType, BF16>)
|
||||
add_device_avgpool_bwd_ndhwc_bf16_instances(op_ptrs);
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
else if constexpr(is_same_v<DOutDataType, F32> && is_same_v<DInDataType, F32>)
|
||||
add_device_avgpool_bwd_ndhwc_f32_instances(op_ptrs);
|
||||
#endif
|
||||
}
|
||||
|
||||
return op_ptrs;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -0,0 +1,58 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ck/tensor_operation/gpu/device/device_max_pool_bwd.hpp"
|
||||
#include "ck/library/tensor_operation_instance/device_operation_instance_factory.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
#ifdef CK_ENABLE_FP16
|
||||
void add_device_maxpool_bwd_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceMaxPoolBwd<F16, I32, F16>>>&);
|
||||
#endif
|
||||
#ifdef CK_ENABLE_BF16
|
||||
void add_device_maxpool_bwd_bf16_instances(
|
||||
std::vector<std::unique_ptr<DeviceMaxPoolBwd<BF16, I32, BF16>>>&);
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
void add_device_maxpool_bwd_f32_instances(
|
||||
std::vector<std::unique_ptr<DeviceMaxPoolBwd<F32, I32, F32>>>&);
|
||||
#endif
|
||||
template <typename DOutDataType, typename IndexDataType, typename DInDataType>
|
||||
struct DeviceOperationInstanceFactory<
|
||||
ck::tensor_operation::device::DeviceMaxPoolBwd<DOutDataType, IndexDataType, DInDataType>>
|
||||
{
|
||||
using DeviceOp = DeviceMaxPoolBwd<DOutDataType, IndexDataType, DInDataType>;
|
||||
|
||||
static auto GetInstances()
|
||||
{
|
||||
std::vector<std::unique_ptr<DeviceOp>> op_ptrs;
|
||||
#ifdef CK_ENABLE_FP16
|
||||
if constexpr(is_same_v<DOutDataType, F16> && is_same_v<DInDataType, F16> &&
|
||||
is_same_v<IndexDataType, I32>)
|
||||
add_device_maxpool_bwd_f16_instances(op_ptrs);
|
||||
#endif
|
||||
#ifdef CK_ENABLE_BF16
|
||||
else if constexpr(is_same_v<DOutDataType, BF16> && is_same_v<DInDataType, BF16> &&
|
||||
is_same_v<IndexDataType, I32>)
|
||||
add_device_maxpool_bwd_bf16_instances(op_ptrs);
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
else if constexpr(is_same_v<DOutDataType, F32> && is_same_v<DInDataType, F32> &&
|
||||
is_same_v<IndexDataType, I32>)
|
||||
add_device_maxpool_bwd_f32_instances(op_ptrs);
|
||||
#endif
|
||||
|
||||
return op_ptrs;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -37,6 +37,21 @@ void add_device_pool3d_fwd_ndhwc_index_f16_instances(
|
||||
std::vector<std::unique_ptr<
|
||||
DevicePoolFwd<InOutRank, WindowRank, F16, F16, I32, NDHWC, NDHWC, MaxOp, true>>>&);
|
||||
#endif
|
||||
#ifdef CK_ENABLE_BF16
|
||||
// BF16
|
||||
void add_device_pool3d_fwd_ndhwc_bf16_instances(
|
||||
std::vector<std::unique_ptr<
|
||||
DevicePoolFwd<InOutRank, WindowRank, BF16, BF16, I32, NDHWC, NDHWC, MaxOp, false>>>&);
|
||||
|
||||
void add_device_pool3d_fwd_ndhwc_bf16_instances(
|
||||
std::vector<std::unique_ptr<
|
||||
DevicePoolFwd<InOutRank, WindowRank, BF16, BF16, I32, NDHWC, NDHWC, AvgOp, false>>>&);
|
||||
|
||||
// BF16 - return index
|
||||
void add_device_pool3d_fwd_ndhwc_index_bf16_instances(
|
||||
std::vector<std::unique_ptr<
|
||||
DevicePoolFwd<InOutRank, WindowRank, BF16, BF16, I32, NDHWC, NDHWC, MaxOp, true>>>&);
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
// FP32
|
||||
void add_device_pool3d_fwd_ndhwc_f32_instances(
|
||||
@@ -98,9 +113,23 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DevicePoolFw
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef CK_ENABLE_BF16
|
||||
else if constexpr(is_same_v<InDataType, BF16> && is_same_v<OutDataType, BF16> &&
|
||||
is_same_v<IndexDataType, I32>)
|
||||
{
|
||||
if constexpr(OutputIndex && ReduceOpId == MaxOp)
|
||||
{
|
||||
add_device_pool3d_fwd_ndhwc_index_bf16_instances(op_ptrs);
|
||||
}
|
||||
else
|
||||
{
|
||||
add_device_pool3d_fwd_ndhwc_bf16_instances(op_ptrs);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
if constexpr(is_same_v<InDataType, F32> && is_same_v<OutDataType, F32> &&
|
||||
is_same_v<IndexDataType, I32>)
|
||||
else if constexpr(is_same_v<InDataType, F32> && is_same_v<OutDataType, F32> &&
|
||||
is_same_v<IndexDataType, I32>)
|
||||
{
|
||||
if constexpr(OutputIndex && ReduceOpId == MaxOp)
|
||||
{
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
set(DEVICE_AVGPOOL_BWD_INSTANCES)
|
||||
if(DTYPES MATCHES "fp16" OR NOT DEFINED DTYPES)
|
||||
list(APPEND DEVICE_AVGPOOL_BWD_INSTANCES device_avg_pool3d_bwd_ndhwc_f16_instance.cpp)
|
||||
endif()
|
||||
if(DTYPES MATCHES "bf16" OR NOT DEFINED DTYPES)
|
||||
list(APPEND DEVICE_AVGPOOL_BWD_INSTANCES device_avg_pool3d_bwd_ndhwc_bf16_instance.cpp)
|
||||
endif()
|
||||
if(DTYPES MATCHES "fp32" OR NOT DEFINED DTYPES)
|
||||
list(APPEND DEVICE_AVGPOOL_BWD_INSTANCES device_avg_pool3d_bwd_ndhwc_f32_instance.cpp)
|
||||
endif()
|
||||
add_instance_library(device_avg_pool3d_bwd_instance ${DEVICE_AVGPOOL_BWD_INSTANCES})
|
||||
@@ -0,0 +1,59 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_avgpool3d_bwd_ndhwc_ndhwc.hpp"
|
||||
#include "ck/utility/data_type.hpp"
|
||||
|
||||
#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
using I32 = int32_t;
|
||||
using F16 = ck::half_t;
|
||||
using BF16 = ck::bhalf_t;
|
||||
using F32 = float;
|
||||
using NDHWC = ck::tensor_layout::convolution::NDHWC;
|
||||
|
||||
using device_avgpool_bwd_ndhwc_f16_instances =
|
||||
// clang-format off
|
||||
std::tuple <
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<F16, F16, F32, 256, 256, 1, 1, 1, 1>,
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<F16, F16, F32, 256, 256, 1, 2, 2, 2>,
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<F16, F16, F32, 256, 256, 1, 4, 4, 4>,
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<F16, F16, F32, 256, 256, 1, 8, 8, 8>,
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<F16, F16, F32, 256, 32, 8, 8, 8, 8>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
using device_avgpool_bwd_ndhwc_bf16_instances =
|
||||
// clang-format off
|
||||
std::tuple <
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<BF16, BF16, F32, 256, 256, 1, 1, 1, 1>,
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<BF16, BF16, F32, 256, 256, 1, 2, 2, 2>,
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<BF16, BF16, F32, 256, 256, 1, 4, 4, 4>,
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<BF16, BF16, F32, 256, 256, 1, 8, 8, 8>,
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<BF16, BF16, F32, 256, 32, 8, 8, 8, 8>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
using device_avgpool_bwd_ndhwc_f32_instances =
|
||||
// clang-format off
|
||||
std::tuple <
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<F32, F32, F32, 256, 256, 1, 1, 1, 1>,
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<F32, F32, F32, 256, 256, 1, 2, 2, 2>,
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<F32, F32, F32, 256, 256, 1, 4, 4, 4>,
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<F32, F32, F32, 256, 256, 1, 8, 8, 8>,
|
||||
DeviceAvgPool3dBwd_NDHWC_NDHWC<F32, F32, F32, 256, 32, 8, 8, 8, 8>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -0,0 +1,20 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "avg_pool3d_bwd_ndhwc_instance_common.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
void add_device_avgpool_bwd_ndhwc_bf16_instances(
|
||||
std::vector<std::unique_ptr<DeviceAvgPoolBwd<3, BF16, BF16, NDHWC, NDHWC>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(instances, device_avgpool_bwd_ndhwc_bf16_instances{});
|
||||
}
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -0,0 +1,20 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "avg_pool3d_bwd_ndhwc_instance_common.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
void add_device_avgpool_bwd_ndhwc_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceAvgPoolBwd<3, F16, F16, NDHWC, NDHWC>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(instances, device_avgpool_bwd_ndhwc_f16_instances{});
|
||||
}
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -0,0 +1,20 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "avg_pool3d_bwd_ndhwc_instance_common.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
void add_device_avgpool_bwd_ndhwc_f32_instances(
|
||||
std::vector<std::unique_ptr<DeviceAvgPoolBwd<3, F32, F32, NDHWC, NDHWC>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(instances, device_avgpool_bwd_ndhwc_f32_instances{});
|
||||
}
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -0,0 +1,11 @@
|
||||
set(DEVICE_MAXPOOL_BWD_INSTANCES)
|
||||
if(DTYPES MATCHES "fp16" OR NOT DEFINED DTYPES)
|
||||
list(APPEND DEVICE_MAXPOOL_BWD_INSTANCES device_max_pool_bwd_f16_instance.cpp)
|
||||
endif()
|
||||
if(DTYPES MATCHES "bf16" OR NOT DEFINED DTYPES)
|
||||
list(APPEND DEVICE_MAXPOOL_BWD_INSTANCES device_max_pool_bwd_bf16_instance.cpp)
|
||||
endif()
|
||||
if(DTYPES MATCHES "fp32" OR NOT DEFINED DTYPES)
|
||||
list(APPEND DEVICE_MAXPOOL_BWD_INSTANCES device_max_pool_bwd_f32_instance.cpp)
|
||||
endif()
|
||||
add_instance_library(device_max_pool_bwd_instance ${DEVICE_MAXPOOL_BWD_INSTANCES})
|
||||
@@ -0,0 +1,20 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "max_pool_bwd_instance_common.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
void add_device_maxpool_bwd_bf16_instances(
|
||||
std::vector<std::unique_ptr<DeviceMaxPoolBwd<BF16, I32, BF16>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(instances, device_maxpool_bwd_instances<BF16, I32, BF16>{});
|
||||
}
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -0,0 +1,20 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "max_pool_bwd_instance_common.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
void add_device_maxpool_bwd_f16_instances(
|
||||
std::vector<std::unique_ptr<DeviceMaxPoolBwd<F16, I32, F16>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(instances, device_maxpool_bwd_instances<F16, I32, F16>{});
|
||||
}
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -0,0 +1,20 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "max_pool_bwd_instance_common.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
void add_device_maxpool_bwd_f32_instances(
|
||||
std::vector<std::unique_ptr<DeviceMaxPoolBwd<F32, I32, F32>>>& instances)
|
||||
{
|
||||
add_device_operation_instances(instances, device_maxpool_bwd_instances<F32, I32, F32>{});
|
||||
}
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -0,0 +1,35 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_max_pool_bwd_impl.hpp"
|
||||
#include "ck/utility/data_type.hpp"
|
||||
|
||||
#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
using I32 = int32_t;
|
||||
using F16 = ck::half_t;
|
||||
using BF16 = ck::bhalf_t;
|
||||
using F32 = float;
|
||||
|
||||
template <typename DOutDataType, typename IndexDataType, typename DInDataType>
|
||||
using device_maxpool_bwd_instances =
|
||||
// clang-format off
|
||||
std::tuple <
|
||||
DeviceMaxPoolBwdImpl<DOutDataType, IndexDataType, DInDataType, 1>,
|
||||
DeviceMaxPoolBwdImpl<DOutDataType, IndexDataType, DInDataType, 2>,
|
||||
DeviceMaxPoolBwdImpl<DOutDataType, IndexDataType, DInDataType, 4>
|
||||
// clang-format on
|
||||
>;
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -3,6 +3,10 @@ if(DTYPES MATCHES "fp16" OR NOT DEFINED DTYPES)
|
||||
list(APPEND DEVICE_POOL3D_FWD_INSTANCES device_avg_pool3d_fwd_ndhwc_f16_instance.cpp
|
||||
device_max_pool3d_fwd_ndhwc_f16_instance.cpp)
|
||||
endif()
|
||||
if(DTYPES MATCHES "bf16" OR NOT DEFINED DTYPES)
|
||||
list(APPEND DEVICE_POOL3D_FWD_INSTANCES device_avg_pool3d_fwd_ndhwc_bf16_instance.cpp
|
||||
device_max_pool3d_fwd_ndhwc_bf16_instance.cpp)
|
||||
endif()
|
||||
if(DTYPES MATCHES "fp32" OR NOT DEFINED DTYPES)
|
||||
list(APPEND DEVICE_POOL3D_FWD_INSTANCES device_avg_pool3d_fwd_ndhwc_f32_instance.cpp
|
||||
device_max_pool3d_fwd_ndhwc_f32_instance.cpp)
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "pool_fwd_instance_common.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
static constexpr auto ReduceOpId = ck::ReduceTensorOp::AVG;
|
||||
|
||||
void add_device_pool3d_fwd_ndhwc_bf16_instances(
|
||||
std::vector<
|
||||
std::unique_ptr<DevicePoolFwd<5, 3, BF16, BF16, I32, NDHWC, NDHWC, ReduceOpId, false>>>&
|
||||
instances)
|
||||
{
|
||||
add_device_operation_instances(
|
||||
instances, device_pool3d_fwd_ndhwc_instances<BF16, BF16, I32, F32, ReduceOpId, false>{});
|
||||
}
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -0,0 +1,34 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "pool_fwd_instance_common.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace tensor_operation {
|
||||
namespace device {
|
||||
namespace instance {
|
||||
|
||||
static constexpr auto ReduceOpId = ck::ReduceTensorOp::MAX;
|
||||
|
||||
void add_device_pool3d_fwd_ndhwc_bf16_instances(
|
||||
std::vector<
|
||||
std::unique_ptr<DevicePoolFwd<5, 3, BF16, BF16, I32, NDHWC, NDHWC, ReduceOpId, false>>>&
|
||||
instances)
|
||||
{
|
||||
add_device_operation_instances(
|
||||
instances, device_pool3d_fwd_ndhwc_instances<BF16, BF16, I32, BF16, ReduceOpId, false>{});
|
||||
}
|
||||
|
||||
void add_device_pool3d_fwd_ndhwc_index_bf16_instances(
|
||||
std::vector<
|
||||
std::unique_ptr<DevicePoolFwd<5, 3, BF16, BF16, I32, NDHWC, NDHWC, ReduceOpId, true>>>&
|
||||
instances)
|
||||
{
|
||||
add_device_operation_instances(
|
||||
instances, device_pool3d_fwd_ndhwc_instances<BF16, BF16, I32, BF16, ReduceOpId, true>{});
|
||||
}
|
||||
|
||||
} // namespace instance
|
||||
} // namespace device
|
||||
} // namespace tensor_operation
|
||||
} // namespace ck
|
||||
@@ -17,6 +17,7 @@ namespace instance {
|
||||
|
||||
using I32 = int32_t;
|
||||
using F16 = ck::half_t;
|
||||
using BF16 = ck::bhalf_t;
|
||||
using F32 = float;
|
||||
using NDHWC = ck::tensor_layout::convolution::NDHWC;
|
||||
|
||||
|
||||
253
profiler/include/profiler/profile_avg_pool3d_bwd_impl.hpp
Normal file
253
profiler/include/profiler/profile_avg_pool3d_bwd_impl.hpp
Normal file
@@ -0,0 +1,253 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iomanip>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/library/tensor_operation_instance/gpu/pool3d_fwd.hpp"
|
||||
#include "ck/library/tensor_operation_instance/gpu/avg_pool3d_bwd.hpp"
|
||||
#include "ck/library/utility/check_err.hpp"
|
||||
#include "ck/library/utility/device_memory.hpp"
|
||||
#include "ck/library/utility/host_tensor.hpp"
|
||||
#include "ck/library/utility/host_tensor_generator.hpp"
|
||||
#include "ck/library/utility/literals.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_avgpool_bwd.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace profiler {
|
||||
|
||||
template <typename TensorLayout>
|
||||
std::vector<ck::index_t> f_tensor_strides_ncdhw(ck::index_t N_,
|
||||
ck::index_t C_,
|
||||
ck::index_t D,
|
||||
ck::index_t H,
|
||||
ck::index_t W,
|
||||
TensorLayout layout)
|
||||
{
|
||||
using namespace ck::literals;
|
||||
(void)N_;
|
||||
if constexpr(ck::is_same<decltype(layout), ck::tensor_layout::convolution::NDHWC>::value)
|
||||
return {D * C_ * H * W, 1_uz, C_ * H * W, W * C_, C_};
|
||||
else
|
||||
throw std::runtime_error("not supported yet");
|
||||
};
|
||||
|
||||
template <typename DOutDataType,
|
||||
typename DInDataType,
|
||||
typename ComputeDataType,
|
||||
typename DOutLayout,
|
||||
typename DInLayout>
|
||||
bool profile_avg_pool3d_bwd_impl(int do_verification,
|
||||
int init_method,
|
||||
bool do_log,
|
||||
bool time_kernel,
|
||||
std::vector<index_t> in_length, // NCDHW
|
||||
std::vector<index_t> window_spatial_lengths,
|
||||
std::vector<index_t> window_strides,
|
||||
std::vector<index_t> window_dilations,
|
||||
std::vector<index_t> input_left_pads,
|
||||
std::vector<index_t> input_right_pads)
|
||||
{
|
||||
constexpr index_t InOutRank = 5;
|
||||
constexpr index_t WindowRank = 3;
|
||||
|
||||
if(in_length.size() != InOutRank || window_spatial_lengths.size() != WindowRank ||
|
||||
window_strides.size() != WindowRank || window_dilations.size() != WindowRank ||
|
||||
input_left_pads.size() != WindowRank || input_right_pads.size() != WindowRank)
|
||||
{
|
||||
std::cout << "Parameter is incorrect" << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<index_t> out_length(InOutRank);
|
||||
|
||||
int N = in_length[0];
|
||||
int C = in_length[1];
|
||||
|
||||
out_length[0] = N;
|
||||
out_length[1] = C;
|
||||
|
||||
// Calculate Do, Ho, Wo
|
||||
for(int i = 2; i < InOutRank; ++i)
|
||||
{
|
||||
auto pad1 = input_left_pads[i - 2];
|
||||
auto pad2 = input_right_pads[i - 2];
|
||||
auto windows_size = window_spatial_lengths[i - 2];
|
||||
auto windows_stride = window_strides[i - 2];
|
||||
auto windows_dilation = window_dilations[i - 2];
|
||||
auto eff = (windows_size - 1) * windows_dilation + 1;
|
||||
out_length[i] = (in_length[i] + pad1 + pad2 - eff) / windows_stride + 1;
|
||||
}
|
||||
|
||||
int Di = in_length[2];
|
||||
int Hi = in_length[3];
|
||||
int Wi = in_length[4];
|
||||
int Do = out_length[2];
|
||||
int Ho = out_length[3];
|
||||
int Wo = out_length[4];
|
||||
|
||||
auto f_host_tensor_descriptor =
|
||||
[](std::size_t N_, std::size_t C_, std::size_t D, std::size_t H, std::size_t W) {
|
||||
using namespace ck::literals;
|
||||
|
||||
return HostTensorDescriptor({N_, C_, D, H, W},
|
||||
{D * C_ * H * W, 1_uz, C_ * H * W, W * C_, C_});
|
||||
};
|
||||
|
||||
Tensor<DOutDataType> dout_n_c_do_ho_wo(f_host_tensor_descriptor(N, C, Do, Ho, Wo));
|
||||
Tensor<DInDataType> din_n_c_di_hi_wi_device(f_host_tensor_descriptor(N, C, Di, Hi, Wi));
|
||||
Tensor<DInDataType> din_n_c_di_hi_wi_host(f_host_tensor_descriptor(N, C, Di, Hi, Wi));
|
||||
|
||||
switch(init_method)
|
||||
{
|
||||
case 0: dout_n_c_do_ho_wo.GenerateTensorValue(GeneratorTensor_1<DOutDataType>{}); break;
|
||||
case 1: dout_n_c_do_ho_wo.GenerateTensorValue(GeneratorTensor_2<DOutDataType>{-5, 5}); break;
|
||||
default: dout_n_c_do_ho_wo.GenerateTensorValue(GeneratorTensor_3<DOutDataType>{-0.5, 0.5});
|
||||
}
|
||||
|
||||
DeviceMem dout_device_buf(sizeof(DOutDataType) * dout_n_c_do_ho_wo.mDesc.GetElementSpaceSize());
|
||||
DeviceMem din_device_buf(sizeof(DInDataType) *
|
||||
din_n_c_di_hi_wi_device.mDesc.GetElementSpaceSize());
|
||||
|
||||
dout_device_buf.ToDevice(dout_n_c_do_ho_wo.mData.data());
|
||||
|
||||
using DeviceOp = ck::tensor_operation::device::
|
||||
DeviceAvgPoolBwd<3, DOutDataType, DInDataType, DOutLayout, DInLayout>;
|
||||
|
||||
// get device op instances
|
||||
const auto instance_ptrs =
|
||||
ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
DeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << instance_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_instance_name;
|
||||
float best_avg_time = std::numeric_limits<float>::max();
|
||||
float best_gb_per_sec = 0;
|
||||
|
||||
if(do_verification)
|
||||
{
|
||||
using ReferencePoolingBwdInstance =
|
||||
ck::tensor_operation::host::ReferenceAvgPoolBwd<3, DInDataType, DOutDataType>;
|
||||
|
||||
ReferencePoolingBwdInstance ref_pooling_bwd;
|
||||
auto ref_pooling_bwd_argument = ref_pooling_bwd.MakeArgument(din_n_c_di_hi_wi_host,
|
||||
dout_n_c_do_ho_wo,
|
||||
window_spatial_lengths,
|
||||
window_strides,
|
||||
window_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads);
|
||||
|
||||
auto ref_invoker = ref_pooling_bwd.MakeInvoker();
|
||||
ref_invoker.Run(ref_pooling_bwd_argument);
|
||||
}
|
||||
|
||||
int num_kernel = 0;
|
||||
|
||||
for(auto& inst_ptr : instance_ptrs)
|
||||
{
|
||||
auto argument_ptr = inst_ptr->MakeArgumentPointer(
|
||||
static_cast<DOutDataType*>(dout_device_buf.GetDeviceBuffer()),
|
||||
static_cast<DInDataType*>(din_device_buf.GetDeviceBuffer()),
|
||||
{N, C, Do, Ho, Wo},
|
||||
{N, C, Di, Hi, Wi},
|
||||
f_tensor_strides_ncdhw(N, C, Do, Ho, Wo, DOutLayout{}),
|
||||
f_tensor_strides_ncdhw(N, C, Di, Hi, Wi, DInLayout{}),
|
||||
window_spatial_lengths,
|
||||
window_strides,
|
||||
window_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads);
|
||||
|
||||
if(inst_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
++num_kernel;
|
||||
}
|
||||
else
|
||||
{
|
||||
if(time_kernel)
|
||||
{
|
||||
std::cout << inst_ptr->GetTypeString() << " skipped due to unsupported argument: ";
|
||||
LogRange(std::cout << "doutput lengths = ", out_length, ", ") << std::endl;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
din_device_buf.SetZero();
|
||||
|
||||
auto invoker_ptr = inst_ptr->MakeInvokerPointer();
|
||||
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
||||
|
||||
std::size_t num_bytes =
|
||||
dout_n_c_do_ho_wo.mDesc.GetElementSize() * sizeof(DOutDataType) +
|
||||
din_n_c_di_hi_wi_device.mDesc.GetElementSize() * sizeof(DInDataType);
|
||||
|
||||
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
||||
|
||||
if(time_kernel)
|
||||
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << gb_per_sec << " GB/s, "
|
||||
<< inst_ptr->GetTypeString() << std::endl;
|
||||
|
||||
if(avg_time < best_avg_time)
|
||||
{
|
||||
best_instance_name = inst_ptr->GetTypeString();
|
||||
best_avg_time = avg_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
}
|
||||
|
||||
if(do_verification)
|
||||
{
|
||||
din_device_buf.FromDevice(din_n_c_di_hi_wi_device.mData.data());
|
||||
bool pass = ck::utils::check_err(din_n_c_di_hi_wi_device.mData,
|
||||
din_n_c_di_hi_wi_host.mData,
|
||||
"Error: Incorrect results",
|
||||
1e-3,
|
||||
1e-3);
|
||||
|
||||
if(do_log)
|
||||
{
|
||||
LogRangeAsType<float>(
|
||||
std::cout << "din_n_c_di_hi_wi_device: ", din_n_c_di_hi_wi_device.mData, ",")
|
||||
<< std::endl;
|
||||
|
||||
LogRangeAsType<float>(
|
||||
std::cout << "din_n_c_di_hi_wi_host: ", din_n_c_di_hi_wi_host.mData, ",")
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
if(!pass)
|
||||
{
|
||||
std::cout << inst_ptr->GetTypeString() << " failed verification: ";
|
||||
LogRange(std::cout << "doutput lengths = [", out_length, ", ") << "]." << std::endl;
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if(time_kernel)
|
||||
std::cout << "pass" << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(time_kernel)
|
||||
{
|
||||
LogRange(std::cout << "length = ", out_length, ",") << std::endl;
|
||||
std::cout << "best perf = " << best_avg_time << " ms, " << best_gb_per_sec << " GB/s, "
|
||||
<< best_instance_name << std::endl;
|
||||
}
|
||||
|
||||
if(num_kernel == 0)
|
||||
{
|
||||
std::cout << "Error: No kernel is applicable" << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace profiler
|
||||
} // namespace ck
|
||||
288
profiler/include/profiler/profile_max_pool3d_bwd_impl.hpp
Normal file
288
profiler/include/profiler/profile_max_pool3d_bwd_impl.hpp
Normal file
@@ -0,0 +1,288 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iomanip>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/library/tensor_operation_instance/gpu/pool3d_fwd.hpp"
|
||||
#include "ck/library/tensor_operation_instance/gpu/max_pool_bwd.hpp"
|
||||
#include "ck/library/utility/check_err.hpp"
|
||||
#include "ck/library/utility/device_memory.hpp"
|
||||
#include "ck/library/utility/host_tensor.hpp"
|
||||
#include "ck/library/utility/host_tensor_generator.hpp"
|
||||
#include "ck/library/utility/literals.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_pool_fwd.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_maxpool_bwd.hpp"
|
||||
|
||||
namespace ck {
|
||||
namespace profiler {
|
||||
|
||||
template <typename InDataType,
|
||||
typename OutDataType,
|
||||
typename IndexDataType,
|
||||
typename DOutDataType,
|
||||
typename DInDataType,
|
||||
bool PropagateNan>
|
||||
bool profile_max_pool3d_bwd_impl(int do_verification,
|
||||
int init_method,
|
||||
bool do_log,
|
||||
bool time_kernel,
|
||||
std::vector<index_t> in_length, // NCDHW
|
||||
std::vector<index_t> window_spatial_lengths,
|
||||
std::vector<index_t> window_strides,
|
||||
std::vector<index_t> window_dilations,
|
||||
std::vector<index_t> input_left_pads,
|
||||
std::vector<index_t> input_right_pads)
|
||||
{
|
||||
// AtomicAdd only support f32 for now. ComputeDataType must be float32
|
||||
using ComputeDataType = float;
|
||||
|
||||
constexpr index_t InOutRank = 5;
|
||||
constexpr index_t WindowRank = 3;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
if(in_length.size() != InOutRank || window_spatial_lengths.size() != WindowRank ||
|
||||
window_strides.size() != WindowRank || window_dilations.size() != WindowRank ||
|
||||
input_left_pads.size() != WindowRank || input_right_pads.size() != WindowRank)
|
||||
{
|
||||
std::cout << "Parameter is incorrect" << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<index_t> out_length(InOutRank);
|
||||
|
||||
int N = in_length[0];
|
||||
int C = in_length[1];
|
||||
|
||||
out_length[0] = N;
|
||||
out_length[1] = C;
|
||||
|
||||
// Calculate Do, Ho, Wo
|
||||
for(int i = 2; i < InOutRank; ++i)
|
||||
{
|
||||
auto pad1 = input_left_pads[i - 2];
|
||||
auto pad2 = input_right_pads[i - 2];
|
||||
auto windows_size = window_spatial_lengths[i - 2];
|
||||
auto windows_stride = window_strides[i - 2];
|
||||
auto windows_dilation = window_dilations[i - 2];
|
||||
auto eff = (windows_size - 1) * windows_dilation + 1;
|
||||
out_length[i] = (in_length[i] + pad1 + pad2 - eff) / windows_stride + 1;
|
||||
}
|
||||
|
||||
int Di = in_length[2];
|
||||
int Hi = in_length[3];
|
||||
int Wi = in_length[4];
|
||||
int Do = out_length[2];
|
||||
int Ho = out_length[3];
|
||||
int Wo = out_length[4];
|
||||
|
||||
auto f_host_tensor_descriptor =
|
||||
[](std::size_t N_, std::size_t C_, std::size_t D, std::size_t H, std::size_t W) {
|
||||
using namespace ck::literals;
|
||||
|
||||
return HostTensorDescriptor({N_, C_, D, H, W},
|
||||
{D * C_ * H * W, 1_uz, C_ * H * W, W * C_, C_});
|
||||
};
|
||||
|
||||
Tensor<InDataType> in_n_c_di_hi_wi(f_host_tensor_descriptor(N, C, Di, Hi, Wi));
|
||||
Tensor<OutDataType> out_n_c_do_ho_wo(f_host_tensor_descriptor(N, C, Do, Ho, Wo));
|
||||
Tensor<IndexDataType> out_indices_n_c_do_ho_wo(f_host_tensor_descriptor(N, C, Do, Ho, Wo));
|
||||
Tensor<DOutDataType> dout_n_c_do_ho_wo(f_host_tensor_descriptor(N, C, Do, Ho, Wo));
|
||||
Tensor<DInDataType> din_n_c_di_hi_wi_host(f_host_tensor_descriptor(N, C, Di, Hi, Wi));
|
||||
|
||||
Tensor<DInDataType> din_n_c_di_hi_wi_device(f_host_tensor_descriptor(N, C, Di, Hi, Wi));
|
||||
|
||||
switch(init_method)
|
||||
{
|
||||
case 0:
|
||||
in_n_c_di_hi_wi.GenerateTensorValue(GeneratorTensor_1<InDataType>{});
|
||||
dout_n_c_do_ho_wo.GenerateTensorValue(GeneratorTensor_1<DOutDataType>{});
|
||||
break;
|
||||
case 1:
|
||||
in_n_c_di_hi_wi.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5});
|
||||
dout_n_c_do_ho_wo.GenerateTensorValue(GeneratorTensor_2<DOutDataType>{-5, 5});
|
||||
break;
|
||||
default:
|
||||
in_n_c_di_hi_wi.GenerateTensorValue(GeneratorTensor_3<InDataType>{-0.5, 0.5});
|
||||
dout_n_c_do_ho_wo.GenerateTensorValue(GeneratorTensor_3<DOutDataType>{-0.5, 0.5});
|
||||
}
|
||||
|
||||
DeviceMem indices_device_buf(sizeof(IndexDataType) *
|
||||
out_indices_n_c_do_ho_wo.mDesc.GetElementSpaceSize());
|
||||
DeviceMem dout_device_buf(sizeof(DOutDataType) * dout_n_c_do_ho_wo.mDesc.GetElementSpaceSize());
|
||||
DeviceMem din_device_buf(sizeof(DInDataType) *
|
||||
din_n_c_di_hi_wi_device.mDesc.GetElementSpaceSize());
|
||||
|
||||
// Generate index data from forwarding
|
||||
{
|
||||
using ReferencePoolingFwdInstance =
|
||||
ck::tensor_operation::host::ReferencePoolingFwd<InOutRank,
|
||||
WindowRank,
|
||||
InDataType,
|
||||
OutDataType,
|
||||
ComputeDataType,
|
||||
IndexDataType,
|
||||
ck::ReduceTensorOp::MAX,
|
||||
false,
|
||||
true>;
|
||||
|
||||
ReferencePoolingFwdInstance ref_pooling_fwd;
|
||||
auto ref_pooling_fwd_argument = ref_pooling_fwd.MakeArgument(in_n_c_di_hi_wi,
|
||||
out_n_c_do_ho_wo,
|
||||
out_indices_n_c_do_ho_wo,
|
||||
window_spatial_lengths,
|
||||
window_strides,
|
||||
window_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads);
|
||||
auto ref_pooling_fwd_invoker = ref_pooling_fwd.MakeInvoker();
|
||||
ref_pooling_fwd_invoker.Run(ref_pooling_fwd_argument);
|
||||
}
|
||||
|
||||
indices_device_buf.ToDevice(out_indices_n_c_do_ho_wo.mData.data());
|
||||
dout_device_buf.ToDevice(dout_n_c_do_ho_wo.mData.data());
|
||||
|
||||
using DeviceOp =
|
||||
ck::tensor_operation::device::DeviceMaxPoolBwd<DOutDataType, IndexDataType, DInDataType>;
|
||||
|
||||
// get device op instances
|
||||
const auto instance_ptrs =
|
||||
ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
DeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << instance_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_instance_name;
|
||||
float best_avg_time = std::numeric_limits<float>::max();
|
||||
float best_gb_per_sec = 0;
|
||||
|
||||
if(do_verification)
|
||||
{
|
||||
using ReferencePoolingBwdInstance =
|
||||
ck::tensor_operation::host::ReferenceMaxPoolBwd<DOutDataType,
|
||||
IndexDataType,
|
||||
ComputeDataType,
|
||||
DInDataType,
|
||||
PassThrough>;
|
||||
|
||||
ReferencePoolingBwdInstance ref_pooling_bwd;
|
||||
auto ref_pooling_bwd_argument = ref_pooling_bwd.MakeArgument(
|
||||
dout_n_c_do_ho_wo, out_indices_n_c_do_ho_wo, din_n_c_di_hi_wi_host, PassThrough{});
|
||||
auto ref_invoker = ref_pooling_bwd.MakeInvoker();
|
||||
ref_invoker.Run(ref_pooling_bwd_argument);
|
||||
}
|
||||
|
||||
int num_kernel = 0;
|
||||
|
||||
for(auto& inst_ptr : instance_ptrs)
|
||||
{
|
||||
auto argument_ptr = inst_ptr->MakeArgumentPointer(
|
||||
static_cast<DOutDataType*>(dout_device_buf.GetDeviceBuffer()),
|
||||
static_cast<IndexDataType*>(indices_device_buf.GetDeviceBuffer()),
|
||||
static_cast<DInDataType*>(din_device_buf.GetDeviceBuffer()),
|
||||
dout_n_c_do_ho_wo.mDesc.GetElementSpaceSize(),
|
||||
din_n_c_di_hi_wi_device.mDesc.GetElementSpaceSize(),
|
||||
window_spatial_lengths,
|
||||
window_strides,
|
||||
window_dilations);
|
||||
|
||||
if(inst_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
++num_kernel;
|
||||
}
|
||||
else
|
||||
{
|
||||
if(time_kernel)
|
||||
{
|
||||
std::cout << inst_ptr->GetTypeString() << " skipped due to unsupported argument: ";
|
||||
LogRange(std::cout << "doutput lengths = ", out_length, ", ") << std::endl;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
size_t workspace_sz = inst_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
DeviceMem workspace_device_buf(workspace_sz);
|
||||
inst_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace_device_buf.GetDeviceBuffer());
|
||||
|
||||
auto invoker_ptr = inst_ptr->MakeInvokerPointer();
|
||||
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
||||
|
||||
std::size_t num_bytes =
|
||||
dout_n_c_do_ho_wo.mDesc.GetElementSize() * sizeof(DOutDataType) +
|
||||
out_indices_n_c_do_ho_wo.mDesc.GetElementSize() * sizeof(IndexDataType) +
|
||||
din_n_c_di_hi_wi_device.mDesc.GetElementSize() * sizeof(DInDataType);
|
||||
|
||||
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
||||
|
||||
if(time_kernel)
|
||||
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << gb_per_sec << " GB/s, "
|
||||
<< inst_ptr->GetTypeString() << std::endl;
|
||||
|
||||
if(avg_time < best_avg_time)
|
||||
{
|
||||
best_instance_name = inst_ptr->GetTypeString();
|
||||
best_avg_time = avg_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
}
|
||||
|
||||
if(do_verification)
|
||||
{
|
||||
din_device_buf.FromDevice(din_n_c_di_hi_wi_device.mData.data());
|
||||
|
||||
bool pass = ck::utils::check_err(din_n_c_di_hi_wi_device.mData,
|
||||
din_n_c_di_hi_wi_host.mData,
|
||||
"Error: Incorrect results",
|
||||
1e-3,
|
||||
1e-3);
|
||||
|
||||
if(do_log)
|
||||
{
|
||||
LogRangeAsType<float>(
|
||||
std::cout << "out_indices_n_c_do_ho_wo: ", out_indices_n_c_do_ho_wo.mData, ",")
|
||||
<< std::endl;
|
||||
|
||||
LogRangeAsType<float>(
|
||||
std::cout << "din_n_c_di_hi_wi_device: ", din_n_c_di_hi_wi_device.mData, ",")
|
||||
<< std::endl;
|
||||
|
||||
LogRangeAsType<float>(
|
||||
std::cout << "din_n_c_di_hi_wi_host: ", din_n_c_di_hi_wi_host.mData, ",")
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
if(!pass)
|
||||
{
|
||||
std::cout << inst_ptr->GetTypeString() << " failed verification: ";
|
||||
LogRange(std::cout << "doutput lengths = [", out_length, ", ") << "]." << std::endl;
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if(time_kernel)
|
||||
std::cout << "pass" << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(time_kernel)
|
||||
{
|
||||
LogRange(std::cout << "length = ", out_length, ",") << std::endl;
|
||||
std::cout << "best perf = " << best_avg_time << " ms, " << best_gb_per_sec << " GB/s, "
|
||||
<< best_instance_name << std::endl;
|
||||
}
|
||||
|
||||
if(num_kernel == 0)
|
||||
{
|
||||
std::cout << "Error: No kernel is applicable" << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace profiler
|
||||
} // namespace ck
|
||||
@@ -19,6 +19,8 @@ set(PROFILER_SOURCES
|
||||
profile_groupnorm.cpp
|
||||
profile_layernorm.cpp
|
||||
profile_max_pool3d_fwd.cpp
|
||||
profile_avg_pool3d_bwd.cpp
|
||||
profile_max_pool3d_bwd.cpp
|
||||
profile_softmax.cpp
|
||||
profile_batchnorm_fwd.cpp
|
||||
profile_batchnorm_bwd.cpp
|
||||
@@ -76,6 +78,8 @@ target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_batchnorm_instance)
|
||||
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_contraction_bilinear_instance)
|
||||
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_contraction_scale_instance)
|
||||
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_pool3d_fwd_instance)
|
||||
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_avg_pool3d_bwd_instance)
|
||||
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_max_pool_bwd_instance)
|
||||
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_conv2d_bwd_data_instance)
|
||||
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_conv3d_bwd_data_instance)
|
||||
if(DL_KERNELS)
|
||||
|
||||
175
profiler/src/profile_avg_pool3d_bwd.cpp
Normal file
175
profiler/src/profile_avg_pool3d_bwd.cpp
Normal file
@@ -0,0 +1,175 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "profiler/data_type_enum.hpp"
|
||||
#include "profiler/profile_avg_pool3d_bwd_impl.hpp"
|
||||
#include "profiler_operation_registry.hpp"
|
||||
|
||||
using ck::index_t;
|
||||
|
||||
struct maxPoolbwdArgParser
|
||||
{
|
||||
std::unordered_map<std::string, std::vector<int>> long_opts = {{"length", {}},
|
||||
{"wsize", {}},
|
||||
{"wstride", {}},
|
||||
{"wdilation", {}},
|
||||
{"pad1", {}},
|
||||
{"pad2", {}}};
|
||||
|
||||
bool parse_opt(int argc, char* argv[], const std::string& key, int i)
|
||||
{
|
||||
if(std::string("--") + key == argv[i])
|
||||
{
|
||||
int pos = i;
|
||||
while(++i < argc && argv[i][0] != '-') {}
|
||||
int end = i;
|
||||
for(int j = pos + 1; j < end; j++)
|
||||
{
|
||||
long_opts[key].push_back(std::stoi(argv[j]));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void operator()(int argc, char* argv[])
|
||||
{
|
||||
for(auto& kv : long_opts)
|
||||
{
|
||||
for(int i = 1; i < argc; i++)
|
||||
{
|
||||
if(parse_opt(argc, argv, kv.first, i))
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void print_help_avg_pool3d_bwd()
|
||||
{
|
||||
std::cout << "arg1: data type (0: fp16; 1: fp32; 5: bf16)\n"
|
||||
<< "arg2: verification (0: no; 1: yes)\n"
|
||||
<< "arg3: initialization (0: no init; 1: integer value; 2: decimal value)\n"
|
||||
<< "arg4: print tensor value (0: no; 1: yes)\n"
|
||||
<< "arg5: time kernel (0=no, 1=yes)\n"
|
||||
<< "--length: input tensor length for NCDHW(e.g, --length 2 32 30 30 30) \n"
|
||||
<< "--wsize: window size for ZYX (e.g, --wsize 2 2 2) \n"
|
||||
<< "--wstride: window stride for DHW (e.g, --wstride 2 2 2) \n"
|
||||
<< "--wdilation: window dilation for DHW (e.g, --wdilation 1 1 1) \n"
|
||||
<< "--pad1: left side of padding in DHW (e.g, --pad1 1 1 1) \n"
|
||||
<< "--pad2: right side of padding in DHW (e.g, --pad2 1 1 1) \n"
|
||||
<< "eg: ckProfiler avg_pool3d_bwd 0 1 2 0 1 --length 2 32 30 30 30 --wsize 2 2 2 "
|
||||
"--wstride 2 2 2 --wdilation 1 1 1 --pad1 1 1 1 --pad2 1 1 1"
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
int profile_avg_pool3d_bwd(int argc, char* argv[])
|
||||
{
|
||||
ck::DataTypeEnum data_type = ck::DataTypeEnum::Half;
|
||||
bool do_verification = true;
|
||||
int init_method = 0;
|
||||
bool do_log = false;
|
||||
bool time_kernel = true;
|
||||
|
||||
std::vector<index_t> in_length = {2, 32, 30, 30, 30};
|
||||
std::vector<index_t> wsize = {2, 2, 2};
|
||||
std::vector<index_t> wstride = {2, 2, 2};
|
||||
std::vector<index_t> wdilation = {1, 1, 1};
|
||||
std::vector<index_t> pad1 = {1, 1, 1};
|
||||
std::vector<index_t> pad2 = {1, 1, 1};
|
||||
|
||||
if(argc != 2 && argc != 33)
|
||||
{
|
||||
print_help_avg_pool3d_bwd();
|
||||
return 0;
|
||||
}
|
||||
else if(argc == 33)
|
||||
{
|
||||
data_type = static_cast<ck::DataTypeEnum>(std::stoi(argv[2]));
|
||||
do_verification = std::stoi(argv[3]);
|
||||
init_method = std::stoi(argv[4]);
|
||||
do_log = std::stoi(argv[5]);
|
||||
time_kernel = std::stoi(argv[6]);
|
||||
|
||||
// parse the long options
|
||||
maxPoolbwdArgParser arg_parser;
|
||||
arg_parser(argc, argv);
|
||||
in_length = arg_parser.long_opts["length"];
|
||||
wsize = arg_parser.long_opts["wsize"];
|
||||
wstride = arg_parser.long_opts["wstride"];
|
||||
wdilation = arg_parser.long_opts["wdilation"];
|
||||
pad1 = arg_parser.long_opts["pad1"];
|
||||
pad2 = arg_parser.long_opts["pad2"];
|
||||
}
|
||||
|
||||
#ifdef CK_ENABLE_FP16
|
||||
using F16 = ck::half_t;
|
||||
#endif
|
||||
#ifdef CK_ENABLE_BF16
|
||||
using BF16 = ck::bhalf_t;
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
using F32 = float;
|
||||
#endif
|
||||
using NDHWC = ck::tensor_layout::convolution::NDHWC;
|
||||
|
||||
if(false)
|
||||
;
|
||||
#ifdef CK_ENABLE_FP16
|
||||
else if(data_type == ck::DataTypeEnum::Half)
|
||||
{
|
||||
ck::profiler::profile_avg_pool3d_bwd_impl<F16, F16, F16, NDHWC, NDHWC>(do_verification,
|
||||
init_method,
|
||||
do_log,
|
||||
time_kernel,
|
||||
in_length,
|
||||
wsize,
|
||||
wstride,
|
||||
wdilation,
|
||||
pad1,
|
||||
pad2);
|
||||
}
|
||||
#endif
|
||||
#ifdef CK_ENABLE_BF16
|
||||
else if(data_type == ck::DataTypeEnum::BFloat16)
|
||||
{
|
||||
ck::profiler::profile_avg_pool3d_bwd_impl<BF16, BF16, BF16, NDHWC, NDHWC>(do_verification,
|
||||
init_method,
|
||||
do_log,
|
||||
time_kernel,
|
||||
in_length,
|
||||
wsize,
|
||||
wstride,
|
||||
wdilation,
|
||||
pad1,
|
||||
pad2);
|
||||
}
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
else if(data_type == ck::DataTypeEnum::Float)
|
||||
{
|
||||
ck::profiler::profile_avg_pool3d_bwd_impl<F32, F32, F32, NDHWC, NDHWC>(do_verification,
|
||||
init_method,
|
||||
do_log,
|
||||
time_kernel,
|
||||
in_length,
|
||||
wsize,
|
||||
wstride,
|
||||
wdilation,
|
||||
pad1,
|
||||
pad2);
|
||||
}
|
||||
#endif
|
||||
else
|
||||
{
|
||||
throw std::runtime_error("not implemented yet");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
REGISTER_PROFILER_OPERATION("avg_pool3d_bwd", "max_pool bwd", profile_avg_pool3d_bwd);
|
||||
177
profiler/src/profile_max_pool3d_bwd.cpp
Normal file
177
profiler/src/profile_max_pool3d_bwd.cpp
Normal file
@@ -0,0 +1,177 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "profiler/data_type_enum.hpp"
|
||||
#include "profiler/profile_max_pool3d_bwd_impl.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
||||
#include "profiler_operation_registry.hpp"
|
||||
|
||||
using ck::index_t;
|
||||
|
||||
struct maxPoolbwdArgParser
|
||||
{
|
||||
std::unordered_map<std::string, std::vector<int>> long_opts = {{"length", {}},
|
||||
{"wsize", {}},
|
||||
{"wstride", {}},
|
||||
{"wdilation", {}},
|
||||
{"pad1", {}},
|
||||
{"pad2", {}}};
|
||||
|
||||
bool parse_opt(int argc, char* argv[], const std::string& key, int i)
|
||||
{
|
||||
if(std::string("--") + key == argv[i])
|
||||
{
|
||||
int pos = i;
|
||||
while(++i < argc && argv[i][0] != '-') {}
|
||||
int end = i;
|
||||
for(int j = pos + 1; j < end; j++)
|
||||
{
|
||||
long_opts[key].push_back(std::stoi(argv[j]));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void operator()(int argc, char* argv[])
|
||||
{
|
||||
for(auto& kv : long_opts)
|
||||
{
|
||||
for(int i = 1; i < argc; i++)
|
||||
{
|
||||
if(parse_opt(argc, argv, kv.first, i))
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void print_help_max_pool3d_bwd()
|
||||
{
|
||||
std::cout << "arg1: data type (0: fp16; 1: fp32; 5: bf16)\n"
|
||||
<< "arg2: verification (0: no; 1: yes)\n"
|
||||
<< "arg3: initialization (0: no init; 1: integer value; 2: decimal value)\n"
|
||||
<< "arg4: print tensor value (0: no; 1: yes)\n"
|
||||
<< "arg5: time kernel (0=no, 1=yes)\n"
|
||||
<< "--length: input tensor length for NCDHW(e.g, --length 2 32 30 30 30) \n"
|
||||
<< "--wsize: window size for ZYX (e.g, --wsize 2 2 2) \n"
|
||||
<< "--wstride: window stride for DHW (e.g, --wstride 2 2 2) \n"
|
||||
<< "--wdilation: window dilation for DHW (e.g, --wdilation 1 1 1) \n"
|
||||
<< "--pad1: left side of padding in DHW (e.g, --pad1 1 1 1) \n"
|
||||
<< "--pad2: right side of padding in DHW (e.g, --pad2 1 1 1) \n"
|
||||
<< "eg: ckProfiler max_pool3d_bwd 0 1 2 0 1 --length 2 32 30 30 30 --wsize 2 2 2 "
|
||||
"--wstride 2 2 2 --wdilation 1 1 1 --pad1 1 1 1 --pad2 1 1 1"
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
int profile_max_pool3d_bwd(int argc, char* argv[])
|
||||
{
|
||||
ck::DataTypeEnum data_type = ck::DataTypeEnum::Half;
|
||||
bool do_verification = true;
|
||||
int init_method = 0;
|
||||
bool do_log = false;
|
||||
bool time_kernel = true;
|
||||
|
||||
std::vector<index_t> in_length = {2, 32, 30, 30, 30};
|
||||
std::vector<index_t> wsize = {2, 2, 2};
|
||||
std::vector<index_t> wstride = {2, 2, 2};
|
||||
std::vector<index_t> wdilation = {1, 1, 1};
|
||||
std::vector<index_t> pad1 = {1, 1, 1};
|
||||
std::vector<index_t> pad2 = {1, 1, 1};
|
||||
|
||||
if(argc != 2 && argc != 33)
|
||||
{
|
||||
print_help_max_pool3d_bwd();
|
||||
return 0;
|
||||
}
|
||||
else if(argc == 33)
|
||||
{
|
||||
data_type = static_cast<ck::DataTypeEnum>(std::stoi(argv[2]));
|
||||
do_verification = std::stoi(argv[3]);
|
||||
init_method = std::stoi(argv[4]);
|
||||
do_log = std::stoi(argv[5]);
|
||||
time_kernel = std::stoi(argv[6]);
|
||||
|
||||
// parse the long options
|
||||
maxPoolbwdArgParser arg_parser;
|
||||
arg_parser(argc, argv);
|
||||
in_length = arg_parser.long_opts["length"];
|
||||
wsize = arg_parser.long_opts["wsize"];
|
||||
wstride = arg_parser.long_opts["wstride"];
|
||||
wdilation = arg_parser.long_opts["wdilation"];
|
||||
pad1 = arg_parser.long_opts["pad1"];
|
||||
pad2 = arg_parser.long_opts["pad2"];
|
||||
}
|
||||
|
||||
#ifdef CK_ENABLE_FP16
|
||||
using F16 = ck::half_t;
|
||||
#endif
|
||||
#ifdef CK_ENABLE_BF16
|
||||
using BF16 = ck::bhalf_t;
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
using F32 = float;
|
||||
#endif
|
||||
using I32 = int32_t;
|
||||
|
||||
if(false)
|
||||
;
|
||||
#ifdef CK_ENABLE_FP16
|
||||
else if(data_type == ck::DataTypeEnum::Half)
|
||||
{
|
||||
ck::profiler::profile_max_pool3d_bwd_impl<F16, F16, I32, F16, F16, false>(do_verification,
|
||||
init_method,
|
||||
do_log,
|
||||
time_kernel,
|
||||
in_length,
|
||||
wsize,
|
||||
wstride,
|
||||
wdilation,
|
||||
pad1,
|
||||
pad2);
|
||||
}
|
||||
#endif
|
||||
#ifdef CK_ENABLE_BF16
|
||||
else if(data_type == ck::DataTypeEnum::BFloat16)
|
||||
{
|
||||
ck::profiler::profile_max_pool3d_bwd_impl<BF16, BF16, I32, BF16, BF16, false>(
|
||||
do_verification,
|
||||
init_method,
|
||||
do_log,
|
||||
time_kernel,
|
||||
in_length,
|
||||
wsize,
|
||||
wstride,
|
||||
wdilation,
|
||||
pad1,
|
||||
pad2);
|
||||
}
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
else if(data_type == ck::DataTypeEnum::Float)
|
||||
{
|
||||
ck::profiler::profile_max_pool3d_bwd_impl<F32, F32, I32, F32, F32, false>(do_verification,
|
||||
init_method,
|
||||
do_log,
|
||||
time_kernel,
|
||||
in_length,
|
||||
wsize,
|
||||
wstride,
|
||||
wdilation,
|
||||
pad1,
|
||||
pad2);
|
||||
}
|
||||
#endif
|
||||
else
|
||||
{
|
||||
throw std::runtime_error("not implemented yet");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
REGISTER_PROFILER_OPERATION("max_pool3d_bwd", "max_pool3d bwd", profile_max_pool3d_bwd);
|
||||
@@ -51,7 +51,7 @@ struct maxPoolFwdArgParser
|
||||
|
||||
void print_help_max_pool3d_fwd()
|
||||
{
|
||||
std::cout << "arg1: data type (0: fp16; 1: fp32)\n"
|
||||
std::cout << "arg1: data type (0: fp16; 1: fp32; 5: bf16)\n"
|
||||
<< "arg2: verification (0: no; 1: yes)\n"
|
||||
<< "arg3: initialization (0: no init; 1: integer value; 2: decimal value)\n"
|
||||
<< "arg4: print tensor value (0: no; 1: yes)\n"
|
||||
@@ -109,8 +109,15 @@ int profile_max_pool3d_fwd(int argc, char* argv[])
|
||||
pad2 = arg_parser.long_opts["pad2"];
|
||||
}
|
||||
|
||||
using F16 = ck::half_t;
|
||||
using F32 = float;
|
||||
#ifdef CK_ENABLE_FP16
|
||||
using F16 = ck::half_t;
|
||||
#endif
|
||||
#ifdef CK_ENABLE_BF16
|
||||
using BF16 = ck::bhalf_t;
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
using F32 = float;
|
||||
#endif
|
||||
using I32 = int32_t;
|
||||
using NDHWC = ck::tensor_layout::convolution::NDHWC;
|
||||
|
||||
@@ -120,7 +127,10 @@ int profile_max_pool3d_fwd(int argc, char* argv[])
|
||||
constexpr auto ReduceOpId = ck::ReduceTensorOp::AVG;
|
||||
#endif
|
||||
|
||||
if(data_type == ck::DataTypeEnum::Half)
|
||||
if(false)
|
||||
;
|
||||
#ifdef CK_ENABLE_FP16
|
||||
else if(data_type == ck::DataTypeEnum::Half)
|
||||
{
|
||||
if(return_index)
|
||||
ck::profiler::
|
||||
@@ -149,6 +159,51 @@ int profile_max_pool3d_fwd(int argc, char* argv[])
|
||||
pad1,
|
||||
pad2);
|
||||
}
|
||||
#endif
|
||||
#ifdef CK_ENABLE_BF16
|
||||
else if(data_type == ck::DataTypeEnum::BFloat16)
|
||||
{
|
||||
if(return_index)
|
||||
ck::profiler::profile_pool3d_fwd_impl<BF16,
|
||||
BF16,
|
||||
BF16,
|
||||
I32,
|
||||
NDHWC,
|
||||
NDHWC,
|
||||
ReduceOpId,
|
||||
false,
|
||||
true>(do_verification,
|
||||
init_method,
|
||||
do_log,
|
||||
time_kernel,
|
||||
in_length,
|
||||
wsize,
|
||||
wstride,
|
||||
wdilation,
|
||||
pad1,
|
||||
pad2);
|
||||
else
|
||||
ck::profiler::profile_pool3d_fwd_impl<BF16,
|
||||
BF16,
|
||||
BF16,
|
||||
I32,
|
||||
NDHWC,
|
||||
NDHWC,
|
||||
ReduceOpId,
|
||||
false,
|
||||
false>(do_verification,
|
||||
init_method,
|
||||
do_log,
|
||||
time_kernel,
|
||||
in_length,
|
||||
wsize,
|
||||
wstride,
|
||||
wdilation,
|
||||
pad1,
|
||||
pad2);
|
||||
}
|
||||
#endif
|
||||
#ifdef CK_ENABLE_FP32
|
||||
else if(data_type == ck::DataTypeEnum::Float)
|
||||
{
|
||||
if(return_index)
|
||||
@@ -178,6 +233,7 @@ int profile_max_pool3d_fwd(int argc, char* argv[])
|
||||
pad1,
|
||||
pad2);
|
||||
}
|
||||
#endif
|
||||
else
|
||||
{
|
||||
throw std::runtime_error("not implemented yet");
|
||||
|
||||
@@ -57,7 +57,7 @@ add_subdirectory(data_type)
|
||||
add_subdirectory(elementwise_normalization)
|
||||
add_subdirectory(batchnorm)
|
||||
add_subdirectory(contraction)
|
||||
add_subdirectory(pool_fwd)
|
||||
add_subdirectory(pool)
|
||||
add_subdirectory(batched_gemm_multi_d)
|
||||
add_subdirectory(grouped_convnd_bwd_data)
|
||||
if(GPU_TARGETS MATCHES "gfx11")
|
||||
|
||||
16
test/pool/CMakeLists.txt
Normal file
16
test/pool/CMakeLists.txt
Normal file
@@ -0,0 +1,16 @@
|
||||
add_custom_target(test_pool)
|
||||
|
||||
add_gtest_executable(test_avg_pool3d_bwd test_avg_pool3d_bwd.cpp)
|
||||
add_gtest_executable(test_max_pool3d_bwd test_max_pool3d_bwd.cpp)
|
||||
add_gtest_executable(test_avg_pool3d_fwd test_avg_pool3d_fwd.cpp)
|
||||
add_gtest_executable(test_max_pool3d_fwd test_max_pool3d_fwd.cpp)
|
||||
|
||||
target_link_libraries(test_avg_pool3d_bwd PRIVATE utility device_avg_pool3d_bwd_instance)
|
||||
target_link_libraries(test_max_pool3d_bwd PRIVATE utility device_max_pool_bwd_instance)
|
||||
target_link_libraries(test_avg_pool3d_fwd PRIVATE utility device_pool3d_fwd_instance)
|
||||
target_link_libraries(test_max_pool3d_fwd PRIVATE utility device_pool3d_fwd_instance)
|
||||
|
||||
add_dependencies(test_pool test_avg_pool3d_bwd)
|
||||
add_dependencies(test_pool test_max_pool3d_bwd)
|
||||
add_dependencies(test_pool test_avg_pool3d_fwd)
|
||||
add_dependencies(test_pool test_max_pool3d_fwd)
|
||||
74
test/pool/test_avg_pool3d_bwd.cpp
Normal file
74
test/pool/test_avg_pool3d_bwd.cpp
Normal file
@@ -0,0 +1,74 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "profiler/profile_avg_pool3d_bwd_impl.hpp"
|
||||
#include "test_pool_fwd_common.hpp"
|
||||
|
||||
template <typename Tuple>
|
||||
class TestAvgPool3dBwd : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using DOutDataType = std::tuple_element_t<0, Tuple>;
|
||||
using DInDataType = std::tuple_element_t<1, Tuple>;
|
||||
using ComputeDataType = std::tuple_element_t<2, Tuple>;
|
||||
using DOutLayout = std::tuple_element_t<3, Tuple>;
|
||||
using DInLayout = std::tuple_element_t<4, Tuple>;
|
||||
|
||||
std::vector<PoolingParam> params;
|
||||
|
||||
void Run()
|
||||
{
|
||||
for(auto param : params)
|
||||
{
|
||||
bool success =
|
||||
ck::profiler::profile_avg_pool3d_bwd_impl<DOutDataType,
|
||||
DInDataType,
|
||||
ComputeDataType,
|
||||
DOutLayout,
|
||||
DInLayout>(true,
|
||||
2,
|
||||
false,
|
||||
false,
|
||||
param.length_,
|
||||
param.window_spatial_lengths_,
|
||||
param.window_strides_,
|
||||
param.window_dilations_,
|
||||
param.input_left_pads_,
|
||||
param.input_right_pads_);
|
||||
EXPECT_TRUE(success);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(CK_ENABLE_FP16) && defined(CK_ENABLE_BF16) && defined(CK_ENABLE_FP32)
|
||||
using KernelTypes = ::testing::Types<std::tuple<F16, F16, F32, NDHWC, NDHWC>,
|
||||
std::tuple<BF16, BF16, F32, NDHWC, NDHWC>,
|
||||
std::tuple<F32, F32, F32, NDHWC, NDHWC>>;
|
||||
#elif defined(CK_ENABLE_FP16) && defined(CK_ENABLE_FP32)
|
||||
using KernelTypes = ::testing::Types<std::tuple<F16, F16, F32, NDHWC, NDHWC>,
|
||||
std::tuple<F32, F32, F32, NDHWC, NDHWC>>;
|
||||
#elif defined(CK_ENABLE_BF16) && defined(CK_ENABLE_FP32)
|
||||
using KernelTypes = ::testing::Types<std::tuple<BF16, BF16, F32, NDHWC, NDHWC>,
|
||||
std::tuple<F32, F32, F32, NDHWC, NDHWC>>;
|
||||
#elif defined(CK_ENABLE_FP16) && defined(CK_ENABLE_BF16)
|
||||
using KernelTypes = ::testing::Types<std::tuple<F16, F16, F32, NDHWC, NDHWC>,
|
||||
std::tuple<BF16, BF16, F32, NDHWC, NDHWC>>;
|
||||
#elif defined(CK_ENABLE_FP16)
|
||||
using KernelTypes = ::testing::Types<std::tuple<F16, F16, F32, NDHWC, NDHWC>>;
|
||||
#elif defined(CK_ENABLE_BF16)
|
||||
using KernelTypes = ::testing::Types<std::tuple<BF16, BF16, F32, NDHWC, NDHWC>>;
|
||||
#elif defined(CK_ENABLE_FP32)
|
||||
using KernelTypes = ::testing::Types<std::tuple<F32, F32, F32, NDHWC, NDHWC>>;
|
||||
#endif
|
||||
|
||||
TYPED_TEST_SUITE(TestAvgPool3dBwd, KernelTypes);
|
||||
TYPED_TEST(TestAvgPool3dBwd, Test_Pool)
|
||||
{
|
||||
// length, window_length, window_stride, window_dilation, left_pad, right_pad
|
||||
this->params = {{{1, 1, 1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}},
|
||||
{{2, 16, 64, 64, 64}, {4, 4, 4}, {4, 4, 4}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}},
|
||||
{{2, 32, 30, 30, 30}, {2, 2, 2}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}}};
|
||||
|
||||
this->Run();
|
||||
}
|
||||
79
test/pool/test_max_pool3d_bwd.cpp
Normal file
79
test/pool/test_max_pool3d_bwd.cpp
Normal file
@@ -0,0 +1,79 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "profiler/profile_max_pool3d_bwd_impl.hpp"
|
||||
#include "test_pool_fwd_common.hpp"
|
||||
|
||||
template <typename Tuple>
|
||||
class TestMaxPool3dBwd : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
using DOutDataType = std::tuple_element_t<0, Tuple>;
|
||||
using DInDataType = std::tuple_element_t<1, Tuple>;
|
||||
using IndexDataType = std::tuple_element_t<2, Tuple>;
|
||||
|
||||
using InDataType = DInDataType;
|
||||
using OutDataType = DOutDataType;
|
||||
|
||||
std::vector<PoolingParam> params;
|
||||
|
||||
void Run()
|
||||
{
|
||||
for(auto param : params)
|
||||
{
|
||||
bool success =
|
||||
ck::profiler::profile_max_pool3d_bwd_impl<InDataType,
|
||||
OutDataType,
|
||||
IndexDataType,
|
||||
DOutDataType,
|
||||
DInDataType,
|
||||
false>(true,
|
||||
2,
|
||||
false,
|
||||
false,
|
||||
param.length_,
|
||||
param.window_spatial_lengths_,
|
||||
param.window_strides_,
|
||||
param.window_dilations_,
|
||||
param.input_left_pads_,
|
||||
param.input_right_pads_);
|
||||
EXPECT_TRUE(success);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(CK_ENABLE_FP16) && defined(CK_ENABLE_BF16) && defined(CK_ENABLE_FP32)
|
||||
using KernelTypes = ::testing::Types<std::tuple<F16, F16, I32, NDHWC, NDHWC>,
|
||||
std::tuple<BF16, BF16, I32, NDHWC, NDHWC>,
|
||||
std::tuple<F32, F32, I32, NDHWC, NDHWC>>;
|
||||
#elif defined(CK_ENABLE_FP16) && defined(CK_ENABLE_FP32)
|
||||
using KernelTypes = ::testing::Types<std::tuple<F16, F16, I32, NDHWC, NDHWC>,
|
||||
std::tuple<F32, F32, I32, NDHWC, NDHWC>>;
|
||||
#elif defined(CK_ENABLE_BF16) && defined(CK_ENABLE_FP32)
|
||||
using KernelTypes = ::testing::Types<std::tuple<BF16, BF16, I32, NDHWC, NDHWC>,
|
||||
std::tuple<F32, F32, I32, NDHWC, NDHWC>>;
|
||||
#elif defined(CK_ENABLE_FP16) && defined(CK_ENABLE_BF16)
|
||||
using KernelTypes = ::testing::Types<std::tuple<F16, F16, I32, NDHWC, NDHWC>,
|
||||
std::tuple<BF16, BF16, I32, NDHWC, NDHWC>>;
|
||||
#elif defined(CK_ENABLE_FP16)
|
||||
using KernelTypes = ::testing::Types<std::tuple<F16, F16, I32, NDHWC, NDHWC>>;
|
||||
#elif defined(CK_ENABLE_BF16)
|
||||
using KernelTypes = ::testing::Types<std::tuple<BF16, BF16, I32, NDHWC, NDHWC>>;
|
||||
#elif defined(CK_ENABLE_FP32)
|
||||
using KernelTypes = ::testing::Types<std::tuple<F32, F32, I32, NDHWC, NDHWC>>;
|
||||
#endif
|
||||
|
||||
TYPED_TEST_SUITE(TestMaxPool3dBwd, KernelTypes);
|
||||
TYPED_TEST(TestMaxPool3dBwd, Test_Pool)
|
||||
{
|
||||
// length, window_length, window_stride, window_dilation, left_pad, right_pad
|
||||
this->params = {{{1, 1, 1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}},
|
||||
{{2, 16, 64, 64, 64}, {4, 4, 4}, {4, 4, 4}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}},
|
||||
{{2, 32, 30, 30, 30}, {2, 2, 2}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}}};
|
||||
|
||||
// this->params = {{{2, 32, 30, 30, 30}, {2, 2, 2}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1,
|
||||
// 1}}};
|
||||
|
||||
this->Run();
|
||||
}
|
||||
@@ -4,10 +4,12 @@
|
||||
#include "gtest/gtest.h"
|
||||
#include "ck/ck.hpp"
|
||||
|
||||
using F16 = ck::half_t;
|
||||
using F32 = float;
|
||||
using I32 = int32_t;
|
||||
using F16 = ck::half_t;
|
||||
using BF16 = ck::bhalf_t;
|
||||
using F32 = float;
|
||||
using I32 = int32_t;
|
||||
using ck::index_t;
|
||||
using NDHWC = ck::tensor_layout::convolution::NDHWC;
|
||||
|
||||
struct PoolingParam
|
||||
{
|
||||
@@ -1,10 +0,0 @@
|
||||
add_custom_target(test_pool_fwd)
|
||||
|
||||
add_gtest_executable(test_avg_pool3d_fwd test_avg_pool3d_fwd.cpp)
|
||||
add_gtest_executable(test_max_pool3d_fwd test_max_pool3d_fwd.cpp)
|
||||
|
||||
target_link_libraries(test_avg_pool3d_fwd PRIVATE utility device_pool3d_fwd_instance)
|
||||
target_link_libraries(test_max_pool3d_fwd PRIVATE utility device_pool3d_fwd_instance)
|
||||
|
||||
add_dependencies(test_pool_fwd test_avg_pool3d_fwd)
|
||||
add_dependencies(test_pool_fwd test_max_pool3d_fwd)
|
||||
Reference in New Issue
Block a user