From 7cdb665a50b32a7da7a6a74cd1f988c1225b04fa Mon Sep 17 00:00:00 2001 From: Chao Liu Date: Sat, 1 Jun 2019 10:48:48 -0500 Subject: [PATCH] refactor --- src/include/conv_common.hip.hpp | 29 +++- ...on_implicit_gemm_v2_chwn_cyxk_khwn.hip.hpp | 5 +- ...2_chwn_cyxk_khwn_lds_double_buffer.hip.hpp | 5 +- src/include/threadwise_2d_tensor_op.hip.hpp | 137 ----------------- src/include/threadwise_4d_tensor_op.hip.hpp | 138 ------------------ 5 files changed, 30 insertions(+), 284 deletions(-) delete mode 100644 src/include/threadwise_2d_tensor_op.hip.hpp diff --git a/src/include/conv_common.hip.hpp b/src/include/conv_common.hip.hpp index 6fe7104be3..32f448a7f0 100644 --- a/src/include/conv_common.hip.hpp +++ b/src/include/conv_common.hip.hpp @@ -3,7 +3,7 @@ // this is ugly, only for 4d template -__host__ __device__ constexpr auto get_convolution_output_default_4d_tensor_descriptor(InDesc, +constexpr auto get_convolution_output_default_4d_tensor_descriptor(InDesc, WeiDesc) { constexpr auto in_desc = InDesc{}; @@ -34,7 +34,7 @@ __host__ __device__ constexpr auto get_convolution_output_default_4d_tensor_desc } template -__host__ __device__ constexpr auto get_convolution_with_padding_output_default_4d_tensor_descriptor( +constexpr auto get_convolution_with_padding_output_default_4d_tensor_descriptor( InDesc, WeiDesc, LowerPads, UpperPads) { constexpr auto in_desc = InDesc{}; @@ -71,7 +71,7 @@ __host__ __device__ constexpr auto get_convolution_with_padding_output_default_4 } template -__host__ __device__ constexpr std::size_t calculate_convolution_flops(InDesc, WeiDesc, OutDesc) +constexpr std::size_t calculate_convolution_flops(InDesc, WeiDesc, OutDesc) { constexpr auto wei_desc = WeiDesc{}; constexpr auto out_desc = OutDesc{}; @@ -92,3 +92,26 @@ __host__ __device__ constexpr std::size_t calculate_convolution_flops(InDesc, We return std::size_t(2) * N * K * Ho * Wo * C * Y * X; } + +template +constexpr std::size_t calculate_convolution_memory_size(Float, InDesc, WeiDesc, OutDesc) +{ + constexpr auto wei_desc = WeiDesc{}; + constexpr auto out_desc = OutDesc{}; + + constexpr auto I0 = Number<0>{}; + constexpr auto I1 = Number<1>{}; + constexpr auto I2 = Number<2>{}; + constexpr auto I3 = Number<3>{}; + + constexpr index_t N = out_desc.GetLength(I0); + constexpr index_t K = out_desc.GetLength(I1); + constexpr index_t Ho = out_desc.GetLength(I2); + constexpr index_t Wo = out_desc.GetLength(I3); + + constexpr index_t C = wei_desc.GetLength(I1); + constexpr index_t Y = wei_desc.GetLength(I2); + constexpr index_t X = wei_desc.GetLength(I3); + + return sizeof(Float) * (InDesc::GetElementSpace() + WeiDesc::GetElementSpace() + OutDesc::GetElementSpace()); +} diff --git a/src/include/gridwise_convolution_implicit_gemm_v2_chwn_cyxk_khwn.hip.hpp b/src/include/gridwise_convolution_implicit_gemm_v2_chwn_cyxk_khwn.hip.hpp index c78a16713b..231fbbe448 100644 --- a/src/include/gridwise_convolution_implicit_gemm_v2_chwn_cyxk_khwn.hip.hpp +++ b/src/include/gridwise_convolution_implicit_gemm_v2_chwn_cyxk_khwn.hip.hpp @@ -4,7 +4,6 @@ #include "ConstantMatrixDescriptor.hip.hpp" #include "blockwise_4d_tensor_op.hip.hpp" #include "blockwise_2d_tensor_op.hip.hpp" -#include "threadwise_2d_tensor_op.hip.hpp" #include "blockwise_gemm.hip.hpp" // define B = flatten(N, Hi, Wi) @@ -202,8 +201,8 @@ struct GridwiseConvolutionImplicitGemm_v2_chwn_cyxk_khwn // register Float p_out_thread[out_kb_thread_desc.GetElementSpace()]; - // set threadwise output tensor to 0 - threadwise_2d_tensor_set_zero(out_kb_thread_desc, p_out_thread); + // set threadwise output to 0 + threadwise_matrix_set_zero(c_kxb_thread_mtx_desc, p_out_thread); for(index_t c_block_data_begin = 0; c_block_data_begin < C; c_block_data_begin += CPerBlock, p_in_global_block_offset += CPerBlock * in_cb_global_desc.GetStride(I0), diff --git a/src/include/gridwise_convolution_implicit_gemm_v2_chwn_cyxk_khwn_lds_double_buffer.hip.hpp b/src/include/gridwise_convolution_implicit_gemm_v2_chwn_cyxk_khwn_lds_double_buffer.hip.hpp index 60baf437cf..f87caf3816 100644 --- a/src/include/gridwise_convolution_implicit_gemm_v2_chwn_cyxk_khwn_lds_double_buffer.hip.hpp +++ b/src/include/gridwise_convolution_implicit_gemm_v2_chwn_cyxk_khwn_lds_double_buffer.hip.hpp @@ -4,7 +4,6 @@ #include "ConstantMatrixDescriptor.hip.hpp" #include "blockwise_4d_tensor_op.hip.hpp" #include "blockwise_2d_tensor_op.hip.hpp" -#include "threadwise_2d_tensor_op.hip.hpp" #include "threadwise_tensor_slice_op.hip.hpp" #include "blockwise_gemm.hip.hpp" @@ -222,8 +221,8 @@ struct GridwiseConvolutionImplicitGemm_v2_chwn_cyxk_khwn_lds_double_buffer // register Float p_out_thread[out_kb_thread_desc.GetElementSpace()]; - // set threadwise output tensor to 0 - threadwise_2d_tensor_set_zero(out_kb_thread_desc, p_out_thread); + // set threadwise output to 0 + threadwise_matrix_set_zero(c_kxb_thread_mtx_desc, p_out_thread); for(index_t c_block_data_begin = 0; c_block_data_begin + 2 * CPerBlock < C; c_block_data_begin += 2 * CPerBlock) diff --git a/src/include/threadwise_2d_tensor_op.hip.hpp b/src/include/threadwise_2d_tensor_op.hip.hpp deleted file mode 100644 index 1bee7e801d..0000000000 --- a/src/include/threadwise_2d_tensor_op.hip.hpp +++ /dev/null @@ -1,137 +0,0 @@ -#pragma once -#include "ConstantTensorDescriptor.hip.hpp" - -template -__device__ void threadwise_2d_tensor_pointwise_operation_unary(Desc, Float* __restrict__ p, F f) -{ - constexpr auto I0 = Number<0>{}; - constexpr auto I1 = Number<1>{}; - - constexpr auto desc = Desc{}; - -#if 0 - if(get_thread_local_1d_id() == 0) - { - print_ConstantTensorDescriptor(desc, "threadwise_4d_tensor_op_unary: "); - } -#endif - - for(index_t did0 = 0; did0 < desc.GetLength(I0); ++did0) - { - for(index_t did1 = 0; did1 < desc.GetLength(I1); ++did1) - { - const index_t dindex = desc.GetOffsetFromMultiIndex(did0, did1); - - f(p[dindex]); - } - } -} - -// TODO: in order to optimize mem access for different mem type, -// need to write specialized version -template -__device__ void threadwise_2d_tensor_pointwise_operation_binary_reorder_by_get_dst_from_src( - SrcDesc, - Float* const __restrict__ p_src, - DstDesc, - Float* __restrict__ p_dst, - SrcOpLengths, - MapDst2Src, - F f) -{ - constexpr auto I0 = Number<0>{}; - constexpr auto I1 = Number<1>{}; - - constexpr index_t IR0 = MapDst2Src{}.Get(I0); - constexpr index_t IR1 = MapDst2Src{}.Get(I1); - - constexpr auto src_desc = SrcDesc{}; - constexpr auto dst_desc = DstDesc{}; - constexpr auto ref_desc = make_ConstantTensorDescriptor(SrcOpLengths{}); - - for(index_t did0 = 0; did0 < ref_desc.GetLength(I0); ++did0) - { - for(index_t did1 = 0; did1 < ref_desc.GetLength(I1); ++did1) - { - const index_t aindex = src_desc.GetOffsetFromMultiIndex(did0, did1); - - const index_t did[2] = {did0, did1}; - - const index_t bindex = dst_desc.GetOffsetFromMultiIndex(did[IR0], did[IR1]); - - f(p_src[aindex], p_dst[bindex]); - } - } -} - -template -__device__ void threadwise_2d_tensor_set_zero(Desc, Float* __restrict__ p) -{ - auto f_set_zero = [](Float& v) { v = Float(0); }; - - threadwise_2d_tensor_pointwise_operation_unary( - Desc{}, p, f_set_zero); -} - -template -__device__ void -threadwise_2d_tensor_copy_reorder_by_get_dst_from_src(SrcDesc, - Float* const __restrict__ p_src, - DstDesc, - Float* __restrict__ p_dst, - SrcOpLengths, - MapDst2Src) -{ - auto f_copy = [](const Float& src, Float& dst) { dst = src; }; - - threadwise_2d_tensor_pointwise_operation_binary_reorder_by_get_dst_from_src( - SrcDesc{}, p_src, DstDesc{}, p_dst, SrcOpLengths{}, MapDst2Src{}, f_copy); -} - -#if 0 // replaced threadwise_tensor_slice_copy -template -__device__ void threadwise_2d_tensor_copy( - SrcDesc, Float* const __restrict__ p_src, DstDesc, Float* __restrict__ p_dst, SrcOpLengths) -{ - auto dst_from_src_reorder = Sequence<0, 1>{}; - - threadwise_2d_tensor_copy_reorder_by_get_dst_from_src( - SrcDesc{}, p_src, DstDesc{}, p_dst, SrcOpLengths{}, dst_from_src_reorder); -} -#endif - -template -__device__ void threadwise_2d_tensor_shift_down(Desc, Float* __restrict__ p, IDim, NShift) -{ - constexpr auto I0 = Number<0>{}; - constexpr auto I1 = Number<1>{}; - - constexpr auto desc = Desc{}; - -#if 0 - if(get_thread_local_1d_id() == 0) - { - print_ConstantTensorDescriptor(desc, "threadwise_4d_tensor_shift_down: "); - } -#endif - - constexpr index_t nshift = NShift::mValue; - - constexpr index_t did0_end = - is_same::value ? desc.GetLength(I0) - nshift : desc.GetLength(I0); - - constexpr index_t did1_end = - is_same::value ? desc.GetLength(I1) - nshift : desc.GetLength(I1); - - for(index_t did0 = 0; did0 < did0_end; ++did0) - { - for(index_t did1 = 0; did1 < did1_end; ++did1) - { - const index_t dindex = desc.GetOffsetFromMultiIndex(did0, did1); - - const index_t sindex = dindex + nshift * desc.GetStride(IDim{}); - - p[dindex] = p[sindex]; - } - } -} diff --git a/src/include/threadwise_4d_tensor_op.hip.hpp b/src/include/threadwise_4d_tensor_op.hip.hpp index b8a2c59a26..6620a28c24 100644 --- a/src/include/threadwise_4d_tensor_op.hip.hpp +++ b/src/include/threadwise_4d_tensor_op.hip.hpp @@ -1,144 +1,6 @@ #pragma once #include "ConstantTensorDescriptor.hip.hpp" -template -__device__ void threadwise_4d_tensor_pointwise_operation_unary(Desc, Float* __restrict__ p, F f) -{ - constexpr auto I0 = Number<0>{}; - constexpr auto I1 = Number<1>{}; - constexpr auto I2 = Number<2>{}; - constexpr auto I3 = Number<3>{}; - - constexpr auto desc = Desc{}; - -#if 0 - if(get_thread_local_1d_id() == 0) - { - print_ConstantTensorDescriptor(desc, "threadwise_4d_tensor_op_unary: "); - } -#endif - - for(index_t did0 = 0; did0 < desc.GetLength(I0); ++did0) - { - for(index_t did1 = 0; did1 < desc.GetLength(I1); ++did1) - { - for(index_t did2 = 0; did2 < desc.GetLength(I2); ++did2) - { - for(index_t did3 = 0; did3 < desc.GetLength(I3); ++did3) - { - const index_t dindex = desc.GetOffsetFromMultiIndex(did0, did1, did2, did3); - - f(p[dindex]); - } - } - } - } -} - -// TODO: in order to optimize mem access for different mem type, -// need to write specialized version -template -__device__ void threadwise_4d_tensor_pointwise_operation_binary_reorder_given_dst2src( - SrcDesc, - const SrcData* __restrict__ p_src, - DstDesc, - DstData* __restrict__ p_dst, - SrcOpLengths, - MapDst2Src, - F f) -{ - constexpr auto I0 = Number<0>{}; - constexpr auto I1 = Number<1>{}; - constexpr auto I2 = Number<2>{}; - constexpr auto I3 = Number<3>{}; - - constexpr index_t IR0 = MapDst2Src{}.Get(I0); - constexpr index_t IR1 = MapDst2Src{}.Get(I1); - constexpr index_t IR2 = MapDst2Src{}.Get(I2); - constexpr index_t IR3 = MapDst2Src{}.Get(I3); - - constexpr auto src_desc = SrcDesc{}; - constexpr auto dst_desc = DstDesc{}; - constexpr auto ref_desc = make_ConstantTensorDescriptor(SrcOpLengths{}); - - for(index_t did0 = 0; did0 < ref_desc.GetLength(I0); ++did0) - { - for(index_t did1 = 0; did1 < ref_desc.GetLength(I1); ++did1) - { - for(index_t did2 = 0; did2 < ref_desc.GetLength(I2); ++did2) - { - for(index_t did3 = 0; did3 < ref_desc.GetLength(I3); ++did3) - { - const index_t aindex = src_desc.GetOffsetFromMultiIndex(did0, did1, did2, did3); - - const index_t did[4] = {did0, did1, did2, did3}; - - const index_t bindex = - dst_desc.GetOffsetFromMultiIndex(did[IR0], did[IR1], did[IR2], did[IR3]); - - f(p_src[aindex], p_dst[bindex]); - -#if 0 - if(get_block_1d_id() == 0) - { - printf("tid %5u, " - "src did %u %u %u %u, " - "dst did %u %u %u %u, " - "aindex %5u, " - "bindex %5u\n", - get_thread_local_1d_id(), - did0, - did1, - did2, - did3, - did[IR0], - did[IR1], - did[IR2], - did[IR3], - aindex, - bindex); - } -#endif - } - } - } - } -} - -template -__device__ void threadwise_4d_tensor_set_zero(Desc, Data* __restrict__ p) -{ - auto f_set_zero = [](Data& v) { v = Data(0); }; - - threadwise_4d_tensor_pointwise_operation_unary( - Desc{}, p, f_set_zero); -} - -template -__device__ void threadwise_4d_tensor_copy_reorder_given_dst2src(SrcDesc, - const SrcData* __restrict__ p_src, - DstDesc, - DstData* __restrict__ p_dst, - SrcOpLengths, - MapDst2Src) -{ - auto f_copy = [](const SrcData& src, DstData& dst) { dst = static_cast(src); }; - - threadwise_4d_tensor_pointwise_operation_binary_reorder_given_dst2src( - SrcDesc{}, p_src, DstDesc{}, p_dst, SrcOpLengths{}, MapDst2Src{}, f_copy); -} - template __device__ void threadwise_4d_tensor_shift_down(Desc, Float* __restrict__ p, IDim, NShift) {