diff --git a/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v1r3_chwn_cyxk_khwn_padded.hpp b/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v1r3_chwn_cyxk_khwn_padded.hpp index 23a7f5b05e..2d794033c1 100644 --- a/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v1r3_chwn_cyxk_khwn_padded.hpp +++ b/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v1r3_chwn_cyxk_khwn_padded.hpp @@ -51,10 +51,10 @@ struct GridwiseConvolutionImplicitGemm_v1r3_chwn_cyxk_khwn_padded const Float* const __restrict__ p_wei_global, Float* const __restrict__ p_out_global) const { - static constexpr auto I0 = Number<0>{}; - static constexpr auto I1 = Number<1>{}; - static constexpr auto I2 = Number<2>{}; - static constexpr auto I3 = Number<3>{}; + static constexpr auto I0 = Number<0>{}; + static constexpr auto I1 = Number<1>{}; + static constexpr auto I2 = Number<2>{}; + static constexpr auto I3 = Number<3>{}; static constexpr auto True = integral_constant{}; static constexpr auto False = integral_constant{}; diff --git a/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw.hpp b/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw.hpp index 5fb465c519..04785a4a06 100644 --- a/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw.hpp +++ b/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw.hpp @@ -1,5 +1,5 @@ -#ifndef CK_GRIDWISE_CONVOLUTION_IMPLICIT_GEMM_V4R1_NCHW_KCYX_NKHW -#define CK_GRIDWISE_CONVOLUTION_IMPLICIT_GEMM_V4R1_NCHW_KCYX_NKHW +#ifndef CK_GRIDWISE_CONVOLUTION_IMPLICIT_GEMM_V4R1_NCHW_KCYX_NKHW_HPP +#define CK_GRIDWISE_CONVOLUTION_IMPLICIT_GEMM_V4R1_NCHW_KCYX_NKHW_HPP #include "common_header.hpp" #include "ConstantTensorDescriptor.hpp" @@ -14,17 +14,16 @@ namespace ck { // define B = merge(N0, Ho, Wo) template struct GridwiseConvolutionImplicitGemm_v4r1_nchw_kcyx_nkhw @@ -56,7 +55,9 @@ struct GridwiseConvolutionImplicitGemm_v4r1_nchw_kcyx_nkhw { // this is a mess // TODO: find more elegent way of specifying (or calculating) performance parameters - static_assert(N2 == GemmNPerThreadSubC, "wrong!"); + constexpr index_t N1 = GemmNRepeat; + constexpr index_t N2 = GemmNPerThreadSubC; + static_assert((N1 * N2 * BPerBlock) % (GemmNPerThreadSubC * GemmNLevel0Cluster * GemmNLevel1Cluster) == 0, @@ -155,13 +156,11 @@ struct GridwiseConvolutionImplicitGemm_v4r1_nchw_kcyx_nkhw static_assert(in_e_n1_b_n2_block_desc.GetStride(I1) % GemmDataPerReadB == 0, "GemmDataPerReadB alignment requirement is not satisfied"); -#if 0 // debug // input blockwise copy // slice a merged tensor, reorder and copy to a normal tensor // this copy operator already has blockwise offset built-in auto blockwise_in_copy = - BlockwiseGenericTensorSliceCopy_v1( {0, 0, b_block_data_on_global, 0}, {0, 0, 0, 0}); -#else - auto blockwise_in_copy = BlockwiseGenericTensorSliceCopy_v2< - BlockSize, - decltype(in_e_n1_b_n2_global_merged_desc), - decltype(in_e_n1_b_n2_block_desc), - MergedTensorCoordinate, - NormalTensorCoordinate, - decltype(in_e_n1_b_n2_block_desc.GetLengths()), - InBlockCopySubLengths_E_N1_B_N2, - InBlockCopyClusterLengths_E_N1_B_N2, - InBlockCopyThreadClusterArrangeOrder>({0, 0, b_block_data_on_global, 0}, {0, 0, 0, 0}); -#endif // weight tensor // tensor descriptor in device memory, src of blockwise copy @@ -197,13 +186,11 @@ struct GridwiseConvolutionImplicitGemm_v4r1_nchw_kcyx_nkhw Sequence{}, Number{}); -#if 0 // debug // operator for blockwise copy of weight into LDS // slice a tensor, and copy it into another tensor // this copy operator already have blockwise offset built-in auto blockwise_wei_copy = - BlockwiseGenericTensorSliceCopy_v1( {0, k_block_data_on_global}, {0, 0}); -#else - auto blockwise_wei_copy = BlockwiseGenericTensorSliceCopy_v2< - BlockSize, - decltype(wei_e_k_global_desc), - decltype(wei_e_k_block_desc), - NormalTensorCoordinate, - NormalTensorCoordinate, - decltype(wei_e_k_block_desc.GetLengths()), - WeiBlockCopySubLengths_E_K, - WeiBlockCopyClusterLengths_E_K, - WeiBlockCopyThreadClusterArrangeOrder>({0, k_block_data_on_global}, {0, 0}); -#endif // GEMM definition // c_mtx += transpose(a_mtx) * b_mtx @@ -300,13 +277,8 @@ struct GridwiseConvolutionImplicitGemm_v4r1_nchw_kcyx_nkhw __syncthreads(); -#if 0 - blockwise_in_copy.MoveSlicingWindowOnSourceTensor(I0, Number{}, True); - blockwise_wei_copy.MoveSlicingWindowOnSourceTensor(I0, Number{}, True); -#else - blockwise_in_copy.MoveSrcSliceWindow({EPerBlock, 0, 0, 0}, true); - blockwise_wei_copy.MoveSrcSliceWindow({EPerBlock, 0}, true); -#endif + blockwise_in_copy.MoveSrcSliceWindow(make_multi_index(EPerBlock, 0, 0, 0), True); + blockwise_wei_copy.MoveSrcSliceWindow(make_multi_index(EPerBlock, 0), True); } // copy output: register to global memory @@ -356,27 +328,17 @@ struct GridwiseConvolutionImplicitGemm_v4r1_nchw_kcyx_nkhw out_k_n1_b_n2_global_merged_desc.GetOffsetFromMultiIndex( k_thread_data_on_global, 0, b_thread_data_on_global, 0); -#if 0 // debug - threadwise_generic_tensor_slice_copy_v1( - out_n0_n1_n2_k0_k1_k2_h_w_thread_desc, - p_out_thread, - {0, 0, 0, 0, 0, 0, 0, 0}, - out_n0_n1_n2_k0_k1_k2_h_w_global_mem_desc, - p_out_thread_on_global, - {0, 0, 0, 0, 0, 0, 0, 0}, - out_n0_n1_n2_k0_k1_k2_h_w_thread_desc.GetLengths(), - arithmetic_sequence_gen<0, 8, 1>::type{}, - Number<1>{}); -#else - ThreadwiseGenericTensorSliceCopy_v2< + ThreadwiseGenericTensorSliceCopy_v2r1< decltype(out_n0_n1_n2_k0_k1_k2_h_w_thread_desc), decltype(out_n0_n1_n2_k0_k1_k2_h_w_global_mem_desc), - NormalTensorCoordinate, - MergedTensorCoordinate, - decltype(out_n0_n1_n2_k0_k1_k2_h_w_thread_desc.GetLengths())>( - {0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0}) + decltype(out_n0_n1_n2_k0_k1_k2_h_w_thread_desc.GetLengths()), + arithmetic_sequence_gen<0, 8, 1>::type, + arithmetic_sequence_gen<0, 8, 1>::type, + 7, + 7, + 1, + 1>({0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0}) .Run(p_out_thread, p_out_thread_on_global); -#endif } } }; diff --git a/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw_padded.hpp b/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw_padded.hpp new file mode 100644 index 0000000000..e987161395 --- /dev/null +++ b/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw_padded.hpp @@ -0,0 +1,349 @@ +#ifndef CK_GRIDWISE_CONVOLUTION_IMPLICIT_GEMM_V4R1_NCHW_KCYX_NKHW_PADDED_HPP +#define CK_GRIDWISE_CONVOLUTION_IMPLICIT_GEMM_V4R1_NCHW_KCYX_NKHW_PADDED_HPP + +#include "common_header.hpp" +#include "ConstantTensorDescriptor.hpp" +#include "ConstantMergedTensorDescriptor.hpp" +#include "ConstantMatrixDescriptor.hpp" +#include "blockwise_generic_tensor_slice_copy.hpp" +#include "blockwise_gemm.hpp" +#include "threadwise_generic_tensor_slice_copy.hpp" + +namespace ck { + +// define B = merge(N0, Ho, Wo) +template +struct GridwiseConvolutionImplicitGemm_v4r1_nchw_kcyx_nkhw_padded +{ + __device__ void Run(const Float* const __restrict__ p_in_global, + const Float* const __restrict__ p_wei_global, + Float* const __restrict__ p_out_global) const + { + // this is a mess + // TODO: find more elegent way of specifying (or calculating) performance parameters + constexpr index_t N1 = GemmNRepeat; + constexpr index_t N2 = GemmNPerThreadSubC; + + static_assert((N1 * N2 * BPerBlock) % + (GemmNPerThreadSubC * GemmNLevel0Cluster * GemmNLevel1Cluster) == + 0, + "wrong!"); + + constexpr auto I0 = Number<0>{}; + constexpr auto I1 = Number<1>{}; + constexpr auto I2 = Number<2>{}; + constexpr auto I3 = Number<3>{}; + constexpr auto I5 = Number<5>{}; + + constexpr auto True = integral_constant{}; + + constexpr auto in_n_c_h_w_global_desc = InGlobalDesc{}; + constexpr auto wei_k_c_y_x_global_desc = WeiGlobalDesc{}; + constexpr auto out_n_k_h_w_global_desc = OutGlobalDesc{}; + + constexpr index_t N = in_n_c_h_w_global_desc.GetLength(I0); + constexpr index_t C = in_n_c_h_w_global_desc.GetLength(I1); + + constexpr index_t K = out_n_k_h_w_global_desc.GetLength(I1); + constexpr index_t Ho = out_n_k_h_w_global_desc.GetLength(I2); + constexpr index_t Wo = out_n_k_h_w_global_desc.GetLength(I3); + + constexpr index_t Y = wei_k_c_y_x_global_desc.GetLength(I2); + constexpr index_t X = wei_k_c_y_x_global_desc.GetLength(I3); + + constexpr index_t ConvStrideH = ConvStrides{}[0]; + constexpr index_t ConvStrideW = ConvStrides{}[1]; + + constexpr index_t ConvDilationH = ConvDilations{}[0]; + constexpr index_t ConvDilationW = ConvDilations{}[1]; + + static_assert(N % (N1 * N2) == 0, "wrong! cannot divice N evenly among thread"); + + constexpr index_t N0 = N / (N1 * N2); + + constexpr index_t B = N0 * Ho * Wo; + + constexpr index_t E = C * Y * X; + + // sanity-check for vectorized memory load + static_assert(ConvStrideW == 1 || InBlockCopySrcDataPerRead_B == 1, + "wrong! global vector load of input tensor is wrong"); + + static_assert((X == 1 || ConvDilationW % InBlockCopySrcDataPerRead_B == 0), + "wrong! aligment requirement for vectorized global load of input tensor will " + "be violated"); + + // divide block work by [K, B] + static_assert(K % KPerBlock == 0 && B % BPerBlock == 0 && E % EPerBlock == 0, + "wrong! cannot divide work evenly among block"); + + constexpr index_t KBlockWork = K / KPerBlock; + constexpr index_t BBlockWork = B / BPerBlock; + + constexpr auto block_work_desc = + make_ConstantTensorDescriptor_packed(Sequence{}); + + const auto block_work_multi_id = + block_work_desc.GetMultiIndexFrom1dIndex(get_block_1d_id()); + + const index_t k_block_data_on_global = block_work_multi_id[0] * KPerBlock; + const index_t b_block_data_on_global = block_work_multi_id[1] * BPerBlock; + + // input tensor + // tensor descriptor in device memory [N0, N1, N2, Ho, Wo] + constexpr auto in_n0_n1_n2_h_w_global_desc = + in_n_c_h_w_global_desc.StridedSlice(I2, Number{}, Number{}) + .StridedSlice(I3, Number{}, Number{}) + .Fold(I0, Number{}, Number{}) + .Extract(Sequence<0, 1, 2, 4, 5>{}); + + // batch descritpor for device memory + constexpr auto in_c_y_x_global_desc = + in_n_c_h_w_global_desc.StridedSlice(I2, Number{}, Number{}) + .StridedSlice(I3, Number{}, Number{}) + .Extract(Sequence<1, 2, 3>{}); + + // merged tensor descriptor in device memory [E, N1, B, N2], src of blockwise copy + constexpr auto in_e_n1_b_n2_global_merged_desc = make_ConstantMergedTensorDescriptor( + in_c_y_x_global_desc.Embed(in_n0_n1_n2_h_w_global_desc), + Sequence<0, 1, 2>{}, + Sequence<4>{}, + Sequence<3, 6, 7>{}, + Sequence<5>{}); + + // memory layout descriptor in LDS [E, N1, B, N2], dst of blockwise copy + // be careful of LDS alignment + constexpr auto in_e_n1_b_n2_block_desc = make_ConstantTensorDescriptor_aligned( + Sequence{}, Number{}); + + // this check is ad-hoc + // TODO: need to properly implement tensor descriptor with multiple alignment + // requirements + static_assert(in_e_n1_b_n2_block_desc.GetStride(I1) % GemmDataPerReadB == 0, + "GemmDataPerReadB alignment requirement is not satisfied"); + + // input blockwise copy + // slice a merged tensor, reorder and copy to a normal tensor + // this copy operator already has blockwise offset built-in + auto blockwise_in_copy = + BlockwiseGenericTensorSliceCopy_v2( + {0, 0, b_block_data_on_global, 0}, {0, 0, 0, 0}); + + // weight tensor + // tensor descriptor in device memory, src of blockwise copy + constexpr auto wei_e_k_global_desc = + wei_k_c_y_x_global_desc.Unfold(I1, I3).ReorderGivenNew2Old(Sequence<1, 0>{}); + + // tensor descriptor in LDS, dst of blockwise copy + // be careful of LDS alignment + constexpr auto wei_e_k_block_desc = make_ConstantTensorDescriptor_aligned( + Sequence{}, + Number{}); + + // operator for blockwise copy of weight into LDS + // slice a tensor, and copy it into another tensor + // this copy operator already have blockwise offset built-in + auto blockwise_wei_copy = + BlockwiseGenericTensorSliceCopy_v2( + {0, k_block_data_on_global}, {0, 0}); + + // GEMM definition + // c_mtx += transpose(a_mtx) * b_mtx + // a_mtx[EPerBlock, KPerBlock] is in LDS + // b_mtx[EPerBlocl, N1 * BPerBlock * N2] is in LDS + // c_mtx[KPerBlock, N1 * BPerBlock * N2] is distributed among threads, and saved in + // register + constexpr auto a_e_k_block_mtx_desc = make_ConstantMatrixDescriptor(wei_e_k_block_desc); + + constexpr auto b_e_n1bn2_block_mtx_desc = + make_ConstantMatrixDescriptor(in_e_n1_b_n2_block_desc.Unfold(I1, I3)); + + // sanity check + static_assert(KPerBlock % (GemmMPerThreadSubC * GemmMLevel0Cluster * GemmMLevel1Cluster) == + 0, + "wrong!"); + + constexpr index_t GemmMRepeat = + KPerBlock / (GemmMPerThreadSubC * GemmMLevel0Cluster * GemmMLevel1Cluster); + + // c_thread_mtx definition: this is a mess + // TODO:: more elegent way of defining c_thread_mtx + constexpr auto c_k0k2_n1n2_thread_mtx_desc = make_ConstantMatrixDescriptor_packed( + Number{}, Number{}); + + const auto blockwise_gemm = BlockwiseGemmBlockABlockBThreadCTransANormalBNormalC_v2< + BlockSize, + decltype(a_e_k_block_mtx_desc), + decltype(b_e_n1bn2_block_mtx_desc), + decltype(c_k0k2_n1n2_thread_mtx_desc), + GemmMPerThreadSubC, + GemmNPerThreadSubC, + GemmMLevel0Cluster, + GemmNLevel0Cluster, + GemmMLevel1Cluster, + GemmNLevel1Cluster, + GemmKPerThreadLoop, + GemmDataPerReadA, + GemmDataPerReadB>{}; + + // LDS allocation for input and weight: be careful of alignment + constexpr index_t max_align = math::lcm(InBlockCopyDstDataPerWrite_N2, + WeiBlockCopyDstDataPerWrite_K, + GemmDataPerReadA, + GemmDataPerReadB); + + constexpr index_t in_block_space = + math::integer_least_multiple(in_e_n1_b_n2_block_desc.GetElementSpace(), max_align); + + constexpr index_t wei_block_space = + math::integer_least_multiple(wei_e_k_block_desc.GetElementSpace(), max_align); + + __shared__ Float p_in_block[in_block_space]; + __shared__ Float p_wei_block[wei_block_space]; + + // register allocation for output + Float p_out_thread[c_k0k2_n1n2_thread_mtx_desc.GetElementSpace()]; + + // zero out threadwise output + threadwise_matrix_set_zero(c_k0k2_n1n2_thread_mtx_desc, p_out_thread); + + // do work + for(index_t e = 0; e < E; e += EPerBlock) + { + blockwise_in_copy.Run(p_in_global, p_in_block); + blockwise_wei_copy.Run(p_wei_global, p_wei_block); + + __syncthreads(); + + blockwise_gemm.Run(p_wei_block, p_in_block, p_out_thread); + + __syncthreads(); + + blockwise_in_copy.MoveSrcSliceWindow(make_multi_index(EPerBlock, 0, 0, 0), True); + blockwise_wei_copy.MoveSrcSliceWindow(make_multi_index(EPerBlock, 0), True); + } + + // copy output: register to global memory + { + constexpr index_t K2 = GemmMPerThreadSubC; + constexpr index_t K1 = GemmMLevel0Cluster * GemmMLevel1Cluster; + + // define tensor descriptor for threadwise copy + // output memory layout descriptor in register + constexpr auto out_k0_k1_k2_n1_n0_h_w_n2_thread_mem_desc = + make_ConstantTensorDescriptor_packed( + Sequence{}); + + // output tensor descriptor in register, src of threadwise copy + constexpr auto out_n0_n1_n2_k0_k1_k2_h_w_thread_desc = + out_k0_k1_k2_n1_n0_h_w_n2_thread_mem_desc.ReorderGivenNew2Old( + Sequence<4, 3, 7, 0, 1, 2, 5, 6>{}); + + // output memory layout descriptor in device memory, dst of threadwise copy + constexpr auto out_n0_n1_n2_k0_k1_k2_h_w_global_mem_desc = + out_n_k_h_w_global_desc.Fold(I1, Number{}, Number{}) + .Fold(I0, Number{}, Number{}); + + // calculate origin of thread output tensor on global memory + // blockwise GEMM c matrix starting index + const auto c_thread_mtx_on_block = + blockwise_gemm.GetBeginOfThreadMatrixC(get_thread_local_1d_id()); + + const index_t k_thread_data_on_global = + k_block_data_on_global + c_thread_mtx_on_block.row; + + const index_t b_thread_data_on_global = + b_block_data_on_global + c_thread_mtx_on_block.col / N2; + + // output merged global tensor descriptor, for calculating origin of thread tensor + // in global memory + constexpr auto out_k_n1_b_n2_global_merged_desc = make_ConstantMergedTensorDescriptor( + out_n0_n1_n2_k0_k1_k2_h_w_global_mem_desc.Unfold(I3, I5), + Sequence<3>{}, + Sequence<1>{}, + Sequence<0, 4, 5>{}, + Sequence<2>{}); + + // origin of dst in device memory + Float* p_out_thread_on_global = + p_out_global + + out_k_n1_b_n2_global_merged_desc.GetOffsetFromMultiIndex( + k_thread_data_on_global, 0, b_thread_data_on_global, 0); + + ThreadwiseGenericTensorSliceCopy_v2r1< + decltype(out_n0_n1_n2_k0_k1_k2_h_w_thread_desc), + decltype(out_n0_n1_n2_k0_k1_k2_h_w_global_mem_desc), + decltype(out_n0_n1_n2_k0_k1_k2_h_w_thread_desc.GetLengths()), + arithmetic_sequence_gen<0, 8, 1>::type, + arithmetic_sequence_gen<0, 8, 1>::type, + 7, + 7, + 1, + 1>({0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0}) + .Run(p_out_thread, p_out_thread_on_global); + } + } +}; + +} // namespace ck +#endif diff --git a/composable_kernel/include/tensor_description/ConstantMergedTensorDescriptor.hpp b/composable_kernel/include/tensor_description/ConstantMergedTensorDescriptor.hpp index 01653ffb1f..b00552e07c 100644 --- a/composable_kernel/include/tensor_description/ConstantMergedTensorDescriptor.hpp +++ b/composable_kernel/include/tensor_description/ConstantMergedTensorDescriptor.hpp @@ -111,7 +111,7 @@ struct ConstantMergedTensorDescriptor index_t itmp = original_multi_id_partial[I]; - original_multi_id.Set(Number{}, itmp); + original_multi_id(idim_original) = itmp; } }; diff --git a/composable_kernel/include/tensor_operation/blockwise_generic_tensor_slice_copy.hpp b/composable_kernel/include/tensor_operation/blockwise_generic_tensor_slice_copy.hpp index 071996fa60..cec6f08c4e 100644 --- a/composable_kernel/include/tensor_operation/blockwise_generic_tensor_slice_copy.hpp +++ b/composable_kernel/include/tensor_operation/blockwise_generic_tensor_slice_copy.hpp @@ -518,7 +518,7 @@ struct BlockwiseGenericTensorSliceCopy_v2 } private: - using RegisterBufferDesc = decltype(make_native_tensor_descriptor_packed(SubLengths{})); + using RegisterBufferDesc = decltype(make_ConstantTensorDescriptor_packed(SubLengths{})); using ThreadwiseLoad = ThreadwiseGenericTensorSliceCopy_v2r1 +template void device_convolution_implicit_gemm_v1_chwn_cyxk_khwn_padded(InDesc, const Tensor& in_nchw, WeiDesc, const Tensor& wei_kcyx, OutDesc, Tensor& out_nkhw, - LowerPads, - UpperPads, + LeftPads, + RightPads, index_t nrepeat) { constexpr auto I0 = Number<0>{}; @@ -131,8 +131,8 @@ void device_convolution_implicit_gemm_v1_chwn_cyxk_khwn_padded(InDesc, decltype(in_chwn_desc), decltype(wei_cyxk_desc), decltype(out_khwn_desc), - LowerPads, - UpperPads, + LeftPads, + RightPads, NPerBlock, KPerBlock, CPerBlock, diff --git a/driver/include/device_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw.hpp b/driver/include/device_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw.hpp index 69a82c1268..fc8d0e7adc 100644 --- a/driver/include/device_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw.hpp +++ b/driver/include/device_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw.hpp @@ -3,7 +3,7 @@ #include "device.hpp" #include "tensor.hpp" #include "gridwise_convolution_kernel_wrapper.hpp" -//#include "gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw.hpp" +#include "gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw.hpp" #include "gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw_lds_double_buffer.hpp" template +#include "device.hpp" +#include "tensor.hpp" +#include "gridwise_convolution_kernel_wrapper.hpp" +#include "gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw_padded.hpp" + +template +void device_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw_padded(InDesc, + const Tensor& in_nchw, + WeiDesc, + const Tensor& wei_kcyx, + OutDesc, + Tensor& out_nkhw, + ConvStrides, + ConvDilations, + LeftPads, + RightPads, + index_t nrepeat) +{ + using namespace ck; + + constexpr auto I0 = Number<0>{}; + constexpr auto I1 = Number<1>{}; + constexpr auto I2 = Number<2>{}; + constexpr auto I3 = Number<3>{}; + + constexpr auto in_nchw_desc = InDesc{}; + constexpr auto wei_kcyx_desc = WeiDesc{}; + constexpr auto out_nkhw_desc = OutDesc{}; + + constexpr index_t N = out_nkhw_desc.GetLength(I0); + constexpr index_t K = out_nkhw_desc.GetLength(I1); + constexpr index_t Ho = out_nkhw_desc.GetLength(I2); + constexpr index_t Wo = out_nkhw_desc.GetLength(I3); + + std::size_t data_sz = sizeof(T); + DeviceMem in_nchw_device_buf(data_sz * in_nchw.mDesc.GetElementSpace()); + DeviceMem wei_kcyx_device_buf(data_sz * wei_kcyx.mDesc.GetElementSpace()); + DeviceMem out_nkhw_device_buf(data_sz * out_nkhw.mDesc.GetElementSpace()); + + in_nchw_device_buf.ToDevice(in_nchw.mData.data()); + wei_kcyx_device_buf.ToDevice(wei_kcyx.mData.data()); + out_nkhw_device_buf.ToDevice(out_nkhw.mData.data()); + +#if 1 + // each thread hold 64 data + constexpr index_t BlockSize = 256; + + constexpr index_t BPerBlock = 16; + constexpr index_t KPerBlock = 128; + constexpr index_t EPerBlock = 8; + + constexpr index_t GemmNRepeat = 2; + + constexpr index_t GemmMPerThreadSubC = 4; + constexpr index_t GemmNPerThreadSubC = 4; + constexpr index_t GemmMLevel0Cluster = 4; + constexpr index_t GemmNLevel0Cluster = 4; + constexpr index_t GemmMLevel1Cluster = 4; + constexpr index_t GemmNLevel1Cluster = 4; + constexpr index_t GemmKPerThreadLoop = 1; + constexpr index_t GemmDataPerReadA = 4; + constexpr index_t GemmDataPerReadB = 4; + + using InBlockCopySubLengths_E_N1_B_N2 = Sequence<1, 1, 1, 4>; + using InBlockCopyClusterLengths_E_N1_B_N2 = Sequence<8, 2, 16, 1>; + using InBlockCopyThreadClusterArrangeOrder = Sequence<0, 1, 3, 2>; // [E, N1, N2, B] + using InBlockCopySrcAccessOrder = Sequence<0, 1, 3, 2>; // [E, N1, N2, B] + using InBlockCopyDstAccessOrder = Sequence<0, 1, 2, 3>; // [E, N1, B, N2] + + constexpr index_t InBlockCopySrcDataPerRead_B = 1; + constexpr index_t InBlockCopyDstDataPerWrite_N2 = 4; + + using WeiBlockCopySubLengths_E_K = Sequence<4, 1>; + using WeiBlockCopyClusterLengths_E_K = Sequence<2, 128>; + using WeiBlockCopyThreadClusterArrangeOrder = Sequence<1, 0>; // [K, E] + using WeiBlockCopySrcAccessOrder = Sequence<1, 0>; // [K, E] + using WeiBlockCopyDstAccessOrder = Sequence<0, 1>; // [E, K] + + constexpr index_t WeiBlockCopySrcDataPerRead_E = 4; + constexpr index_t WeiBlockCopyDstDataPerWrite_K = 1; +#endif + + constexpr index_t N1 = GemmNRepeat; + constexpr index_t N2 = GemmNPerThreadSubC; + + constexpr index_t B = (N * Ho * Wo) / (N1 * N2); + + constexpr index_t GridSize = + ((B + BPerBlock - 1) / BPerBlock) * ((K + KPerBlock - 1) / KPerBlock); + + printf("%s: BlockSize %u, GridSize %u \n", __func__, BlockSize, GridSize); + + for(index_t i = 0; i < nrepeat; ++i) + { + constexpr auto gridwise_conv = GridwiseConvolutionImplicitGemm_v4r1_nchw_kcyx_nkhw_padded< + GridSize, + BlockSize, + T, + decltype(in_nchw_desc), + decltype(wei_kcyx_desc), + decltype(out_nkhw_desc), + ConvStrides, + ConvDilations, + LeftPads, + RightPads, + BPerBlock, + KPerBlock, + EPerBlock, + GemmNRepeat, + GemmMPerThreadSubC, + GemmNPerThreadSubC, + GemmMLevel0Cluster, + GemmNLevel0Cluster, + GemmMLevel1Cluster, + GemmNLevel1Cluster, + GemmKPerThreadLoop, + GemmDataPerReadA, + GemmDataPerReadB, + InBlockCopySubLengths_E_N1_B_N2, + InBlockCopyClusterLengths_E_N1_B_N2, + InBlockCopyThreadClusterArrangeOrder, + InBlockCopySrcAccessOrder, + InBlockCopyDstAccessOrder, + InBlockCopySrcDataPerRead_B, + InBlockCopyDstDataPerWrite_N2, + WeiBlockCopySubLengths_E_K, + WeiBlockCopyClusterLengths_E_K, + WeiBlockCopyThreadClusterArrangeOrder, + WeiBlockCopySrcAccessOrder, + WeiBlockCopyDstAccessOrder, + WeiBlockCopySrcDataPerRead_E, + WeiBlockCopyDstDataPerWrite_K>{}; + + float time = launch_kernel(run_gridwise_convolution_kernel, + dim3(GridSize), + dim3(BlockSize), + 0, + static_cast(in_nchw_device_buf.GetDeviceBuffer()), + static_cast(wei_kcyx_device_buf.GetDeviceBuffer()), + static_cast(out_nkhw_device_buf.GetDeviceBuffer())); + + printf("Elapsed time : %f ms, %f TFlop/s\n", + time, + (float)calculate_convolution_flops(InDesc{}, WeiDesc{}, OutDesc{}) / + (std::size_t(1000) * 1000 * 1000) / time); + usleep(std::min(time * 1000, float(10000))); + } + + out_nkhw_device_buf.FromDevice(out_nkhw.mData.data()); +} diff --git a/driver/src/CMakeLists.txt b/driver/src/CMakeLists.txt deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/driver/src/driver.cpp b/driver/src/driver.cpp index 177386ab8b..558abb207e 100644 --- a/driver/src/driver.cpp +++ b/driver/src/driver.cpp @@ -15,6 +15,7 @@ //#include "device_convolution_implicit_gemm_v2_chwn_cyxk_khwn.hpp" //#include "device_convolution_implicit_gemm_v3_nchw_cyxk_nkhw.hpp" #include "device_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw.hpp" +#include "device_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw_padded.hpp" //#include "device_convolution_implicit_gemm_v4r2_nchw_kcyx_nkhw.hpp" //#include "device_convolution_implicit_gemm_v4r3_nchw_kcyx_nkhw.hpp" #include "device_convolution_implicit_gemm_v4r4_nchw_kcyx_nkhw.hpp" @@ -90,8 +91,8 @@ int main(int argc, char* argv[]) // 3x3, 34x34 constexpr index_t N = 64; constexpr index_t C = 256; - constexpr index_t HI = 32; - constexpr index_t WI = 32; + constexpr index_t HI = 34; + constexpr index_t WI = 34; constexpr index_t K = 128; constexpr index_t Y = 3; constexpr index_t X = 3; @@ -99,8 +100,8 @@ int main(int argc, char* argv[]) using ConvStrides = Sequence<1, 1>; using ConvDilations = Sequence<1, 1>; - using LeftPads = Sequence<1, 1>; - using RightPads = Sequence<1, 1>; + using LeftPads = Sequence<0, 0>; + using RightPads = Sequence<0, 0>; #elif 0 // 1x1 filter, 8x8 image // cudnn@V100 68%, ck@V100 72%, ck@P100 52%, ck@VII 42% @@ -368,7 +369,7 @@ int main(int argc, char* argv[]) #elif 0 device_convolution_implicit_gemm_v1_chwn_cyxk_khwn( in_nchw_desc, in_nchw, wei_kcyx_desc, wei_kcyx, out_nkhw_desc, out_nkhw_device, nrepeat); -#elif 1 +#elif 0 device_convolution_implicit_gemm_v1_chwn_cyxk_khwn_padded(in_nchw_desc, in_nchw, wei_kcyx_desc, @@ -397,6 +398,18 @@ int main(int argc, char* argv[]) ConvStrides{}, ConvDilations{}, nrepeat); +#elif 1 + device_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw_padded(in_nchw_desc, + in_nchw, + wei_kcyx_desc, + wei_kcyx, + out_nkhw_desc, + out_nkhw_device, + ConvStrides{}, + ConvDilations{}, + LeftPads{}, + RightPads{}, + nrepeat); #elif 0 device_convolution_implicit_gemm_v4r2_nchw_kcyx_nkhw(in_nchw_desc, in_nchw,