diff --git a/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw_padded_lds_double_buffer.hpp b/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw_padded_lds_double_buffer.hpp new file mode 100644 index 0000000000..803f6b1b60 --- /dev/null +++ b/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw_padded_lds_double_buffer.hpp @@ -0,0 +1,441 @@ +#ifndef CK_GRIDWISE_CONVOLUTION_IMPLICIT_GEMM_V4R1_NCHW_KCYX_NKHW_PADDED_LDS_DOUBLE_BUFFER_HPP +#define CK_GRIDWISE_CONVOLUTION_IMPLICIT_GEMM_V4R1_NCHW_KCYX_NKHW_PADDED_LDS_DOUBLE_BUFFER_HPP + +#include "common_header.hpp" +#include "ConstantTensorDescriptor.hpp" +#include "ConstantMergedTensorDescriptor.hpp" +#include "ConstantMatrixDescriptor.hpp" +#include "blockwise_generic_tensor_slice_copy.hpp" +#include "blockwise_gemm.hpp" +#include "threadwise_generic_tensor_slice_copy.hpp" + +namespace ck { + +// define B = merge(N0, Ho, Wo) +template +struct GridwiseConvolutionImplicitGemm_v4r1_nchw_kcyx_nkhw_padded_lds_double_buffer +{ + __device__ void Run(const Float* const __restrict__ p_in_global, + const Float* const __restrict__ p_wei_global, + Float* const __restrict__ p_out_global) const + { + // this is a mess + // TODO: find more elegent way of specifying (or calculating) performance parameters + constexpr index_t N1 = GemmNRepeat; + constexpr index_t N2 = GemmNPerThreadSubC; + + static_assert((N1 * N2 * BPerBlock) % + (GemmNPerThreadSubC * GemmNLevel0Cluster * GemmNLevel1Cluster) == + 0, + "wrong!"); + + constexpr auto I0 = Number<0>{}; + constexpr auto I1 = Number<1>{}; + constexpr auto I2 = Number<2>{}; + constexpr auto I3 = Number<3>{}; + + constexpr auto True = integral_constant{}; + + constexpr auto in_n_c_hi_wi_global_desc = + make_native_tensor_descriptor(InGlobalDesc::GetLengths(), InGlobalDesc::GetStrides()); + constexpr auto wei_k_c_y_x_global_desc = + make_native_tensor_descriptor(WeiGlobalDesc::GetLengths(), WeiGlobalDesc::GetStrides()); + constexpr auto out_n_k_ho_wo_global_desc = + make_native_tensor_descriptor(OutGlobalDesc::GetLengths(), OutGlobalDesc::GetStrides()); + + constexpr index_t N = in_n_c_hi_wi_global_desc.GetLength(I0); + constexpr index_t C = in_n_c_hi_wi_global_desc.GetLength(I1); + constexpr index_t Hi = in_n_c_hi_wi_global_desc.GetLength(I2); + constexpr index_t Wi = in_n_c_hi_wi_global_desc.GetLength(I3); + + constexpr index_t K = out_n_k_ho_wo_global_desc.GetLength(I1); + constexpr index_t Ho = out_n_k_ho_wo_global_desc.GetLength(I2); + constexpr index_t Wo = out_n_k_ho_wo_global_desc.GetLength(I3); + + constexpr index_t Y = wei_k_c_y_x_global_desc.GetLength(I2); + constexpr index_t X = wei_k_c_y_x_global_desc.GetLength(I3); + + constexpr index_t ConvStrideH = ConvStrides{}[0]; + constexpr index_t ConvStrideW = ConvStrides{}[1]; + + constexpr index_t ConvDilationH = ConvDilations{}[0]; + constexpr index_t ConvDilationW = ConvDilations{}[1]; + + static_assert(N % (N1 * N2) == 0, "wrong! cannot divice N evenly among thread"); + + constexpr index_t N0 = N / (N1 * N2); + + constexpr index_t B = N0 * Ho * Wo; + + constexpr index_t E = C * Y * X; + + // sanity-check for vectorized memory load + static_assert((Ho == 1 || ConvStrideW % InBlockCopySrcDataPerRead_B == 0) && + (X == 1 || ConvDilationW % InBlockCopySrcDataPerRead_B == 0), + "wrong! aligment requirement for vectorized global load of input tensor will " + "be violated"); + + // divide block work by [K, B] + static_assert(K % KPerBlock == 0 && B % BPerBlock == 0 && E % EPerBlock == 0, + "wrong! cannot divide work evenly among block"); + + constexpr index_t KBlockWork = K / KPerBlock; + constexpr index_t BBlockWork = B / BPerBlock; + + constexpr auto block_work_desc = + make_ConstantTensorDescriptor_packed(Sequence{}); + + const auto block_work_multi_id = + block_work_desc.GetMultiIndexFrom1dIndex(get_block_1d_id()); + + const index_t k_block_data_on_global = block_work_multi_id[0] * KPerBlock; + const index_t b_block_data_on_global = block_work_multi_id[1] * BPerBlock; + + // input tensor + // global memory + constexpr auto in_n_c_hip_wip_global_desc = transform_tensor_descriptor( + in_n_c_hi_wi_global_desc, + make_tuple( + PassThrough{}, PassThrough{}, Pad, LeftPads, RightPads>{}), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2, 3>{}), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2, 3>{})); + + constexpr auto in_n0_n1_n2_c_y_ho_x_wo_global_desc = transform_tensor_descriptor( + in_n_c_hip_wip_global_desc, + make_tuple(Unmerge>{}, + PassThrough{}, + Embed, Sequence>{}, + Embed, Sequence>{}), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{}), + make_tuple(Sequence<0, 1, 2>{}, Sequence<3>{}, Sequence<4, 5>{}, Sequence<6, 7>{})); + + constexpr auto in_e_n1_b_n2_global_desc = transform_tensor_descriptor( + in_n0_n1_n2_c_y_ho_x_wo_global_desc, + make_tuple(Merge>{}, + PassThrough{}, + Merge>{}, + PassThrough{}), + make_tuple(Sequence<3, 4, 6>{}, Sequence<1>{}, Sequence<0, 5, 7>{}, Sequence<2>{}), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{})); + + // memory layout descriptor in LDS [E, N1, B, N2], dst of blockwise copy + // be careful of LDS alignment + constexpr auto in_e_n1_b_n2_block_desc = make_native_tensor_descriptor_aligned( + Sequence{}, Number{}); + + // this check is ad-hoc + // TODO: need to properly implement tensor descriptor with multiple alignment + // requirements + static_assert(in_e_n1_b_n2_block_desc.GetStride(I1) % GemmDataPerReadB == 0, + "GemmDataPerReadB alignment requirement is not satisfied"); + + // input blockwise copy + // slice a merged tensor, reorder and copy to a normal tensor + // this copy operator already has blockwise offset built-in + auto blockwise_in_copy = + BlockwiseGenericTensorSliceCopy_v4( + {0, 0, b_block_data_on_global, 0}, {0, 0, 0, 0}); + + // weight tensor + // tensor descriptor in device memory, src of blockwise copy + constexpr auto wei_e_k_global_desc = + transform_tensor_descriptor(wei_k_c_y_x_global_desc, + make_tuple(Merge>{}, PassThrough{}), + make_tuple(Sequence<1, 2, 3>{}, Sequence<0>{}), + make_tuple(Sequence<0>{}, Sequence<1>{})); + + // tensor descriptor in LDS, dst of blockwise copy + // be careful of LDS alignment + constexpr auto wei_e_k_block_desc = make_native_tensor_descriptor_aligned( + Sequence{}, + Number{}); + + // operator for blockwise copy of weight into LDS + // slice a tensor, and copy it into another tensor + // this copy operator already have blockwise offset built-in + auto blockwise_wei_copy = + BlockwiseGenericTensorSliceCopy_v4( + {0, k_block_data_on_global}, {0, 0}); + + // GEMM definition + // c_mtx += transpose(a_mtx) * b_mtx + // a_mtx[EPerBlock, KPerBlock] is in LDS + // b_mtx[EPerBlocl, N1 * BPerBlock * N2] is in LDS + // c_mtx[KPerBlock, N1 * BPerBlock * N2] is distributed among threads, and saved in + // register + constexpr auto a_e_k_block_mtx_desc = make_ConstantMatrixDescriptor(wei_e_k_block_desc); + + constexpr auto b_e_n1bn2_block_mtx_desc = make_ConstantMatrixDescriptor( + in_e_n1_b_n2_block_desc.GetLength(I0), + in_e_n1_b_n2_block_desc.GetLength(I1) * in_e_n1_b_n2_block_desc.GetLength(I2) * + in_e_n1_b_n2_block_desc.GetLength(I3), + in_e_n1_b_n2_block_desc.GetStride(I0)); + + // sanity check + static_assert(KPerBlock % (GemmMPerThreadSubC * GemmMLevel0Cluster * GemmMLevel1Cluster) == + 0, + "wrong!"); + + constexpr index_t GemmMRepeat = + KPerBlock / (GemmMPerThreadSubC * GemmMLevel0Cluster * GemmMLevel1Cluster); + + // c_thread_mtx definition: this is a mess + // TODO:: more elegent way of defining c_thread_mtx + constexpr auto c_k0k2_n1n2_thread_mtx_desc = make_ConstantMatrixDescriptor_packed( + Number{}, Number{}); + + const auto blockwise_gemm = BlockwiseGemmBlockABlockBThreadCTransANormalBNormalC_v2< + BlockSize, + decltype(a_e_k_block_mtx_desc), + decltype(b_e_n1bn2_block_mtx_desc), + decltype(c_k0k2_n1n2_thread_mtx_desc), + GemmMPerThreadSubC, + GemmNPerThreadSubC, + GemmMLevel0Cluster, + GemmNLevel0Cluster, + GemmMLevel1Cluster, + GemmNLevel1Cluster, + GemmKPerThreadLoop, + GemmDataPerReadA, + GemmDataPerReadB>{}; + + // LDS allocation for input and weight: be careful of alignment + constexpr index_t max_align = math::lcm(InBlockCopyDstDataPerWrite_N2, + WeiBlockCopyDstDataPerWrite_K, + GemmDataPerReadA, + GemmDataPerReadB); + + constexpr index_t in_block_space = + math::integer_least_multiple(in_e_n1_b_n2_block_desc.GetElementSpace(), max_align); + + constexpr index_t wei_block_space = + math::integer_least_multiple(wei_e_k_block_desc.GetElementSpace(), max_align); + + __shared__ Float p_in_block_double[2 * in_block_space]; + __shared__ Float p_wei_block_double[2 * wei_block_space]; + + // register allocation for output + Float p_out_thread[c_k0k2_n1n2_thread_mtx_desc.GetElementSpace()]; + + // zero out threadwise output + threadwise_matrix_set_zero(c_k0k2_n1n2_thread_mtx_desc, p_out_thread); + + // LDS double buffer: preload data into LDS + { + blockwise_in_copy.Run(p_in_global, p_in_block_double); + blockwise_wei_copy.Run(p_wei_global, p_wei_block_double); + } + + // LDS double buffer: main body + for(index_t e_block_data_begin = 0; e_block_data_begin + 2 * EPerBlock < E; + e_block_data_begin += 2 * EPerBlock) + { +#pragma unroll + for(index_t iloop = 0; iloop < 2; ++iloop) + { + const bool even_loop = (iloop % 2 == 0); + + Float* p_in_block_now = + even_loop ? p_in_block_double : p_in_block_double + in_block_space; + Float* p_wei_block_now = + even_loop ? p_wei_block_double : p_wei_block_double + wei_block_space; + + Float* p_in_block_next = + even_loop ? p_in_block_double + in_block_space : p_in_block_double; + Float* p_wei_block_next = + even_loop ? p_wei_block_double + wei_block_space : p_wei_block_double; + + Float p_in_register_buffer[blockwise_in_copy.GetRegisterBufferSize()]; + Float p_wei_register_buffer[blockwise_wei_copy.GetRegisterBufferSize()]; + + blockwise_in_copy.MoveSrcSliceWindow(Sequence{}, True); + blockwise_wei_copy.MoveSrcSliceWindow(Sequence{}, True); + + __syncthreads(); + + // LDS doubel buffer: load next data from device mem + blockwise_in_copy.RunLoadRegisterBuffer(p_in_global, p_in_register_buffer); + blockwise_wei_copy.RunLoadRegisterBuffer(p_wei_global, p_wei_register_buffer); + + // LDS double buffer: GEMM on current data + blockwise_gemm.Run(p_wei_block_now, p_in_block_now, p_out_thread); + + // LDS double buffer: store next data to LDS + blockwise_in_copy.RunStoreRegisterBuffer(p_in_register_buffer, p_in_block_next); + blockwise_wei_copy.RunStoreRegisterBuffer(p_wei_register_buffer, p_wei_block_next); + } + } + + // LDS double buffer: tail + { + // even iteration + Float p_in_register_buffer[blockwise_in_copy.GetRegisterBufferSize()]; + Float p_wei_register_buffer[blockwise_wei_copy.GetRegisterBufferSize()]; + + blockwise_in_copy.MoveSrcSliceWindow(Sequence{}, True); + blockwise_wei_copy.MoveSrcSliceWindow(Sequence{}, True); + + __syncthreads(); + + // LDS doubel buffer: load next data from device mem + blockwise_in_copy.RunLoadRegisterBuffer(p_in_global, p_in_register_buffer); + blockwise_wei_copy.RunLoadRegisterBuffer(p_wei_global, p_wei_register_buffer); + + // LDS double buffer: GEMM on current data + blockwise_gemm.Run(p_wei_block_double, p_in_block_double, p_out_thread); + + // LDS double buffer: store next data to LDS + blockwise_in_copy.RunStoreRegisterBuffer(p_in_register_buffer, + p_in_block_double + in_block_space); + blockwise_wei_copy.RunStoreRegisterBuffer(p_wei_register_buffer, + p_wei_block_double + wei_block_space); + + // odd iteration + __syncthreads(); + + // LDS double buffer: GEMM on current data + blockwise_gemm.Run(p_wei_block_double + wei_block_space, + p_in_block_double + in_block_space, + p_out_thread); + } + + // copy output: register to global memory + { + constexpr index_t K2 = GemmMPerThreadSubC; + constexpr index_t K1 = GemmMLevel0Cluster * GemmMLevel1Cluster; + + static_assert(K % (K1 * K2) == 0, "wrong!"); + + // define tensor descriptor for threadwise copy + // output memory layout descriptor in register + constexpr auto out_k0_k1_k2_n1_n0_ho_wo_n2_thread_desc = + make_native_tensor_descriptor_packed( + Sequence{}); + + // output tensor descriptor in register, src of threadwise copy + constexpr auto out_n0_n1_n2_k0_k1_k2_ho_wo_thread_desc = + reorder_tensor_descriptor_given_upper2lower(out_k0_k1_k2_n1_n0_ho_wo_n2_thread_desc, + Sequence<4, 3, 7, 0, 1, 2, 5, 6>{}); + + // output memory layout descriptor in device memory, dst of threadwise copy + constexpr auto out_n0_n1_n2_k0_k1_k2_ho_wo_global_desc = transform_tensor_descriptor( + out_n_k_ho_wo_global_desc, + make_tuple(Unmerge>{}, + Unmerge>{}, + PassThrough{}, + PassThrough{}), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{}), + make_tuple(Sequence<0, 1, 2>{}, Sequence<3, 4, 5>{}, Sequence<6>{}, Sequence<7>{})); + + // calculate origin of thread output tensor on global memory + // blockwise GEMM c matrix starting index + const auto c_thread_mtx_on_block = + blockwise_gemm.GetBeginOfThreadMatrixC(get_thread_local_1d_id()); + + const index_t k_thread_data_on_global = + k_block_data_on_global + c_thread_mtx_on_block.row; + + const index_t b_thread_data_on_global = + b_block_data_on_global + c_thread_mtx_on_block.col / N2; + + // output merged global tensor descriptor, for calculating origin of thread tensor + // in global memory + constexpr auto out_n0_n1_n2_k_ho_wo_global_desc = transform_tensor_descriptor( + out_n_k_ho_wo_global_desc, + make_tuple(Unmerge>{}, + PassThrough{}, + PassThrough{}, + PassThrough{}), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{}), + make_tuple(Sequence<0, 1, 2>{}, Sequence<3>{}, Sequence<4>{}, Sequence<5>{})); + + constexpr auto out_k_n1_b_n2_global_desc = transform_tensor_descriptor( + out_n0_n1_n2_k_ho_wo_global_desc, + make_tuple(PassThrough{}, + PassThrough{}, + Merge>{}, + PassThrough{}), + make_tuple(Sequence<3>{}, Sequence<1>{}, Sequence<0, 4, 5>{}, Sequence<2>{}), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{})); + + // origin of dst in device memory + Float* p_out_thread_on_global = + p_out_global + + out_k_n1_b_n2_global_desc.CalculateOffset( + {k_thread_data_on_global, 0, b_thread_data_on_global, 0}); + + ThreadwiseGenericTensorSliceCopy_v4r2< + decltype(out_n0_n1_n2_k0_k1_k2_ho_wo_thread_desc), + decltype(out_n0_n1_n2_k0_k1_k2_ho_wo_global_desc), + decltype(out_n0_n1_n2_k0_k1_k2_ho_wo_thread_desc.GetLengths()), + arithmetic_sequence_gen<0, 8, 1>::type, + 7, + 1, + 1>({0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0}) + .Run(p_out_thread, p_out_thread_on_global); + } + } +}; + +} // namespace ck +#endif diff --git a/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r4_nchw_kcyx_nkhw_padded_lds_double_buffer.hpp b/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r4_nchw_kcyx_nkhw_padded_lds_double_buffer.hpp new file mode 100644 index 0000000000..43145950e3 --- /dev/null +++ b/composable_kernel/include/kernel_algorithm/gridwise_convolution_implicit_gemm_v4r4_nchw_kcyx_nkhw_padded_lds_double_buffer.hpp @@ -0,0 +1,389 @@ +#ifndef CK_GRIDWISE_CONVOLUTION_IMPLICIT_GEMM_V4R4_NCHW_KCYX_NKHW_PADDED_LDS_DOUBLE_BUFFER_HPP +#define CK_GRIDWISE_CONVOLUTION_IMPLICIT_GEMM_V4R4_NCHW_KCYX_NKHW_PADDED_LDS_DOUBLE_BUFFER_HPP + +#include "common_header.hpp" +#include "ConstantTensorDescriptor.hpp" +#include "ConstantMergedTensorDescriptor.hpp" +#include "ConstantMatrixDescriptor.hpp" +#include "blockwise_generic_tensor_slice_copy.hpp" +#include "blockwise_gemm.hpp" +#include "threadwise_generic_tensor_slice_copy.hpp" + +namespace ck { + +// B = merge(N, Ho, Wo) +template +struct GridwiseConvolutionImplicitGemm_v4r4_nchw_kcyx_nkhw_padded_lds_double_buffer +{ + __device__ void Run(const Float* const __restrict__ p_in_global, + const Float* const __restrict__ p_wei_global, + Float* const __restrict__ p_out_global) const + { + constexpr auto I0 = Number<0>{}; + constexpr auto I1 = Number<1>{}; + constexpr auto I2 = Number<2>{}; + constexpr auto I3 = Number<3>{}; + + constexpr auto True = integral_constant{}; + + constexpr auto in_n_c_hi_wi_global_desc = + make_native_tensor_descriptor(InGlobalDesc::GetLengths(), InGlobalDesc::GetStrides()); + constexpr auto wei_k_c_y_x_global_desc = + make_native_tensor_descriptor(WeiGlobalDesc::GetLengths(), WeiGlobalDesc::GetStrides()); + constexpr auto out_n_k_ho_wo_global_desc = + make_native_tensor_descriptor(OutGlobalDesc::GetLengths(), OutGlobalDesc::GetStrides()); + + constexpr index_t N = in_n_c_hi_wi_global_desc.GetLength(I0); + constexpr index_t C = in_n_c_hi_wi_global_desc.GetLength(I1); + constexpr index_t Hi = in_n_c_hi_wi_global_desc.GetLength(I2); + constexpr index_t Wi = in_n_c_hi_wi_global_desc.GetLength(I3); + + constexpr index_t K = out_n_k_ho_wo_global_desc.GetLength(I1); + constexpr index_t Ho = out_n_k_ho_wo_global_desc.GetLength(I2); + constexpr index_t Wo = out_n_k_ho_wo_global_desc.GetLength(I3); + + constexpr index_t Y = wei_k_c_y_x_global_desc.GetLength(I2); + constexpr index_t X = wei_k_c_y_x_global_desc.GetLength(I3); + + constexpr index_t ConvStrideH = ConvStrides{}[0]; + constexpr index_t ConvStrideW = ConvStrides{}[1]; + + constexpr index_t ConvDilationH = ConvDilations{}[0]; + constexpr index_t ConvDilationW = ConvDilations{}[1]; + + constexpr index_t E = C * Y * X; + constexpr index_t B = N * Ho * Wo; + + // sanity-check for vectorized memory load + static_assert((Ho == 1 || ConvStrideW % InBlockCopyDataPerAccess_B == 0) && + (X == 1 || ConvDilationW % InBlockCopyDataPerAccess_B == 0), + "wrong! aligment requirement for vectorized global load of input tensor will " + "be violated"); + + // divide block work by [K, B] + static_assert(K % KPerBlock == 0 && B % BPerBlock == 0 && E % EPerBlock == 0, + "wrong! cannot divide work evenly among block"); + + constexpr index_t KBlockWork = K / KPerBlock; + constexpr index_t BBlockWork = B / BPerBlock; + + constexpr auto block_work_desc = + make_ConstantTensorDescriptor_packed(Sequence{}); + + const auto block_work_multi_id = + block_work_desc.GetMultiIndexFrom1dIndex(get_block_1d_id()); + + const index_t k_block_data_on_global = block_work_multi_id[0] * KPerBlock; + const index_t b_block_data_on_global = block_work_multi_id[1] * BPerBlock; + + // input tensor + // global mem + constexpr auto in_n_c_hip_wip_global_desc = transform_tensor_descriptor( + in_n_c_hi_wi_global_desc, + make_tuple( + PassThrough{}, PassThrough{}, Pad, LeftPads, RightPads>{}), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2, 3>{}), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2, 3>{})); + + constexpr auto in_n_c_y_ho_x_wo_global_desc = transform_tensor_descriptor( + in_n_c_hip_wip_global_desc, + make_tuple(PassThrough{}, + PassThrough{}, + Embed, Sequence>{}, + Embed, Sequence>{}), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{}), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2, 3>{}, Sequence<4, 5>{})); + + constexpr auto in_e_b_global_desc = transform_tensor_descriptor( + in_n_c_y_ho_x_wo_global_desc, + make_tuple(Merge>{}, Merge>{}), + make_tuple(Sequence<1, 2, 4>{}, Sequence<0, 3, 5>{}), + make_tuple(Sequence<0>{}, Sequence<1>{})); + + // LDS mem + // be careful of LDS alignment + constexpr auto in_e_b_block_desc = + make_native_tensor_descriptor_packed(Sequence{}); + + // input blockwise copy + auto blockwise_in_copy = + BlockwiseGenericTensorSliceCopy_v4( + {0, b_block_data_on_global}, {0, 0}); + + // weight tensor + // global mem + constexpr auto wei_e_k_global_desc = + transform_tensor_descriptor(wei_k_c_y_x_global_desc, + make_tuple(Merge>{}, PassThrough{}), + make_tuple(Sequence<1, 2, 3>{}, Sequence<0>{}), + make_tuple(Sequence<0>{}, Sequence<1>{})); + + // LDS + // be careful of LDS alignment + constexpr auto wei_e_k_block_desc = make_native_tensor_descriptor_aligned( + Sequence{}, + Number{}); + + // weight blockwise copy + auto blockwise_wei_copy = + BlockwiseGenericTensorSliceCopy_v4( + {0, k_block_data_on_global}, {0, 0}); + + // GEMM definition + // c_mtx += transpose(a_mtx) * b_mtx + // a_mtx[EPerBlock, KPerBlock] is in LDS + // b_mtx[EPerBlocl, BPerBlock] is in LDS + // c_mtx[KPerBlock, BPerBlock] is distributed among threads, and saved in + // register + constexpr auto a_e_k_block_mtx_desc = make_ConstantMatrixDescriptor(wei_e_k_block_desc); + + constexpr auto b_e_b_block_mtx_desc = make_ConstantMatrixDescriptor(in_e_b_block_desc); + + // sanity check + static_assert( + KPerBlock % (GemmMPerThreadSubC * GemmMLevel0Cluster * GemmMLevel1Cluster) == 0 && + BPerBlock % (GemmNPerThreadSubC * GemmNLevel0Cluster * GemmNLevel1Cluster) == 0, + "wrong!"); + + constexpr index_t GemmMRepeat = + KPerBlock / (GemmMPerThreadSubC * GemmMLevel0Cluster * GemmMLevel1Cluster); + + constexpr index_t GemmNRepeat = + BPerBlock / (GemmNPerThreadSubC * GemmNLevel0Cluster * GemmNLevel1Cluster); + + // c_thread_mtx definition: this is a mess + // TODO:: more elegent way of defining c_thread_mtx + constexpr auto c_k0k1_b0b1_thread_mtx_desc = make_ConstantMatrixDescriptor_packed( + Number{}, Number{}); + + const auto blockwise_gemm = BlockwiseGemmBlockABlockBThreadCTransANormalBNormalC_v2< + BlockSize, + decltype(a_e_k_block_mtx_desc), + decltype(b_e_b_block_mtx_desc), + decltype(c_k0k1_b0b1_thread_mtx_desc), + GemmMPerThreadSubC, + GemmNPerThreadSubC, + GemmMLevel0Cluster, + GemmNLevel0Cluster, + GemmMLevel1Cluster, + GemmNLevel1Cluster, + GemmKPerThreadLoop, + GemmDataPerReadA, + GemmDataPerReadB>{}; + + // LDS allocation for input and weight: be careful of alignment + constexpr index_t max_align = math::lcm(InBlockCopyDataPerAccess_B, + WeiBlockCopyDstDataPerWrite_K, + GemmDataPerReadA, + GemmDataPerReadB); + + constexpr index_t in_block_space = + math::integer_least_multiple(in_e_b_block_desc.GetElementSpace(), max_align); + + constexpr index_t wei_block_space = + math::integer_least_multiple(wei_e_k_block_desc.GetElementSpace(), max_align); + + __shared__ Float p_in_block_double[2 * in_block_space]; + __shared__ Float p_wei_block_double[2 * wei_block_space]; + + // register allocation for output + Float p_out_thread[c_k0k1_b0b1_thread_mtx_desc.GetElementSpace()]; + + // zero out threadwise output + threadwise_matrix_set_zero(c_k0k1_b0b1_thread_mtx_desc, p_out_thread); + + // LDS double buffer: preload data into LDS + { + blockwise_in_copy.Run(p_in_global, p_in_block_double); + blockwise_wei_copy.Run(p_wei_global, p_wei_block_double); + } + + // LDS double buffer: main body + for(index_t e_block_data_begin = 0; e_block_data_begin + 2 * EPerBlock < E; + e_block_data_begin += 2 * EPerBlock) + { +#pragma unroll + for(index_t iloop = 0; iloop < 2; ++iloop) + { + const bool even_loop = (iloop % 2 == 0); + + Float* p_in_block_now = + even_loop ? p_in_block_double : p_in_block_double + in_block_space; + Float* p_wei_block_now = + even_loop ? p_wei_block_double : p_wei_block_double + wei_block_space; + + Float* p_in_block_next = + even_loop ? p_in_block_double + in_block_space : p_in_block_double; + Float* p_wei_block_next = + even_loop ? p_wei_block_double + wei_block_space : p_wei_block_double; + + Float p_in_register_buffer[blockwise_in_copy.GetRegisterBufferSize()]; + Float p_wei_register_buffer[blockwise_wei_copy.GetRegisterBufferSize()]; + + blockwise_in_copy.MoveSrcSliceWindow(Sequence{}, True); + blockwise_wei_copy.MoveSrcSliceWindow(Sequence{}, True); + + __syncthreads(); + + // LDS doubel buffer: load next data from device mem + blockwise_in_copy.RunLoadRegisterBuffer(p_in_global, p_in_register_buffer); + blockwise_wei_copy.RunLoadRegisterBuffer(p_wei_global, p_wei_register_buffer); + + // LDS double buffer: GEMM on current data + blockwise_gemm.Run(p_wei_block_now, p_in_block_now, p_out_thread); + + // LDS double buffer: store next data to LDS + blockwise_in_copy.RunStoreRegisterBuffer(p_in_register_buffer, p_in_block_next); + blockwise_wei_copy.RunStoreRegisterBuffer(p_wei_register_buffer, p_wei_block_next); + } + } + + // LDS double buffer: tail + { + Float p_in_register_buffer[blockwise_in_copy.GetRegisterBufferSize()]; + Float p_wei_register_buffer[blockwise_wei_copy.GetRegisterBufferSize()]; + + // even iteration + blockwise_in_copy.MoveSrcSliceWindow(Sequence{}, True); + blockwise_wei_copy.MoveSrcSliceWindow(Sequence{}, True); + + __syncthreads(); + + // LDS doubel buffer: load next data from device mem + blockwise_in_copy.RunLoadRegisterBuffer(p_in_global, p_in_register_buffer); + blockwise_wei_copy.RunLoadRegisterBuffer(p_wei_global, p_wei_register_buffer); + + // LDS double buffer: GEMM on current data + blockwise_gemm.Run(p_wei_block_double, p_in_block_double, p_out_thread); + + // LDS double buffer: store next data to LDS + blockwise_in_copy.RunStoreRegisterBuffer(p_in_register_buffer, + p_in_block_double + in_block_space); + blockwise_wei_copy.RunStoreRegisterBuffer(p_wei_register_buffer, + p_wei_block_double + wei_block_space); + + // odd iteration + __syncthreads(); + + // LDS double buffer: GEMM on current data + blockwise_gemm.Run(p_wei_block_double + wei_block_space, + p_in_block_double + in_block_space, + p_out_thread); + } + + // copy output: register to global memory + { + // calculate origin of thread output tensor on global memory + // blockwise GEMM c matrix starting index + const auto c_thread_mtx_on_block = + blockwise_gemm.GetBeginOfThreadMatrixC(get_thread_local_1d_id()); + + const index_t k_thread_data_on_global = + k_block_data_on_global + c_thread_mtx_on_block.row; + + const index_t b_thread_data_on_global = + b_block_data_on_global + c_thread_mtx_on_block.col; + + // src descriptor + constexpr auto out_k0_k1_b0_b1_thread_desc = make_native_tensor_descriptor_packed( + Sequence{}); + + // dst descriptor + constexpr index_t K1 = GemmMPerThreadSubC * GemmMLevel0Cluster * GemmMLevel1Cluster; + constexpr index_t B1 = GemmNPerThreadSubC * GemmNLevel0Cluster * GemmNLevel1Cluster; + + constexpr index_t K0 = K / K1; + constexpr index_t B0 = B / B1; + + constexpr auto out_k_b_global_desc = transform_tensor_descriptor( + out_n_k_ho_wo_global_desc, + make_tuple(PassThrough{}, Merge>{}), + make_tuple(Sequence<1>{}, Sequence<0, 2, 3>{}), + make_tuple(Sequence<0>{}, Sequence<1>{})); + + constexpr auto out_k0_k1_b0_b1_global_desc = transform_tensor_descriptor( + out_k_b_global_desc, + make_tuple(Unmerge>{}, Unmerge>{}), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0, 1>{}, Sequence<2, 3>{})); + + // output threadwise copy + auto threadwise_out_copy = ThreadwiseGenericTensorSliceCopy_v4r2< + decltype(out_k0_k1_b0_b1_thread_desc), + decltype(out_k0_k1_b0_b1_global_desc), + decltype(out_k0_k1_b0_b1_thread_desc.GetLengths()), + arithmetic_sequence_gen<0, 4, 1>::type, + 3, + OutThreadCopyDataPerAccess_B, + OutThreadCopyDataPerAccess_B>({0, 0, 0, 0}, + {k_thread_data_on_global / K1, + k_thread_data_on_global % K1, + b_thread_data_on_global / B1, + b_thread_data_on_global % B1}); + + threadwise_out_copy.Run(p_out_thread, p_out_global); + } + } +}; + +} // namespace ck +#endif diff --git a/driver/src/driver.cpp b/driver/src/driver.cpp index e383f8e06f..a47073c5e7 100644 --- a/driver/src/driver.cpp +++ b/driver/src/driver.cpp @@ -399,7 +399,7 @@ int main(int argc, char* argv[]) ConvStrides{}, ConvDilations{}, nrepeat); -#elif 0 +#elif 1 device_convolution_implicit_gemm_v4r1_nchw_kcyx_nkhw_padded(in_nchw_desc, in_nchw, wei_kcyx_desc,