diff --git a/driver/device_convolution_implicit_gemm_v3_nchw_cyxk_nkhw.hpp b/driver/device_convolution_implicit_gemm_v3_nchw_cyxk_nkhw.hpp new file mode 100644 index 0000000000..2960471635 --- /dev/null +++ b/driver/device_convolution_implicit_gemm_v3_nchw_cyxk_nkhw.hpp @@ -0,0 +1,148 @@ +#pragma once +#include +#include "device.hpp" +#include "gridwise_convolution_wrapper.hip.hpp" +#include "gridwise_convolution_implicit_gemm_v3_nchw_cyxk_nkhw.hip.hpp" + +template +void device_convolution_implicit_gemm_v3_nchw_cyxk_nkhw(InDesc, + const Tensor& in_nchw, + WeiDesc, + const Tensor& wei_kcyx, + OutDesc, + Tensor& out_nkhw, + index_t nrepeat) +{ + constexpr auto I0 = Number<0>{}; + constexpr auto I1 = Number<1>{}; + constexpr auto I2 = Number<2>{}; + constexpr auto I3 = Number<3>{}; + + constexpr auto in_nchw_desc = InDesc{}; + constexpr auto wei_kcyx_desc = WeiDesc{}; + constexpr auto out_nkhw_desc = OutDesc{}; + + constexpr index_t Hi = in_nchw_desc.GetLength(I2); + constexpr index_t Wi = in_nchw_desc.GetLength(I3); + + constexpr index_t N = out_nkhw_desc.GetLength(I0); + constexpr index_t Ho = out_nkhw_desc.GetLength(I2); + constexpr index_t Wo = out_nkhw_desc.GetLength(I3); + + constexpr index_t K = wei_kcyx_desc.GetLength(I0); + constexpr index_t C = wei_kcyx_desc.GetLength(I1); + constexpr index_t Y = wei_kcyx_desc.GetLength(I2); + constexpr index_t X = wei_kcyx_desc.GetLength(I3); + + // reorder weight + auto wei_cyxk_desc = make_ConstantTensorDescriptor(Sequence{}); + ostream_ConstantTensorDescriptor(wei_cyxk_desc, std::cout << "wei_cyxk_desc: "); + + Tensor wei_cyxk(make_TensorDescriptor(wei_cyxk_desc)); + + auto f_reorder_kcyx2cyxk = [&](auto k, auto c, auto y, auto x) { + wei_cyxk(c, y, x, k) = wei_kcyx(k, c, y, x); + }; + + make_ParallelTensorFunctor(f_reorder_kcyx2cyxk, K, C, Y, X)( + std::thread::hardware_concurrency()); + + std::size_t data_sz = sizeof(T); + DeviceMem in_nchw_device_buf(data_sz * in_nchw.mDesc.GetElementSpace()); + DeviceMem wei_cyxk_device_buf(data_sz * wei_cyxk.mDesc.GetElementSpace()); + DeviceMem out_nkhw_device_buf(data_sz * out_nkhw.mDesc.GetElementSpace()); + + in_nchw_device_buf.ToDevice(in_nchw.mData.data()); + wei_cyxk_device_buf.ToDevice(wei_cyxk.mData.data()); + out_nkhw_device_buf.ToDevice(out_nkhw.mData.data()); + +#if 1 + // for 3x3, 28x28, v3, Pascal + constexpr index_t BlockSize = 128; + + constexpr index_t BPerBlock = 16; + constexpr index_t KPerBlock = 128; + constexpr index_t CPerBlock = 8; + + constexpr index_t BPerThread = 1; + constexpr index_t KPerThread = 8; + + constexpr index_t GemmMPerThreadSubC = 4; + constexpr index_t GemmNPerThreadSubC = 4; + constexpr index_t GemmMLevel0Cluster = 4; + constexpr index_t GemmNLevel0Cluster = 2; + constexpr index_t GemmMLevel1Cluster = 4; + constexpr index_t GemmNLevel1Cluster = 2; + constexpr index_t GemmKPerThreadLoop = 1; + constexpr index_t GemmDataPerReadA = 4; + constexpr index_t GemmDataPerReadB = 4; + + using InBlockReorderSrcSubLengths_NCHW = Sequence<4, 1, 1, 1>; + using InBlockReorderSrcClusterLengths_NCHW = Sequence<4, 8, 2, 2>; + using InBlockReorderMapThreadCluster2SrcCluster_CHNW2NCHW = Sequence<1, 2, 0, 3>; + + constexpr index_t WeiBlockCopyDataPerRead_K = 4; +#endif + + constexpr index_t GridSize = + ((N + NPerBlock - 1) / NPerBlock) * ((K + KPerBlock - 1) / KPerBlock) * + ((Ho + HoPerBlock - 1) / HoPerBlock) * ((Wo + WoPerBlock - 1) / WoPerBlock); + + printf("%s: BlockSize %u, GridSize %u \n", __func__, BlockSize, GridSize); + + for(index_t i = 0; i < nrepeat; ++i) + { + constexpr auto gridwise_conv = +#if 1 + GridwiseConvolutionImplicitGemm_v3_nchw_cyxk_nkhw +#endif + {}; + + float time = launch_kernel(run_gridwise_convolution, + dim3(GridSize), + dim3(BlockSize), + 0, + static_cast(in_nchw_device_buf.GetDeviceBuffer()), + static_cast(wei_cyxk_device_buf.GetDeviceBuffer()), + static_cast(out_nkhw_device_buf.GetDeviceBuffer())); + + printf("Elapsed time : %f ms, %f TFlop/s\n", + time, + (float)calculate_convolution_flops(InDesc{}, WeiDesc{}, OutDesc{}) / + (std::size_t(1000) * 1000 * 1000) / time); + usleep(std::min(time * 1000, float(10000))); + } + + out_nkhw_device_buf.FromDevice(out_nkhw.mData.data()); +} diff --git a/driver/driver.hip.cpp b/driver/driver.hip.cpp index c72e1eab3b..0b75e0083a 100644 --- a/driver/driver.hip.cpp +++ b/driver/driver.hip.cpp @@ -13,6 +13,7 @@ #include "device_convolution_implicit_gemm_v1_nchw_cyxk_khwn.hpp" #include "device_convolution_implicit_gemm_v1_nchw_cyxk_nkhw.hpp" #include "device_convolution_implicit_gemm_v2_chwn_cyxk_khwn.hpp" +#include "device_convolution_implicit_gemm_v3_nchw_cyxk_nkhw.hpp" struct GeneratorTensor_1 { @@ -410,7 +411,7 @@ void check_error(const Tensor& ref, const Tensor& result) int main(int argc, char* argv[]) { -#if 1 +#if 0 // 3x3, 34x34 constexpr index_t N = 64; constexpr index_t C = 256; @@ -434,7 +435,7 @@ int main(int argc, char* argv[]) constexpr index_t HPad = 0; constexpr index_t WPad = 0; -#elif 0 +#elif 1 // 3x3 filter, 28x28 image constexpr index_t N = 128; constexpr index_t C = 256; @@ -603,7 +604,7 @@ int main(int argc, char* argv[]) #if 1 #if 0 device_direct_convolution_1 -#elif 1 +#elif 0 device_convolution_direct_v2_nchw_kcyx_nkhw #elif 0 device_direct_convolution_2_vectorized_nchw_kcyx_nkhw @@ -615,6 +616,8 @@ int main(int argc, char* argv[]) device_convolution_implicit_gemm_v1_nchw_cyxk_nkhw #elif 0 device_convolution_implicit_gemm_v2_chwn_cyxk_khwn +#elif 1 + device_convolution_implicit_gemm_v3_nchw_cyxk_nkhw #endif (in_nchw_desc, in_nchw, wei_kcyx_desc, wei_kcyx, out_nkhw_desc, out_nkhw_device, nrepeat); diff --git a/src/include/ConstantMergedTensorDescriptor.hip.hpp b/src/include/ConstantMergedTensorDescriptor.hip.hpp new file mode 100644 index 0000000000..ab73c8a49d --- /dev/null +++ b/src/include/ConstantMergedTensorDescriptor.hip.hpp @@ -0,0 +1,95 @@ +#pragma once +#include "common.hip.hpp" +#include "ConstantTensorDescriptor.hip.hpp" + +// TensorDesc: ConstantTensorDescriptor<...> +// MergedDimRanges: Sequence +template +struct ConstantMergedTensorDescriptor +{ + static constexpr index_t nOriginalDim = GetNumOfOriginalDimension(); + static constexpr index_t nDim = GetNumOfDimension(); + + template + __host__ __device__ constexpr ConstantMergedTensorDescriptor() + { + constexpr auto merged_dim_ranges = std::make_tuple(MergedDimRanges{}...); + + static_for<0, sizeof...(MergedDimRanges), 1>{}([&](auto I) { + constexpr index_t i = I.Get(); + constexpr auto merged_dim_range = std::get(merged_dim_ranges); + + static_assert(merged_dim_range.GetSize() == 2, + "wrong! should specify first and last dimension to be merged"); + static_assert(merged_dim_range.Get(Number<0>{}) < GetNumOfUnmergedDimension(), + "wrong!"); + static_assert(merged_dim_range.Get(Number<1>{}) < GetNumOfUnmergedDimension(), + "wrong!"); + static_assert(merged_dim_range.Get(Number<0>{}) <= merged_dim_range.Get(Number<1>{}), + "wrong!"); + }); + } + + __host__ __device__ static constexpr index_t GetNumOfOriginalDimension() + { + return TensorDesc::GetNumOfDimension(); + } + + __host__ __device__ static constexpr index_t GetNumOfDimension() + { + constexpr auto merged_dim_ranges = std::make_tuple(MergedDimRanges...); + + struct f_calculate_num_of_lost_dim + { + __host__ __device__ constexpr index_t operator()(auto I) const + { + constexpr index_t i = I.Get(); + constexpr auto merged_dim_range = std::get(merged_dim_ranges); + + return merged_dim_range.Get(Number<1>{}) - merged_dim_range.Get(Number<0>{}); + } + }; + + constexpr index_t num_lost_dim = static_const_reduce_n{}( + f_calculate_num_of_lost_dim, mod_conv::plus{}); + + return TensorDesc::GetNumOfDimension() - num_lost_dim; + } + + template + __host__ __device__ static constexpr bool IsMergedDimension(Number) + { + // not implemented + } + + template + __host__ __device__ static constexpr bool GetLength(Number) + { + // not implemented + } + + template + __host__ __device__ static constexpr bool GetStride(Number) + { + static_assert(!IsMergedDimension(Number{}, "wrong! A merged dimension does not have uniform stride") + // not implemented + } + + template + __host__ __device__ auto MultiIndex2OriginalMultiIndex(Is... is) const + { + // not implemented + } + + template + __host__ __device__ auto OriginalMultiIndex2MultiIndex(Is... is) const + { + // not implemented + } +}; + +template +constexpr auto make_ConstantMergedTensorDescriptor(TensorDesc, MergedDimRanges...) +{ + return ConstantMergedTensorDescriptor{}; +} diff --git a/src/include/ConstantTensorDescriptor.hip.hpp b/src/include/ConstantTensorDescriptor.hip.hpp index aad038020a..6dcd16e167 100644 --- a/src/include/ConstantTensorDescriptor.hip.hpp +++ b/src/include/ConstantTensorDescriptor.hip.hpp @@ -65,7 +65,7 @@ struct ConstantTensorDescriptor static_assert(Lengths::GetSize() == Strides::GetSize(), "nDim not consistent"); } - __host__ __device__ static constexpr index_t GetDimension() { return nDim; } + __host__ __device__ static constexpr index_t GetNumOfDimension() { return nDim; } __host__ __device__ static constexpr Lengths GetLengths() { return Lengths{}; } @@ -160,11 +160,51 @@ struct ConstantTensorDescriptor return multi_id; } - __host__ __device__ static constexpr auto Condense() + __host__ __device__ static constexpr auto Pack() { constexpr auto default_strides = calculate_default_strides(Lengths{}); return ConstantTensorDescriptor{}; } + + template + __host__ __device__ static constexpr auto Extract(Number... /*extracted_dims...*/) + { + static_assert(sizeof...(IDims) <= GetNumOfDimension(), "wrong!"); + + constexpr auto extracted_lengths = Sequence{})...>{}; + constexpr auto extracted_strides = Sequence{})...>{}; + + return make_ConstantTensorDescriptor(extracted_lenghts, extracted_strides); + } + + template + __host__ __device__ static constexpr auto Slice(Number, Number) + { + // not implemented + } + + template + __host__ device__ static constexpr auto Fold(Number, Sequence) + { + // not implemented + // need to check the Length dimension to be folded is dividable by FoldLengths + } + + template + __host__ __device__ static constexpr auto Unfold(Number, Number) + { + // not implemented + // need to check the dimensions to be unfold are packed, otherwise, Unfold is not permitted + } + + template + __host__ __device__ static constexpr auto ReorderGivenNew2Old(Sequence /*new2old*/) + { + static_assert(sizeof...(IRs) == GetNumberOfDimension(), "wrong! dimension is wrong"); + constexpr auto map_new2old = Sequence{}; + return make_ConstantTensorDescriptor(Lengths{}.ReorderGivenNew2Old(map_new2old), + Strides{}.ReorderGivenNew2Old(map_new2old)); + } }; template @@ -191,7 +231,7 @@ template __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s) { constexpr auto desc = TDesc{}; - constexpr index_t ndim = desc.GetDimension(); + constexpr index_t ndim = desc.GetNumOfDimension(); static_assert(ndim >= 2 && ndim <= 10, "wrong!"); @@ -202,7 +242,7 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s) printf("%s dim %u, lengths {%u %u}, strides {%u %u}\n", s, - desc.GetDimension(), + desc.GetNumOfDimension(), desc.GetLength(I0), desc.GetLength(I1), desc.GetStride(I0), @@ -216,7 +256,7 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s) printf("%s dim %u, lengths {%u %u %u}, strides {%u %u %u}\n", s, - desc.GetDimension(), + desc.GetNumOfDimension(), desc.GetLength(I0), desc.GetLength(I1), desc.GetLength(I2), @@ -233,7 +273,7 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s) printf("%s dim %u, lengths {%u %u %u %u}, strides {%u %u %u %u}\n", s, - desc.GetDimension(), + desc.GetNumOfDimension(), desc.GetLength(I0), desc.GetLength(I1), desc.GetLength(I2), @@ -253,7 +293,7 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s) printf("%s dim %u, lengths {%u %u %u %u %u}, strides {%u %u %u %u %u}\n", s, - desc.GetDimension(), + desc.GetNumOfDimension(), desc.GetLength(I0), desc.GetLength(I1), desc.GetLength(I2), @@ -276,7 +316,7 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s) printf("%s dim %u, lengths {%u %u %u %u %u %u}, strides {%u %u %u %u %u %u}\n", s, - desc.GetDimension(), + desc.GetNumOfDimension(), desc.GetLength(I0), desc.GetLength(I1), desc.GetLength(I2), @@ -302,7 +342,7 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s) printf("%s dim %u, lengths {%u %u %u %u %u %u %u}, strides {%u %u %u %u %u %u %u}\n", s, - desc.GetDimension(), + desc.GetNumOfDimension(), desc.GetLength(I0), desc.GetLength(I1), desc.GetLength(I2), @@ -331,7 +371,7 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s) printf("%s dim %u, lengths {%u %u %u %u %u %u %u %u}, strides {%u %u %u %u %u %u %u %u}\n", s, - desc.GetDimension(), + desc.GetNumOfDimension(), desc.GetLength(I0), desc.GetLength(I1), desc.GetLength(I2), @@ -364,7 +404,7 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s) printf("%s dim %u, lengths {%u %u %u %u %u %u %u %u %u}, strides {%u %u %u %u %u %u %u %u " "%u}\n", s, - desc.GetDimension(), + desc.GetNumOfDimension(), desc.GetLength(I0), desc.GetLength(I1), desc.GetLength(I2), @@ -400,7 +440,7 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s) printf("%s dim %u, lengths {%u %u %u %u %u %u %u %u %u %u}, strides {%u %u %u %u %u %u %u " "%u %u %u}\n", s, - desc.GetDimension(), + desc.GetNumOfDimension(), desc.GetLength(I0), desc.GetLength(I1), desc.GetLength(I2), diff --git a/src/include/Sequence.hip.hpp b/src/include/Sequence.hip.hpp index ba3116ce0b..3826e4df97 100644 --- a/src/include/Sequence.hip.hpp +++ b/src/include/Sequence.hip.hpp @@ -59,10 +59,22 @@ struct Sequence __host__ __device__ constexpr auto PopBack() const; - template - __host__ __device__ constexpr auto Transform(F f) const + template + __host__ __device__ constexpr auto Insert(Number, Number) const { - return Sequence{}; + index_t data[mSize + 1]; + + static_for<0, I, 1>{}([&](auto Iter) { + constexpr index_t iter = Iter.Get(); + data[iter] = mData[iter]; + }); + + data[I] = X; + + static_for{}([&](auto Iter) { + constexpr index_t iter = Iter.Get(); + data[iter + 1] = mData[iter]; + }); } }; diff --git a/src/include/blockwise_4d_tensor_op.hip.hpp b/src/include/blockwise_4d_tensor_op.hip.hpp index 14a8c3d9de..8235575a2f 100644 --- a/src/include/blockwise_4d_tensor_op.hip.hpp +++ b/src/include/blockwise_4d_tensor_op.hip.hpp @@ -1,6 +1,6 @@ #pragma once #include "ConstantTensorDescriptor.hip.hpp" -#include "threadwise_nd_tensor_op.hip.hpp" +#include "threadwise_tensor_slice_op.hip.hpp" template __device__ void diff --git a/src/include/blockwise_gemm.hip.hpp b/src/include/blockwise_gemm.hip.hpp index 582f45f7ea..722c1ae9bb 100644 --- a/src/include/blockwise_gemm.hip.hpp +++ b/src/include/blockwise_gemm.hip.hpp @@ -7,7 +7,6 @@ template {}; } __device__ static MatrixIndex GetBeginOfThreadMatrixC(index_t thread_id) @@ -101,7 +84,6 @@ struct BlockwiseGemmBlockABlockBThreadCTransANormalBNormalC_v2 level1_n_id * NPerLevel0Cluster + level0_n_id * NPerThreadSubC}; } - // this should be optimized away if input is known __device__ static MatrixIndex GetDistanceFromBeginOfThreadMatrixC(index_t m_in_c, index_t n_in_c) { diff --git a/src/include/blockwise_merged_tensor_slice_op.hip.hpp b/src/include/blockwise_merged_tensor_slice_op.hip.hpp new file mode 100644 index 0000000000..b67a239455 --- /dev/null +++ b/src/include/blockwise_merged_tensor_slice_op.hip.hpp @@ -0,0 +1,55 @@ +#pragma once +#include "threadwise_tensor_slice_op.hip.hpp" + +// slice a merged tensor, reorder and copy it into a normal tensor +// src: a merged tensor, +// dst: a normal tensor +template +struct BlockwiseTensorSliceCopy_generic_v1 +{ + static constexpr index_t nDim = SrcDesc::GetNumOfDimension(); + + index_t mSrcMyThreadOffset; + index_t mDstMyThreadOffset; + + __device__ BlockwiseTensorSliceCopy_generic_v1(Array src_block_multi_id_offset, + Array dst_block_multi_id_offset) + { + // only support SrcSubLengths.GetLength() == 1 on merged dimension, for now + // check SrcDataPerRead should be 1, if last dimension is a merged dimension + + // check NDim consistent + + // calculate mSrcMyThreadOffset + // calculate mDstMyThreadOffset + } + + __device__ static constexpr index_t GetRegisterClipboardSize() {} + + __device__ void RunLoadRegisterClipboard(const Float* __restrict__ p_src, + Float* __restrict__ p_clipboard) const + { + } + + __device__ void RunStoreRegisterClipboard(const Float* __restrict__ p_clipboard, + Float* __restrict__ p_dst) const + { + } + + __device__ void Run(const Float* __restrict__ p_src, Float* __restrict__ p_dst) const + { + Float p_clipboard[GetRegisterClipboardSize()]; + + RunLoadRegisterClipboard(p_src, p_clipboard); + RunStoreRegisterClipboard(p_clipboard, p_dst); + } +}; diff --git a/src/include/blockwise_nd_tensor_op.hip.hpp b/src/include/blockwise_tensor_slice_op.hip.hpp similarity index 81% rename from src/include/blockwise_nd_tensor_op.hip.hpp rename to src/include/blockwise_tensor_slice_op.hip.hpp index 0eeacfe940..5f7284dc2a 100644 --- a/src/include/blockwise_nd_tensor_op.hip.hpp +++ b/src/include/blockwise_tensor_slice_op.hip.hpp @@ -1,5 +1,5 @@ #pragma once -#include "threadwise_nd_tensor_op.hip.hpp" +#include "threadwise_tensor_slice_op.hip.hpp" template -struct BlockwiseNdTensorCopyReorder_v3 +struct BlockwiseTensorSliceReorderCopy_v3 { static constexpr index_t nDim = SrcLengths::GetSize(); index_t mSrcMyThreadOffset; index_t mDstMyThreadOffset; - __device__ BlockwiseNdTensorCopyReorder_v3() + __device__ + BlockwiseTensorSliceReorderCopy_v3(Array src_block_data_multi_id_begin, + Array dst_block_data_multi_id_begin) { constexpr auto src_desc = SrcDesc{}; constexpr auto dst_desc = DstDesc{}; @@ -43,8 +45,9 @@ struct BlockwiseNdTensorCopyReorder_v3 static_assert(is_same::value, "wrong! only support float for now!\n"); // sanity check: nDim - static_assert(SrcDesc::GetDimension() == nDim && DstDesc::GetDimension() == nDim && - SrcLengths::GetSize() == nDim && SrcSubLengths::GetSize() == nDim && + static_assert(SrcDesc::GetNumOfDimension() == nDim && + DstDesc::GetNumOfDimension() == nDim && SrcLengths::GetSize() == nDim && + SrcSubLengths::GetSize() == nDim && SrcClusterLengths::GetSize() == nDim && MapDst2Src::GetSize() == nDim && MapThreadCluster2SrcCluster::GetSize() == nDim, "wrong! nDim is not consistent\n"); @@ -112,17 +115,17 @@ struct BlockwiseNdTensorCopyReorder_v3 static_for<0, nDim, 1>{}([&](auto IDim) { constexpr auto I = decltype(IDim){}; constexpr index_t i = I.Get(); - // compiler: will it really compute index here, or be associated with Get1dIndex and + // compiler: will it really compute index here, or be merged with Get1dIndex and // optimized away??? src_data_multi_id[i] *= src_sub_lengths.Get(I); }); - // compiler: will it really compute index here, or be associated with Get1dIndex and + // compiler: will it really compute index here, or be merged with Get1dIndex and // optimized away??? const auto dst_data_multi_id = reorder_array_given_new2old(src_data_multi_id, map_dst2src); - mSrcMyThreadOffset = src_desc.Get1dIndex(src_data_multi_id); - mDstMyThreadOffset = dst_desc.Get1dIndex(dst_data_multi_id); + mSrcMyThreadOffset = src_desc.Get1dIndex(src_data_multi_id + src_block_data_multi_id_begin); + mDstMyThreadOffset = dst_desc.Get1dIndex(dst_data_multi_id + dst_block_data_multi_id_begin); } __device__ static constexpr index_t GetRegisterClipboardSize() @@ -176,12 +179,12 @@ struct BlockwiseNdTensorCopyReorder_v3 constexpr index_t clipboard_offset = thread_tensor_desc.Get1dIndex(clipboard_data_multi_id); - threadwise_nd_tensor_copy(SrcDesc{}, - p_src + src_offset + mSrcMyThreadOffset, - thread_tensor_desc, - p_clipboard + clipboard_offset, - thread_sub_tensor_lengths, - Number{}); + threadwise_tensor_slice_copy(SrcDesc{}, + p_src + src_offset + mSrcMyThreadOffset, + thread_tensor_desc, + p_clipboard + clipboard_offset, + thread_sub_tensor_lengths, + Number{}); }); } @@ -222,22 +225,22 @@ struct BlockwiseNdTensorCopyReorder_v3 // write in the order of dst #if 1 - threadwise_nd_tensor_copy_reorder_given_dst2src_v2(thread_tensor_desc, - p_clipboard + clipboard_offset, - DstDesc{}, - p_dst + dst_offset + - mDstMyThreadOffset, - thread_sub_tensor_lengths, - MapDst2Src{}); + threadwise_tensor_slice_copy_reorder_given_dst2src_v2(thread_tensor_desc, + p_clipboard + clipboard_offset, + DstDesc{}, + p_dst + dst_offset + + mDstMyThreadOffset, + thread_sub_tensor_lengths, + MapDst2Src{}); #else - threadwise_nd_tensor_copy_reorder_given_dst2src_v3(thread_tensor_desc, - p_clipboard + clipboard_offset, - DstDesc{}, - p_dst + dst_offset + - mDstMyThreadOffset, - thread_sub_tensor_lengths, - MapDst2Src{}, - Number{}); + threadwise_tensor_slice_copy_reorder_given_dst2src_v3(thread_tensor_desc, + p_clipboard + clipboard_offset, + DstDesc{}, + p_dst + dst_offset + + mDstMyThreadOffset, + thread_sub_tensor_lengths, + MapDst2Src{}, + Number{}); #endif }); } diff --git a/src/include/conv_common.hip.hpp b/src/include/conv_common.hip.hpp index b1d248d847..d9bf22b9c8 100644 --- a/src/include/conv_common.hip.hpp +++ b/src/include/conv_common.hip.hpp @@ -14,8 +14,8 @@ __host__ __device__ constexpr auto get_convolution_output_default_4d_tensor_desc constexpr auto I2 = Number<2>{}; constexpr auto I3 = Number<3>{}; - static_assert(in_desc.GetDimension() == 4, "input nDim is not 4"); - static_assert(wei_desc.GetDimension() == 4, "weight nDim is not 4"); + static_assert(in_desc.GetNumOfDimension() == 4, "input nDim is not 4"); + static_assert(wei_desc.GetNumOfDimension() == 4, "weight nDim is not 4"); static_assert(in_desc.GetLength(I1) == wei_desc.GetLength(I1), "input & weight dimension not consistent"); @@ -45,8 +45,8 @@ __host__ __device__ constexpr auto get_convolution_with_padding_output_default_4 constexpr auto I2 = Number<2>{}; constexpr auto I3 = Number<3>{}; - static_assert(in_desc.GetDimension() == 4, "input nDim is not 4"); - static_assert(wei_desc.GetDimension() == 4, "weight nDim is not 4"); + static_assert(in_desc.GetNumOfDimension() == 4, "input nDim is not 4"); + static_assert(wei_desc.GetNumOfDimension() == 4, "weight nDim is not 4"); static_assert(in_desc.GetLength(I1) == wei_desc.GetLength(I1), "input & weight dimension not consistent"); diff --git a/src/include/functional.hip.hpp b/src/include/functional.hip.hpp index 90c976b2a1..177270367f 100644 --- a/src/include/functional.hip.hpp +++ b/src/include/functional.hip.hpp @@ -1,76 +1,6 @@ #pragma once #include "constant_integral.hip.hpp" -template -struct static_for_impl -{ - template - __host__ __device__ void operator()(F f) const - { - static_assert(Remaining % Increment == 0, "wrong! Remaining % Increment != 0"); - static_assert(Increment <= Remaining, "will go out-of-range"); - - f(Number{}); - static_for_impl{}(f); - } -}; - -template -struct static_for_impl -{ - template - __host__ __device__ void operator()(F) const - { - // no work left, just return - return; - } -}; - -template -struct static_for -{ - template - __host__ __device__ void operator()(F f) const - { - static_assert(NBegin < NEnd, "Wrong! we should have NBegin < NEnd"); - static_assert((NEnd - NBegin) % Increment == 0, - "Wrong! should satisfy (NEnd - NBegin) % Increment == 0"); - static_for_impl{}(f); - } -}; - -template -struct static_const_reduce_n -{ - template - __host__ __device__ constexpr auto operator()(F f, Reduce r) const - { - static_assert(NLoop > 1, "out-of-range"); - - constexpr auto a = f(Number{}); - auto b = static_const_reduce_n{}(f, r); // TODO: cannot use constexpr here, weird - return r(a, b); - } -}; - -template <> -struct static_const_reduce_n<1> -{ - template - __host__ __device__ constexpr auto operator()(F f, Reduce) const - { - return f(Number<0>{}); - } -}; - -#if 0 -template -__host__ __device__ constexpr auto unpacker(F f) -{ - return [=](auto xs_array){ f(xs...); }; -} -#endif - struct forwarder { template @@ -132,3 +62,76 @@ struct static_if return Type{}; } }; +template +struct static_for_impl +{ + template + __host__ __device__ void operator()(F f) const + { + static_assert(Remaining % Increment == 0, "wrong! Remaining % Increment != 0"); + static_assert(Increment <= Remaining, "will go out-of-range"); + + f(Number{}); + static_for_impl{}(f); + } +}; + +template +struct static_for_impl +{ + template + __host__ __device__ void operator()(F) const + { + // no work left, just return + return; + } +}; + +// F signature: F(Number) +template +struct static_for +{ + template + __host__ __device__ void operator()(F f) const + { + static_assert((NEnd - NBegin) % Increment == 0, + "Wrong! should satisfy (NEnd - NBegin) % Increment == 0"); + + static_if < NBegin{}([&](auto forwarder) { + static_for_impl{}(f); + }); + } +}; + +template +struct static_const_reduce_n +{ + // signature of F: F(Number) + template + __host__ __device__ constexpr auto operator()(F f, Reduce r) const + { + static_assert(NLoop > 1, "out-of-range"); + + constexpr auto a = f(Number{}); + auto b = static_const_reduce_n{}(f, r); // TODO: cannot use constexpr here, weird + return r(a, b); + } +}; + +template <> +struct static_const_reduce_n<1> +{ + template + __host__ __device__ constexpr auto operator()(F f, Reduce) const + { + return f(Number<0>{}); + } +}; + +#if 0 +template +__host__ __device__ constexpr auto unpacker(F f) +{ + return [=](auto xs_array){ f(xs...); }; +} +#endif diff --git a/src/include/gridwise_convolution_direct_v2_nchw_kcyx_nkhw.hip.hpp b/src/include/gridwise_convolution_direct_v2_nchw_kcyx_nkhw.hip.hpp index 92bd32376f..3590e63f1e 100644 --- a/src/include/gridwise_convolution_direct_v2_nchw_kcyx_nkhw.hip.hpp +++ b/src/include/gridwise_convolution_direct_v2_nchw_kcyx_nkhw.hip.hpp @@ -3,7 +3,7 @@ #include "ConstantTensorDescriptor.hip.hpp" #include "blockwise_2d_tensor_op.hip.hpp" #include "blockwise_4d_tensor_op.hip.hpp" -#include "threadwise_nd_tensor_op.hip.hpp" +#include "threadwise_tensor_slice_op.hip.hpp" #include "threadwise_direct_convolution.hip.hpp" template {}; - const auto blockwise_in_copy_reorder = - BlockwiseNdTensorCopyReorder_v3, - InBlockReorderSrcSubLengths_NCHW, - InBlockReorderSrcClusterLengths_NCHW, - decltype(map_chwn2nchw), - InBlockReorderMapThreadCluster2SrcCluster_CHNW2NCHW, - InBlockReorderDataPerRead_W, - InBlockReorderDataPerWrite_N>{}; + const auto blockwise_in_copy_reorder = BlockwiseTensorSliceReorderCopy_v3< + BlockSize, + Float, + decltype(in_n_c_h_w_global_desc), + decltype(in_c_h_w_n_block_desc), + Sequence, + InBlockReorderSrcSubLengths_NCHW, + InBlockReorderSrcClusterLengths_NCHW, + decltype(map_chwn2nchw), + InBlockReorderMapThreadCluster2SrcCluster_CHNW2NCHW, + InBlockReorderDataPerRead_W, + InBlockReorderDataPerWrite_N>{}; // blockwise wei copy // format is [CPerBlock, X * KPerBlock] diff --git a/src/include/gridwise_convolution_implicit_gemm_v1r3_chwn_cyxk_khwn.hip.hpp b/src/include/gridwise_convolution_implicit_gemm_v1r3_chwn_cyxk_khwn.hip.hpp index 5f9c7a75bc..35b5d87a84 100644 --- a/src/include/gridwise_convolution_implicit_gemm_v1r3_chwn_cyxk_khwn.hip.hpp +++ b/src/include/gridwise_convolution_implicit_gemm_v1r3_chwn_cyxk_khwn.hip.hpp @@ -4,7 +4,7 @@ #include "ConstantMatrixDescriptor.hip.hpp" #include "blockwise_2d_tensor_op.hip.hpp" #include "blockwise_4d_tensor_op.hip.hpp" -#include "threadwise_nd_tensor_op.hip.hpp" +#include "threadwise_tensor_slice_op.hip.hpp" #include "threadwise_4d_tensor_op.hip.hpp" #include "blockwise_batched_gemm.hip.hpp" @@ -347,7 +347,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_chwn_cyxk_khwn } #endif - threadwise_nd_tensor_copy( + threadwise_tensor_slice_copy( out_10d_thread_desc, p_out_thread, out_10d_global_desc, @@ -397,7 +397,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_chwn_cyxk_khwn } #endif - threadwise_nd_tensor_copy( + threadwise_tensor_slice_copy( out_10d_thread_desc, p_out_thread, out_10d_global_desc, diff --git a/src/include/gridwise_convolution_implicit_gemm_v1r3_lds_double_buffer_chwn_cyxk_khwn.hip.hpp b/src/include/gridwise_convolution_implicit_gemm_v1r3_lds_double_buffer_chwn_cyxk_khwn.hip.hpp index a266b2de1e..3e2ea73bd9 100644 --- a/src/include/gridwise_convolution_implicit_gemm_v1r3_lds_double_buffer_chwn_cyxk_khwn.hip.hpp +++ b/src/include/gridwise_convolution_implicit_gemm_v1r3_lds_double_buffer_chwn_cyxk_khwn.hip.hpp @@ -4,7 +4,7 @@ #include "ConstantMatrixDescriptor.hip.hpp" #include "blockwise_2d_tensor_op.hip.hpp" #include "blockwise_4d_tensor_op.hip.hpp" -#include "threadwise_nd_tensor_op.hip.hpp" +#include "threadwise_tensor_slice_op.hip.hpp" #include "threadwise_4d_tensor_op.hip.hpp" #include "blockwise_batched_gemm.hip.hpp" @@ -408,7 +408,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_lds_double_buffer_chwn_cyxk_khwn } #endif - threadwise_nd_tensor_copy( + threadwise_tensor_slice_copy( out_10d_thread_desc, p_out_thread, out_10d_global_desc, @@ -458,7 +458,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_lds_double_buffer_chwn_cyxk_khwn } #endif - threadwise_nd_tensor_copy( + threadwise_tensor_slice_copy( out_10d_thread_desc, p_out_thread, out_10d_global_desc, diff --git a/src/include/gridwise_convolution_implicit_gemm_v1r3_lds_double_buffer_nchw_cyxk_khwn.hip.hpp b/src/include/gridwise_convolution_implicit_gemm_v1r3_lds_double_buffer_nchw_cyxk_khwn.hip.hpp index 732443adf3..e33b25d429 100644 --- a/src/include/gridwise_convolution_implicit_gemm_v1r3_lds_double_buffer_nchw_cyxk_khwn.hip.hpp +++ b/src/include/gridwise_convolution_implicit_gemm_v1r3_lds_double_buffer_nchw_cyxk_khwn.hip.hpp @@ -3,8 +3,8 @@ #include "ConstantTensorDescriptor.hip.hpp" #include "ConstantMatrixDescriptor.hip.hpp" #include "blockwise_2d_tensor_op.hip.hpp" -#include "blockwise_nd_tensor_op.hip.hpp" -#include "threadwise_nd_tensor_op.hip.hpp" +#include "blockwise_tensor_slice_op.hip.hpp" +#include "threadwise_tensor_slice_op.hip.hpp" #include "threadwise_4d_tensor_op.hip.hpp" #include "blockwise_batched_gemm.hip.hpp" @@ -131,18 +131,18 @@ struct GridwiseConvolutionImplicitGemm_v1r3_lds_double_buffer_nchw_cyxk_khwn // input: format is [N, C, Hi, Wi] to [C, Hi, Wi, N] constexpr auto map_chwn2nchw = Sequence<1, 2, 3, 0>{}; - const auto blockwise_in_copy_reorder = - BlockwiseNdTensorCopyReorder_v3, - InBlockReorderSrcSubLengths_NCHW, - InBlockReorderSrcClusterLengths_NCHW, - decltype(map_chwn2nchw), - InBlockReorderMapThreadCluster2SrcCluster_CHNW2NCHW, - InBlockReorderDataPerRead_W, - InBlockReorderDataPerWrite_N>{}; + const auto blockwise_in_copy_reorder = BlockwiseTensorSliceReorderCopy_v3< + BlockSize, + Float, + decltype(in_n_c_h_w_global_desc), + decltype(in_c_h_w_n_block_desc), + Sequence, + InBlockReorderSrcSubLengths_NCHW, + InBlockReorderSrcClusterLengths_NCHW, + decltype(map_chwn2nchw), + InBlockReorderMapThreadCluster2SrcCluster_CHNW2NCHW, + InBlockReorderDataPerRead_W, + InBlockReorderDataPerWrite_N>{}; // blockwise wei copy // format is [CPerBlock, KPerBlock] @@ -407,7 +407,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_lds_double_buffer_nchw_cyxk_khwn } #endif - threadwise_nd_tensor_copy( + threadwise_tensor_slice_copy( out_10d_thread_desc, p_out_thread, out_10d_global_desc, @@ -457,7 +457,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_lds_double_buffer_nchw_cyxk_khwn } #endif - threadwise_nd_tensor_copy( + threadwise_tensor_slice_copy( out_10d_thread_desc, p_out_thread, out_10d_global_desc, diff --git a/src/include/gridwise_convolution_implicit_gemm_v1r3_lds_double_buffer_nchw_cyxk_nkhw.hip.hpp b/src/include/gridwise_convolution_implicit_gemm_v1r3_lds_double_buffer_nchw_cyxk_nkhw.hip.hpp index a2dee6a01f..605496d1c8 100644 --- a/src/include/gridwise_convolution_implicit_gemm_v1r3_lds_double_buffer_nchw_cyxk_nkhw.hip.hpp +++ b/src/include/gridwise_convolution_implicit_gemm_v1r3_lds_double_buffer_nchw_cyxk_nkhw.hip.hpp @@ -3,8 +3,8 @@ #include "ConstantTensorDescriptor.hip.hpp" #include "ConstantMatrixDescriptor.hip.hpp" #include "blockwise_2d_tensor_op.hip.hpp" -#include "blockwise_nd_tensor_op.hip.hpp" -#include "threadwise_nd_tensor_op.hip.hpp" +#include "blockwise_tensor_slice_op.hip.hpp" +#include "threadwise_tensor_slice_op.hip.hpp" #include "threadwise_4d_tensor_op.hip.hpp" #include "blockwise_batched_gemm.hip.hpp" @@ -131,18 +131,18 @@ struct GridwiseConvolutionImplicitGemm_v1r3_lds_double_buffer_nchw_cyxk_nkhw // input: format is [N, C, Hi, Wi] to [C, Hi, Wi, N] constexpr auto map_chwn2nchw = Sequence<1, 2, 3, 0>{}; - const auto blockwise_in_copy_reorder = - BlockwiseNdTensorCopyReorder_v3, - InBlockReorderSrcSubLengths_NCHW, - InBlockReorderSrcClusterLengths_NCHW, - decltype(map_chwn2nchw), - InBlockReorderMapThreadCluster2SrcCluster_CHNW2NCHW, - InBlockReorderDataPerRead_W, - InBlockReorderDataPerWrite_N>{}; + const auto blockwise_in_copy_reorder = BlockwiseTensorSliceReorderCopy_v3< + BlockSize, + Float, + decltype(in_n_c_h_w_global_desc), + decltype(in_c_h_w_n_block_desc), + Sequence, + InBlockReorderSrcSubLengths_NCHW, + InBlockReorderSrcClusterLengths_NCHW, + decltype(map_chwn2nchw), + InBlockReorderMapThreadCluster2SrcCluster_CHNW2NCHW, + InBlockReorderDataPerRead_W, + InBlockReorderDataPerWrite_N>{}; // blockwise wei copy // format is [CPerBlock, KPerBlock] @@ -409,7 +409,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_lds_double_buffer_nchw_cyxk_nkhw constexpr auto map_out_global2thread = Sequence<7, 8, 9, 0, 1, 2, 3, 4, 5, 6>{}; - threadwise_nd_tensor_copy_reorder_given_dst2src_v2( + threadwise_tensor_slice_copy_reorder_given_dst2src_v2( out_10d_thread_desc, p_out_thread, out_10d_global_desc, @@ -458,7 +458,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_lds_double_buffer_nchw_cyxk_nkhw constexpr auto map_out_global2thread = Sequence<8, 9, 0, 1, 2, 3, 4, 5, 6, 7>{}; - threadwise_nd_tensor_copy_reorder_given_dst2src_v2( + threadwise_tensor_slice_copy_reorder_given_dst2src_v2( out_10d_thread_desc, p_out_thread, out_10d_global_desc, diff --git a/src/include/gridwise_convolution_implicit_gemm_v1r3_nchw_cyxk_khwn.hip.hpp b/src/include/gridwise_convolution_implicit_gemm_v1r3_nchw_cyxk_khwn.hip.hpp index 5e6fde6ef2..3b3107ff70 100644 --- a/src/include/gridwise_convolution_implicit_gemm_v1r3_nchw_cyxk_khwn.hip.hpp +++ b/src/include/gridwise_convolution_implicit_gemm_v1r3_nchw_cyxk_khwn.hip.hpp @@ -3,8 +3,8 @@ #include "ConstantTensorDescriptor.hip.hpp" #include "ConstantMatrixDescriptor.hip.hpp" #include "blockwise_2d_tensor_op.hip.hpp" -#include "blockwise_nd_tensor_op.hip.hpp" -#include "threadwise_nd_tensor_op.hip.hpp" +#include "blockwise_tensor_slice_op.hip.hpp" +#include "threadwise_tensor_slice_op.hip.hpp" #include "threadwise_4d_tensor_op.hip.hpp" #include "blockwise_batched_gemm.hip.hpp" @@ -130,18 +130,18 @@ struct GridwiseConvolutionImplicitGemm_v1r3_nchw_cyxk_khwn // input: format is [N, C, Hi, Wi] to [C, Hi, Wi, N] constexpr auto map_chwn2nchw = Sequence<1, 2, 3, 0>{}; - const auto blockwise_in_copy_reorder = - BlockwiseNdTensorCopyReorder_v3, - InBlockReorderSrcSubLengths_NCHW, - InBlockReorderSrcClusterLengths_NCHW, - decltype(map_chwn2nchw), - InBlockReorderMapThreadCluster2SrcCluster_CHNW2NCHW, - InBlockReorderDataPerRead_W, - InBlockReorderDataPerWrite_N>{}; + const auto blockwise_in_copy_reorder = BlockwiseTensorSliceReorderCopy_v3< + BlockSize, + Float, + decltype(in_n_c_h_w_global_desc), + decltype(in_c_h_w_n_block_desc), + Sequence, + InBlockReorderSrcSubLengths_NCHW, + InBlockReorderSrcClusterLengths_NCHW, + decltype(map_chwn2nchw), + InBlockReorderMapThreadCluster2SrcCluster_CHNW2NCHW, + InBlockReorderDataPerRead_W, + InBlockReorderDataPerWrite_N>{}; // blockwise wei copy // format is [CPerBlock, KPerBlock] @@ -390,7 +390,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_nchw_cyxk_khwn } #endif - threadwise_nd_tensor_copy( + threadwise_tensor_slice_copy( out_10d_thread_desc, p_out_thread, out_10d_global_desc, @@ -440,7 +440,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_nchw_cyxk_khwn } #endif - threadwise_nd_tensor_copy( + threadwise_tensor_slice_copy( out_10d_thread_desc, p_out_thread, out_10d_global_desc, diff --git a/src/include/gridwise_convolution_implicit_gemm_v1r3_nchw_cyxk_nkhw.hip.hpp b/src/include/gridwise_convolution_implicit_gemm_v1r3_nchw_cyxk_nkhw.hip.hpp index ffda830d67..ab66902f65 100644 --- a/src/include/gridwise_convolution_implicit_gemm_v1r3_nchw_cyxk_nkhw.hip.hpp +++ b/src/include/gridwise_convolution_implicit_gemm_v1r3_nchw_cyxk_nkhw.hip.hpp @@ -3,8 +3,8 @@ #include "ConstantTensorDescriptor.hip.hpp" #include "ConstantMatrixDescriptor.hip.hpp" #include "blockwise_2d_tensor_op.hip.hpp" -#include "blockwise_nd_tensor_op.hip.hpp" -#include "threadwise_nd_tensor_op.hip.hpp" +#include "blockwise_tensor_slice_op.hip.hpp" +#include "threadwise_tensor_slice_op.hip.hpp" #include "threadwise_4d_tensor_op.hip.hpp" #include "blockwise_batched_gemm.hip.hpp" @@ -73,7 +73,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_nchw_cyxk_nkhw constexpr index_t Y = wei_c_y_x_k_global_desc.GetLength(I1); constexpr index_t X = wei_c_y_x_k_global_desc.GetLength(I2); - // divide block work: [K, Ho, Wo, N] + // divide block work: [N, K, Ho, Wo] static_assert(N % NPerBlock == 0 && K % KPerBlock == 0 && C % CPerBlock == 0 && Ho % HoPerBlock == 0 && Wo % WoPerBlock == 0, "wrong! cannot evenly divide work for workgroup "); @@ -128,18 +128,18 @@ struct GridwiseConvolutionImplicitGemm_v1r3_nchw_cyxk_nkhw // input: format is [N, C, Hi, Wi] to [C, Hi, Wi, N] constexpr auto map_chwn2nchw = Sequence<1, 2, 3, 0>{}; - const auto blockwise_in_copy_reorder = - BlockwiseNdTensorCopyReorder_v3, - InBlockReorderSrcSubLengths_NCHW, - InBlockReorderSrcClusterLengths_NCHW, - decltype(map_chwn2nchw), - InBlockReorderMapThreadCluster2SrcCluster_CHNW2NCHW, - InBlockReorderDataPerRead_W, - InBlockReorderDataPerWrite_N>{}; + const auto blockwise_in_copy_reorder = BlockwiseTensorSliceReorderCopy_v3< + BlockSize, + Float, + decltype(in_n_c_h_w_global_desc), + decltype(in_c_h_w_n_block_desc), + Sequence, + InBlockReorderSrcSubLengths_NCHW, + InBlockReorderSrcClusterLengths_NCHW, + decltype(map_chwn2nchw), + InBlockReorderMapThreadCluster2SrcCluster_CHNW2NCHW, + InBlockReorderDataPerRead_W, + InBlockReorderDataPerWrite_N>{}; // blockwise wei copy // format is [CPerBlock, KPerBlock] @@ -390,7 +390,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_nchw_cyxk_nkhw constexpr auto map_out_global2thread = Sequence<7, 8, 9, 0, 1, 2, 3, 4, 5, 6>{}; - threadwise_nd_tensor_copy_reorder_given_dst2src_v2( + threadwise_tensor_slice_copy_reorder_given_dst2src_v2( out_10d_thread_desc, p_out_thread, out_10d_global_desc, @@ -439,7 +439,7 @@ struct GridwiseConvolutionImplicitGemm_v1r3_nchw_cyxk_nkhw constexpr auto map_out_global2thread = Sequence<8, 9, 0, 1, 2, 3, 4, 5, 6, 7>{}; - threadwise_nd_tensor_copy_reorder_given_dst2src_v2( + threadwise_tensor_slice_copy_reorder_given_dst2src_v2( out_10d_thread_desc, p_out_thread, out_10d_global_desc, diff --git a/src/include/gridwise_convolution_implicit_gemm_v2_chwn_cyxk_khwn_lds_double_buffer.hip.hpp b/src/include/gridwise_convolution_implicit_gemm_v2_chwn_cyxk_khwn_lds_double_buffer.hip.hpp index 90370b7b4f..fefed3a3e6 100644 --- a/src/include/gridwise_convolution_implicit_gemm_v2_chwn_cyxk_khwn_lds_double_buffer.hip.hpp +++ b/src/include/gridwise_convolution_implicit_gemm_v2_chwn_cyxk_khwn_lds_double_buffer.hip.hpp @@ -5,7 +5,7 @@ #include "blockwise_4d_tensor_op.hip.hpp" #include "blockwise_2d_tensor_op.hip.hpp" #include "threadwise_2d_tensor_op.hip.hpp" -#include "threadwise_nd_tensor_op.hip.hpp" +#include "threadwise_tensor_slice_op.hip.hpp" #include "blockwise_gemm.hip.hpp" // define B = flatten(N, Hi, Wi) diff --git a/src/include/gridwise_convolution_implicit_gemm_v3_nchw_cyxk_nkhw.hip.hpp b/src/include/gridwise_convolution_implicit_gemm_v3_nchw_cyxk_nkhw.hip.hpp new file mode 100644 index 0000000000..be0804d508 --- /dev/null +++ b/src/include/gridwise_convolution_implicit_gemm_v3_nchw_cyxk_nkhw.hip.hpp @@ -0,0 +1,309 @@ +#pragma once +#include "common.hip.hpp" +#include "ConstantTensorDescriptor.hip.hpp" +#include "ConstantMatrixDescriptor.hip.hpp" +#include "blockwise_gemm.hip.hpp" + +// define B = merge(N, Ho, Wo) +template +struct GridwiseConvolutionImplicitGemm_v3_nchw_cyxk_nkhw +{ + __device__ void Run(const Float* const __restrict__ p_in_global, + const Float* const __restrict__ p_wei_global, + Float* const __restrict__ p_out_global) const + { + // this is a mess + // TODO: more elegent way of specifying (or calculating) performance variables + static_assert(N2 == GemmNPerThreadSubC, "wrong!"); + static_assert(KPerBlock == + N1 * GemmNPerThreadSubC * GemmNLevel0Cluster * GemmNLevel1Cluster, + "wrong!"); + static_assert( + KPerBlock % (N1 * GemmNPerThreadSubC * GemmNLevel0Cluster * GemmNLevel1Cluster) == 0, + "wrong!"); + + constexpr auto I0 = Number<0>{}; + constexpr auto I1 = Number<1>{}; + constexpr auto I2 = Number<2>{}; + constexpr auto I3 = Number<3>{}; + constexpr auto I4 = Number<4>{}; + constexpr auto I5 = Number<5>{}; + constexpr auto I6 = Number<6>{}; + constexpr auto I7 = Number<7>{}; + + constexpr auto in_n_c_h_w_global_desc = InGlobalDesc{}; + constexpr auto wei_c_y_x_k_global_desc = WeiGlobalDesc{}; + constexpr auto out_n_k_h_w_global_desc = OutGlobalDesc{}; + + constexpr index_t N = in_n_c_h_w_global_desc.GetLength(I0); + constexpr index_t C = in_n_c_h_w_global_desc.GetLength(I1); + constexpr index_t Hi = in_n_c_h_w_global_desc.GetLength(I2); + constexpr index_t Wi = in_n_c_h_w_global_desc.GetLength(I3); + + constexpr index_t K = out_n_k_h_w_global_desc.GetLength(I1); + constexpr index_t Ho = out_n_k_h_w_global_desc.GetLength(I2); + constexpr index_t Wo = out_n_k_h_w_global_desc.GetLength(I3); + + constexpr index_t Y = wei_c_y_x_k_global_desc.GetLength(I1); + constexpr index_t X = wei_c_y_x_k_global_desc.GetLength(I2); + + static_assert(N % (N1 * N2) == 0, "wrong! cannot divice N evenly among thread"); + + constexpr index_t N0 = N / (N1 * N2); + + constexpr index_t B = N0 * Ho * Wo; + + // divide block work by [K, B] + static_assert(K % KPerBlock == 0 && B % BPerBlock == 0, + C % CPerBlock == 0, + "wrong! cannot divide work evenly among block"); + + constexpr index_t KBlockWork = K / KPerBlock; + constexpr index_t BBlockWork = B / BPerBlock; + + constexpr auto block_work_desc = + make_ConstantTensorDescriptor(Sequence{}); + + const auto block_work_multi_id = block_work_desc.GetMultiIndex(get_block_1d_id()); + + const index_t k_block_data_on_global = block_work_multi_id[0] * KPerBlock; + const index_t b_block_data_on_global = block_work_multi_id[1] * BPerBlock; + + // input tensor + // memory layout descriptor in device memory [N0, N1, N2, C, H, W] + constexpr auto in_n0_n1_n2_c_h_w_global_mem_desc = + in_n_c_h_w_global_desc.Fold(I0, Sequence{}); + + // merged tensor descriptor in device memory [N1, N2, C, B], src of blockwise copy + constexpr auto in_n1_n2_c_b_global_merged_desc = + in_n0_n1_n2_c_h_w_global_desc.ReorderGivenNew2Old(Sequence<1, 2, 3, 0, 4, 5>{}) + .Slice(I4, Number{}) + .Slice(I5, Number{}) + .Merge(I3, I5); + + // memory layout descriptor in LDS [C, N1, B, N2] + // be careful of LDS alignment + constexpr auto in_c_n1_b_n2_block_mem_desc = make_ConstantTensorDescriptor_aligned( + Sequence{}, Number{}); + + // tensor descriptor in LDS [N1, N2, C, B], dst of blockwise copy + constexpr auto in_n1_n2_c_b_block_desc = + in_c_n1_b_n2_block_mem_desc.ReorderGivenNew2Old(Sequence<1, 3, 0, 2>{}); + + // this check is ad-hoc + // TODO: need to properly implement tensor descriptor with alignment + static_assert(in_c_n1_b_n2_block_desc.GetStride(I1) % GemmDataPerReadB == 0, + "GemmDataPerReadB alignment requirement is not satisfied"); + + // input blockwise copy + // slice a merged tensor, reorder and copy to a normal tensor + // this copy operator already has blockwise offset built-in + const auto blockwise_in_copy = BlockwiseTensorSliceCopy_generic_v1< + BlockSize, + Float, + decltype(in_n1_n2_c_b_global_merged_desc), + decltype(in_n1_n2_c_b_block_desc), + Sequence, + InBlockCopySubLengths_N1_N2_C_B, + InBlockCopyClusterLengths_N1_N2_C_B, + Sequence<2, 0, 1, 3>, // thread_arrange_order [C, N1, N2, B] + Sequence<0, 1, 2, 3>, // src_access_order [N1, N2, C, B] + Sequence<2, 0, 3, 1>, // dst_access_order [C, N1, B, N2] + >({0, 0, 0, b_block_data_on_global}, {0, 0, 0, 0}); + + // weight tensor + // tensor descriptor in device memory, src of blockwise copy + constexpr auto wei_c_k_global_desc = wei_c_y_x_k_global_desc.Extract(Sequence<0, 3>{}); + + // tensor descriptor in LDS, dst of blockwise copy + // be careful of LDS alignment + constexpr auto wei_c_k_block_desc = make_ConstantTensorDescriptor_aligned( + Sequence{}, + Number{}); + + // operator for blockwise copy of weight into LDS + // slicing a tensor + // this copy operator already have tensor offset built-in + const auto blockwise_wei_copy = + Blockwise2dTensorCopy3({0, k_block_data_on_global}, {0, 0}); + + // GEMM definition + // c_mtx += transpose(a_mtx) * b_mtx + // a_mtx[CPerBlock, KPerBlock] is in LDS + // b_mtx[CPerBlocl, N1 * BPerBlock * N2] is in LDS + // c_mtx[KPerBlock, N1 * BPerBlock * N2] is distributed among threads, and saved in + // register + constexpr auto a_c_k_block_mtx_desc = make_ConstantMatrixDescriptor( + Number{}, Number{}, Number{}); + + constexpr auto b_c_n1bn2_block_mtx_desc = + make_ConstantMatrixDescriptor(Number{}, + Number{}, + Number{}); + + // sanity check + static_assert(KPerBlock % (GemmMPerThreadSubC * GemmMLevel0Cluster * GemmMLevel1Cluster), + "wrong!"); + + constexpr index_t GemmMRepeat = + KPerBlock / (GemmMPerThreadSubC * GemmMLevel0Cluster * GemmMLevel1Cluster); + + // c_thread_mtx definition: this is a mess + // TODO:: more elegent way of defining c_thread_mtx + constexpr auto c_k0k2_n1n2_thread_mtx_desc = make_ConstantMatrixDescriptor( + Number{}, Number{}); + + const auto blockwise_gemm = BlockwiseGemmBlockABlockBThreadCTransANormalBNormalC_v2< + BlockSize, + decltype(a_c_k_block_mtx_desc), + decltype(b_c_n1bn2_block_mtx_desc), + decltype(c_k0k2_n1n2_thread_mtx_desc), + GemmMPerThreadSubC, + GemmNPerThreadSubC, + GemmMLevel0Cluster, + GemmNLevel0Cluster, + GemmMLevel1Cluster, + GemmNLevel1Cluster, + GemmKPerThreadLoop, + GemmDataPerReadA, + GemmDataPerReadB>{}; + + // LDS allocation for input and weight: be careful of alignment + constexpr index_t max_align = mod_conv::max(InBlockReorderDataPerWrite_N, + WeiBlockCopyDataPerRead_K, + GemmDataPerReadA, + GemmDataPerReadB); + + constexpr index_t in_block_space = + in_c_n1_b_n2_block_mem_desc.GetElementSpace(Number{}); + + constexpr index_t wei_block_space = wei_c_k_block_desc.GetElementSpace(Number{}); + + __shared__ Float p_in_block[in_block_space]; + __shared__ Float p_wei_block[wei_block_space]; + + // register allocation for output + Float p_out_thread[c_k0k2_n1n2_thread_mtx_desc.GetElementSpace()]; + + // zero out threadwise output + threadwise_matrix_set_zero(out_k0_k1_k2_n1_n0_h_w_n2_thread_desc, p_out_thread); + + // do work + for(index_t y = 0; y < Y; ++y) + { + for(index_t x = 0; x < X; ++x) + { + // calculate origin of block input and weight tensor on global memory + const Float* p_in_block_on_global = + p_in_global + in_n_c_h_w_global_desc.Get1dIndex(0, 0, y, x); + + const Float* p_wei_block_on_global = + p_wei_global + wei_c_y_x_k_global_desc.Get1dIndex(0, y, x, 0); + + for(index_t + c_block_data_on_global = 0; + c_block_data_on_global < C; + c_block_data_on_global += CPerBlock, + p_in_block_ont_global += CPerBlock * in_n_c_h_w_global_desc.GetStride(I1), + p_wei_block_on_global += CPerBlock * wei_c_y_x_k_global_desc.GetStride(I0)) + { + blockwise_in_copy.run(p_in_block_on_global, p_in_block); + blockwise_wei_copy.run(p_wei_block_on_global, p_wei_block); + + __syncthreads(); + + blockwise_gemm.run(p_wei_block, p_in_block, p_out_thread); + + __syncthreads(); + } + } + } + + // copy output: register to global memory + { + constexpr index_t K2 = GemmMPerThreadSubC; + constexpr index_t K1 = GemmMLevel0Cluster * GemmMLevel1Cluster; + constexpr index_t K0 = K / (K1 * K2); + + // define tensor descriptor for threadwise copy + // output tensor (also, memory layout) descriptor in register, src of threadwise + // copy + constexpr auto out_k0_k1_k2_n1_b_n2_thread_mem_desc = make_ConstantTensorDescriptor( + Sequence{}); + + // output memory layout descriptor in device memory + constexpr auto out_n0_n1_n2_k0_k1_k2_h_w_global_mem_desc = + out_n_k_h_w_global.Fold(I1, Sequence{}).Fold(I0, Sequence{}); + + // output merged tensor descriptor in device memory, dst of threadwise copy + constexpr auto out_k0_k1_k2_n1_b_n2_global_merged_desc = + out_n0_n1_n2_k0_k1_k2_h_w_global_mem_desc + .ReorderGivenNew2Old(Sequence<3, 4, 5, 1, 0, 6, 7, 2>{}) + .Merge(I4, I6); + + // calculate origin of thread output tensor on global memory + // blockwise GEMM c matrix starting index + const auto c_thread_mtx_on_block = + blockwise_gemm.GetBeginOfThreadMatrixC(get_thread_local_1d_id()); + + // origin of thread tensor on global + const index_t k_thread_data_on_global k_block_data_on_global + + c_thread_mtx_on_block.row; + const index_t b_thread_data_on_global = + b_block_data_on_global + c_thread_mtx_on_block.col; + + // output merged global tensor descriptor, for calculating origin of thread tensor + // in global memory + constexpr auto out_k_n1_b_n2_global_merged_desc = + out_k0_k1_k2_n1_b_n2_global_merged_desc.Unfold(I1, I2); + + // origin of thread tensor in global memory + const index_t p_out_thread_on_global = + p_out_global + + out_k_n1_b_n2_global_merged_desc.Get1dIndex( + k_thread_data_on_global, 0, 0, 0); // dst origin on merged global tensor + + // copy + threadwise_tensor_slice_copy_generic( + out_k0_k1_k2_n1_b_n2_thread_mem_desc, // src thread tensor (in register) descriptor + p_out_thread, // origin of src + {0, 0, 0, 0, 0, 0}, // starting point of slice, w.r.t. origin of src + out_k0_k1_k2_n1_b_n2_global_merged_desc, // dst global merged tensor (in device mem) + // descriptor + p_out_thread_on_global, // origin of dst + {0, + 0, + 0, + 0, + b_thread_data_on_global, + 0}, // starting point of slice w.r.t. origin of dst + out_k0_k1_k2_n1_b_n2_thread_desc.GetLengths(), // slice lengths + Sequence<2, 3, 4, 0, 5, 1>{} // order of dimension access + ); + } + } +}; diff --git a/src/include/tensor.hpp b/src/include/tensor.hpp index 1ebfef0c5d..c91d132eec 100644 --- a/src/include/tensor.hpp +++ b/src/include/tensor.hpp @@ -85,7 +85,7 @@ struct TensorDescriptor { } - std::size_t GetDimension() const; + std::size_t GetNumOfDimension() const; std::size_t GetElementSize() const; std::size_t GetElementSpace() const; @@ -95,7 +95,7 @@ struct TensorDescriptor template std::size_t Get1dIndex(Is... is) const { - assert(sizeof...(Is) == this->GetDimension()); + assert(sizeof...(Is) == this->GetNumOfDimension()); std::initializer_list iss{static_cast(is)...}; return std::inner_product(iss.begin(), iss.end(), mStrides.begin(), std::size_t{0}); } @@ -206,7 +206,7 @@ struct Tensor template void GenerateTensorValue(G g, std::size_t num_thread = 1) { - switch(mDesc.GetDimension()) + switch(mDesc.GetNumOfDimension()) { case 1: { diff --git a/src/include/threadwise_2d_tensor_op.hip.hpp b/src/include/threadwise_2d_tensor_op.hip.hpp index 9121bb9e76..24c6823f13 100644 --- a/src/include/threadwise_2d_tensor_op.hip.hpp +++ b/src/include/threadwise_2d_tensor_op.hip.hpp @@ -88,7 +88,7 @@ threadwise_2d_tensor_copy_reorder_by_get_dst_from_src(SrcDesc, SrcDesc{}, p_src, DstDesc{}, p_dst, SrcOpLengths{}, MapDst2Src{}, f_copy); } -#if 0 // replaced threadwise_nd_tensor_copy +#if 0 // replaced threadwise_tensor_slice_copy template __device__ void threadwise_2d_tensor_copy( SrcDesc, Float* const __restrict__ p_src, DstDesc, Float* __restrict__ p_dst, SrcOpLengths) diff --git a/src/include/threadwise_direct_convolution.hip.hpp b/src/include/threadwise_direct_convolution.hip.hpp index 70f60e67cf..94c5f8ecd3 100644 --- a/src/include/threadwise_direct_convolution.hip.hpp +++ b/src/include/threadwise_direct_convolution.hip.hpp @@ -1,6 +1,6 @@ #pragma once #include "ConstantTensorDescriptor.hip.hpp" -#include "threadwise_nd_tensor_op.hip.hpp" +#include "threadwise_tensor_slice_op.hip.hpp" // optimized for scenario if p_in, p_wei, p_out are in register template @@ -85,11 +85,11 @@ __device__ void threadwise_direct_convolution_2(InDesc, TInWei p_wei_reg[wei_reg_desc.GetElementSpace()]; // copy input tensor into register - threadwise_nd_tensor_copy( + threadwise_tensor_slice_copy( in_desc, p_in, in_reg_desc, p_in_reg, in_reg_desc.GetLengths(), Number<1>{}); // copy input tensor into register - threadwise_nd_tensor_copy( + threadwise_tensor_slice_copy( wei_desc, p_wei, wei_reg_desc, p_wei_reg, wei_reg_desc.GetLengths(), Number<1>{}); // do convolution diff --git a/src/include/threadwise_gemm.hip.hpp b/src/include/threadwise_gemm.hip.hpp index 34c9cbc430..6713b6ce63 100644 --- a/src/include/threadwise_gemm.hip.hpp +++ b/src/include/threadwise_gemm.hip.hpp @@ -1,4 +1,19 @@ #pragma once +#include "common.hip.hpp" +#include "ConstantMatrixDescriptor.hip.hpp" + +template +__device__ void threadwise_matrix_set_zero(Matrix, Float* __restrict__ p_thread) +{ + for(index_t i = 0; i < Matrix::NRow(); ++i) + { + for(index_t j = 0; j < Matrix::NCol(); ++j) + { + const index_t id = Matrix::Get1dIndex(i, j); + p_thread[id] = 0; + } + } +} template -__device__ void threadwise_nd_tensor_copy(SrcDesc, - const Float* __restrict__ p_src, - DstDesc, - Float* __restrict__ p_dst, - SrcOpLengths, - Number) +__device__ void threadwise_tensor_slice_copy(SrcDesc, + const Float* __restrict__ p_src, + DstDesc, + Float* __restrict__ p_dst, + SrcOpLengths, + Number) { using vector_t = typename vector_type::MemoryType; constexpr index_t nDim = SrcOpLengths::GetSize(); - static_assert(SrcDesc{}.GetDimension() == nDim && DstDesc{}.GetDimension() == nDim, + static_assert(SrcDesc{}.GetNumOfDimension() == nDim && DstDesc{}.GetNumOfDimension() == nDim, "wrong! dimension not consistent"); constexpr auto src_desc = SrcDesc{}; @@ -63,7 +63,7 @@ __device__ void threadwise_nd_tensor_copy(SrcDesc, }); } -// write in order of src +// access in order of src template __device__ void -threadwise_nd_tensor_copy_reorder_given_dst2src_v1(SrcDesc, - const SrcData* __restrict__ p_src, - DstDesc, - DstData* __restrict__ p_dst, - SrcOpLengths, - MapDst2Src) +threadwise_tensor_slice_copy_reorder_given_dst2src_v1(SrcDesc, + const SrcData* __restrict__ p_src, + DstDesc, + DstData* __restrict__ p_dst, + SrcOpLengths, + MapDst2Src) { constexpr auto src_desc = SrcDesc{}; constexpr auto dst_desc = DstDesc{}; @@ -92,7 +92,7 @@ threadwise_nd_tensor_copy_reorder_given_dst2src_v1(SrcDesc, }); } -// write in order of dst +// access in order of dst template __device__ void -threadwise_nd_tensor_copy_reorder_given_dst2src_v2(SrcDesc, - const SrcData* __restrict__ p_src, - DstDesc, - DstData* __restrict__ p_dst, - SrcOpLengths, - MapDst2Src) +threadwise_tensor_slice_copy_reorder_given_dst2src_v2(SrcDesc, + const SrcData* __restrict__ p_src, + DstDesc, + DstData* __restrict__ p_dst, + SrcOpLengths, + MapDst2Src) { constexpr auto src_desc = SrcDesc{}; constexpr auto dst_desc = DstDesc{}; @@ -123,20 +123,22 @@ threadwise_nd_tensor_copy_reorder_given_dst2src_v2(SrcDesc, }); } -// write in order of dst +// access in order of dst +// manually pack data into vector before write template -__device__ void threadwise_nd_tensor_copy_reorder_given_dst2src_v3(SrcDesc, - const Float* __restrict__ p_src, - DstDesc, - Float* __restrict__ p_dst, - SrcOpLengths, - MapDst2Src, - Number) +__device__ void +threadwise_tensor_slice_copy_reorder_given_dst2src_v3(SrcDesc, + const Float* __restrict__ p_src, + DstDesc, + Float* __restrict__ p_dst, + SrcOpLengths, + MapDst2Src, + Number) { using vector_t = typename vector_type::MemoryType; @@ -190,3 +192,17 @@ __device__ void threadwise_nd_tensor_copy_reorder_given_dst2src_v3(SrcDesc, }); }); } + +template +__device__ void +threadwise_tensor_slice_copy_generic(SrcDesc, + const Float* __restrict__ p_src, + Array src_multi_offset, + DstDesc, + Float* __restrict__ p_dst, + Array dst_multi_offset, + SliceLengths, + DimAccessOrder) +{ + // not implemented +} diff --git a/src/tensor.cpp b/src/tensor.cpp index ee3d39837d..035f7b5e31 100644 --- a/src/tensor.cpp +++ b/src/tensor.cpp @@ -25,7 +25,7 @@ void TensorDescriptor::CalculateStrides() mLens.rbegin(), mLens.rend() - 1, mStrides.rbegin() + 1, std::multiplies()); } -std::size_t TensorDescriptor::GetDimension() const { return mLens.size(); } +std::size_t TensorDescriptor::GetNumOfDimension() const { return mLens.size(); } std::size_t TensorDescriptor::GetElementSize() const {