diff --git a/composable_kernel/include/config.hpp b/composable_kernel/include/config.hpp index bb6ba58e6a..3126958b67 100644 --- a/composable_kernel/include/config.hpp +++ b/composable_kernel/include/config.hpp @@ -59,14 +59,19 @@ #define CK_USE_AMD_INNER_PRODUCT_INLINE_ASM 1 #endif -// AMD buffer addressing -#ifndef CK_USE_AMD_BUFFER_ADDRESSING -#define CK_USE_AMD_BUFFER_ADDRESSING 1 +// AMD buffer_load +#ifndef CK_USE_AMD_BUFFER_LOAD +#define CK_USE_AMD_BUFFER_LOAD 1 #endif -// only gfx908 support native floating point atomic add -#ifndef CK_USE_AMD_BUFFER_ATOMIC_FADD -#define CK_USE_AMD_BUFFER_ATOMIC_FADD 0 +// AMD buffer_store +#ifndef CK_USE_AMD_BUFFER_STORE +#define CK_USE_AMD_BUFFER_STORE 1 +#endif + +// AMD buffer_atomic_add +#ifndef CK_USE_AMD_BUFFER_ATOMIC_ADD +#define CK_USE_AMD_BUFFER_ATOMIC_ADD 1 #endif // AMD XDLOPS @@ -97,9 +102,6 @@ #define CK_EXPERIMENTAL_USE_IN_REGISTER_SUB_DWORD_TRANSPOSE 1 #endif -// pass tensor descriptor by value or void* -#define CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE 1 -#define CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VOID_POINTER 0 #define CK_EXPERIMENTAL_STATIC_TENSOR_DESCRIPTOR 0 // merge transformation use magic number division @@ -166,7 +168,8 @@ enum ActivTypeEnum_t }; // index type -using index_t = int32_t; +using index_t = int32_t; +using long_index_t = int64_t; } // namespace ck #endif diff --git a/composable_kernel/include/problem_transform/transform_forward_convolution3d_into_gemm_v4r4r4_ndhwc_kzyxc_ndhwk.hpp b/composable_kernel/include/problem_transform/transform_forward_convolution3d_into_gemm_v4r4r4_ndhwc_kzyxc_ndhwk.hpp new file mode 100644 index 0000000000..7544289b21 --- /dev/null +++ b/composable_kernel/include/problem_transform/transform_forward_convolution3d_into_gemm_v4r4r4_ndhwc_kzyxc_ndhwk.hpp @@ -0,0 +1,150 @@ +#ifndef CK_TRANSFORM_FORWARD_CONVOLUTION3D_INTO_GEMM_V4R4R4_NHWC_KYXC_NHWK_HPP +#define CK_TRANSFORM_FORWARD_CONVOLUTION3D_INTO_GEMM_V4R4R4_NHWC_KYXC_NHWK_HPP + +#include "common_header.hpp" +#include "tensor_descriptor.hpp" +#include "tensor_descriptor_helper.hpp" + +namespace ck { + +// A: in +// B: wei +// C: out +// GemmM = N * Do * Ho * Wo +// GemmN = K +// GemmK = Z * Y * X * C +template +__host__ __device__ constexpr auto +transform_forward_convolution3d_into_gemm_v4r4r4_ndhwc_kzyxc_ndhwk_pad( + const TensorDescriptor& in_grid_desc_n_di_hi_wi_c, + const TensorDescriptor& wei_k_z_y_x_c_grid_desc, + const TensorDescriptor& out_n_do_ho_wo_k_grid_desc, + const ConvStrides& conv_strides, + const ConvDilations& conv_dilations, + const InLeftPads& in_left_pads, + const InRightPads& in_right_pads, + Number) +{ + constexpr auto I0 = Number<0>{}; + constexpr auto I1 = Number<1>{}; + constexpr auto I2 = Number<2>{}; + constexpr auto I3 = Number<3>{}; + constexpr auto I4 = Number<4>{}; + + constexpr auto GemmK1 = Number{}; + + const auto N = in_grid_desc_n_di_hi_wi_c.GetLength(I0); + const auto K = out_n_do_ho_wo_k_grid_desc.GetLength(I4); + const auto C = in_grid_desc_n_di_hi_wi_c.GetLength(I4); + + const auto Di = in_grid_desc_n_di_hi_wi_c.GetLength(I1); + const auto Hi = in_grid_desc_n_di_hi_wi_c.GetLength(I2); + const auto Wi = in_grid_desc_n_di_hi_wi_c.GetLength(I3); + + const auto Do = out_n_do_ho_wo_k_grid_desc.GetLength(I1); + const auto Ho = out_n_do_ho_wo_k_grid_desc.GetLength(I2); + const auto Wo = out_n_do_ho_wo_k_grid_desc.GetLength(I3); + + const auto Z = wei_k_z_y_x_c_grid_desc.GetLength(I1); + const auto Y = wei_k_z_y_x_c_grid_desc.GetLength(I2); + const auto X = wei_k_z_y_x_c_grid_desc.GetLength(I3); + + const auto ConvStrideD = conv_strides[I0]; + const auto ConvStrideH = conv_strides[I1]; + const auto ConvStrideW = conv_strides[I2]; + + const auto ConvDilationD = conv_dilations[I0]; + const auto ConvDilationH = conv_dilations[I1]; + const auto ConvDilationW = conv_dilations[I2]; + + const auto InLeftPadD = in_left_pads[I0]; + const auto InLeftPadH = in_left_pads[I1]; + const auto InLeftPadW = in_left_pads[I2]; + + const auto InRightPadD = in_right_pads[I0]; + const auto InRightPadH = in_right_pads[I1]; + const auto InRightPadW = in_right_pads[I2]; + + const auto GemmM = N * Do * Ho * Wo; + const auto GemmN = K; + const auto GemmK = Z * Y * X * C; + const auto GemmK0 = GemmK / GemmK1; + + // A: input tensor + const auto in_grid_desc_n_dip_hip_wip_c = transform_tensor_descriptor( + in_grid_desc_n_di_hi_wi_c, + make_tuple(make_pass_through_transform(N), + make_pad_transform(Di, InLeftPadD, InRightPadD), + make_pad_transform(Hi, InLeftPadH, InRightPadH), + make_pad_transform(Wi, InLeftPadW, InRightPadW), + make_pass_through_transform(C)), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{}, Sequence<4>{}), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{}, Sequence<4>{})); + + const auto in_grid_desc_n_z_do_y_ho_x_wo_c = transform_tensor_descriptor( + in_grid_desc_n_dip_hip_wip_c, + make_tuple(make_pass_through_transform(N), + make_embed_transform(make_tuple(Z, Do), make_tuple(ConvDilationD, ConvStrideD)), + make_embed_transform(make_tuple(Y, Ho), make_tuple(ConvDilationH, ConvStrideH)), + make_embed_transform(make_tuple(X, Wo), make_tuple(ConvDilationW, ConvStrideW)), + make_pass_through_transform(C)), + make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{}, Sequence<4>{}), + make_tuple( + Sequence<0>{}, Sequence<1, 2>{}, Sequence<3, 4>{}, Sequence<5, 6>{}, Sequence<7>{})); + + const auto in_grid_desc_gemmk_gemmm = + transform_tensor_descriptor(in_grid_desc_n_z_do_y_ho_x_wo_c, + make_tuple(make_merge_transform(make_tuple(Z, Y, X, C)), + make_merge_transform(make_tuple(N, Do, Ho, Wo))), + make_tuple(Sequence<1, 3, 5, 7>{}, Sequence<0, 2, 4, 6>{}), + make_tuple(Sequence<0>{}, Sequence<1>{})); + + const auto in_grid_desc_gemmk0_gemmm_gemmk1 = + transform_tensor_descriptor(in_grid_desc_gemmk_gemmm, + make_tuple(make_unmerge_transform(make_tuple(GemmK0, GemmK1)), + make_pass_through_transform(GemmM)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0, 2>{}, Sequence<1>{})); + + // B: weight tensor + const auto wei_grid_desc_gemmk_gemmn = transform_tensor_descriptor( + make_naive_tensor_descriptor_packed(make_tuple(K, Z * Y * X * C)), + make_tuple(make_pass_through_transform(K), make_pass_through_transform(Z * Y * X * C)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<1>{}, Sequence<0>{})); + + const auto wei_grid_desc_gemmk0_gemmn_gemmk1 = + transform_tensor_descriptor(wei_grid_desc_gemmk_gemmn, + make_tuple(make_unmerge_transform(make_tuple(GemmK0, GemmK1)), + make_pass_through_transform(GemmN)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0, 2>{}, Sequence<1>{})); + + // C: output tensor + const auto out_grid_desc_gemmm_gemmn = transform_tensor_descriptor( + make_naive_tensor_descriptor_packed(make_tuple(N * Do * Ho * Wo, K)), + make_tuple(make_pass_through_transform(N * Do * Ho * Wo), make_pass_through_transform(K)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0>{}, Sequence<1>{})); + + // const auto out_grid_desc_gemmm_gemmn = transform_tensor_descriptor( + // out_n_do_ho_wo_k_grid_desc, + // make_tuple(make_merge_transform(make_tuple(N, Do, Ho, Wo)), + // make_pass_through_transform(K)), + // make_tuple(Sequence<0, 1, 2, 3>{}, Sequence<3>{}), + // make_tuple(Sequence<0>{}, Sequence<1>{})); + + return make_tuple(in_grid_desc_gemmk0_gemmm_gemmk1, + wei_grid_desc_gemmk0_gemmn_gemmk1, + out_grid_desc_gemmm_gemmn); +} + +} // namespace ck +#endif diff --git a/composable_kernel/include/tensor_description/multi_index_transform.hpp b/composable_kernel/include/tensor_description/multi_index_transform.hpp index 248148686b..fa705cc3fe 100644 --- a/composable_kernel/include/tensor_description/multi_index_transform.hpp +++ b/composable_kernel/include/tensor_description/multi_index_transform.hpp @@ -1862,5 +1862,92 @@ struct Slice } }; +/* + * \brief lower_idx = upper_idx % modulus. + * TODO: Need an improved implementation since the modulo operation is expensive. + */ +template +struct Modulo +{ + using LowerIndex = MultiIndex<1>; + using UpperIndex = MultiIndex<1>; + using UpLengths = decltype(make_tuple(UpLength{})); + + Modulus modulus_; + UpLengths up_lengths_; + + __host__ __device__ constexpr Modulo() = default; + + __host__ __device__ constexpr Modulo(const Modulus& modulus, const UpLength& up_length) + : modulus_{modulus}, up_lengths_{make_tuple(up_length)} + { + } + + __host__ __device__ static constexpr index_t GetNumOfLowerDimension() { return 1; } + + __host__ __device__ static constexpr index_t GetNumOfUpperDimension() { return 1; } + + __host__ __device__ constexpr const auto& GetUpperLengths() const { return up_lengths_; } + + template + __host__ __device__ constexpr void CalculateLowerIndex(LowIdx& idx_low, + const UpIdx& idx_up) const + { + static_assert(LowIdx::Size() == 1 && UpIdx::Size() == 1, + "wrong! inconsistent # of dimension"); + + idx_low(Number<0>{}) = idx_up[Number<0>{}] % modulus_; + } + + template + __host__ __device__ void UpdateLowerIndex(LowIdxDiff& idx_diff_low, + const UpIdxDiff& idx_diff_up, + LowIdx& idx_low, + const UpIdx& up_idx, + Number) const + { + static_assert(LowIdxDiff::Size() == 1 && UpIdxDiff::Size() == 1 && LowIdx::Size() == 1 && + UpIdx::Size() == 1, + "wrong! inconsistent # of dimension"); + + constexpr auto I0 = Number<0>{}; + + const auto idx_low_old = idx_low; + idx_low(I0) = (up_idx(I0) + idx_diff_up(I0)) % modulus_; + idx_diff_low(I0) = idx_low - idx_low_old; + } + + __host__ __device__ static constexpr bool IsLinearTransform() { return false; } + + __host__ __device__ static constexpr bool IsValidUpperIndexAlwaysMappedToValidLowerIndex() + { + return true; + } + + template + __host__ __device__ static constexpr bool + IsValidUpperIndexMappedToValidLowerIndex(const UpIdx& /* idx_up */) + { + return true; + } + + __host__ __device__ static constexpr bool IsKnownAtCompileTime() + { + return is_known_at_compile_time::value; + } + + __host__ __device__ void Print() const + { + printf("{"); + printf("Modulus, "); + printf("up_lengths_"); + print_multi_index(up_lengths_); + printf("}"); + } +}; } // namespace ck #endif diff --git a/composable_kernel/include/tensor_description/multi_index_transform_helper.hpp b/composable_kernel/include/tensor_description/multi_index_transform_helper.hpp index 9a73799173..bc360714b9 100644 --- a/composable_kernel/include/tensor_description/multi_index_transform_helper.hpp +++ b/composable_kernel/include/tensor_description/multi_index_transform_helper.hpp @@ -98,6 +98,12 @@ __host__ __device__ constexpr auto make_freeze_transform(const LowerIndex& low_i return Freeze{low_idx}; } +template +__host__ __device__ constexpr auto make_insert_transform(const UpperIndex& up_idx) +{ + return Insert{up_idx}; +} + template __host__ __device__ constexpr auto make_slice_transform(const LowLength& low_length, const SliceBegin& slice_begin, @@ -113,5 +119,11 @@ __host__ __device__ constexpr auto make_vectorize_transform(const VectorSize& ve return Vectorize{vector_size, up_length}; } +template +__host__ __device__ constexpr auto make_modulo_transform(const Modulus& modulus, + const UpLength& up_length) +{ + return Modulo{modulus, up_length}; +} } // namespace ck #endif diff --git a/composable_kernel/include/tensor_operation/gridwise_batched_gemm_xdlops_v2r3.hpp b/composable_kernel/include/tensor_operation/gridwise_batched_gemm_xdlops_v2r3.hpp index 2ccfa3a52b..08bb791d51 100644 --- a/composable_kernel/include/tensor_operation/gridwise_batched_gemm_xdlops_v2r3.hpp +++ b/composable_kernel/include/tensor_operation/gridwise_batched_gemm_xdlops_v2r3.hpp @@ -11,7 +11,6 @@ namespace ck { -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE template -__global__ void -#if CK_USE_LAUNCH_BOUNDS - __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) -#endif - kernel_batched_gemm_xdlops_v2r3( - const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - FloatC* __restrict__ p_c_grid, - const void CONSTANT* p_a_grid_desc_g_k0_m_k1, - const void CONSTANT* p_b_grid_desc_g_k0_n_k1, - const void CONSTANT* p_c_grid_desc_g_m0_n0_m1_n1_m2_m3_m4_n2, - const void CONSTANT* p_a_element_op, - const void CONSTANT* p_b_element_op, - const void CONSTANT* p_c_element_op, - const void CONSTANT* p_block_2_ctile_map) -{ - const auto a_grid_desc_g_k0_m_k1 = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_a_grid_desc_g_k0_m_k1)); - const auto b_grid_desc_g_k0_n_k1 = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_b_grid_desc_g_k0_n_k1)); - const auto c_grid_desc_g_m0_n0_m1_n1_m2_m3_m4_n2 = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_grid_desc_g_m0_n0_m1_n1_m2_m3_m4_n2)); - const auto block_2_ctile_map = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_block_2_ctile_map)); - const auto a_element_op = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_a_element_op)); - const auto b_element_op = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_b_element_op)); - const auto c_element_op = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_element_op)); - - __shared__ char p_shared[GridwiseBatchedGemm::GetSharedMemoryNumberOfByte()]; - - GridwiseBatchedGemm::template Run(p_a_grid, - p_b_grid, - p_c_grid, - p_shared, - a_grid_desc_g_k0_m_k1, - b_grid_desc_g_k0_n_k1, - c_grid_desc_g_m0_n0_m1_n1_m2_m3_m4_n2, - a_element_op, - b_element_op, - c_element_op, - block_2_ctile_map); -} -#endif template {}, Sequence<1>{}, Sequence<2>{}), make_tuple(Sequence<0>{}, Sequence<1, 3>{}, Sequence<2, 4>{})); - const auto c_blockid_to_g_m00_m01_n00_n01_block_cluster_adaptor = + const auto cblockid_to_g_m00_m01_n00_n01_block_cluster_adaptor = make_single_stage_tensor_adaptor( make_tuple(make_merge_transform(make_tuple(G, M00, N00, M01, N01))), make_tuple(Sequence<0, 1, 2, 3, 4>{}), make_tuple(Sequence<0>{})); - const auto c_blockid_to_g_m0_n0_block_cluster_adaptor = + const auto cblockid_to_g_m0_n0_block_cluster_adaptor = chain_tensor_adaptors(g_m00_m01_n00_n01_to_m0_n0_block_cluster_adaptor, - c_blockid_to_g_m00_m01_n00_n01_block_cluster_adaptor); + cblockid_to_g_m00_m01_n00_n01_block_cluster_adaptor); - return c_blockid_to_g_m0_n0_block_cluster_adaptor; + return cblockid_to_g_m0_n0_block_cluster_adaptor; } using CGridDesc_G_M0_N0_M1_N1_M2_M3_M4_N2 = decltype(MakeCGridDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2(CGridDesc_G_M_N{})); - using Block2CTileMap = decltype(MakeBlock2CTileMap(CGridDesc_G_M_N{}, 1, 1)); + using DefaultBlock2CTileMap = decltype(MakeDefaultBlock2CTileMap(CGridDesc_G_M_N{}, 1, 1)); - template + template __device__ static void Run(const FloatAB* __restrict__ p_a_grid, const FloatAB* __restrict__ p_b_grid, diff --git a/composable_kernel/include/tensor_operation/gridwise_gemm_dlops_v1r2.hpp b/composable_kernel/include/tensor_operation/gridwise_gemm_dlops_v1r2.hpp index d91159b884..d758309c24 100644 --- a/composable_kernel/include/tensor_operation/gridwise_gemm_dlops_v1r2.hpp +++ b/composable_kernel/include/tensor_operation/gridwise_gemm_dlops_v1r2.hpp @@ -12,7 +12,6 @@ namespace ck { -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE template {}, integral_constant{}); } -#elif CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VOID_POINTER -// pass tensor descriptor by CONSTANT void pointer -// CONSTANT is needed to inform compiler void pointers in the kernel signature are pointing to -// non-modifiable parameter address space, so compiler can enable corresponding optimization -template -__global__ void -#if CK_USE_LAUNCH_BOUNDS - __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) -#endif - kernel_gemm_dlops_v1r2(const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - FloatC* __restrict__ p_c_grid, - const void CONSTANT* p_a_k_m0_m1_grid_desc, - const void CONSTANT* p_b_k_n0_n1_grid_desc, - const void CONSTANT* p_c_m0_m10_m11_n0_n10_n11_grid_desc, - const void CONSTANT* p_c_blockid_to_m0_n0_block_cluster_adaptor) -{ - // first cast void CONSTANT void* to void* - // second cast void* to Desc* - // the copy constructor of tensor descriptor doesn't take address_space(4) - const auto a_k_m0_m1_grid_desc = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_a_k_m0_m1_grid_desc)); - const auto b_k_n0_n1_grid_desc = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_b_k_n0_n1_grid_desc)); - const auto c_m0_m10_m11_n0_n10_n11_grid_desc = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_m0_m10_m11_n0_n10_n11_grid_desc)); - const auto c_blockid_to_m0_n0_block_cluster_adaptor = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_blockid_to_m0_n0_block_cluster_adaptor)); - - constexpr index_t shared_block_size = - GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); - - __shared__ FloatAB p_shared_block[shared_block_size]; - - GridwiseGemm::Run(p_a_grid, - p_b_grid, - p_c_grid, - p_shared_block, - a_k_m0_m1_grid_desc, - b_k_n0_n1_grid_desc, - c_m0_m10_m11_n0_n10_n11_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor, - integral_constant{}, - integral_constant{}); -} -#endif template {}), make_tuple(Sequence<0>{})); - return c_blockid_to_m0_n0_block_cluster_adaptor; + return cblockid_to_m0_n0_block_cluster_adaptor; } using AKM0M1GridDesc = decltype(MakeAKM0M1GridDescriptor(AKMGridDesc{})); @@ -321,7 +264,7 @@ struct GridwiseGemmDlops_km_kn_mn_v1r2 const AKM0M1GridDesc& a_k_m0_m1_grid_desc, const BKN0N1GridDesc& b_k_n0_n1_grid_desc, const CM0M10M11N0N10N11GridDesc& c_m0_m10_m11_n0_n10_n11_grid_desc, - const CBlockIdToM0N0BlockClusterAdaptor& c_blockid_to_m0_n0_block_cluster_adaptor, + const CBlockIdToM0N0BlockClusterAdaptor& cblockid_to_m0_n0_block_cluster_adaptor, integral_constant, integral_constant) { @@ -336,7 +279,7 @@ struct GridwiseGemmDlops_km_kn_mn_v1r2 // divide block work by [M, N] const auto c_m0_n0_block_cluster_idx = - c_blockid_to_m0_n0_block_cluster_adaptor.CalculateBottomIndex( + cblockid_to_m0_n0_block_cluster_adaptor.CalculateBottomIndex( make_multi_index(get_block_1d_id())); // HACK: this force index data into SGPR diff --git a/composable_kernel/include/tensor_operation/gridwise_gemm_dlops_v1r3.hpp b/composable_kernel/include/tensor_operation/gridwise_gemm_dlops_v1r3.hpp index 32b6c31200..4a7db509ed 100644 --- a/composable_kernel/include/tensor_operation/gridwise_gemm_dlops_v1r3.hpp +++ b/composable_kernel/include/tensor_operation/gridwise_gemm_dlops_v1r3.hpp @@ -12,7 +12,6 @@ namespace ck { -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE template {}, integral_constant{}); } -#elif CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VOID_POINTER -// pass tensor descriptor by CONSTANT void pointer -// CONSTANT is needed to inform compiler void pointers in the kernel signature are pointing to -// non-modifiable parameter address space, so compiler can enable corresponding optimization -template -__global__ void -#if CK_USE_LAUNCH_BOUNDS - __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) -#endif - kernel_gemm_dlops_v1r3(const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - FloatC* __restrict__ p_c_grid, - const void CONSTANT* p_a_k0_m0_m1_k1_grid_desc, - const void CONSTANT* p_b_k0_n0_n1_k1_grid_desc, - const void CONSTANT* p_c_m0_m10_m11_n0_n10_n11_grid_desc, - const void CONSTANT* p_c_blockid_to_m0_n0_block_cluster_adaptor) -{ - // first cast void CONSTANT void* to void* - // second cast void* to Desc* - // the copy constructor of tensor descriptor doesn't take address_space(4) - const auto a_k0_m0_m1_k1_grid_desc = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_a_k0_m0_m1_k1_grid_desc)); - const auto b_k0_n0_n1_k1_grid_desc = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_b_k0_n0_n1_k1_grid_desc)); - const auto c_m0_m10_m11_n0_n10_n11_grid_desc = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_m0_m10_m11_n0_n10_n11_grid_desc)); - const auto c_blockid_to_m0_n0_block_cluster_adaptor = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_blockid_to_m0_n0_block_cluster_adaptor)); - - constexpr index_t shared_block_size = - GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); - - __shared__ FloatAB p_shared_block[shared_block_size]; - - GridwiseGemm::Run(p_a_grid, - p_b_grid, - p_c_grid, - p_shared_block, - a_k0_m0_m1_k1_grid_desc, - b_k0_n0_n1_k1_grid_desc, - c_m0_m10_m11_n0_n10_n11_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor, - integral_constant{}, - integral_constant{}); -} -#endif template {}), make_tuple(Sequence<0>{})); - return c_blockid_to_m0_n0_block_cluster_adaptor; + return cblockid_to_m0_n0_block_cluster_adaptor; } using AK0M0M1K1GridDesc = decltype(MakeAK0M0M1K1GridDescriptor(AK0MK1GridDesc{})); @@ -328,7 +271,7 @@ struct GridwiseGemmDlops_km_kn_mn_v1r3 const AK0M0M1K1GridDesc& a_k0_m0_m1_k1_grid_desc, const BK0N0N1K1GridDesc& b_k0_n0_n1_k1_grid_desc, const CM0M10M11N0N10N11GridDesc& c_m0_m10_m11_n0_n10_n11_grid_desc, - const CBlockIdToM0N0BlockClusterAdaptor& c_blockid_to_m0_n0_block_cluster_adaptor, + const CBlockIdToM0N0BlockClusterAdaptor& cblockid_to_m0_n0_block_cluster_adaptor, integral_constant, integral_constant) { @@ -341,7 +284,7 @@ struct GridwiseGemmDlops_km_kn_mn_v1r3 // divide block work by [M, N] const auto c_m0_n0_block_cluster_idx = - c_blockid_to_m0_n0_block_cluster_adaptor.CalculateBottomIndex( + cblockid_to_m0_n0_block_cluster_adaptor.CalculateBottomIndex( make_multi_index(get_block_1d_id())); // HACK: this force index data into SGPR diff --git a/composable_kernel/include/tensor_operation/gridwise_gemm_dlops_v3.hpp b/composable_kernel/include/tensor_operation/gridwise_gemm_dlops_v3.hpp index 1d8a110e22..0b62fcd554 100644 --- a/composable_kernel/include/tensor_operation/gridwise_gemm_dlops_v3.hpp +++ b/composable_kernel/include/tensor_operation/gridwise_gemm_dlops_v3.hpp @@ -12,7 +12,6 @@ namespace ck { -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE template {}, integral_constant{}); } @@ -77,7 +76,7 @@ __global__ void const BGridDesc_E0_E1_N_H0_H1_H2_W0_W1_W2_E2 b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, const CGridDesc_K0_K1_N_H0_H1_H2_W0_W1_W2 c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, const DGridDesc_K0_K1_N_H0_H1_Hx_W0_W1_Wx d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - const CBlockIdToBlockClusterAdaptor_K_N_H_W c_blockid_to_k_n_h_w_block_cluster_adaptor) + const CBlockIdToBlockClusterAdaptor_K_N_H_W cblockid_to_k_n_h_w_block_cluster_adaptor) { constexpr index_t shared_block_size = GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); @@ -93,7 +92,7 @@ __global__ void b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor, + cblockid_to_k_n_h_w_block_cluster_adaptor, integral_constant{}, integral_constant{}); } @@ -122,7 +121,7 @@ __global__ void const BGridDesc_E0_E1_N_H0_H1_H2_W0_W1_W2_E2 b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, const CGridDesc_K0_K1_N_H0_H1_H2_W0_W1_W2 c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, const DGridDesc_K0_K1_N_H0_H1_Hx_W0_W1_Wx d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - const CBlockIdToBlockClusterAdaptor_K_N_H_W c_blockid_to_k_n_h_w_block_cluster_adaptor) + const CBlockIdToBlockClusterAdaptor_K_N_H_W cblockid_to_k_n_h_w_block_cluster_adaptor) { constexpr index_t shared_block_size = GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); @@ -139,335 +138,10 @@ __global__ void b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor, + cblockid_to_k_n_h_w_block_cluster_adaptor, integral_constant{}, integral_constant{}); } -#elif CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VOID_POINTER -// pass tensor descriptor by CONSTANT void pointer -// CONSTANT is needed to inform compiler void pointers in the kernel signature are pointing to -// non-modifiable parameter address space, so compiler can enable corresponding optimization -template -__global__ void -#if CK_USE_LAUNCH_BOUNDS - __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) -#endif - kernel_gemm_dlops_v3(const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - const FloatC* __restrict__ p_bias_grid, - FloatC* __restrict__ p_c_grid, - const void CONSTANT* p_a_e0_e1_k0_k1_e2_grid_desc, - const void CONSTANT* p_b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, - const void CONSTANT* p_c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, - const void CONSTANT* p_c_blockid_to_k_n_h_w_block_cluster_adaptor) -{ - // first cast void CONSTANT void* to void* - // second cast void* to Desc* - // the copy constructor of tensor descriptor doesn't take address_space(4) - const auto a_e0_e1_k0_k1_e2_grid_desc = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_a_e0_e1_k0_k1_e2_grid_desc)); - const auto b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc)); - const auto c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc)); - const auto c_blockid_to_k_n_h_w_block_cluster_adaptor = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_blockid_to_k_n_h_w_block_cluster_adaptor)); - - constexpr index_t shared_block_size = - GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); - - __shared__ FloatAB p_shared_block[shared_block_size]; - - GridwiseGemm::ConvBiasActiv(p_a_grid, - p_b_grid, - p_bias_grid, - p_c_grid, - p_shared_block, - a_e0_e1_k0_k1_e2_grid_desc, - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor, - integral_constant{}, - integral_constant{}); -} - -// pass tensor descriptor by CONSTANT void pointer -// CONSTANT is needed to inform compiler void pointers in the kernel signature are pointing to -// non-modifiable parameter address space, so compiler can enable corresponding optimization -template -__global__ void -#if CK_USE_LAUNCH_BOUNDS - __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) -#endif - kernel_gemm_dlops_v3_resize_add( - const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - const FloatC* __restrict__ p_bias_grid, - FloatC* __restrict__ p_d_grid, - const void CONSTANT* p_a_e0_e1_k0_k1_e2_grid_desc, - const void CONSTANT* p_b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, - const void CONSTANT* p_c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, - const void CONSTANT* p_d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - const void CONSTANT* p_c_blockid_to_k_n_h_w_block_cluster_adaptor) -{ - // first cast void CONSTANT void* to void* - // second cast void* to Desc* - // the copy constructor of tensor descriptor doesn't take address_space(4) - const auto a_e0_e1_k0_k1_e2_grid_desc = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_a_e0_e1_k0_k1_e2_grid_desc)); - const auto b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc)); - const auto c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc)); - const auto d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc)); - const auto c_blockid_to_k_n_h_w_block_cluster_adaptor = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_blockid_to_k_n_h_w_block_cluster_adaptor)); - - constexpr index_t shared_block_size = - GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); - - __shared__ FloatAB p_shared_block[shared_block_size]; - - GridwiseGemm::ConvBiasActivResizeAdd(p_a_grid, - p_b_grid, - p_bias_grid, - p_d_grid, - p_shared_block, - a_e0_e1_k0_k1_e2_grid_desc, - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, - d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor, - integral_constant{}, - integral_constant{}); -} - -template -__global__ void -#if CK_USE_LAUNCH_BOUNDS - __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) -#endif - kernel_gemm_dlops_v3_maxpool( - const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - const FloatC* __restrict__ p_bias_grid, - FloatC* __restrict__ p_c_grid, - FloatC* __restrict__ p_d_grid, - const void CONSTANT* p_a_e0_e1_k0_k1_e2_grid_desc, - const void CONSTANT* p_b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, - const void CONSTANT* p_c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, - const void CONSTANT* p_d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - const void CONSTANT* p_c_blockid_to_k_n_h_w_block_cluster_adaptor) -{ - // first cast void CONSTANT void* to void* - // second cast void* to Desc* - // the copy constructor of tensor descriptor doesn't take address_space(4) - const auto a_e0_e1_k0_k1_e2_grid_desc = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_a_e0_e1_k0_k1_e2_grid_desc)); - const auto b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc)); - const auto c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc)); - const auto d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc)); - const auto c_blockid_to_k_n_h_w_block_cluster_adaptor = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_blockid_to_k_n_h_w_block_cluster_adaptor)); - - constexpr index_t shared_block_size = - GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); - - __shared__ FloatAB p_shared_block[shared_block_size]; - - GridwiseGemm::ConvBiasActivMaxpool(p_a_grid, - p_b_grid, - p_bias_grid, - p_c_grid, - p_d_grid, - p_shared_block, - a_e0_e1_k0_k1_e2_grid_desc, - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, - d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor, - integral_constant{}, - integral_constant{}); -} -#elif CK_EXPERIMENTAL_STATIC_TENSOR_DESCRIPTOR -template -__global__ void -#if CK_USE_LAUNCH_BOUNDS - __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) -#endif - kernel_gemm_dlops_v3_resize_add(const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - const FloatC* __restrict__ p_bias_grid, - FloatC* __restrict__ p_d_grid) -{ - constexpr index_t shared_block_size = - GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); - - __shared__ FloatAB p_shared_block[shared_block_size]; - - constexpr auto a_e0_e1_k0_k1_e2_grid_desc = AGridDesc_E0_E1_K0_K1_E2{}; - constexpr auto b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc = - BGridDesc_E0_E1_N_H0_H1_H2_W0_W1_W2_E2{}; - constexpr auto c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc = CGridDesc_K0_K1_N_H0_H1_H2_W0_W1_W2{}; - constexpr auto d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc = DGridDesc_K0_K1_N_H0_H1_Hx_W0_W1_Wx{}; - constexpr auto c_blockid_to_k_n_h_w_block_cluster_adaptor = - CBlockIdToBlockClusterAdaptor_K_N_H_W{}; - - GridwiseGemm::ConvBiasActivResizeAdd(p_a_grid, - p_b_grid, - p_bias_grid, - p_d_grid, - p_shared_block, - a_e0_e1_k0_k1_e2_grid_desc, - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, - d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor, - integral_constant{}, - integral_constant{}); -} - -template -__global__ void -#if CK_USE_LAUNCH_BOUNDS - __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) -#endif - kernel_gemm_dlops_v3_maxpool(const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - const FloatC* __restrict__ p_bias_grid, - FloatC* __restrict__ p_c_grid, - FloatC* __restrict__ p_d_grid) -{ - constexpr index_t shared_block_size = - GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); - - __shared__ FloatAB p_shared_block[shared_block_size]; - - constexpr auto a_e0_e1_k0_k1_e2_grid_desc = AGridDesc_E0_E1_K0_K1_E2{}; - constexpr auto b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc = - BGridDesc_E0_E1_N_H0_H1_H2_W0_W1_W2_E2{}; - constexpr auto c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc = CGridDesc_K0_K1_N_H0_H1_H2_W0_W1_W2{}; - constexpr auto d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc = DGridDesc_K0_K1_N_H0_H1_Hx_W0_W1_Wx{}; - constexpr auto c_blockid_to_k_n_h_w_block_cluster_adaptor = - CBlockIdToBlockClusterAdaptor_K_N_H_W{}; - - GridwiseGemm::ConvBiasActivMaxpool(p_a_grid, - p_b_grid, - p_bias_grid, - p_c_grid, - p_d_grid, - p_shared_block, - a_e0_e1_k0_k1_e2_grid_desc, - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, - d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor, - integral_constant{}, - integral_constant{}); -} - -template -__global__ void -#if CK_USE_LAUNCH_BOUNDS - __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) -#endif - kernel_gemm_dlops_v3(const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - const FloatC* __restrict__ p_bias_grid, - FloatC* __restrict__ p_c_grid) -{ - constexpr index_t shared_block_size = - GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); - - __shared__ FloatAB p_shared_block[shared_block_size]; - - constexpr auto a_e0_e1_k0_k1_e2_grid_desc = AGridDesc_E0_E1_K0_K1_E2{}; - constexpr auto b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc = - BGridDesc_E0_E1_N_H0_H1_H2_W0_W1_W2_E2{}; - constexpr auto c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc = CGridDesc_K0_K1_N_H0_H1_H2_W0_W1_W2{}; - constexpr auto c_blockid_to_k_n_h_w_block_cluster_adaptor = - CBlockIdToBlockClusterAdaptor_K_N_H_W{}; - - GridwiseGemm::ConvBiasActiv(p_a_grid, - p_b_grid, - p_bias_grid, - p_c_grid, - p_shared_block, - a_e0_e1_k0_k1_e2_grid_desc, - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor, - integral_constant{}, - integral_constant{}); -} -#endif template {}), make_tuple(Sequence<0>{})); - return c_blockid_to_k_n_ho_wo_block_cluster_adaptor; + return cblockid_to_k_n_ho_wo_block_cluster_adaptor; } // using AGridDesc_E0_E1_K0_K1_E2 = @@ -854,10 +528,10 @@ struct GridwiseGemmDlops_km_kn_mn_v3 }; __device__ static constexpr auto GetCBlockIndex( - const CBlockIdToBlockClusterAdaptor_K_N_H_W& c_blockid_to_k_n_h_w_block_cluster_adaptor) + const CBlockIdToBlockClusterAdaptor_K_N_H_W& cblockid_to_k_n_h_w_block_cluster_adaptor) { const auto c_k_n_h_w_block_cluster_idx = - c_blockid_to_k_n_h_w_block_cluster_adaptor.CalculateBottomIndex( + cblockid_to_k_n_h_w_block_cluster_adaptor.CalculateBottomIndex( make_multi_index(get_block_1d_id())); return c_k_n_h_w_block_cluster_idx; } @@ -1245,8 +919,8 @@ struct GridwiseGemmDlops_km_kn_mn_v3 constexpr auto HasDoubleTailE1BlockLoop = CalculateHasDoubleTailE1BlockLoop(); // const auto c_k_n_h_w_block_cluster_idx = - // GetCBlockIndex(c_blockid_to_k_n_h_w_block_cluster_adaptor); - // c_blockid_to_k_n_h_w_block_cluster_adaptor.CalculateBottomIndex( + // GetCBlockIndex(cblockid_to_k_n_h_w_block_cluster_adaptor); + // cblockid_to_k_n_h_w_block_cluster_adaptor.CalculateBottomIndex( // make_multi_index(get_block_1d_id())); const index_t k_block_work_id = __builtin_amdgcn_readfirstlane(c_block_idx[I0]); @@ -1614,7 +1288,7 @@ struct GridwiseGemmDlops_km_kn_mn_v3 const BGridDesc_E0_E1_N_H0_H1_H2_W0_W1_W2_E2& b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, const CGridDesc_K0_K1_N_H0_H1_H2_W0_W1_W2& c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, const DGridDesc_K0_K1_N_H0_H1_Hx_W0_W1_Wx& d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - const CBlockIdToBlockClusterAdaptor_K_N_H_W& c_blockid_to_k_n_h_w_block_cluster_adaptor, + const CBlockIdToBlockClusterAdaptor_K_N_H_W& cblockid_to_k_n_h_w_block_cluster_adaptor, integral_constant) { const auto bias_k0_k1_grid_desc = @@ -1641,7 +1315,7 @@ struct GridwiseGemmDlops_km_kn_mn_v3 c_thread_buf; const auto c_k_n_h_w_block_cluster_idx = - GetCBlockIndex(c_blockid_to_k_n_h_w_block_cluster_adaptor); + GetCBlockIndex(cblockid_to_k_n_h_w_block_cluster_adaptor); const auto c_thread_mtx_index = GetCThreadIndex(); @@ -1680,7 +1354,7 @@ struct GridwiseGemmDlops_km_kn_mn_v3 const AGridDesc_E0_E1_K0_K1_E2& a_e0_e1_k0_k1_e2_grid_desc, const BGridDesc_E0_E1_N_H0_H1_H2_W0_W1_W2_E2& b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, const CGridDesc_K0_K1_N_H0_H1_H2_W0_W1_W2& c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, - const CBlockIdToBlockClusterAdaptor_K_N_H_W& c_blockid_to_k_n_h_w_block_cluster_adaptor, + const CBlockIdToBlockClusterAdaptor_K_N_H_W& cblockid_to_k_n_h_w_block_cluster_adaptor, integral_constant, integral_constant) { @@ -1708,7 +1382,7 @@ struct GridwiseGemmDlops_km_kn_mn_v3 c_thread_buf; const auto c_k_n_h_w_block_cluster_idx = - GetCBlockIndex(c_blockid_to_k_n_h_w_block_cluster_adaptor); + GetCBlockIndex(cblockid_to_k_n_h_w_block_cluster_adaptor); const auto c_thread_mtx_index = GetCThreadIndex(); @@ -1761,7 +1435,7 @@ struct GridwiseGemmDlops_km_kn_mn_v3 const BGridDesc_E0_E1_N_H0_H1_H2_W0_W1_W2_E2& b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, const CGridDesc_K0_K1_N_H0_H1_H2_W0_W1_W2& c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, const DGridDesc_K0_K1_N_H0_H1_Hx_W0_W1_Wx& d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - const CBlockIdToBlockClusterAdaptor_K_N_H_W& c_blockid_to_k_n_h_w_block_cluster_adaptor, + const CBlockIdToBlockClusterAdaptor_K_N_H_W& cblockid_to_k_n_h_w_block_cluster_adaptor, integral_constant, integral_constant) { @@ -1791,7 +1465,7 @@ struct GridwiseGemmDlops_km_kn_mn_v3 c_thread_buf; const auto c_k_n_h_w_block_cluster_idx = - GetCBlockIndex(c_blockid_to_k_n_h_w_block_cluster_adaptor); + GetCBlockIndex(cblockid_to_k_n_h_w_block_cluster_adaptor); const auto c_thread_mtx_index = GetCThreadIndex(); @@ -1851,7 +1525,7 @@ struct GridwiseGemmDlops_km_kn_mn_v3 const BGridDesc_E0_E1_N_H0_H1_H2_W0_W1_W2_E2& b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, const CGridDesc_K0_K1_N_H0_H1_H2_W0_W1_W2& c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, const DGridDesc_K0_K1_N_H0_H1_Hx_W0_W1_Wx& d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - const CBlockIdToBlockClusterAdaptor_K_N_H_W& c_blockid_to_k_n_h_w_block_cluster_adaptor, + const CBlockIdToBlockClusterAdaptor_K_N_H_W& cblockid_to_k_n_h_w_block_cluster_adaptor, integral_constant, integral_constant) { @@ -1879,7 +1553,7 @@ struct GridwiseGemmDlops_km_kn_mn_v3 c_thread_buf; const auto c_k_n_h_w_block_cluster_idx = - GetCBlockIndex(c_blockid_to_k_n_h_w_block_cluster_adaptor); + GetCBlockIndex(cblockid_to_k_n_h_w_block_cluster_adaptor); const auto c_thread_mtx_index = GetCThreadIndex(); diff --git a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r3.hpp b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r3.hpp index 0db11aedef..751015e6b2 100644 --- a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r3.hpp +++ b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r3.hpp @@ -11,7 +11,6 @@ namespace ck { -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE template -__global__ void -#if CK_USE_LAUNCH_BOUNDS - __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) -#endif - kernel_gemm_xdlops_v2r3(const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - FloatC* __restrict__ p_c_grid, - const void CONSTANT* p_a_grid_desc_k0_m_k1, - const void CONSTANT* p_b_grid_desc_k0_n_k1, - const void CONSTANT* p_c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2, - const void CONSTANT* p_a_element_op, - const void CONSTANT* p_b_element_op, - const void CONSTANT* p_c_element_op, - const void CONSTANT* p_block_2_ctile_map) -{ - const auto a_grid_desc_k0_m_k1 = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_a_grid_desc_k0_m_k1)); - const auto b_grid_desc_k0_n_k1 = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_b_grid_desc_k0_n_k1)); - const auto c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2 = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2)); - const auto block_2_ctile_map = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_block_2_ctile_map)); - const auto a_element_op = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_a_element_op)); - const auto b_element_op = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_b_element_op)); - const auto c_element_op = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_element_op)); - - __shared__ char p_shared[GridwiseGemm::GetSharedMemoryNumberOfByte()]; - - GridwiseGemm::template Run(p_a_grid, - p_b_grid, - p_c_grid, - p_shared, - a_grid_desc_k0_m_k1, - b_grid_desc_k0_n_k1, - c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2, - a_element_op, - b_element_op, - c_element_op, - block_2_ctile_map); -} -#endif template {}, Sequence<1>{}), make_tuple(Sequence<0, 2>{}, Sequence<1, 3>{})); - const auto c_blockid_to_m00_m01_n00_n01_block_cluster_adaptor = + const auto cblockid_to_m00_m01_n00_n01_block_cluster_adaptor = make_single_stage_tensor_adaptor( make_tuple(make_merge_transform(make_tuple(M00, N00, M01, N01))), make_tuple(Sequence<0, 1, 2, 3>{}), make_tuple(Sequence<0>{})); - const auto c_blockid_to_m0_n0_block_cluster_adaptor = + const auto cblockid_to_m0_n0_block_cluster_adaptor = chain_tensor_adaptors(m00_m01_n00_n01_to_m0_n0_block_cluster_adaptor, - c_blockid_to_m00_m01_n00_n01_block_cluster_adaptor); + cblockid_to_m00_m01_n00_n01_block_cluster_adaptor); - return c_blockid_to_m0_n0_block_cluster_adaptor; + return cblockid_to_m0_n0_block_cluster_adaptor; } using CGridDesc_M0_N0_M1_N1_M2_M3_M4_N2 = decltype(MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(CGridDesc_M_N{})); - using Block2CTileMap = decltype(MakeBlock2CTileMap(CGridDesc_M_N{}, 1, 1)); + using DefaultBlock2CTileMap = decltype(MakeDefaultBlock2CTileMap(CGridDesc_M_N{}, 1, 1)); - template + template __device__ static void Run(const FloatAB* __restrict__ p_a_grid, const FloatAB* __restrict__ p_b_grid, diff --git a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r4.hpp b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r4.hpp index 7983b0e834..ede928e02a 100644 --- a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r4.hpp +++ b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r4.hpp @@ -11,7 +11,6 @@ namespace ck { -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE template -__global__ void -#if CK_USE_LAUNCH_BOUNDS - __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) -#endif - kernel_gemm_xdlops_v2r4(const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - FloatC* __restrict__ p_c_grid, - const void CONSTANT* p_a_b_k0_m_k1_grid_desc, - const void CONSTANT* p_b_b_k0_n_k1_grid_desc, - const void CONSTANT* p_c_m0_n0_m1_n1_m2_m3_m4_n2_grid_desc, - const void CONSTANT* p_a_element_op, - const void CONSTANT* p_b_element_op, - const void CONSTANT* p_c_element_op, - const void CONSTANT* p_block_2_ctile_map) -{ - constexpr index_t shared_block_size = - GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); - - const auto a_b_k0_m_k1_grid_desc = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_a_b_k0_m_k1_grid_desc)); - const auto b_b_k0_n_k1_grid_desc = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_b_b_k0_n_k1_grid_desc)); - const auto c_m0_n0_m1_n1_m2_m3_m4_n2_grid_desc = - *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_m0_n0_m1_n1_m2_m3_m4_n2_grid_desc)); - const auto block_2_ctile_map = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_block_2_ctile_map)); - const auto a_element_op = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_a_element_op)); - const auto b_element_op = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_b_element_op)); - const auto c_element_op = *reinterpret_cast( - cast_pointer_to_generic_address_space(p_c_element_op)); - - __shared__ FloatAB p_shared_block[shared_block_size]; - - GridwiseGemm::template Run(p_a_grid, - p_b_grid, - p_c_grid, - p_shared_block, - a_b_k0_m_k1_grid_desc, - b_b_k0_n_k1_grid_desc, - c_m0_n0_m1_n1_m2_m3_m4_n2_grid_desc, - a_element_op, - b_element_op, - c_element_op, - block_2_ctile_map); -} -#endif template {}, Sequence<1>{}, Sequence<2>{}), make_tuple(Sequence<0>{}, Sequence<1, 3>{}, Sequence<2, 4>{})); - const auto c_blockid_to_kbatch_m00_m01_n00_n01_block_cluster_adaptor = + const auto cblockid_to_kbatch_m00_m01_n00_n01_block_cluster_adaptor = make_single_stage_tensor_adaptor( make_tuple(make_merge_transform(make_tuple(KBatch, M00, N00, M01, N01))), make_tuple(Sequence<0, 1, 2, 3, 4>{}), make_tuple(Sequence<0>{})); - const auto c_blockid_to_kbatch_m0_n0_block_cluster_adaptor = + const auto cblockid_to_kbatch_m0_n0_block_cluster_adaptor = chain_tensor_adaptors(kbatch_m00_m01_n00_n01_to_m0_n0_block_cluster_adaptor, - c_blockid_to_kbatch_m00_m01_n00_n01_block_cluster_adaptor); + cblockid_to_kbatch_m00_m01_n00_n01_block_cluster_adaptor); - return c_blockid_to_kbatch_m0_n0_block_cluster_adaptor; + return cblockid_to_kbatch_m0_n0_block_cluster_adaptor; } using CM0N0M1N1M2M3M4N2GridDesc = decltype(MakeCM0N0M1N1M2M3M4N2GridDescriptor(CMNGridDesc{})); diff --git a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r5.hpp b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r5.hpp index 986809de9c..b4d7ef7d84 100644 --- a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r5.hpp +++ b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r5.hpp @@ -277,7 +277,7 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v2r5 // return block_id to C matrix tile idx (m0, n0) mapping __host__ __device__ static constexpr auto - MakeBlock2CTileMap(const CGridDesc_M_N& c_grid_desc_m_n, index_t M01, index_t N01) + MakeDefaultBlock2CTileMap(const CGridDesc_M_N& c_grid_desc_m_n, index_t M01, index_t N01) { const auto M = c_grid_desc_m_n.GetLength(I0); const auto N = c_grid_desc_m_n.GetLength(I1); @@ -298,17 +298,17 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v2r5 make_tuple(Sequence<0>{}, Sequence<1>{}), make_tuple(Sequence<0, 2>{}, Sequence<1, 3>{})); - const auto c_blockid_to_m00_m01_n00_n01_block_cluster_adaptor = + const auto cblockid_to_m00_m01_n00_n01_block_cluster_adaptor = make_single_stage_tensor_adaptor( make_tuple(make_merge_transform(make_tuple(M00, N00, M01, N01))), make_tuple(Sequence<0, 1, 2, 3>{}), make_tuple(Sequence<0>{})); - const auto c_blockid_to_m0_n0_block_cluster_adaptor = + const auto cblockid_to_m0_n0_block_cluster_adaptor = chain_tensor_adaptors(m00_m01_n00_n01_to_m0_n0_block_cluster_adaptor, - c_blockid_to_m00_m01_n00_n01_block_cluster_adaptor); + cblockid_to_m00_m01_n00_n01_block_cluster_adaptor); - return c_blockid_to_m0_n0_block_cluster_adaptor; + return cblockid_to_m0_n0_block_cluster_adaptor; } using CGridDesc_M0_N0_M1_N1_M2_M3_M4_N2 = @@ -320,9 +320,9 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v2r5 using C1GridDesc_M0_N0_M1_N1_M2_M3_M4_N2 = decltype(MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(C1GridDesc_M_N{})); - using Block2CTileMap = decltype(MakeBlock2CTileMap(CGridDesc_M_N{}, 1, 1)); + using DefaultBlock2CTileMap = decltype(MakeDefaultBlock2CTileMap(CGridDesc_M_N{}, 1, 1)); - template + template __device__ static void Run(const FloatAB* __restrict__ p_a_grid, const FloatAB* __restrict__ p_b_grid, diff --git a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r6.hpp b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r6.hpp index a96cd6e74a..7d6c86f516 100644 --- a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r6.hpp +++ b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v2r6.hpp @@ -271,7 +271,7 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v2r6 // return block_id to C matrix tile idx (m0, n0) mapping __host__ __device__ static constexpr auto - MakeBlock2CTileMap(const CGridDesc_M_N& c_grid_desc_m_n, index_t M01, index_t N01) + MakeDefaultBlock2CTileMap(const CGridDesc_M_N& c_grid_desc_m_n, index_t M01, index_t N01) { const auto M = c_grid_desc_m_n.GetLength(I0); const auto N = c_grid_desc_m_n.GetLength(I1); @@ -292,17 +292,17 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v2r6 make_tuple(Sequence<0>{}, Sequence<1>{}), make_tuple(Sequence<0, 2>{}, Sequence<1, 3>{})); - const auto c_blockid_to_m00_m01_n00_n01_block_cluster_adaptor = + const auto cblockid_to_m00_m01_n00_n01_block_cluster_adaptor = make_single_stage_tensor_adaptor( make_tuple(make_merge_transform(make_tuple(M00, N00, M01, N01))), make_tuple(Sequence<0, 1, 2, 3>{}), make_tuple(Sequence<0>{})); - const auto c_blockid_to_m0_n0_block_cluster_adaptor = + const auto cblockid_to_m0_n0_block_cluster_adaptor = chain_tensor_adaptors(m00_m01_n00_n01_to_m0_n0_block_cluster_adaptor, - c_blockid_to_m00_m01_n00_n01_block_cluster_adaptor); + cblockid_to_m00_m01_n00_n01_block_cluster_adaptor); - return c_blockid_to_m0_n0_block_cluster_adaptor; + return cblockid_to_m0_n0_block_cluster_adaptor; } using CGridDesc_M0_N0_M1_N1_M2_M3_M4_N2 = @@ -311,9 +311,9 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v2r6 using C0GridDesc_M0_N0_M1_N1_M2_M3_M4_N2 = decltype(MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(C0GridDesc_M_N{})); - using Block2CTileMap = decltype(MakeBlock2CTileMap(CGridDesc_M_N{}, 1, 1)); + using DefaultBlock2CTileMap = decltype(MakeDefaultBlock2CTileMap(CGridDesc_M_N{}, 1, 1)); - template + template __device__ static void Run(const FloatAB* __restrict__ p_a_grid, const FloatAB* __restrict__ p_b_grid, diff --git a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v3r1.hpp b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v3r1.hpp index 3022f3f0fc..14d8b10b3d 100644 --- a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v3r1.hpp +++ b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v3r1.hpp @@ -288,7 +288,7 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v3r1 // return block_id to C matrix tile idx (m0, n0) mapping __host__ __device__ static constexpr auto - MakeBlock2CTileMap(const CGridDesc_M_N& c_grid_desc_m_n, index_t M01, index_t N01) + MakeDefaultBlock2CTileMap(const CGridDesc_M_N& c_grid_desc_m_n, index_t M01, index_t N01) { const auto M = c_grid_desc_m_n.GetLength(I0); const auto N = c_grid_desc_m_n.GetLength(I1); @@ -309,26 +309,27 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v3r1 make_tuple(Sequence<0>{}, Sequence<1>{}), make_tuple(Sequence<0, 2>{}, Sequence<1, 3>{})); - const auto c_blockid_to_m00_m01_n00_n01_block_cluster_adaptor = + const auto cblockid_to_m00_m01_n00_n01_block_cluster_adaptor = make_single_stage_tensor_adaptor( make_tuple(make_merge_transform(make_tuple(M00, N00, M01, N01))), make_tuple(Sequence<0, 1, 2, 3>{}), make_tuple(Sequence<0>{})); - const auto c_blockid_to_m0_n0_block_cluster_adaptor = + const auto cblockid_to_m0_n0_block_cluster_adaptor = chain_tensor_adaptors(m00_m01_n00_n01_to_m0_n0_block_cluster_adaptor, - c_blockid_to_m00_m01_n00_n01_block_cluster_adaptor); + cblockid_to_m00_m01_n00_n01_block_cluster_adaptor); - return c_blockid_to_m0_n0_block_cluster_adaptor; + return cblockid_to_m0_n0_block_cluster_adaptor; } using CGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl = remove_cvref_t; - using Block2CTileMap = remove_cvref_t; + using DefaultBlock2CTileMap = + remove_cvref_t; - template + template __device__ static void Run(const FloatAB* __restrict__ p_a_grid, const FloatAB* __restrict__ p_b_grid, diff --git a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v3r2.hpp b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v3r2.hpp index 30059525c7..c566dc046f 100644 --- a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v3r2.hpp +++ b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v3r2.hpp @@ -296,7 +296,7 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v3r2 // return block_id to C matrix tile idx (m0, n0) mapping __host__ __device__ static constexpr auto - MakeBlock2CTileMap(const CGridDesc_M_N& c_grid_desc_m_n, index_t M01, index_t N01) + MakeDefaultBlock2CTileMap(const CGridDesc_M_N& c_grid_desc_m_n, index_t M01, index_t N01) { const auto M = c_grid_desc_m_n.GetLength(I0); const auto N = c_grid_desc_m_n.GetLength(I1); @@ -317,17 +317,17 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v3r2 make_tuple(Sequence<0>{}, Sequence<1>{}), make_tuple(Sequence<0, 2>{}, Sequence<1, 3>{})); - const auto c_blockid_to_m00_m01_n00_n01_block_cluster_adaptor = + const auto cblockid_to_m00_m01_n00_n01_block_cluster_adaptor = make_single_stage_tensor_adaptor( make_tuple(make_merge_transform(make_tuple(M00, N00, M01, N01))), make_tuple(Sequence<0, 1, 2, 3>{}), make_tuple(Sequence<0>{})); - const auto c_blockid_to_m0_n0_block_cluster_adaptor = + const auto cblockid_to_m0_n0_block_cluster_adaptor = chain_tensor_adaptors(m00_m01_n00_n01_to_m0_n0_block_cluster_adaptor, - c_blockid_to_m00_m01_n00_n01_block_cluster_adaptor); + cblockid_to_m00_m01_n00_n01_block_cluster_adaptor); - return c_blockid_to_m0_n0_block_cluster_adaptor; + return cblockid_to_m0_n0_block_cluster_adaptor; } using CGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl = remove_cvref_t; - using Block2CTileMap = remove_cvref_t; + using DefaultBlock2CTileMap = + remove_cvref_t; - template + template __device__ static void Run(const FloatAB* __restrict__ p_a_grid, const FloatAB* __restrict__ p_b_grid, diff --git a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v3r3.hpp b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v3r3.hpp index 7601aa6a07..337550819a 100644 --- a/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v3r3.hpp +++ b/composable_kernel/include/tensor_operation/gridwise_gemm_xdlops_v3r3.hpp @@ -303,7 +303,7 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v3r3 // return block_id to C matrix tile idx (m0, n0) mapping __host__ __device__ static constexpr auto - MakeBlock2CTileMap(const CGridDesc_M_N& c_grid_desc_m_n, index_t M01, index_t N01) + MakeDefaultBlock2CTileMap(const CGridDesc_M_N& c_grid_desc_m_n, index_t M01, index_t N01) { const auto M = c_grid_desc_m_n.GetLength(I0); const auto N = c_grid_desc_m_n.GetLength(I1); @@ -324,17 +324,17 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v3r3 make_tuple(Sequence<0>{}, Sequence<1>{}), make_tuple(Sequence<0, 2>{}, Sequence<1, 3>{})); - const auto c_blockid_to_m00_m01_n00_n01_block_cluster_adaptor = + const auto cblockid_to_m00_m01_n00_n01_block_cluster_adaptor = make_single_stage_tensor_adaptor( make_tuple(make_merge_transform(make_tuple(M00, N00, M01, N01))), make_tuple(Sequence<0, 1, 2, 3>{}), make_tuple(Sequence<0>{})); - const auto c_blockid_to_m0_n0_block_cluster_adaptor = + const auto cblockid_to_m0_n0_block_cluster_adaptor = chain_tensor_adaptors(m00_m01_n00_n01_to_m0_n0_block_cluster_adaptor, - c_blockid_to_m00_m01_n00_n01_block_cluster_adaptor); + cblockid_to_m00_m01_n00_n01_block_cluster_adaptor); - return c_blockid_to_m0_n0_block_cluster_adaptor; + return cblockid_to_m0_n0_block_cluster_adaptor; } using CGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl = remove_cvref_t; - using Block2CTileMap = remove_cvref_t; + using DefaultBlock2CTileMap = + remove_cvref_t; - template + template __device__ static void Run(const FloatAB* __restrict__ p_a_grid, const FloatAB* __restrict__ p_b_grid, diff --git a/composable_kernel/include/utility/amd_buffer_addressing.hpp b/composable_kernel/include/utility/amd_buffer_addressing.hpp index 773f7cff2c..6dbbfe327f 100644 --- a/composable_kernel/include/utility/amd_buffer_addressing.hpp +++ b/composable_kernel/include/utility/amd_buffer_addressing.hpp @@ -920,10 +920,10 @@ __device__ void amd_buffer_atomic_add_impl(const typename vector_type::typ // It is user's responsibility to make sure that is true. template __device__ typename vector_type_maker::type::type -amd_buffer_load_invalid_element_return_return_zero(const T* p_src_wave, - index_t src_thread_element_offset, - bool src_thread_element_valid, - index_t src_element_space_size) +amd_buffer_load_invalid_element_return_zero(const T* p_src_wave, + index_t src_thread_element_offset, + bool src_thread_element_valid, + index_t src_element_space_size) { const int32x4_t src_wave_buffer_resource = make_wave_buffer_resource(p_src_wave, src_element_space_size); diff --git a/composable_kernel/include/utility/array.hpp b/composable_kernel/include/utility/array.hpp index 911cefd057..4c9dfd9a93 100644 --- a/composable_kernel/include/utility/array.hpp +++ b/composable_kernel/include/utility/array.hpp @@ -49,7 +49,7 @@ template __host__ __device__ constexpr auto make_array(X&& x, Xs&&... xs) { using data_type = remove_cvref_t; - return Array{{std::forward(x), std::forward(xs)...}}; + return Array{std::forward(x), std::forward(xs)...}; } // make empty array diff --git a/composable_kernel/include/utility/dynamic_buffer.hpp b/composable_kernel/include/utility/dynamic_buffer.hpp index 95149bcb2e..3b5d494b86 100644 --- a/composable_kernel/include/utility/dynamic_buffer.hpp +++ b/composable_kernel/include/utility/dynamic_buffer.hpp @@ -56,7 +56,7 @@ struct DynamicBuffer static_assert(scalar_per_x_vector % scalar_per_t_vector == 0, "wrong! X need to be multiple T"); -#if CK_USE_AMD_BUFFER_ADDRESSING +#if CK_USE_AMD_BUFFER_LOAD bool constexpr use_amd_buffer_addressing = true; #else bool constexpr use_amd_buffer_addressing = false; @@ -68,8 +68,7 @@ struct DynamicBuffer if constexpr(InvalidElementUseNumericalZeroValue) { - return amd_buffer_load_invalid_element_return_return_zero, - t_per_x>( + return amd_buffer_load_invalid_element_return_zero, t_per_x>( p_data_, i, is_valid_element, element_space_size_); } else @@ -125,7 +124,7 @@ struct DynamicBuffer if constexpr(GetAddressSpace() == AddressSpaceEnum_t::Global) { -#if CK_USE_AMD_BUFFER_ADDRESSING +#if CK_USE_AMD_BUFFER_STORE constexpr index_t t_per_x = scalar_per_x_vector / scalar_per_t_vector; amd_buffer_store, t_per_x>( @@ -291,7 +290,7 @@ struct DynamicBuffer static_assert(GetAddressSpace() == AddressSpaceEnum_t::Global, "only support global mem"); -#if CK_USE_AMD_BUFFER_ADDRESSING +#if CK_USE_AMD_BUFFER_ATOMIC_ADD constexpr index_t t_per_x = scalar_per_x_vector / scalar_per_t_vector; amd_buffer_atomic_add, t_per_x>( diff --git a/composable_kernel/include/utility/integral_constant.hpp b/composable_kernel/include/utility/integral_constant.hpp index 14f3df894b..3d9c0472e7 100644 --- a/composable_kernel/include/utility/integral_constant.hpp +++ b/composable_kernel/include/utility/integral_constant.hpp @@ -13,5 +13,38 @@ struct integral_constant __host__ __device__ constexpr value_type operator()() const noexcept { return value; } }; +template +__host__ __device__ constexpr auto operator+(integral_constant, integral_constant) +{ + return integral_constant{}; +} + +template +__host__ __device__ constexpr auto operator-(integral_constant, integral_constant) +{ + static_assert(Y <= X, "wrong!"); + return integral_constant{}; +} + +template +__host__ __device__ constexpr auto operator*(integral_constant, integral_constant) +{ + return integral_constant{}; +} + +template +__host__ __device__ constexpr auto operator/(integral_constant, integral_constant) +{ + static_assert(Y > 0, "wrong!"); + return integral_constant{}; +} + +template +__host__ __device__ constexpr auto operator%(integral_constant, integral_constant) +{ + static_assert(Y > 0, "wrong!"); + return integral_constant{}; +} + } // namespace ck #endif diff --git a/composable_kernel/include/utility/is_known_at_compile_time.hpp b/composable_kernel/include/utility/is_known_at_compile_time.hpp index 9dbe22f2ee..dc44027901 100644 --- a/composable_kernel/include/utility/is_known_at_compile_time.hpp +++ b/composable_kernel/include/utility/is_known_at_compile_time.hpp @@ -17,6 +17,12 @@ struct is_known_at_compile_time static constexpr bool value = false; }; +template <> +struct is_known_at_compile_time +{ + static constexpr bool value = false; +}; + template struct is_known_at_compile_time> { diff --git a/composable_kernel/include/utility/magic_division.hpp b/composable_kernel/include/utility/magic_division.hpp index 8e15c18458..d87be11c75 100644 --- a/composable_kernel/include/utility/magic_division.hpp +++ b/composable_kernel/include/utility/magic_division.hpp @@ -111,24 +111,39 @@ struct MagicDivision } // magic division for uint32_t - __host__ __device__ static constexpr uint32_t + __device__ static constexpr uint32_t DoMagicDivision(uint32_t dividend, uint32_t multiplier, uint32_t shift) { uint32_t tmp = __umulhi(dividend, multiplier); return (tmp + dividend) >> shift; } + __host__ static constexpr uint32_t + DoMagicDivision(uint32_t dividend, uint32_t multiplier, uint32_t shift) + { + uint32_t tmp = static_cast(dividend) * multiplier >> 32; + return (tmp + dividend) >> shift; + } + // magic division for int32_t // HACK: use dividend_i32 as if it's uint32_t, dividend_i32 need to be // non-negative for result to be correct // TODO: figure out how to do magic number divison for int32_t as dividended - __host__ __device__ static constexpr int32_t + __device__ static constexpr int32_t DoMagicDivision(int32_t dividend_i32, uint32_t multiplier, uint32_t shift) { uint32_t dividend_u32 = bit_cast(dividend_i32); uint32_t tmp = __umulhi(dividend_u32, multiplier); return (tmp + dividend_u32) >> shift; } + + __host__ static constexpr int32_t + DoMagicDivision(int32_t dividend_i32, uint32_t multiplier, uint32_t shift) + { + uint32_t dividend_u32 = bit_cast(dividend_i32); + uint32_t tmp = static_cast(dividend_u32) * multiplier >> 32; + return (tmp + dividend_u32) >> shift; + } }; } // namespace ck diff --git a/composable_kernel/include/utility/number.hpp b/composable_kernel/include/utility/number.hpp index f8c5643694..6f262a4d9f 100644 --- a/composable_kernel/include/utility/number.hpp +++ b/composable_kernel/include/utility/number.hpp @@ -8,37 +8,5 @@ namespace ck { template using Number = integral_constant; -template -__host__ __device__ constexpr auto operator+(Number, Number) -{ - return Number{}; -} - -template -__host__ __device__ constexpr auto operator-(Number, Number) -{ - static_assert(Y <= X, "wrong!"); - return Number{}; -} - -template -__host__ __device__ constexpr auto operator*(Number, Number) -{ - return Number{}; -} - -template -__host__ __device__ constexpr auto operator/(Number, Number) -{ - static_assert(Y > 0, "wrong!"); - return Number{}; -} - -template -__host__ __device__ constexpr auto operator%(Number, Number) -{ - static_assert(Y > 0, "wrong!"); - return Number{}; -} } // namespace ck #endif diff --git a/composable_kernel/include/utility/utility.hpp b/composable_kernel/include/utility/utility.hpp index c4cc717618..7664066126 100644 --- a/composable_kernel/include/utility/utility.hpp +++ b/composable_kernel/include/utility/utility.hpp @@ -13,6 +13,8 @@ __device__ index_t get_wave_local_1d_id() { return threadIdx.x / get_wave_size() __device__ index_t get_block_1d_id() { return blockIdx.x; } +__device__ index_t get_grid_size() { return gridDim.x; } + } // namespace ck #endif diff --git a/composable_kernel/src/kernel_wrapper/convolution_forward_implicit_gemm_v4r4_dlops_nchw_kcyx_nkhw.cpp b/composable_kernel/src/kernel_wrapper/convolution_forward_implicit_gemm_v4r4_dlops_nchw_kcyx_nkhw.cpp index 09a7fffa3e..be197d1383 100644 --- a/composable_kernel/src/kernel_wrapper/convolution_forward_implicit_gemm_v4r4_dlops_nchw_kcyx_nkhw.cpp +++ b/composable_kernel/src/kernel_wrapper/convolution_forward_implicit_gemm_v4r4_dlops_nchw_kcyx_nkhw.cpp @@ -83,7 +83,7 @@ extern "C" __global__ void convolution_forward_implicit_gemm_v4r4_dlops_nchw_kcy void* p_a_k_m0_m1_grid_desc, void* p_b_k_n0_n1_grid_desc, void* p_c_m0_m10_m11_n0_n10_n11_grid_desc, - void* p_c_blockid_to_m0_n0_block_cluster_adaptor) + void* p_cblockid_to_m0_n0_block_cluster_adaptor) { constexpr auto I0 = Number<0>{}; constexpr auto I1 = Number<1>{}; @@ -194,7 +194,7 @@ extern "C" __global__ void convolution_forward_implicit_gemm_v4r4_dlops_nchw_kcy auto b_k_n0_n1_grid_desc = GridwiseGemm::MakeBKN0N1GridDescriptor(b_k_n_grid_desc); auto c_m0_m10_m11_n0_n10_n11_grid_desc = GridwiseGemm::MakeCM0M10M11N0N10N11GridDescriptor(c_m_n_grid_desc); - auto c_blockid_to_m0_n0_block_cluster_adaptor = + auto cblockid_to_m0_n0_block_cluster_adaptor = GridwiseGemm::MakeCBlockIdToM0N0BlockClusterAdaptor(c_m_n_grid_desc); if(hipThreadIdx_x == 0) @@ -203,8 +203,8 @@ extern "C" __global__ void convolution_forward_implicit_gemm_v4r4_dlops_nchw_kcy *static_cast(p_b_k_n0_n1_grid_desc) = b_k_n0_n1_grid_desc; *static_cast( p_c_m0_m10_m11_n0_n10_n11_grid_desc) = c_m0_m10_m11_n0_n10_n11_grid_desc; - *static_cast( - p_c_blockid_to_m0_n0_block_cluster_adaptor) = c_blockid_to_m0_n0_block_cluster_adaptor; + *static_cast( + p_cblockid_to_m0_n0_block_cluster_adaptor) = cblockid_to_m0_n0_block_cluster_adaptor; }; }; @@ -219,7 +219,7 @@ extern "C" __global__ void const void CONSTANT* p_a_k_m0_m1_grid_desc, const void CONSTANT* p_b_k_n0_n1_grid_desc, const void CONSTANT* p_c_m0_m10_m11_n0_n10_n11_grid_desc, - const void CONSTANT* p_c_blockid_to_m0_n0_block_cluster_adaptor) + const void CONSTANT* p_cblockid_to_m0_n0_block_cluster_adaptor) { constexpr auto I0 = Number<0>{}; constexpr auto I1 = Number<1>{}; @@ -332,14 +332,13 @@ extern "C" __global__ void GridwiseGemm::MakeBKN0N1GridDescriptor(b_k_n_grid_desc); constexpr auto c_m0_m10_m11_n0_n10_n11_grid_desc_tmp = GridwiseGemm::MakeCM0M10M11N0N10N11GridDescriptor(c_m_n_grid_desc); - constexpr auto c_blockid_to_m0_n0_block_cluster_adaptor_tmp = + constexpr auto cblockid_to_m0_n0_block_cluster_adaptor_tmp = GridwiseGemm::MakeCBlockIdToM0N0BlockClusterAdaptor(c_m_n_grid_desc); - using AKM0M1GridDesc = decltype(a_k_m0_m1_grid_desc_tmp); - using BKN0N1GridDesc = decltype(b_k_n0_n1_grid_desc_tmp); - using CM0M10M11N0N10N11GridDesc = decltype(c_m0_m10_m11_n0_n10_n11_grid_desc_tmp); - using CBlockIdToM0N0BlockClusterAdaptor = - decltype(c_blockid_to_m0_n0_block_cluster_adaptor_tmp); + using AKM0M1GridDesc = decltype(a_k_m0_m1_grid_desc_tmp); + using BKN0N1GridDesc = decltype(b_k_n0_n1_grid_desc_tmp); + using CM0M10M11N0N10N11GridDesc = decltype(c_m0_m10_m11_n0_n10_n11_grid_desc_tmp); + using CBlockIdToM0N0BlockClusterAdaptor = decltype(cblockid_to_m0_n0_block_cluster_adaptor_tmp); const auto a_k_m0_m1_grid_desc = *reinterpret_cast((const void*)p_a_k_m0_m1_grid_desc); @@ -348,9 +347,9 @@ extern "C" __global__ void const auto c_m0_m10_m11_n0_n10_n11_grid_desc = *reinterpret_cast( (const void*)p_c_m0_m10_m11_n0_n10_n11_grid_desc); - const auto c_blockid_to_m0_n0_block_cluster_adaptor = + const auto cblockid_to_m0_n0_block_cluster_adaptor = *reinterpret_cast( - (const void*)p_c_blockid_to_m0_n0_block_cluster_adaptor); + (const void*)p_cblockid_to_m0_n0_block_cluster_adaptor); constexpr index_t shared_block_size = GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); @@ -364,7 +363,7 @@ extern "C" __global__ void a_k_m0_m1_grid_desc, b_k_n0_n1_grid_desc, c_m0_m10_m11_n0_n10_n11_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor, + cblockid_to_m0_n0_block_cluster_adaptor, integral_constant{}, integral_constant{}); }; diff --git a/composable_kernel/src/kernel_wrapper/convolution_forward_implicit_gemm_v4r4_xdlops_nchw_kcyx_nkhw.cpp b/composable_kernel/src/kernel_wrapper/convolution_forward_implicit_gemm_v4r4_xdlops_nchw_kcyx_nkhw.cpp index 51d852617f..ab63c918df 100644 --- a/composable_kernel/src/kernel_wrapper/convolution_forward_implicit_gemm_v4r4_xdlops_nchw_kcyx_nkhw.cpp +++ b/composable_kernel/src/kernel_wrapper/convolution_forward_implicit_gemm_v4r4_xdlops_nchw_kcyx_nkhw.cpp @@ -79,7 +79,7 @@ extern "C" __global__ void convolution_forward_implicit_gemm_v4r4_xdlops_nchw_kc void* p_a_k0_m_k1_grid_desc, void* p_b_k0_n_k1_grid_desc, void* p_c_m0_m1_m2_n_grid_desc, - void* p_c_blockid_to_m0_n0_block_cluster_adaptor) + void* p_cblockid_to_m0_n0_block_cluster_adaptor) { constexpr auto I0 = Number<0>{}; constexpr auto I1 = Number<1>{}; @@ -188,7 +188,7 @@ extern "C" __global__ void convolution_forward_implicit_gemm_v4r4_xdlops_nchw_kc auto c_m0_m1_m2_n_grid_desc = GridwiseGemm::MakeCM0M1M2NGridDescriptor(c_m_n_grid_desc); - auto c_blockid_to_m0_n0_block_cluster_adaptor = + auto cblockid_to_m0_n0_block_cluster_adaptor = GridwiseGemm::MakeCBlockClusterAdaptor(c_m_n_grid_desc); if(hipThreadIdx_x == 0) @@ -199,8 +199,8 @@ extern "C" __global__ void convolution_forward_implicit_gemm_v4r4_xdlops_nchw_kc b_k0_n_k1_grid_desc; *static_cast(p_c_m0_m1_m2_n_grid_desc) = c_m0_m1_m2_n_grid_desc; - *static_cast( - p_c_blockid_to_m0_n0_block_cluster_adaptor) = c_blockid_to_m0_n0_block_cluster_adaptor; + *static_cast( + p_cblockid_to_m0_n0_block_cluster_adaptor) = cblockid_to_m0_n0_block_cluster_adaptor; } }; @@ -215,7 +215,7 @@ extern "C" __global__ void const void CONSTANT* p_a_k0_m_k1_grid_desc, const void CONSTANT* p_b_k0_n_k1_grid_desc, const void CONSTANT* p_c_m0_m1_m2_n_grid_desc, - const void CONSTANT* p_c_blockid_to_m0_n0_block_cluster_adaptor) + const void CONSTANT* p_cblockid_to_m0_n0_block_cluster_adaptor) { constexpr auto I0 = Number<0>{}; @@ -325,12 +325,11 @@ extern "C" __global__ void constexpr auto c_m0_m1_m2_n_grid_desc_tmp = GridwiseGemm::MakeCM0M1M2NGridDescriptor(c_m_n_grid_desc); - constexpr auto c_blockid_to_m0_n0_block_cluster_adaptor_tmp = + constexpr auto cblockid_to_m0_n0_block_cluster_adaptor_tmp = GridwiseGemm::MakeCBlockClusterAdaptor(c_m_n_grid_desc); - using CM0M1M2NGridDesc = decltype(c_m0_m1_m2_n_grid_desc_tmp); - using CBlockIdToM0N0BlockClusterAdaptor = - decltype(c_blockid_to_m0_n0_block_cluster_adaptor_tmp); + using CM0M1M2NGridDesc = decltype(c_m0_m1_m2_n_grid_desc_tmp); + using CBlockIdToM0N0BlockClusterAdaptor = decltype(cblockid_to_m0_n0_block_cluster_adaptor_tmp); const auto a_k0_m_k1_grid_desc = *reinterpret_cast((const void*)p_a_k0_m_k1_grid_desc); @@ -338,9 +337,9 @@ extern "C" __global__ void *reinterpret_cast((const void*)p_b_k0_n_k1_grid_desc); const auto c_m0_m1_m2_n_grid_desc = *reinterpret_cast((const void*)p_c_m0_m1_m2_n_grid_desc); - const auto c_blockid_to_m0_n0_block_cluster_adaptor = + const auto cblockid_to_m0_n0_block_cluster_adaptor = *reinterpret_cast( - (const void*)p_c_blockid_to_m0_n0_block_cluster_adaptor); + (const void*)p_cblockid_to_m0_n0_block_cluster_adaptor); constexpr index_t shared_block_size = GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); @@ -354,5 +353,5 @@ extern "C" __global__ void a_k0_m_k1_grid_desc, b_k0_n_k1_grid_desc, c_m0_m1_m2_n_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor); + cblockid_to_m0_n0_block_cluster_adaptor); }; diff --git a/composable_kernel/src/kernel_wrapper/convolution_forward_implicit_gemm_v4r4_xdlops_nhwc_kyxc_nhwk.cpp b/composable_kernel/src/kernel_wrapper/convolution_forward_implicit_gemm_v4r4_xdlops_nhwc_kyxc_nhwk.cpp index a9258f42c7..f7fab8d87f 100644 --- a/composable_kernel/src/kernel_wrapper/convolution_forward_implicit_gemm_v4r4_xdlops_nhwc_kyxc_nhwk.cpp +++ b/composable_kernel/src/kernel_wrapper/convolution_forward_implicit_gemm_v4r4_xdlops_nhwc_kyxc_nhwk.cpp @@ -79,7 +79,7 @@ extern "C" __global__ void convolution_forward_implicit_gemm_v4r4_xdlops_nhwc_ky void* p_a_k0_m_k1_grid_desc, void* p_b_k0_n_k1_grid_desc, void* p_c_m0_m1_m2_n_grid_desc, - void* p_c_blockid_to_m0_n0_block_cluster_adaptor) + void* p_cblockid_to_m0_n0_block_cluster_adaptor) { constexpr auto I0 = Number<0>{}; constexpr auto I1 = Number<1>{}; @@ -188,7 +188,7 @@ extern "C" __global__ void convolution_forward_implicit_gemm_v4r4_xdlops_nhwc_ky auto c_m0_m1_m2_n_grid_desc = GridwiseGemm::MakeCM0M1M2NGridDescriptor(c_m_n_grid_desc); - auto c_blockid_to_m0_n0_block_cluster_adaptor = + auto cblockid_to_m0_n0_block_cluster_adaptor = GridwiseGemm::MakeCBlockClusterAdaptor(c_m_n_grid_desc); if(hipThreadIdx_x == 0) @@ -199,8 +199,8 @@ extern "C" __global__ void convolution_forward_implicit_gemm_v4r4_xdlops_nhwc_ky b_k0_n_k1_grid_desc; *static_cast(p_c_m0_m1_m2_n_grid_desc) = c_m0_m1_m2_n_grid_desc; - *static_cast( - p_c_blockid_to_m0_n0_block_cluster_adaptor) = c_blockid_to_m0_n0_block_cluster_adaptor; + *static_cast( + p_cblockid_to_m0_n0_block_cluster_adaptor) = cblockid_to_m0_n0_block_cluster_adaptor; } }; @@ -215,7 +215,7 @@ extern "C" __global__ void const void CONSTANT* p_a_k0_m_k1_grid_desc, const void CONSTANT* p_b_k0_n_k1_grid_desc, const void CONSTANT* p_c_m0_m1_m2_n_grid_desc, - const void CONSTANT* p_c_blockid_to_m0_n0_block_cluster_adaptor) + const void CONSTANT* p_cblockid_to_m0_n0_block_cluster_adaptor) { constexpr auto I0 = Number<0>{}; @@ -324,12 +324,11 @@ extern "C" __global__ void false>; constexpr auto c_m0_m1_m2_n_grid_desc_tmp = GridwiseGemm::MakeCM0M1M2NGridDescriptor(c_m_n_grid_desc); - constexpr auto c_blockid_to_m0_n0_block_cluster_adaptor_tmp = + constexpr auto cblockid_to_m0_n0_block_cluster_adaptor_tmp = GridwiseGemm::MakeCBlockClusterAdaptor(c_m_n_grid_desc); - using CM0M1M2NGridDesc = decltype(c_m0_m1_m2_n_grid_desc_tmp); - using CBlockIdToM0N0BlockClusterAdaptor = - decltype(c_blockid_to_m0_n0_block_cluster_adaptor_tmp); + using CM0M1M2NGridDesc = decltype(c_m0_m1_m2_n_grid_desc_tmp); + using CBlockIdToM0N0BlockClusterAdaptor = decltype(cblockid_to_m0_n0_block_cluster_adaptor_tmp); const auto a_k0_m_k1_grid_desc = *reinterpret_cast((const void*)p_a_k0_m_k1_grid_desc); @@ -337,9 +336,9 @@ extern "C" __global__ void *reinterpret_cast((const void*)p_b_k0_n_k1_grid_desc); const auto c_m0_m1_m2_n_grid_desc = *reinterpret_cast((const void*)p_c_m0_m1_m2_n_grid_desc); - const auto c_blockid_to_m0_n0_block_cluster_adaptor = + const auto cblockid_to_m0_n0_block_cluster_adaptor = *reinterpret_cast( - (const void*)p_c_blockid_to_m0_n0_block_cluster_adaptor); + (const void*)p_cblockid_to_m0_n0_block_cluster_adaptor); constexpr index_t shared_block_size = GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); @@ -353,5 +352,5 @@ extern "C" __global__ void a_k0_m_k1_grid_desc, b_k0_n_k1_grid_desc, c_m0_m1_m2_n_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor); + cblockid_to_m0_n0_block_cluster_adaptor); }; diff --git a/device_operation/include/convolution_utility.hpp b/device_operation/include/convolution_utility.hpp new file mode 100644 index 0000000000..a6b891dab2 --- /dev/null +++ b/device_operation/include/convolution_utility.hpp @@ -0,0 +1,73 @@ +#ifndef CONVOLUTION_UTILITY_HPP +#define CONVOLUTION_UTILITY_HPP + +#include + +namespace ck { +namespace tensor_operation { + +struct ConvolutionUtility +{ + static std::vector + ComputeOutputSpatialLengths(std::vector input_spatial_lengths, + std::vector filter_spatial_lengths, + std::vector conv_strides, + std::vector conv_dilations, + std::vector in_left_pads, + std::vector in_right_pads) + { + if(input_spatial_lengths.size() == 2) + { + assert(filter_spatial_lengths.size() == 2); + assert(conv_strides.size() == 2); + assert(conv_dilations.size() == 2); + assert(in_left_pads.size() == 2); + assert(in_right_pads.size() == 2); + + const index_t YEff = (filter_spatial_lengths[0] - 1) * conv_dilations[0] + 1; + const index_t XEff = (filter_spatial_lengths[1] - 1) * conv_dilations[1] + 1; + + const index_t Hi = input_spatial_lengths[0]; + const index_t Wi = input_spatial_lengths[1]; + + const index_t Ho = + (Hi + in_left_pads[0] + in_right_pads[0] - YEff) / conv_strides[0] + 1; + const index_t Wo = + (Wi + in_left_pads[1] + in_right_pads[1] - XEff) / conv_strides[1] + 1; + + return {Ho, Wo}; + } + else if(input_spatial_lengths.size() == 3) + { + assert(filter_spatial_lengths.size() == 3); + assert(conv_strides.size() == 3); + assert(conv_dilations.size() == 3); + assert(in_left_pads.size() == 3); + assert(in_right_pads.size() == 3); + + const index_t ZEff = (filter_spatial_lengths[0] - 1) * conv_dilations[0] + 1; + const index_t YEff = (filter_spatial_lengths[1] - 1) * conv_dilations[1] + 1; + const index_t XEff = (filter_spatial_lengths[2] - 1) * conv_dilations[2] + 1; + + const index_t Di = input_spatial_lengths[0]; + const index_t Hi = input_spatial_lengths[1]; + const index_t Wi = input_spatial_lengths[2]; + + const index_t Do = + (Di + in_left_pads[0] + in_right_pads[0] - ZEff) / conv_strides[0] + 1; + const index_t Ho = + (Hi + in_left_pads[1] + in_right_pads[1] - YEff) / conv_strides[1] + 1; + const index_t Wo = + (Wi + in_left_pads[2] + in_right_pads[2] - XEff) / conv_strides[2] + 1; + return {Do, Ho, Wo}; + } + else + { + return {}; + } + } +}; + +} // namespace tensor_operation +} // namespace ck +#endif diff --git a/device_operation/include/device_batched_gemm_xdl.hpp b/device_operation/include/device_batched_gemm_xdl.hpp index 02ca716824..bbdb1debb2 100644 --- a/device_operation/include/device_batched_gemm_xdl.hpp +++ b/device_operation/include/device_batched_gemm_xdl.hpp @@ -248,7 +248,7 @@ struct DeviceBatchedGemmXdl c_grid_desc_g_m_n_); block_2_ctile_map_ = - GridwiseBatchedGemm::MakeBlock2CTileMap(c_grid_desc_g_m_n_, M01, N01); + GridwiseBatchedGemm::MakeDefaultBlock2CTileMap(c_grid_desc_g_m_n_, M01, N01); } } @@ -261,7 +261,7 @@ struct DeviceBatchedGemmXdl CGridDesc_G_M_N c_grid_desc_g_m_n_; typename GridwiseBatchedGemm::CGridDesc_G_M0_N0_M1_N1_M2_M3_M4_N2 c_grid_desc_g_m0_n0_m1_n1_m2_m3_m4_n2_; - typename GridwiseBatchedGemm::Block2CTileMap block_2_ctile_map_; + typename GridwiseBatchedGemm::DefaultBlock2CTileMap block_2_ctile_map_; index_t M01_; index_t N01_; AElementwiseOperation a_element_op_; @@ -327,7 +327,7 @@ struct DeviceBatchedGemmXdl AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, - remove_reference_t, + remove_reference_t, true>; ave_time = launch_and_time_kernel(kernel, @@ -359,7 +359,7 @@ struct DeviceBatchedGemmXdl AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, - remove_reference_t, + remove_reference_t, false>; ave_time = launch_and_time_kernel(kernel, diff --git a/device_operation/include/device_conv2d_fwd_xdl_c_shuffle_bias_activation_add_nhwc_kyxc_nhwk.hpp b/device_operation/include/device_conv2d_fwd_xdl_c_shuffle_bias_activation_add_nhwc_kyxc_nhwk.hpp index 6baf1483ac..f2a56396b6 100644 --- a/device_operation/include/device_conv2d_fwd_xdl_c_shuffle_bias_activation_add_nhwc_kyxc_nhwk.hpp +++ b/device_operation/include/device_conv2d_fwd_xdl_c_shuffle_bias_activation_add_nhwc_kyxc_nhwk.hpp @@ -590,7 +590,8 @@ struct MakeCGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl( c1_grid_desc_m_n_); - block_2_ctile_map_ = GridwiseGemm::MakeBlock2CTileMap(c_grid_desc_m_n_, M01, N01); + block_2_ctile_map_ = + GridwiseGemm::MakeDefaultBlock2CTileMap(c_grid_desc_m_n_, M01, N01); } } @@ -614,7 +615,7 @@ struct typename GridwiseGemm:: C1GridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl c1_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl_; - typename GridwiseGemm::Block2CTileMap block_2_ctile_map_; + typename GridwiseGemm::DefaultBlock2CTileMap block_2_ctile_map_; index_t M01_; index_t N01_; InElementwiseOperation in_element_op_; @@ -694,7 +695,7 @@ struct InElementwiseOperation, WeiElementwiseOperation, OutElementwiseOperation, - remove_reference_t, + remove_reference_t, true>; ave_time = launch_and_time_kernel( @@ -738,7 +739,7 @@ struct InElementwiseOperation, WeiElementwiseOperation, OutElementwiseOperation, - remove_reference_t, + remove_reference_t, false>; ave_time = launch_and_time_kernel( diff --git a/device_operation/include/device_conv2d_fwd_xdl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp b/device_operation/include/device_conv2d_fwd_xdl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp index d915feab75..4ee978a7d7 100644 --- a/device_operation/include/device_conv2d_fwd_xdl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp +++ b/device_operation/include/device_conv2d_fwd_xdl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp @@ -561,7 +561,8 @@ struct DeviceConv2dFwdXdl_C_Shuffle_Bias_Activation_Input_N_Hi_Wi_C_Weight_K_Y_X MakeCGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl( c0_grid_desc_m_n_); - block_2_ctile_map_ = GridwiseGemm::MakeBlock2CTileMap(c_grid_desc_m_n_, M01, N01); + block_2_ctile_map_ = + GridwiseGemm::MakeDefaultBlock2CTileMap(c_grid_desc_m_n_, M01, N01); } } @@ -579,7 +580,7 @@ struct DeviceConv2dFwdXdl_C_Shuffle_Bias_Activation_Input_N_Hi_Wi_C_Weight_K_Y_X typename GridwiseGemm:: C0GridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl c0_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl_; - typename GridwiseGemm::Block2CTileMap block_2_ctile_map_; + typename GridwiseGemm::DefaultBlock2CTileMap block_2_ctile_map_; index_t M01_; index_t N01_; InElementwiseOperation in_element_op_; @@ -653,7 +654,7 @@ struct DeviceConv2dFwdXdl_C_Shuffle_Bias_Activation_Input_N_Hi_Wi_C_Weight_K_Y_X InElementwiseOperation, WeiElementwiseOperation, OutElementwiseOperation, - remove_reference_t, + remove_reference_t, true>; ave_time = launch_and_time_kernel( @@ -692,7 +693,7 @@ struct DeviceConv2dFwdXdl_C_Shuffle_Bias_Activation_Input_N_Hi_Wi_C_Weight_K_Y_X InElementwiseOperation, WeiElementwiseOperation, OutElementwiseOperation, - remove_reference_t, + remove_reference_t, false>; ave_time = launch_and_time_kernel( diff --git a/device_operation/include/device_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp b/device_operation/include/device_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp index 43a10b1627..2c94727f34 100644 --- a/device_operation/include/device_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp +++ b/device_operation/include/device_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp @@ -525,7 +525,8 @@ struct DeviceConv2dFwdXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_W MakeCGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl( c_grid_desc_m_n_); - block_2_ctile_map_ = GridwiseGemm::MakeBlock2CTileMap(c_grid_desc_m_n_, M01, N01); + block_2_ctile_map_ = + GridwiseGemm::MakeDefaultBlock2CTileMap(c_grid_desc_m_n_, M01, N01); } } @@ -538,7 +539,7 @@ struct DeviceConv2dFwdXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_W typename GridwiseGemm:: CGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl_; - typename GridwiseGemm::Block2CTileMap block_2_ctile_map_; + typename GridwiseGemm::DefaultBlock2CTileMap block_2_ctile_map_; index_t M01_; index_t N01_; InElementwiseOperation in_element_op_; @@ -628,7 +629,7 @@ struct DeviceConv2dFwdXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_W InElementwiseOperation, WeiElementwiseOperation, OutElementwiseOperation, - remove_reference_t, + remove_reference_t, true>; ave_time = launch_and_time_kernel( @@ -662,7 +663,7 @@ struct DeviceConv2dFwdXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_W InElementwiseOperation, WeiElementwiseOperation, OutElementwiseOperation, - remove_reference_t, + remove_reference_t, false>; ave_time = launch_and_time_kernel( diff --git a/device_operation/include/device_conv2d_fwd_xdl_nhwc_kyxc_nhwk.hpp b/device_operation/include/device_conv2d_fwd_xdl_nhwc_kyxc_nhwk.hpp index 6093f31e49..3888e5e9c8 100644 --- a/device_operation/include/device_conv2d_fwd_xdl_nhwc_kyxc_nhwk.hpp +++ b/device_operation/include/device_conv2d_fwd_xdl_nhwc_kyxc_nhwk.hpp @@ -415,7 +415,8 @@ struct DeviceConv2dFwdXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2_ = GridwiseGemm::MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(c_grid_desc_m_n_); - block_2_ctile_map_ = GridwiseGemm::MakeBlock2CTileMap(c_grid_desc_m_n_, M01, N01); + block_2_ctile_map_ = + GridwiseGemm::MakeDefaultBlock2CTileMap(c_grid_desc_m_n_, M01, N01); } } @@ -428,7 +429,7 @@ struct DeviceConv2dFwdXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K CGridDesc_M_N c_grid_desc_m_n_; typename GridwiseGemm::CGridDesc_M0_N0_M1_N1_M2_M3_M4_N2 c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2_; - typename GridwiseGemm::Block2CTileMap block_2_ctile_map_; + typename GridwiseGemm::DefaultBlock2CTileMap block_2_ctile_map_; index_t M01_; index_t N01_; InElementwiseOperation in_element_op_; @@ -471,7 +472,7 @@ struct DeviceConv2dFwdXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K arg.N01_)) { throw std::runtime_error( - "wrong! GridwiseGemm_km_kn_m0m1n0n1_xdlops_v2r3 has invalid setting"); + "wrong! GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v2r3 has invalid setting"); } const index_t grid_size = GridwiseGemm::CalculateGridSize(arg.c_grid_desc_m_n_); @@ -494,7 +495,7 @@ struct DeviceConv2dFwdXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K InElementwiseOperation, WeiElementwiseOperation, OutElementwiseOperation, - remove_reference_t, + remove_reference_t, true>; ave_time = launch_and_time_kernel(kernel, @@ -525,7 +526,7 @@ struct DeviceConv2dFwdXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K InElementwiseOperation, WeiElementwiseOperation, OutElementwiseOperation, - remove_reference_t, + remove_reference_t, false>; ave_time = launch_and_time_kernel(kernel, diff --git a/device_operation/include/device_conv3d_fwd_naive_ndhwc_kzyxc_ndhwk.hpp b/device_operation/include/device_conv3d_fwd_naive_ndhwc_kzyxc_ndhwk.hpp new file mode 100644 index 0000000000..0371c4ab0d --- /dev/null +++ b/device_operation/include/device_conv3d_fwd_naive_ndhwc_kzyxc_ndhwk.hpp @@ -0,0 +1,276 @@ +#ifndef DEVICE_CONV3D_FWD_NAIVE_HPP +#define DEVICE_CONV3D_FWD_NAIVE_HPP + +#include +#include +#include +#include "convolution_utility.hpp" +#include "device.hpp" +#include "device_conv_fwd.hpp" +#include "common_header.hpp" +#include "naive_conv_fwd.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { + +// specialization for #D conv: in[n, di, hi, wi, c] * wei[k, z, y, x, c] = out[n, do, ho, wo, k] +template +struct DeviceConv3dFwdNaive_Input_N_Di_Hi_Wi_C_Weight_K_Z_Y_X_C_Output_N_Do_Ho_Wo_K + : public DeviceConvFwd + +{ + using DeviceOp = DeviceConv3dFwdNaive_Input_N_Di_Hi_Wi_C_Weight_K_Z_Y_X_C_Output_N_Do_Ho_Wo_K; + + using ADataType = InDataType; + using BDataType = WeiDataType; + using CDataType = OutDataType; + // TODO make A/B datatype different + using ABDataType = InDataType; + + // Argument + struct Argument : public BaseArgument + { + Argument(const InDataType* p_in, + const WeiDataType* p_wei, + OutDataType* p_out, + const index_t N, + const index_t K, + const index_t C, + std::vector input_spatial_lengths, + std::vector filter_spatial_lengths, + std::vector output_spatial_lengths, + std::vector conv_filter_strides, + std::vector conv_filter_dilations, + std::vector input_left_pads, + std::vector input_right_pads, + InElementwiseOperation in_element_op, + WeiElementwiseOperation wei_element_op, + OutElementwiseOperation out_element_op) + : N_{N}, + K_{K}, + C_{C}, + in_spatial_lengths_{input_spatial_lengths}, + filter_spatial_lengths_{filter_spatial_lengths}, + out_spatial_lengths_{output_spatial_lengths}, + conv_filter_strides_{conv_filter_strides}, + conv_filter_dilations_{conv_filter_dilations}, + in_left_pads_{input_left_pads}, + in_right_pads_{input_right_pads}, + p_in_{p_in}, + p_wei_{p_wei}, + p_out_{p_out}, + in_element_op_{in_element_op}, + wei_element_op_{wei_element_op}, + out_element_op_{out_element_op} + { + } + + // private: + index_t N_; + index_t K_; + index_t C_; + std::vector in_spatial_lengths_; + std::vector filter_spatial_lengths_; + std::vector out_spatial_lengths_; + std::vector conv_filter_strides_; + std::vector conv_filter_dilations_; + std::vector in_left_pads_; + std::vector in_right_pads_; + + const InDataType* p_in_; + const WeiDataType* p_wei_; + OutDataType* p_out_; + + InElementwiseOperation in_element_op_; + WeiElementwiseOperation wei_element_op_; + OutElementwiseOperation out_element_op_; + }; + + // Invoker + struct Invoker : public BaseInvoker + { + using Argument = DeviceOp::Argument; + + float Run(const Argument& arg, int nrepeat = 1) + { + const auto naive_conv3d_fwd = + ref::naive_conv_fwd_ndhwc_kzyxc_ndhwk; + + float ave_time = launch_and_time_kernel(naive_conv3d_fwd, + nrepeat, + dim3(256), + dim3(256), + 0, + arg.p_in_, + arg.p_wei_, + arg.p_out_, + arg.N_, + arg.K_, + arg.C_, + arg.in_spatial_lengths_[0], + arg.in_spatial_lengths_[1], + arg.in_spatial_lengths_[2], + arg.filter_spatial_lengths_[0], + arg.filter_spatial_lengths_[1], + arg.filter_spatial_lengths_[2], + arg.out_spatial_lengths_[0], + arg.out_spatial_lengths_[1], + arg.out_spatial_lengths_[2], + arg.conv_filter_strides_[0], + arg.conv_filter_strides_[1], + arg.conv_filter_strides_[2], + arg.conv_filter_dilations_[0], + arg.conv_filter_dilations_[1], + arg.conv_filter_dilations_[2], + arg.in_left_pads_[0], + arg.in_left_pads_[1], + arg.in_left_pads_[2]); + + return ave_time; + } + + // polymorphic + float Run(const BaseArgument* p_arg, int nrepeat = 1) override + { + return Run(*dynamic_cast(p_arg), nrepeat); + } + }; + + static constexpr bool IsValidCompilationParameter() + { + // TODO: properly implement this check + return true; + } + + static bool IsSupportedArgument(const Argument& arg) + { + std::vector out_spatial_lengths = + ConvolutionUtility::ComputeOutputSpatialLengths(arg.in_spatial_lengths_, + arg.filter_spatial_lengths_, + arg.conv_filter_strides_, + arg.conv_filter_dilations_, + arg.in_left_pads_, + arg.in_right_pads_); + + bool out_lengths_are_consistent = out_spatial_lengths[0] == arg.out_spatial_lengths_[0] && + out_spatial_lengths[1] == arg.out_spatial_lengths_[1] && + out_spatial_lengths[2] == arg.out_spatial_lengths_[2]; + return out_lengths_are_consistent; + } + + // polymorphic + bool IsSupportedArgument(const BaseArgument* p_arg) override + { + return IsSupportedArgument(*dynamic_cast(p_arg)); + } + + static auto MakeArgument(const InDataType* p_in, + const WeiDataType* p_wei, + OutDataType* p_out, + const index_t N, + const index_t K, + const index_t C, + std::vector input_spatial_lengths, + std::vector filter_spatial_lengths, + std::vector output_spatial_lengths, + std::vector conv_filter_strides, + std::vector conv_filter_dilations, + std::vector input_left_pads, + std::vector input_right_pads, + InElementwiseOperation in_element_op, + WeiElementwiseOperation wei_element_op, + OutElementwiseOperation out_element_op) + { + return Argument{p_in, + p_wei, + p_out, + N, + K, + C, + input_spatial_lengths, + filter_spatial_lengths, + output_spatial_lengths, + conv_filter_strides, + conv_filter_dilations, + input_left_pads, + input_right_pads, + in_element_op, + wei_element_op, + out_element_op}; + } + + static auto MakeInvoker() { return Invoker{}; } + + // polymorphic + std::unique_ptr + MakeArgumentPointer(const void* p_in, + const void* p_wei, + void* p_out, + const index_t N, + const index_t K, + const index_t C, + std::vector input_spatial_lengths, + std::vector filter_spatial_lengths, + std::vector output_spatial_lengths, + std::vector conv_filter_strides, + std::vector conv_filter_dilations, + std::vector input_left_pads, + std::vector input_right_pads, + InElementwiseOperation in_element_op, + WeiElementwiseOperation wei_element_op, + OutElementwiseOperation out_element_op) override + + { + return std::make_unique(static_cast(p_in), + static_cast(p_wei), + static_cast(p_out), + N, + K, + C, + input_spatial_lengths, + filter_spatial_lengths, + output_spatial_lengths, + conv_filter_strides, + conv_filter_dilations, + input_left_pads, + input_right_pads, + in_element_op, + wei_element_op, + out_element_op); + } + + // polymorphic + std::unique_ptr MakeInvokerPointer() override + { + return std::make_unique(Invoker{}); + } + + std::string GetTypeString() const override + { + auto str = std::stringstream(); + + // clang-format off + str << "DeviceConv3dFwdNaive_Input_N_Di_Hi_Wi_C_Weight_K_Z_Y_X_C_Output_N_Do_Ho_Wo_K<>"; + // clang-format on + + return str.str(); + } +}; + +} // namespace device +} // namespace tensor_operation +} // namespace ck +#endif diff --git a/device_operation/include/device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk.hpp b/device_operation/include/device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk.hpp new file mode 100644 index 0000000000..63a832e150 --- /dev/null +++ b/device_operation/include/device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk.hpp @@ -0,0 +1,676 @@ +#ifndef DEVICE_CONV3D_FWD_XDL_HPP +#define DEVICE_CONV3D_FWD_XDL_HPP + +#include +#include +#include +#include "device.hpp" +#include "device_conv_fwd.hpp" +#include "common_header.hpp" +#include "tensor_layout.hpp" +#include "convolution_forward_specialization.hpp" +#include "tensor_descriptor.hpp" +#include "tensor_descriptor_helper.hpp" +#include "transform_forward_convolution3d_into_gemm_v4r4r4_ndhwc_kzyxc_ndhwk.hpp" +#include "gridwise_gemm_xdlops_v2r3.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { + +template +__global__ void +#if CK_USE_LAUNCH_BOUNDS + __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) +#endif + kernel_gemm_xdlops_v2r3_for_conv3d( + const FloatAB* __restrict__ p_a_grid, + const FloatAB* __restrict__ p_b_grid, + FloatC* __restrict__ p_c_grid, + const index_t num_batches, + const index_t a_batch_stride, + const index_t b_batch_stride, + const index_t c_batch_stride, + const AGridDesc_K0_M_K1 a_grid_desc_k0_m_k1, + const BGridDesc_K0_N_K1 b_grid_desc_k0_n_k1, + const CGridDesc_M0_N0_M1_N1_M2_M3_M4_N2 c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2, + const AElementwiseOperation a_element_op, + const BElementwiseOperation b_element_op, + const CElementwiseOperation c_element_op, + const Block2CTileMap block_2_ctile_map) +{ + const index_t num_blocks_per_batch = + __builtin_amdgcn_readfirstlane(get_grid_size() / num_batches); + const index_t g_idx = __builtin_amdgcn_readfirstlane(get_block_1d_id() / num_blocks_per_batch); + + const long_index_t a_batch_offset = + __builtin_amdgcn_readfirstlane(static_cast(a_batch_stride) * g_idx); + const long_index_t b_batch_offset = + __builtin_amdgcn_readfirstlane(static_cast(b_batch_stride) * g_idx); + const long_index_t c_batch_offset = + __builtin_amdgcn_readfirstlane(static_cast(c_batch_stride) * g_idx); + + __shared__ char p_shared[GridwiseGemm::GetSharedMemoryNumberOfByte()]; + + GridwiseGemm::template Run(p_a_grid + a_batch_offset, + p_b_grid + b_batch_offset, + p_c_grid + c_batch_offset, + p_shared, + a_grid_desc_k0_m_k1, + b_grid_desc_k0_n_k1, + c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2, + a_element_op, + b_element_op, + c_element_op, + block_2_ctile_map); +} + +// specialization for #D conv: in[n, di, hi, wi, c] * wei[k, z, y, x, c] = out[n, do, ho, wo, k] +template +struct DeviceConv3dFwdXdl_Input_N_Di_Hi_Wi_C_Weight_K_Z_Y_X_C_Output_N_Do_Ho_Wo_K + : public DeviceConvFwd + +{ + using DeviceOp = DeviceConv3dFwdXdl_Input_N_Di_Hi_Wi_C_Weight_K_Z_Y_X_C_Output_N_Do_Ho_Wo_K; + + using ADataType = InDataType; + using BDataType = WeiDataType; + using CDataType = OutDataType; + // TODO make A/B datatype different + using ABDataType = InDataType; + + static constexpr auto I0 = Number<0>{}; + static constexpr auto I1 = Number<1>{}; + static constexpr auto I2 = Number<2>{}; + static constexpr auto I3 = Number<3>{}; + + /* + * \brief Split the number of batches, \p N, into N = B * N1, such that the memory + * space of input and output tensors stays with the value range of index_t, and each subbatch + * can be dealed with GridwiseGemm. + */ + static index_t GetMaxAllowableSubBatchSize(const index_t N, + const index_t K, + const index_t C, + std::vector input_spatial_lengths, + std::vector output_spatial_lengths) + { + const index_t Di = input_spatial_lengths[0]; + const index_t Hi = input_spatial_lengths[1]; + const index_t Wi = input_spatial_lengths[2]; + + const index_t Do = output_spatial_lengths[0]; + const index_t Ho = output_spatial_lengths[1]; + const index_t Wo = output_spatial_lengths[2]; + + // N1 should satisfy that + // 1) N % N1 = 0; + // 2) N1 * (Do * Ho * Wo * K) < (2^31 - 1) + // 3) N1 * (Di * Hi * Wi * C) < (2^31 - 1) + // + // Do NOT confuse (B, N1) in this function with (B, N1) in gridewise GEMM. + auto N1 = N + 1; + + const auto stride = + math::max(long_index_t(Do) * Ho * Wo * K, long_index_t(Di) * Hi * Wi * C); + const index_t max_stride = NumericLimits::Max(); + + for(index_t n0 = 1; n0 <= N; ++n0) + { + index_t n1 = N / n0; + if(n0 * n1 == N && long_index_t(n1) * long_index_t(stride) < max_stride) + { + N1 = n1; + break; + } + } + + const auto B = N / N1; + if(B * N1 != N) + { + throw std::runtime_error(__func__ + + std::string(": failed to find num_subbatches for conv3d.\n")); + } + + return N1; + } + + static auto + MakeABCGridDescriptor_A_K0_M_K1_B_K0_N_K1_C_M_N(const index_t N, + const index_t K, + const index_t C, + std::vector input_spatial_lengths, + std::vector filter_spatial_lengths, + std::vector output_spatial_lengths, + std::vector conv_filter_strides, + std::vector conv_filter_dilations, + std::vector input_left_pads, + std::vector input_right_pads) + { + assert(input_spatial_lengths.size() > 2); + assert(filter_spatial_lengths.size() > 2); + assert(conv_filter_strides.size() > 2); + assert(conv_filter_dilations.size() > 2); + assert(input_left_pads.size() > 2); + assert(input_right_pads.size() > 2); + + const index_t Di = input_spatial_lengths[0]; + const index_t Hi = input_spatial_lengths[1]; + const index_t Wi = input_spatial_lengths[2]; + const index_t Z = filter_spatial_lengths[0]; + const index_t Y = filter_spatial_lengths[1]; + const index_t X = filter_spatial_lengths[2]; + + const index_t Do = output_spatial_lengths[0]; + const index_t Ho = output_spatial_lengths[1]; + const index_t Wo = output_spatial_lengths[2]; + + if constexpr(ConvForwardSpecialization == + ConvolutionForwardSpecialization_t::Filter1x1Stride1Pad0) + { + static_assert(ConvForwardSpecialization == -1, "Not implemented!"); + } + else if constexpr(ConvForwardSpecialization == + ConvolutionForwardSpecialization_t::Filter1x1Pad0) + { + + static_assert(ConvForwardSpecialization == -1, "Not implemented!"); + } + else + { + const auto in_desc_n_di_hi_wi_c = + make_naive_tensor_descriptor_packed(make_tuple(N, Di, Hi, Wi, C)); + const auto wei_desc_k_z_y_x_c = + make_naive_tensor_descriptor_packed(make_tuple(K, Z, Y, X, C)); + const auto out_desc_n_do_ho_wo_k = + make_naive_tensor_descriptor_packed(make_tuple(N, Do, Ho, Wo, K)); + + const auto descs = + transform_forward_convolution3d_into_gemm_v4r4r4_ndhwc_kzyxc_ndhwk_pad( + in_desc_n_di_hi_wi_c, + wei_desc_k_z_y_x_c, + out_desc_n_do_ho_wo_k, + make_tuple( + conv_filter_strides[0], conv_filter_strides[1], conv_filter_strides[2]), + make_tuple(conv_filter_dilations[0], + conv_filter_dilations[1], + conv_filter_dilations[2]), + make_tuple(input_left_pads[0], input_left_pads[1], input_left_pads[2]), + make_tuple(input_right_pads[0], input_right_pads[1], input_right_pads[2]), + Number{}); + + return descs; + } + } + + using ABCGridDescs = remove_cvref_t; + + using AGridDesc_K0_M_K1 = remove_cvref_t; + using BGridDesc_K0_N_K1 = remove_cvref_t; + using CGridDesc_M_N = remove_cvref_t; + + struct Block2CTileMapMaker + { + Block2CTileMapMaker(index_t num_batches) : num_batches_(num_batches) {} + + __host__ __device__ constexpr auto + MakeBlock2CTileMap(const CGridDesc_M_N& c_grid_desc_m_n, index_t M01, index_t N01) + { + const auto M = c_grid_desc_m_n.GetLength(I0); + const auto N = c_grid_desc_m_n.GetLength(I1); + + constexpr auto M1 = Number{}; + constexpr auto N1 = Number{}; + + const auto M0 = M / M1; + const auto N0 = N / N1; + + const auto M00 = M0 / M01; + const auto N00 = N0 / N01; + + const auto g_m00_m01_n00_n01_to_m0_n0_block_cluster_adaptor = + make_single_stage_tensor_adaptor( + make_tuple(make_insert_transform(num_batches_), + make_unmerge_transform(make_tuple(M00, M01)), + make_unmerge_transform(make_tuple(N00, N01))), + make_tuple(Sequence<>{}, Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0>{}, Sequence<1, 3>{}, Sequence<2, 4>{})); + + const auto globalblockid_to_g_m00_m01_n00_n01_block_cluster_adaptor = + make_single_stage_tensor_adaptor( + make_tuple(make_merge_transform(make_tuple(num_batches_, M00, N00, M01, N01))), + make_tuple(Sequence<0, 1, 2, 3, 4>{}), + make_tuple(Sequence<0>{})); + + const auto globalblockid_to_m0_n0_block_cluster_adaptor = + chain_tensor_adaptors(g_m00_m01_n00_n01_to_m0_n0_block_cluster_adaptor, + globalblockid_to_g_m00_m01_n00_n01_block_cluster_adaptor); + + return globalblockid_to_m0_n0_block_cluster_adaptor; + } + + private: + index_t num_batches_; + }; + + using GridwiseGemm = GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v2r3< + BlockSize, + InDataType, + AccDataType, + OutDataType, + InMemoryDataOperationEnum_t::Set, + AGridDesc_K0_M_K1, + BGridDesc_K0_N_K1, + CGridDesc_M_N, + InElementwiseOperation, + WeiElementwiseOperation, + OutElementwiseOperation, + MPerBlock, + NPerBlock, + K0PerBlock, + MPerXDL, + NPerXDL, + K1, + MXdlPerWave, + NXdlPerWave, + ABlockTransferThreadClusterLengths_K0_M_K1, + Sequence<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder, + Sequence<1, 0, 2>, // ABlockTransferSrcAccessOrder, + 2, + ABlockTransferSrcScalarPerVector, + ABlockTransferDstScalarPerVector_K1, + false, // AThreadTransferSrcResetCoordinateAfterRun, + ABlockLdsAddExtraM, + BBlockTransferThreadClusterLengths_K0_N_K1, + Sequence<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder, + Sequence<1, 0, 2>, // ABlockTransferSrcAccessOrder, + 2, + BBlockTransferSrcScalarPerVector, + BBlockTransferDstScalarPerVector_K1, + false, // BThreadTransferSrcResetCoordinateAfterRun, + BBlockLdsAddExtraN, + Sequence<2, 3, 0, 1, 7, 5, 4, 6>, + 7, + CThreadTransferDstScalarPerVector>; + + using CGridDesc_M0_N0_M1_N1_M2_M3_M4_N2 = + decltype(GridwiseGemm::MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(CGridDesc_M_N{})); + using Block2CTileMap = + decltype(Block2CTileMapMaker{1}.MakeBlock2CTileMap(CGridDesc_M_N{}, 1, 1)); + + // Argument + struct Argument : public BaseArgument + { + Argument(const InDataType* p_in, + const WeiDataType* p_wei, + OutDataType* p_out, + const index_t N, + const index_t K, + const index_t C, + std::vector input_spatial_lengths, + std::vector filter_spatial_lengths, + std::vector output_spatial_lengths, + std::vector conv_filter_strides, + std::vector conv_filter_dilations, + std::vector input_left_pads, + std::vector input_right_pads, + index_t M01, + index_t N01, + InElementwiseOperation in_element_op, + WeiElementwiseOperation wei_element_op, + OutElementwiseOperation out_element_op) + : p_a_grid_{p_in}, + p_b_grid_{p_wei}, + p_c_grid_{p_out}, + M01_{M01}, + N01_{N01}, + in_element_op_{in_element_op}, + wei_element_op_{wei_element_op}, + out_element_op_{out_element_op} + { + const index_t subbatch_size = + GetMaxAllowableSubBatchSize(N, K, C, input_spatial_lengths, output_spatial_lengths); + num_subbatches_ = N / subbatch_size; + + const auto descs = + MakeABCGridDescriptor_A_K0_M_K1_B_K0_N_K1_C_M_N(subbatch_size, + K, + C, + input_spatial_lengths, + filter_spatial_lengths, + output_spatial_lengths, + conv_filter_strides, + conv_filter_dilations, + input_left_pads, + input_right_pads); + + a_grid_desc_k0_m_k1_ = descs[I0]; + b_grid_desc_k0_n_k1_ = descs[I1]; + c_grid_desc_m_n_ = descs[I2]; + + a_batch_stride_ = a_grid_desc_k0_m_k1_.GetElementSpaceSize(); + b_batch_stride_ = 0; + c_batch_stride_ = c_grid_desc_m_n_.GetElementSpaceSize(); + + if(GridwiseGemm::CheckValidity( + a_grid_desc_k0_m_k1_, b_grid_desc_k0_n_k1_, c_grid_desc_m_n_, M01_, N01_)) + { + c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2_ = + GridwiseGemm::MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(c_grid_desc_m_n_); + + block_2_ctile_map_ = Block2CTileMapMaker{num_subbatches_}.MakeBlock2CTileMap( + c_grid_desc_m_n_, M01, N01); + } + } + + // private: + const InDataType* p_a_grid_; + const WeiDataType* p_b_grid_; + OutDataType* p_c_grid_; + index_t num_subbatches_; + index_t a_batch_stride_; + index_t b_batch_stride_; + index_t c_batch_stride_; + AGridDesc_K0_M_K1 a_grid_desc_k0_m_k1_; + BGridDesc_K0_N_K1 b_grid_desc_k0_n_k1_; + CGridDesc_M_N c_grid_desc_m_n_; + CGridDesc_M0_N0_M1_N1_M2_M3_M4_N2 c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2_; + Block2CTileMap block_2_ctile_map_; + index_t M01_; + index_t N01_; + InElementwiseOperation in_element_op_; + WeiElementwiseOperation wei_element_op_; + OutElementwiseOperation out_element_op_; + }; + + // Invoker + struct Invoker : public BaseInvoker + { + using Argument = DeviceOp::Argument; + + float Run(const Argument& arg, int nrepeat = 1) + { + { + std::cout << "num_batches_of_GEMM = " << arg.num_subbatches_ << std::endl; + std::cout << "a_grid_desc_k0_m_k1{" << arg.a_grid_desc_k0_m_k1_.GetLength(I0) + << ", " << arg.a_grid_desc_k0_m_k1_.GetLength(I1) << ", " + << arg.a_grid_desc_k0_m_k1_.GetLength(I2) << "}" << std::endl; + + std::cout << "b_grid_desc_k0_n_k1{" << arg.b_grid_desc_k0_n_k1_.GetLength(I0) + << ", " << arg.b_grid_desc_k0_n_k1_.GetLength(I1) << ", " + << arg.b_grid_desc_k0_n_k1_.GetLength(I2) << "}" << std::endl; + + std::cout << "c_grid_desc_m_n{ " << arg.c_grid_desc_m_n_.GetLength(I0) << ", " + << arg.c_grid_desc_m_n_.GetLength(I1) << "}" << std::endl; + } + + if(!GridwiseGemm::CheckValidity(arg.a_grid_desc_k0_m_k1_, + arg.b_grid_desc_k0_n_k1_, + arg.c_grid_desc_m_n_, + arg.M01_, + arg.N01_)) + { + throw std::runtime_error( + "wrong! GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v2r3 has invalid setting"); + } + + // todo: grid_size times arg.num_subbatches_ + const index_t grid_size = + GridwiseGemm::CalculateGridSize(arg.c_grid_desc_m_n_) * arg.num_subbatches_; + + const auto K0 = arg.a_grid_desc_k0_m_k1_.GetLength(I0); + + const bool has_main_k0_block_loop = GridwiseGemm::CalculateHasMainK0BlockLoop(K0); + + float ave_time = 0; + if(has_main_k0_block_loop) + { + const auto kernel = kernel_gemm_xdlops_v2r3_for_conv3d< + GridwiseGemm, + InDataType, + OutDataType, + remove_reference_t, + remove_reference_t, + remove_reference_t, + InElementwiseOperation, + WeiElementwiseOperation, + OutElementwiseOperation, + remove_reference_t, + true>; + ave_time = launch_and_time_kernel(kernel, + nrepeat, + dim3(grid_size), + dim3(BlockSize), + 0, + arg.p_a_grid_, + arg.p_b_grid_, + arg.p_c_grid_, + arg.num_subbatches_, + arg.a_batch_stride_, + arg.b_batch_stride_, + arg.c_batch_stride_, + arg.a_grid_desc_k0_m_k1_, + arg.b_grid_desc_k0_n_k1_, + arg.c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2_, + arg.in_element_op_, + arg.wei_element_op_, + arg.out_element_op_, + arg.block_2_ctile_map_); + } + else + { + const auto kernel = kernel_gemm_xdlops_v2r3_for_conv3d< + GridwiseGemm, + InDataType, + OutDataType, + remove_reference_t, + remove_reference_t, + remove_reference_t, + InElementwiseOperation, + WeiElementwiseOperation, + OutElementwiseOperation, + remove_reference_t, + false>; + + ave_time = launch_and_time_kernel(kernel, + nrepeat, + dim3(grid_size), + dim3(BlockSize), + 0, + arg.p_a_grid_, + arg.p_b_grid_, + arg.p_c_grid_, + arg.num_subbatches_, + arg.a_batch_stride_, + arg.b_batch_stride_, + arg.c_batch_stride_, + arg.a_grid_desc_k0_m_k1_, + arg.b_grid_desc_k0_n_k1_, + arg.c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2_, + arg.in_element_op_, + arg.wei_element_op_, + arg.out_element_op_, + arg.block_2_ctile_map_); + } + + return ave_time; + } + + // polymorphic + float Run(const BaseArgument* p_arg, int nrepeat = 1) override + { + return Run(*dynamic_cast(p_arg), nrepeat); + } + }; + + static constexpr bool IsValidCompilationParameter() + { + // TODO: properly implement this check + return true; + } + + static bool IsSupportedArgument(const Argument& arg) + { + return GridwiseGemm::CheckValidity(arg.a_grid_desc_k0_m_k1_, + arg.b_grid_desc_k0_n_k1_, + arg.c_grid_desc_m_n_, + arg.M01_, + arg.N01_); + } + + // polymorphic + bool IsSupportedArgument(const BaseArgument* p_arg) override + { + return IsSupportedArgument(*dynamic_cast(p_arg)); + } + + static auto MakeArgument(const InDataType* p_in, + const WeiDataType* p_wei, + OutDataType* p_out, + const index_t N, + const index_t K, + const index_t C, + std::vector input_spatial_lengths, + std::vector filter_spatial_lengths, + std::vector output_spatial_lengths, + std::vector conv_filter_strides, + std::vector conv_filter_dilations, + std::vector input_left_pads, + std::vector input_right_pads, + InElementwiseOperation in_element_op, + WeiElementwiseOperation wei_element_op, + OutElementwiseOperation out_element_op) + { + return Argument{p_in, + p_wei, + p_out, + N, + K, + C, + input_spatial_lengths, + filter_spatial_lengths, + output_spatial_lengths, + conv_filter_strides, + conv_filter_dilations, + input_left_pads, + input_right_pads, + 1, + 1, + in_element_op, + wei_element_op, + out_element_op}; + } + + static auto MakeInvoker() { return Invoker{}; } + + // polymorphic + std::unique_ptr + MakeArgumentPointer(const void* p_in, + const void* p_wei, + void* p_out, + const index_t N, + const index_t K, + const index_t C, + std::vector input_spatial_lengths, + std::vector filter_spatial_lengths, + std::vector output_spatial_lengths, + std::vector conv_filter_strides, + std::vector conv_filter_dilations, + std::vector input_left_pads, + std::vector input_right_pads, + InElementwiseOperation in_element_op, + WeiElementwiseOperation wei_element_op, + OutElementwiseOperation out_element_op) override + + { + return std::make_unique(static_cast(p_in), + static_cast(p_wei), + static_cast(p_out), + N, + K, + C, + input_spatial_lengths, + filter_spatial_lengths, + output_spatial_lengths, + conv_filter_strides, + conv_filter_dilations, + input_left_pads, + input_right_pads, + 1, + 1, + in_element_op, + wei_element_op, + out_element_op); + } + + // polymorphic + std::unique_ptr MakeInvokerPointer() override + { + return std::make_unique(Invoker{}); + } + + std::string GetTypeString() const override + { + auto str = std::stringstream(); + + // clang-format off + str << "DeviceConv3dFwdXdl_Input_N_Di_Hi_Wi_C_Weight_K_Z_Y_X_C_Output_N_Do_Ho_Wo_K" + << "<" + << BlockSize << ", " + << MPerBlock << ", " + << NPerBlock << ", " + << K0PerBlock + << ">"; + // clang-format on + + return str.str(); + } +}; + +} // namespace device +} // namespace tensor_operation +} // namespace ck +#endif diff --git a/device_operation/include/device_gemm_xdl.hpp b/device_operation/include/device_gemm_xdl.hpp index 956c66819e..da047a5140 100644 --- a/device_operation/include/device_gemm_xdl.hpp +++ b/device_operation/include/device_gemm_xdl.hpp @@ -261,7 +261,8 @@ struct DeviceGemmXdl c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2_ = GridwiseGemm::MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(c_grid_desc_m_n_); - block_2_ctile_map_ = GridwiseGemm::MakeBlock2CTileMap(c_grid_desc_m_n_, M01, N01); + block_2_ctile_map_ = + GridwiseGemm::MakeDefaultBlock2CTileMap(c_grid_desc_m_n_, M01, N01); } } @@ -274,7 +275,7 @@ struct DeviceGemmXdl CGridDesc_M_N c_grid_desc_m_n_; typename GridwiseGemm::CGridDesc_M0_N0_M1_N1_M2_M3_M4_N2 c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2_; - typename GridwiseGemm::Block2CTileMap block_2_ctile_map_; + typename GridwiseGemm::DefaultBlock2CTileMap block_2_ctile_map_; index_t M01_; index_t N01_; AElementwiseOperation a_element_op_; @@ -309,7 +310,7 @@ struct DeviceGemmXdl arg.N01_)) { throw std::runtime_error( - "wrong! GridwiseGemm_km_kn_m0m1n0n1_xdlops_v2r3 has invalid setting"); + "wrong! GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v2r3 has invalid setting"); } const index_t grid_size = GridwiseGemm::CalculateGridSize(arg.c_grid_desc_m_n_); @@ -332,7 +333,7 @@ struct DeviceGemmXdl AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, - remove_reference_t, + remove_reference_t, true>; ave_time = launch_and_time_kernel(kernel, @@ -363,7 +364,7 @@ struct DeviceGemmXdl AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, - remove_reference_t, + remove_reference_t, false>; ave_time = launch_and_time_kernel(kernel, diff --git a/device_operation/include/device_gemm_xdl_c_shuffle.hpp b/device_operation/include/device_gemm_xdl_c_shuffle.hpp index 76f1b3e44e..9aa1ab158d 100644 --- a/device_operation/include/device_gemm_xdl_c_shuffle.hpp +++ b/device_operation/include/device_gemm_xdl_c_shuffle.hpp @@ -221,7 +221,8 @@ struct DeviceGemmXdl_C_Shuffle MakeCGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl( c_grid_desc_m_n_); - block_2_ctile_map_ = GridwiseGemm::MakeBlock2CTileMap(c_grid_desc_m_n_, M01, N01); + block_2_ctile_map_ = + GridwiseGemm::MakeDefaultBlock2CTileMap(c_grid_desc_m_n_, M01, N01); } } @@ -235,7 +236,7 @@ struct DeviceGemmXdl_C_Shuffle typename GridwiseGemm:: CGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl_; - typename GridwiseGemm::Block2CTileMap block_2_ctile_map_; + typename GridwiseGemm::DefaultBlock2CTileMap block_2_ctile_map_; index_t M01_; index_t N01_; AElementwiseOperation a_element_op_; @@ -295,7 +296,7 @@ struct DeviceGemmXdl_C_Shuffle AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, - remove_reference_t, + remove_reference_t, true>; ave_time = launch_and_time_kernel( @@ -329,7 +330,7 @@ struct DeviceGemmXdl_C_Shuffle AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, - remove_reference_t, + remove_reference_t, false>; ave_time = launch_and_time_kernel( diff --git a/device_operation/include/device_gemm_xdl_c_shuffle_bias_2d.hpp b/device_operation/include/device_gemm_xdl_c_shuffle_bias_2d.hpp index fcdc512477..d1e0d6d84e 100644 --- a/device_operation/include/device_gemm_xdl_c_shuffle_bias_2d.hpp +++ b/device_operation/include/device_gemm_xdl_c_shuffle_bias_2d.hpp @@ -235,7 +235,8 @@ struct DeviceGemmXdl_C_Shuffle_Bias_2d MakeCGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl( c_grid_desc_m_n_); - block_2_ctile_map_ = GridwiseGemm::MakeBlock2CTileMap(c_grid_desc_m_n_, M01, N01); + block_2_ctile_map_ = + GridwiseGemm::MakeDefaultBlock2CTileMap(c_grid_desc_m_n_, M01, N01); } } @@ -254,7 +255,7 @@ struct DeviceGemmXdl_C_Shuffle_Bias_2d typename GridwiseGemm:: CGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl_; - typename GridwiseGemm::Block2CTileMap block_2_ctile_map_; + typename GridwiseGemm::DefaultBlock2CTileMap block_2_ctile_map_; index_t M01_; index_t N01_; AElementwiseOperation a_element_op_; @@ -320,7 +321,7 @@ struct DeviceGemmXdl_C_Shuffle_Bias_2d AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, - remove_reference_t, + remove_reference_t, true>; ave_time = launch_and_time_kernel( @@ -359,7 +360,7 @@ struct DeviceGemmXdl_C_Shuffle_Bias_2d AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, - remove_reference_t, + remove_reference_t, false>; ave_time = launch_and_time_kernel( diff --git a/device_operation/include/device_gemm_xdl_c_shuffle_bias_activation.hpp b/device_operation/include/device_gemm_xdl_c_shuffle_bias_activation.hpp index 82dcb5b5c2..ac907b17e0 100644 --- a/device_operation/include/device_gemm_xdl_c_shuffle_bias_activation.hpp +++ b/device_operation/include/device_gemm_xdl_c_shuffle_bias_activation.hpp @@ -240,7 +240,8 @@ struct DeviceGemmXdl_C_Shuffle_Bias_Activation MakeCGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl( c0_grid_desc_m_n_); - block_2_ctile_map_ = GridwiseGemm::MakeBlock2CTileMap(c_grid_desc_m_n_, M01, N01); + block_2_ctile_map_ = + GridwiseGemm::MakeDefaultBlock2CTileMap(c_grid_desc_m_n_, M01, N01); } } @@ -259,7 +260,7 @@ struct DeviceGemmXdl_C_Shuffle_Bias_Activation typename GridwiseGemm:: C0GridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl c0_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl_; - typename GridwiseGemm::Block2CTileMap block_2_ctile_map_; + typename GridwiseGemm::DefaultBlock2CTileMap block_2_ctile_map_; index_t M01_; index_t N01_; AElementwiseOperation a_element_op_; @@ -325,7 +326,7 @@ struct DeviceGemmXdl_C_Shuffle_Bias_Activation AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, - remove_reference_t, + remove_reference_t, true>; ave_time = launch_and_time_kernel( @@ -364,7 +365,7 @@ struct DeviceGemmXdl_C_Shuffle_Bias_Activation AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, - remove_reference_t, + remove_reference_t, false>; ave_time = launch_and_time_kernel( diff --git a/device_operation/include/device_gemm_xdl_c_shuffle_bias_activation_add.hpp b/device_operation/include/device_gemm_xdl_c_shuffle_bias_activation_add.hpp index f5113613e5..ba6e47280b 100644 --- a/device_operation/include/device_gemm_xdl_c_shuffle_bias_activation_add.hpp +++ b/device_operation/include/device_gemm_xdl_c_shuffle_bias_activation_add.hpp @@ -274,7 +274,8 @@ struct DeviceGemmXdl_C_Shuffle_Bias_Activation_Add MakeCGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl( c1_grid_desc_m_n_); - block_2_ctile_map_ = GridwiseGemm::MakeBlock2CTileMap(c_grid_desc_m_n_, M01, N01); + block_2_ctile_map_ = + GridwiseGemm::MakeDefaultBlock2CTileMap(c_grid_desc_m_n_, M01, N01); } } @@ -298,7 +299,7 @@ struct DeviceGemmXdl_C_Shuffle_Bias_Activation_Add typename GridwiseGemm:: C1GridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl c1_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl_; - typename GridwiseGemm::Block2CTileMap block_2_ctile_map_; + typename GridwiseGemm::DefaultBlock2CTileMap block_2_ctile_map_; index_t M01_; index_t N01_; AElementwiseOperation a_element_op_; @@ -370,7 +371,7 @@ struct DeviceGemmXdl_C_Shuffle_Bias_Activation_Add AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, - remove_reference_t, + remove_reference_t, true>; ave_time = launch_and_time_kernel( @@ -414,7 +415,7 @@ struct DeviceGemmXdl_C_Shuffle_Bias_Activation_Add AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, - remove_reference_t, + remove_reference_t, false>; ave_time = launch_and_time_kernel( diff --git a/device_operation/include/tensor_layout.hpp b/device_operation/include/tensor_layout.hpp index b69572d2c0..4fae86d875 100644 --- a/device_operation/include/tensor_layout.hpp +++ b/device_operation/include/tensor_layout.hpp @@ -45,6 +45,18 @@ struct NKHW : public BaseTensorLayout { }; +struct NDHWC : public BaseTensorLayout +{ +}; + +struct KZYXC : public BaseTensorLayout +{ +}; + +struct NDHWK : public BaseTensorLayout +{ +}; + } // namespace convolution } // namespace tensor_layout diff --git a/device_operation_reference/include/naive_conv_fwd.hpp b/device_operation_reference/include/naive_conv_fwd.hpp new file mode 100644 index 0000000000..120938f072 --- /dev/null +++ b/device_operation_reference/include/naive_conv_fwd.hpp @@ -0,0 +1,122 @@ +#ifndef NAIVE_CONV_FWD_HPP +#define NAIVE_CONV_FWD_HPP + +namespace ck { +namespace ref { + +/* + * \brief naive implementation of 3D convolution. Layout is (NDHWC, KZYXC, NDHWK). + * + * \param N number of batches + * \param K number of filters + * \param C number of channels of weight + * \param (Di, Hi, Wi) depth, height and width dimension of data + * \param (Z, Y, X) depth, height and width dimensions of weights + * \param (Do, Ho, Wo) depth, height and width dimension of output + * \param (stride_z, stride_y, stride_x) strides + * \param (dilation_z, dilation_y, dilation_x) dilations + * \param (pad_z, pad_y, pad_x) pads + */ +template +__global__ void naive_conv_fwd_ndhwc_kzyxc_ndhwk(const TIn* __restrict__ p_in, + const TWei* __restrict__ p_wei, + TOut* __restrict__ p_out, + index_t N, + index_t K, + index_t C, + index_t Di, + index_t Hi, + index_t Wi, + index_t Z, + index_t Y, + index_t X, + index_t Do, + index_t Ho, + index_t Wo, + index_t stride_z, + index_t stride_y, + index_t stride_x, + index_t dilation_z, + index_t dilation_y, + index_t dilation_x, + index_t pad_z, + index_t pad_y, + index_t pad_x) +{ + const index_t tid = blockIdx.x * blockDim.x + threadIdx.x; + const index_t num_threads = blockDim.x * gridDim.x; + const long_index_t output_length = N * Do * Ho * Wo * K; + + const index_t out_strides[] = {Do * Ho * Wo * K, Ho * Wo * K, Wo * K, K}; + const index_t in_strides[] = {Di * Hi * Wi * C, Hi * Wi * C, Wi * C, C}; + const index_t wei_strides[] = {Z * Y * X * C, Y * X * C, X * C, C}; + + constexpr auto in_op = InElementwiseOperation{}; + constexpr auto wei_op = WeiElementwiseOperation{}; + constexpr auto out_op = OutElementwiseOperation{}; + + TIn in_val; + TWei wei_val; + TOut out_val; + + for(long_index_t ii = tid; ii < output_length; ii += num_threads) + { + const index_t n = ii / out_strides[0]; + index_t k = ii - n * out_strides[0]; + const index_t dO = k / out_strides[1]; + k -= dO * out_strides[1]; + const index_t ho = k / out_strides[2]; + k -= ho * out_strides[2]; + const index_t wo = k / out_strides[3]; + k -= wo * out_strides[3]; + + TAcc acc = static_cast(0); + + const TIn* in_n = p_in + static_cast(n) * in_strides[0]; + const TWei* wei_k = p_wei + static_cast(k) * wei_strides[0]; + + for(index_t z = 0; z < Z; ++z) + { + index_t di = stride_z * dO - pad_z + dilation_z * z; + const TIn* in_n_di = in_n + di * in_strides[1]; + const TWei* wei_k_z = wei_k + z * wei_strides[1]; + + for(index_t y = 0; y < Y; ++y) + { + index_t hi = stride_y * ho - pad_y + dilation_y * y; + const TIn* in_n_di_hi = in_n_di + hi * in_strides[2]; + const TWei* wei_k_z_y = wei_k_z + y * wei_strides[2]; + + for(index_t x = 0; x < X; ++x) + { + index_t wi = stride_x * wo - pad_x + dilation_x * x; + const TIn* in_n_di_hi_wi = in_n_di_hi + wi * in_strides[3]; + const TWei* wei_k_z_y_x = wei_k_z_y + x * wei_strides[3]; + + if(di >= 0 && di < Di && hi >= 0 && hi < Hi && wi >= 0 && wi < Wi) + { + for(index_t c = 0; c < C; ++c) + { + in_op(in_val, in_n_di_hi_wi[c]); + wei_op(wei_val, wei_k_z_y_x[c]); + acc += in_val * wei_val; + } + } + } + } + } + + out_op(out_val, static_cast(acc)); + p_out[ii] = out_val; + } +} +} // namespace ref +} // namespace ck + +#endif diff --git a/example/10_conv3d_fwd_xdl/README.md b/example/10_conv3d_fwd_xdl/README.md new file mode 100644 index 0000000000..06339b74e5 --- /dev/null +++ b/example/10_conv3d_fwd_xdl/README.md @@ -0,0 +1,57 @@ +# Instructions for ```conv3d_fwd_xdl``` Example + +## Docker script +```bash +docker run \ +-it \ +--rm \ +--privileged \ +--group-add sudo \ +-w /root/workspace \ +-v ${PATH_TO_LOCAL_WORKSPACE}:/root/workspace \ +rocm/tensorflow:rocm4.3.1-tf2.6-dev \ +/bin/bash +``` + +## Build ```conv3d_fwd_xdl``` +```bash +mkdir build && cd build +``` + +```bash +# Need to specify target ID, example below is gfx908 +cmake \ +-D BUILD_DEV=OFF \ +-D CMAKE_BUILD_TYPE=Release \ +-D CMAKE_CXX_FLAGS="-DCK_AMD_GPU_GFX908 --amdgpu-target=gfx908 -O3 " \ +-D CMAKE_CXX_COMPILER=/opt/rocm/bin/hipcc \ +-D CMAKE_PREFIX_PATH=/opt/rocm \ +.. +``` + +```bash + make -j conv3d_fwd_xdl +``` + +## Run ```conv3d_fwd_xdl``` +```bash +#arg1: verification (0=no, 1=yes) +#arg2: initialization (0=no init, 1=integer value, 2=decimal value) +#arg3: run kernel # of times (>1) +#arg4 to 24: N, K, C, Z, Y, X, Di, Hi, Wi, Sz, Sy, Sx, Dz, Dy, Dx, leftPz, LeftPy, LeftPx, RightPz, RightPy, RightPx +./example/conv3d_fwd_xdl 0 1 5 +``` + +Result (MI100 dynamic frequency) +``` +in: dim 5, lengths {4, 71, 71, 71, 192}, strides {68718912, 967872, 13632, 192, 1} +wei: dim 5, lengths {256, 3, 3, 3, 192}, strides {5184, 1728, 576, 192, 1} +out: dim 5, lengths {4, 36, 36, 36, 256}, strides {11943936, 331776, 9216, 256, 1} +a_grid_desc_b_k0_m_k1{1, 648, 186624, 8} +b_grid_desc_b_k0_n_k1{1, 648, 256, 8} +launch_and_time_kernel: grid_dim {1458, 1, 1}, block_dim {256, 1, 1} +Warm up +Start running 5 times... +Perf: 4.49466 ms, 110.206 TFlops, 144.161 GB/s +``` + diff --git a/example/10_conv3d_fwd_xdl/conv3d_fwd_xdl.cpp b/example/10_conv3d_fwd_xdl/conv3d_fwd_xdl.cpp new file mode 100644 index 0000000000..89d2933619 --- /dev/null +++ b/example/10_conv3d_fwd_xdl/conv3d_fwd_xdl.cpp @@ -0,0 +1,281 @@ +#include +#include +#include +#include +#include +#include +#include "config.hpp" +#include "print.hpp" +#include "device.hpp" +#include "host_tensor.hpp" +#include "host_tensor_generator.hpp" +#include "host_gemm.hpp" +#include "device_tensor.hpp" +#include "device_base.hpp" +#include "device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk.hpp" +#include "device_conv3d_fwd_naive_ndhwc_kzyxc_ndhwk.hpp" +#include "convolution_utility.hpp" + +// convolution data type +using InDataType = ck::half_t; +using WeiDataType = ck::half_t; +using OutDataType = ck::half_t; +using AccDataType = float; + +using InElementOp = ck::tensor_operation::element_wise::PassThrough; +using WeiElementOp = ck::tensor_operation::element_wise::PassThrough; +using OutElementOp = ck::tensor_operation::element_wise::PassThrough; + +using F16 = ck::half_t; +using F32 = float; + +template +using S = ck::Sequence; + +using InLayout = ck::tensor_layout::convolution::NDHWC; +using WeiLayout = ck::tensor_layout::convolution::KZYXC; +using OutLayout = ck::tensor_layout::convolution::NDHWK; + +static constexpr auto ConvFwdDefault = + ck::tensor_operation::device::ConvolutionForwardSpecialization_t::Default; + +using DeviceConv3dFwdInstance = ck::tensor_operation::device:: + DeviceConv3dFwdXdl_Input_N_Di_Hi_Wi_C_Weight_K_Z_Y_X_C_Output_N_Do_Ho_Wo_K< + InDataType, // InData + WeiDataType, // WeiData + OutDataType, // OutData + AccDataType, // AccData + InElementOp, // InElementwise Operation + WeiElementOp, // WeiElementwise Operation + OutElementOp, // OutElementwise Operation + ConvFwdDefault, // ConvForwardSpecialization + 256, // BlockSize + 128, // MPerBlock + 256, // NPerBlock + 4, // K0PerBlock + 8, // K1. K0PerBlock * K1 = KPerBlock + 32, // MPerXDL + 32, // NPerXDL. Each XDL computes a matrix of size (MPerXDL, NPerBlock) + 2, // MXdlPerWave + 4, // NXdlPerWave + S<4, 64, 1>, // ABlockTransferThreadClusterLengths_K0_M_K1 + S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder + S<1, 0, 2>, // ABlockTransferSrcAccessOrder + 2, // ABlockTransferSrcVectorDim + 8, // ABlockTransferSrcScalarPerVector + 8, // ABlockTransferDstScalarPerVector_K1 + true, // ABlockLdsAddExtraM + S<4, 64, 1>, // BBlockTransferThreadClusterLengths_K0_N_K1 + S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder + S<1, 0, 2>, // BBlockTransferSrcAccessOrder + 2, // BBlockTransferSrcVectorDim + 8, // BBlockTransferSrcScalarPerVector + 8, // BBlockTransferDstScalarPerVector_K1 + true, // BBlockLdsAddExtraN + 7, // CThreadTransferSrcDstVectorDim + 1>; // CThreadTransferDstScalarPerVector + +int main(int argc, char* argv[]) +{ + bool do_verification = false; + int init_method = 0; + int nrepeat = 5; + + // convolution shape + ck::index_t N = 4; + ck::index_t K = 256; + ck::index_t C = 192; + std::vector in_spatial_lengths = {71, 71, 71}; + std::vector filter_spatial_lengths = {3, 3, 3}; + std::vector conv_filter_strides = {2, 2, 2}; + std::vector conv_filter_dilations = {1, 1, 1}; + std::vector in_left_pads = {1, 1, 1}; + std::vector in_right_pads = {1, 1, 1}; + + if(argc == 4) + { + do_verification = std::stoi(argv[1]); + init_method = std::stoi(argv[2]); + nrepeat = std::stoi(argv[3]); + } + else if(argc == 25) + { + do_verification = std::stoi(argv[1]); + init_method = std::stoi(argv[2]); + nrepeat = std::stoi(argv[3]); + + N = std::stoi(argv[4]); + K = std::stoi(argv[5]); + C = std::stoi(argv[6]); + filter_spatial_lengths[0] = std::stoi(argv[7]); + filter_spatial_lengths[1] = std::stoi(argv[8]); + filter_spatial_lengths[2] = std::stoi(argv[9]); + in_spatial_lengths[0] = std::stoi(argv[10]); + in_spatial_lengths[1] = std::stoi(argv[11]); + in_spatial_lengths[2] = std::stoi(argv[12]); + conv_filter_strides[0] = std::stoi(argv[13]); + conv_filter_strides[1] = std::stoi(argv[14]); + conv_filter_strides[2] = std::stoi(argv[15]); + conv_filter_dilations[0] = std::stoi(argv[16]); + conv_filter_dilations[1] = std::stoi(argv[17]); + conv_filter_dilations[2] = std::stoi(argv[18]); + in_left_pads[0] = std::stoi(argv[19]); + in_left_pads[1] = std::stoi(argv[20]); + in_left_pads[2] = std::stoi(argv[21]); + in_right_pads[0] = std::stoi(argv[22]); + in_right_pads[1] = std::stoi(argv[23]); + in_right_pads[2] = std::stoi(argv[24]); + } + else + { + printf("Usage: 3 or 24 input arguments\n"); + printf(" arg1: verification (0=no, 1=yes)\n"); + printf(" arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"); + printf(" arg3: run kernel # of times (>1)\n"); + printf(" arg4 to 24: N, K, C, Z, Y, X, Di, Hi, Wi, Sz, Sy, Sz, Dz, Dy, Dx, LeftPz, LeftPy, " + "LeftPz, RightPz, RightPy, RightPx\n"); + exit(0); + } + + auto conv3d = DeviceConv3dFwdInstance{}; + + const auto out_spatial_lengths = + ck::tensor_operation::ConvolutionUtility::ComputeOutputSpatialLengths( + in_spatial_lengths, + filter_spatial_lengths, + conv_filter_strides, + conv_filter_dilations, + in_left_pads, + in_right_pads); + Tensor in( + {N, in_spatial_lengths[0], in_spatial_lengths[1], in_spatial_lengths[2], C}); + Tensor wei( + {K, filter_spatial_lengths[0], filter_spatial_lengths[1], filter_spatial_lengths[2], C}); + Tensor out( + {N, out_spatial_lengths[0], out_spatial_lengths[1], out_spatial_lengths[2], K}); + + std::cout << "in: " << in.mDesc << std::endl; + std::cout << "wei: " << wei.mDesc << std::endl; + std::cout << "out: " << out.mDesc << std::endl; + + switch(init_method) + { + case 0: break; + case 1: + in.GenerateTensorValue(GeneratorTensor_2{-5, 5}); + wei.GenerateTensorValue(GeneratorTensor_2{-5, 5}); + break; + default: + in.GenerateTensorValue(GeneratorTensor_3{0.0, 1.0}); + wei.GenerateTensorValue(GeneratorTensor_3{-0.5, 0.5}); + } + + DeviceMem in_device_buf(sizeof(InDataType) * in.mDesc.GetElementSpace()); + DeviceMem wei_device_buf(sizeof(WeiDataType) * wei.mDesc.GetElementSpace()); + DeviceMem out_device_buf(sizeof(OutDataType) * out.mDesc.GetElementSpace()); + + in_device_buf.ToDevice(in.mData.data()); + wei_device_buf.ToDevice(wei.mData.data()); + + // do Convolution + auto invoker = conv3d.MakeInvoker(); + auto argument = conv3d.MakeArgument(static_cast(in_device_buf.GetDeviceBuffer()), + static_cast(wei_device_buf.GetDeviceBuffer()), + static_cast(out_device_buf.GetDeviceBuffer()), + N, + K, + C, + in_spatial_lengths, + filter_spatial_lengths, + out_spatial_lengths, + conv_filter_strides, + conv_filter_dilations, + in_left_pads, + in_right_pads, + InElementOp{}, + WeiElementOp{}, + OutElementOp{}); + + if(!conv3d.IsSupportedArgument(argument)) + { + throw std::runtime_error( + "wrong! device_conv3d with the specified compilation parameters does " + "not support this GEMM problem"); + } + + float ave_time = invoker.Run(argument, nrepeat); + + const auto Di = in_spatial_lengths[0]; + const auto Hi = in_spatial_lengths[1]; + const auto Wi = in_spatial_lengths[2]; + const auto Do = out_spatial_lengths[0]; + const auto Ho = out_spatial_lengths[1]; + const auto Wo = out_spatial_lengths[2]; + const auto Z = filter_spatial_lengths[0]; + const auto Y = filter_spatial_lengths[1]; + const auto X = filter_spatial_lengths[2]; + + std::size_t flop = std::size_t(2) * N * K * Do * Ho * Wo * C * Z * Y * X; + std::size_t num_btype = sizeof(InDataType) * N * Di * Hi * Wi * C + + sizeof(WeiDataType) * K * Z * Y * X * C + + sizeof(OutDataType) * N * Do * Ho * Wo * K; + + float tflops = static_cast(flop) / 1.E9 / ave_time; + + float gb_per_sec = num_btype / 1.E6 / ave_time; + + std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s" + << std::endl; + + out_device_buf.FromDevice(out.mData.data()); + + if(do_verification) + { + DeviceMem out_ref_device_buf(sizeof(OutDataType) * N * Do * Ho * Wo * K); + + using DeviceConv3dFwdNaive = ck::tensor_operation::device:: + DeviceConv3dFwdNaive_Input_N_Di_Hi_Wi_C_Weight_K_Z_Y_X_C_Output_N_Do_Ho_Wo_K< + InDataType, + WeiDataType, + OutDataType, + AccDataType, + InElementOp, + WeiElementOp, + OutElementOp>; + auto conv3d_naive = DeviceConv3dFwdNaive{}; + auto invoker_naive = conv3d_naive.MakeInvoker(); + auto argument_naive = conv3d_naive.MakeArgument( + static_cast(in_device_buf.GetDeviceBuffer()), + static_cast(wei_device_buf.GetDeviceBuffer()), + static_cast(out_ref_device_buf.GetDeviceBuffer()), + N, + K, + C, + in_spatial_lengths, + filter_spatial_lengths, + out_spatial_lengths, + conv_filter_strides, + conv_filter_dilations, + in_left_pads, + in_right_pads, + InElementOp{}, + WeiElementOp{}, + OutElementOp{}); + + if(!conv3d_naive.IsSupportedArgument(argument_naive)) + { + throw std::runtime_error( + "wrong! device_conv3d_naive does NOT support the specified compilation parameters"); + } + invoker_naive.Run(argument_naive); + + Tensor out_ref( + {N, out_spatial_lengths[0], out_spatial_lengths[1], out_spatial_lengths[2], K}); + + out_ref_device_buf.FromDevice(out_ref.mData.data()); + + check_error(out_ref, out); + } + + return 0; +} diff --git a/example/1_gemm_xdl/gemm_xdl.cpp b/example/1_gemm_xdl/gemm_xdl.cpp index d9ed011fbe..5d289f40e8 100644 --- a/example/1_gemm_xdl/gemm_xdl.cpp +++ b/example/1_gemm_xdl/gemm_xdl.cpp @@ -160,7 +160,6 @@ int main(int argc, char* argv[]) a_m_k_device_buf.ToDevice(a_m_k.mData.data()); b_k_n_device_buf.ToDevice(b_k_n.mData.data()); - c_m_n_device_buf.ToDevice(c_m_n_device_result.mData.data()); auto a_element_op = AElementOp{}; auto b_element_op = BElementOp{}; @@ -216,4 +215,6 @@ int main(int argc, char* argv[]) check_error(c_m_n_host_result, c_m_n_device_result); } + + return 0; } diff --git a/example/4_conv2d_fwd_xdl/conv2d_fwd_xdl.cpp b/example/4_conv2d_fwd_xdl/conv2d_fwd_xdl.cpp index 4c62a7af15..26d3ea3f74 100644 --- a/example/4_conv2d_fwd_xdl/conv2d_fwd_xdl.cpp +++ b/example/4_conv2d_fwd_xdl/conv2d_fwd_xdl.cpp @@ -14,6 +14,7 @@ #include "element_wise_operation.hpp" #include "device_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp" #include "reference_conv_fwd.hpp" +#include "convolution_utility.hpp" using InDataType = ck::half_t; using WeiDataType = ck::half_t; @@ -138,16 +139,20 @@ int main(int argc, char* argv[]) exit(0); } - const ck::index_t YEff = (Y - 1) * conv_dilation_h + 1; - const ck::index_t XEff = (X - 1) * conv_dilation_w + 1; + const std::vector conv_filter_strides{conv_stride_h, conv_stride_w}; + const std::vector conv_filter_dilations{conv_dilation_h, conv_dilation_w}; + const std::vector input_left_pads{in_left_pad_h, in_left_pad_w}; + const std::vector input_right_pads{in_right_pad_h, in_right_pad_w}; + const auto output_spatial_lengths = + ck::tensor_operation::ConvolutionUtility::ComputeOutputSpatialLengths({Hi, Wi}, + {Y, X}, + conv_filter_strides, + conv_filter_dilations, + input_left_pads, + input_right_pads); - const ck::index_t Ho = (Hi + in_left_pad_h + in_right_pad_h - YEff) / conv_stride_h + 1; - const ck::index_t Wo = (Wi + in_left_pad_w + in_right_pad_w - XEff) / conv_stride_w + 1; - - const std::vector conv_filter_strides{{conv_stride_h, conv_stride_w}}; - const std::vector conv_filter_dilations{{conv_dilation_h, conv_dilation_w}}; - const std::vector input_left_pads{{in_left_pad_h, in_left_pad_w}}; - const std::vector input_right_pads{{in_right_pad_h, in_right_pad_w}}; + const ck::index_t Ho = output_spatial_lengths[0]; + const ck::index_t Wo = output_spatial_lengths[1]; // tensor layout auto f_host_tensor_descriptor = [](std::size_t N_, @@ -214,9 +219,9 @@ int main(int argc, char* argv[]) N, K, C, - std::vector{{Hi, Wi}}, - std::vector{{Y, X}}, - std::vector{{Ho, Wo}}, + std::vector{Hi, Wi}, + std::vector{Y, X}, + std::vector{Ho, Wo}, conv_filter_strides, conv_filter_dilations, input_left_pads, diff --git a/example/5_conv2d_fwd_xdl_bias_relu/conv2d_fwd_xdl_bias_relu.cpp b/example/5_conv2d_fwd_xdl_bias_relu/conv2d_fwd_xdl_bias_relu.cpp index aa62e212d0..d251aa35e1 100644 --- a/example/5_conv2d_fwd_xdl_bias_relu/conv2d_fwd_xdl_bias_relu.cpp +++ b/example/5_conv2d_fwd_xdl_bias_relu/conv2d_fwd_xdl_bias_relu.cpp @@ -14,6 +14,7 @@ #include "element_wise_operation.hpp" #include "device_conv2d_fwd_xdl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp" #include "reference_conv_fwd_bias_activation.hpp" +#include "convolution_utility.hpp" using InDataType = ck::half_t; using WeiDataType = ck::half_t; @@ -146,16 +147,20 @@ int main(int argc, char* argv[]) exit(0); } - const ck::index_t YEff = (Y - 1) * conv_dilation_h + 1; - const ck::index_t XEff = (X - 1) * conv_dilation_w + 1; + const std::vector conv_filter_strides{conv_stride_h, conv_stride_w}; + const std::vector conv_filter_dilations{conv_dilation_h, conv_dilation_w}; + const std::vector input_left_pads{in_left_pad_h, in_left_pad_w}; + const std::vector input_right_pads{in_right_pad_h, in_right_pad_w}; + const auto output_spatial_lengths = + ck::tensor_operation::ConvolutionUtility::ComputeOutputSpatialLengths({Hi, Wi}, + {Y, X}, + conv_filter_strides, + conv_filter_dilations, + input_left_pads, + input_right_pads); - const ck::index_t Ho = (Hi + in_left_pad_h + in_right_pad_h - YEff) / conv_stride_h + 1; - const ck::index_t Wo = (Wi + in_left_pad_w + in_right_pad_w - XEff) / conv_stride_w + 1; - - const std::vector conv_filter_strides{{conv_stride_h, conv_stride_w}}; - const std::vector conv_filter_dilations{{conv_dilation_h, conv_dilation_w}}; - const std::vector input_left_pads{{in_left_pad_h, in_left_pad_w}}; - const std::vector input_right_pads{{in_right_pad_h, in_right_pad_w}}; + const ck::index_t Ho = output_spatial_lengths[0]; + const ck::index_t Wo = output_spatial_lengths[1]; // tensor layout auto f_host_tensor_descriptor = [](std::size_t N_, @@ -232,9 +237,9 @@ int main(int argc, char* argv[]) N, K, C, - std::vector{{Hi, Wi}}, - std::vector{{Y, X}}, - std::vector{{Ho, Wo}}, + std::vector{Hi, Wi}, + std::vector{Y, X}, + std::vector{Ho, Wo}, conv_filter_strides, conv_filter_dilations, input_left_pads, diff --git a/example/6_conv2d_fwd_xdl_bias_relu_add/conv2d_fwd_xdl_bias_relu_add.cpp b/example/6_conv2d_fwd_xdl_bias_relu_add/conv2d_fwd_xdl_bias_relu_add.cpp index a20a8cbb67..d6011b98a9 100644 --- a/example/6_conv2d_fwd_xdl_bias_relu_add/conv2d_fwd_xdl_bias_relu_add.cpp +++ b/example/6_conv2d_fwd_xdl_bias_relu_add/conv2d_fwd_xdl_bias_relu_add.cpp @@ -14,6 +14,7 @@ #include "element_wise_operation.hpp" #include "device_conv2d_fwd_xdl_c_shuffle_bias_activation_add_nhwc_kyxc_nhwk.hpp" #include "reference_conv_fwd_bias_activation_add.hpp" +#include "convolution_utility.hpp" using InDataType = ck::half_t; using WeiDataType = ck::half_t; @@ -143,16 +144,20 @@ int main(int argc, char* argv[]) exit(0); } - const ck::index_t YEff = (Y - 1) * conv_dilation_h + 1; - const ck::index_t XEff = (X - 1) * conv_dilation_w + 1; + const std::vector conv_filter_strides{conv_stride_h, conv_stride_w}; + const std::vector conv_filter_dilations{conv_dilation_h, conv_dilation_w}; + const std::vector input_left_pads{in_left_pad_h, in_left_pad_w}; + const std::vector input_right_pads{in_right_pad_h, in_right_pad_w}; + const auto output_spatial_lengths = + ck::tensor_operation::ConvolutionUtility::ComputeOutputSpatialLengths({Hi, Wi}, + {Y, X}, + conv_filter_strides, + conv_filter_dilations, + input_left_pads, + input_right_pads); - const ck::index_t Ho = (Hi + in_left_pad_h + in_right_pad_h - YEff) / conv_stride_h + 1; - const ck::index_t Wo = (Wi + in_left_pad_w + in_right_pad_w - XEff) / conv_stride_w + 1; - - const std::vector conv_filter_strides{{conv_stride_h, conv_stride_w}}; - const std::vector conv_filter_dilations{{conv_dilation_h, conv_dilation_w}}; - const std::vector input_left_pads{{in_left_pad_h, in_left_pad_w}}; - const std::vector input_right_pads{{in_right_pad_h, in_right_pad_w}}; + const ck::index_t Ho = output_spatial_lengths[0]; + const ck::index_t Wo = output_spatial_lengths[1]; // tensor layout auto f_host_tensor_descriptor = [](std::size_t N_, @@ -242,9 +247,9 @@ int main(int argc, char* argv[]) N, K, C, - std::vector{{Hi, Wi}}, - std::vector{{Y, X}}, - std::vector{{Ho, Wo}}, + std::vector{Hi, Wi}, + std::vector{Y, X}, + std::vector{Ho, Wo}, conv_filter_strides, conv_filter_dilations, input_left_pads, diff --git a/example/7_conv2d_fwd_xdl_bias_relu_atomic_add/conv2d_fwd_xdl_bias_relu_atomic_add.cpp b/example/7_conv2d_fwd_xdl_bias_relu_atomic_add/conv2d_fwd_xdl_bias_relu_atomic_add.cpp index 8f07cf066b..83636da3a8 100644 --- a/example/7_conv2d_fwd_xdl_bias_relu_atomic_add/conv2d_fwd_xdl_bias_relu_atomic_add.cpp +++ b/example/7_conv2d_fwd_xdl_bias_relu_atomic_add/conv2d_fwd_xdl_bias_relu_atomic_add.cpp @@ -13,6 +13,7 @@ #include "tensor_layout.hpp" #include "device_conv2d_fwd_xdl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp" #include "element_wise_operation.hpp" +#include "convolution_utility.hpp" using InDataType = ck::half_t; using WeiDataType = ck::half_t; @@ -166,16 +167,20 @@ int main(int argc, char* argv[]) exit(0); } - const ck::index_t YEff = (Y - 1) * conv_dilation_h + 1; - const ck::index_t XEff = (X - 1) * conv_dilation_w + 1; + const std::vector conv_filter_strides{conv_stride_h, conv_stride_w}; + const std::vector conv_filter_dilations{conv_dilation_h, conv_dilation_w}; + const std::vector input_left_pads{in_left_pad_h, in_left_pad_w}; + const std::vector input_right_pads{in_right_pad_h, in_right_pad_w}; + const auto output_spatial_lengths = + ck::tensor_operation::ConvolutionUtility::ComputeOutputSpatialLengths({Hi, Wi}, + {Y, X}, + conv_filter_strides, + conv_filter_dilations, + input_left_pads, + input_right_pads); - const ck::index_t Ho = (Hi + in_left_pad_h + in_right_pad_h - YEff) / conv_stride_h + 1; - const ck::index_t Wo = (Wi + in_left_pad_w + in_right_pad_w - XEff) / conv_stride_w + 1; - - const std::vector conv_filter_strides{{conv_stride_h, conv_stride_w}}; - const std::vector conv_filter_dilations{{conv_dilation_h, conv_dilation_w}}; - const std::vector input_left_pads{{in_left_pad_h, in_left_pad_w}}; - const std::vector input_right_pads{{in_right_pad_h, in_right_pad_w}}; + const ck::index_t Ho = output_spatial_lengths[0]; + const ck::index_t Wo = output_spatial_lengths[1]; // tensor layout auto f_host_tensor_descriptor = [](std::size_t N_, @@ -255,9 +260,9 @@ int main(int argc, char* argv[]) N, K, C, - std::vector{{Hi, Wi}}, - std::vector{{Y, X}}, - std::vector{{Ho, Wo}}, + std::vector{Hi, Wi}, + std::vector{Y, X}, + std::vector{Ho, Wo}, conv_filter_strides, conv_filter_dilations, input_left_pads, diff --git a/example/9_conv2d_fwd_xdl_int8/conv2d_fwd_xdl_int8.cpp b/example/9_conv2d_fwd_xdl_int8/conv2d_fwd_xdl_int8.cpp index a4d19dabd1..8614f53472 100644 --- a/example/9_conv2d_fwd_xdl_int8/conv2d_fwd_xdl_int8.cpp +++ b/example/9_conv2d_fwd_xdl_int8/conv2d_fwd_xdl_int8.cpp @@ -14,6 +14,7 @@ #include "device_conv2d_fwd_xdl_nhwc_kyxc_nhwk.hpp" #include "element_wise_operation.hpp" #include "reference_conv_fwd.hpp" +#include "convolution_utility.hpp" using InDataType = int8_t; using WeiDataType = int8_t; @@ -136,16 +137,20 @@ int main(int argc, char* argv[]) exit(0); } - const ck::index_t YEff = (Y - 1) * conv_dilation_h + 1; - const ck::index_t XEff = (X - 1) * conv_dilation_w + 1; + const std::vector conv_filter_strides{conv_stride_h, conv_stride_w}; + const std::vector conv_filter_dilations{conv_dilation_h, conv_dilation_w}; + const std::vector input_left_pads{in_left_pad_h, in_left_pad_w}; + const std::vector input_right_pads{in_right_pad_h, in_right_pad_w}; + const auto output_spatial_lengths = + ck::tensor_operation::ConvolutionUtility::ComputeOutputSpatialLengths({Hi, Wi}, + {Y, X}, + conv_filter_strides, + conv_filter_dilations, + input_left_pads, + input_right_pads); - const ck::index_t Ho = (Hi + in_left_pad_h + in_right_pad_h - YEff) / conv_stride_h + 1; - const ck::index_t Wo = (Wi + in_left_pad_w + in_right_pad_w - XEff) / conv_stride_w + 1; - - const std::vector conv_filter_strides{{conv_stride_h, conv_stride_w}}; - const std::vector conv_filter_dilations{{conv_dilation_h, conv_dilation_w}}; - const std::vector input_left_pads{{in_left_pad_h, in_left_pad_w}}; - const std::vector input_right_pads{{in_right_pad_h, in_right_pad_w}}; + const ck::index_t Ho = output_spatial_lengths[0]; + const ck::index_t Wo = output_spatial_lengths[1]; // tensor layout auto f_host_tensor_descriptor = [](std::size_t N_, @@ -212,9 +217,9 @@ int main(int argc, char* argv[]) N, K, C, - std::vector{{Hi, Wi}}, - std::vector{{Y, X}}, - std::vector{{Ho, Wo}}, + std::vector{Hi, Wi}, + std::vector{Y, X}, + std::vector{Ho, Wo}, conv_filter_strides, conv_filter_dilations, input_left_pads, diff --git a/example/CMakeLists.txt b/example/CMakeLists.txt index c1b3b12d4f..8377cf7679 100644 --- a/example/CMakeLists.txt +++ b/example/CMakeLists.txt @@ -10,6 +10,7 @@ include_directories(BEFORE ${PROJECT_SOURCE_DIR}/composable_kernel/include/tensor_operation ${PROJECT_SOURCE_DIR}/composable_kernel/include/problem_transform ${PROJECT_SOURCE_DIR}/external/rocm/include + ${PROJECT_SOURCE_DIR}/device_operation_reference/include ) set(GEMM_XDL_SOURCE 1_gemm_xdl/gemm_xdl.cpp) @@ -21,6 +22,7 @@ set(CONV2D_FWD_XDL_BIAS_RELU_ADD_SOURCE 6_conv2d_fwd_xdl_bias_relu_add/conv2d_fw set(CONV2D_FWD_XDL_BIAS_RELU_ATOMIC_ADD_SOURCE 7_conv2d_fwd_xdl_bias_relu_atomic_add/conv2d_fwd_xdl_bias_relu_atomic_add.cpp) set(GEMM_XDL_ALPHA_BETA_SOURCE 8_gemm_xdl_alpha_beta/gemm_xdl_alpha_beta.cpp) set(CONV2D_FWD_XDL_INT8_SOURCE 9_conv2d_fwd_xdl_int8/conv2d_fwd_xdl_int8.cpp) +set(CONV3D_FWD_XDL_SOURCE 10_conv3d_fwd_xdl/conv3d_fwd_xdl.cpp) add_executable(gemm_xdl ${GEMM_XDL_SOURCE}) add_executable(gemm_xdl_bias_relu ${GEMM_XDL_BIAS_RELU_SOURCE}) @@ -31,6 +33,7 @@ add_executable(conv2d_fwd_xdl_bias_relu_add ${CONV2D_FWD_XDL_BIAS_RELU_ADD_SOURC add_executable(conv2d_fwd_xdl_bias_relu_atomic_add ${CONV2D_FWD_XDL_BIAS_RELU_ATOMIC_ADD_SOURCE}) add_executable(gemm_xdl_alpha_beta ${GEMM_XDL_ALPHA_BETA_SOURCE}) add_executable(conv2d_fwd_xdl_int8 ${CONV2D_FWD_XDL_INT8_SOURCE}) +add_executable(conv3d_fwd_xdl ${CONV3D_FWD_XDL_SOURCE}) target_link_libraries(gemm_xdl PRIVATE host_tensor) target_link_libraries(gemm_xdl_bias_relu PRIVATE host_tensor) @@ -41,3 +44,5 @@ target_link_libraries(conv2d_fwd_xdl_bias_relu_add PRIVATE host_tensor) target_link_libraries(conv2d_fwd_xdl_bias_relu_atomic_add PRIVATE host_tensor) target_link_libraries(gemm_xdl_alpha_beta PRIVATE host_tensor) target_link_libraries(conv2d_fwd_xdl_int8 PRIVATE host_tensor) +target_link_libraries(conv3d_fwd_xdl PRIVATE host_tensor) + diff --git a/host/driver_offline/include/driver_convolution_add_forward_implicit_gemm_v5r1_dlops_nc0hwc1_kc0yxc1_nk0hwk1.hpp b/host/driver_offline/include/driver_convolution_add_forward_implicit_gemm_v5r1_dlops_nc0hwc1_kc0yxc1_nk0hwk1.hpp index bd2adcb3bd..f70423a35c 100644 --- a/host/driver_offline/include/driver_convolution_add_forward_implicit_gemm_v5r1_dlops_nc0hwc1_kc0yxc1_nk0hwk1.hpp +++ b/host/driver_offline/include/driver_convolution_add_forward_implicit_gemm_v5r1_dlops_nc0hwc1_kc0yxc1_nk0hwk1.hpp @@ -84,16 +84,6 @@ struct DriverDynamicConvolutionForwardImplicitGemmDlops_v5r1_nc0hwc1_kc0yxc1_nk0 const auto ConvDilationH = conv_dilations[I0]; const auto ConvDilationW = conv_dilations[I1]; -#if CK_EXPERIMENTAL_STATIC_TENSOR_DESCRIPTOR - const auto Hop = Number<(Ho + HoPerBlock - 1) / HoPerBlock * HoPerBlock>{}; - const auto Wop = Number<(Wo + WoPerBlock - 1) / WoPerBlock * WoPerBlock>{}; - - const auto OutRightPadH = Hop - Ho; - const auto OutRightPadW = Wop - Wo; - - const auto OutRightPadHx = Number{}; - const auto OutRightPadWx = Number{}; -#else const auto Hop = (Ho + HoPerBlock - 1) / HoPerBlock * HoPerBlock; const auto Wop = (Wo + WoPerBlock - 1) / WoPerBlock * WoPerBlock; @@ -102,7 +92,6 @@ struct DriverDynamicConvolutionForwardImplicitGemmDlops_v5r1_nc0hwc1_kc0yxc1_nk0 const auto OutRightPadHx = OutRightPadH * 2; const auto OutRightPadWx = OutRightPadW * 2; -#endif const auto InLeftPadH = in_left_pads[I0]; const auto InLeftPadW = in_left_pads[I1]; @@ -367,16 +356,14 @@ struct DriverDynamicConvolutionForwardImplicitGemmDlops_v5r1_nc0hwc1_kc0yxc1_nk0 std::cerr << "has_main_e0_block_loop = " << has_main_e0_block_loop << std::endl; - const auto c_blockid_to_k_n_h_w_block_cluster_adaptor = + const auto cblockid_to_k_n_h_w_block_cluster_adaptor = GridwiseGemm::MakeCBlockIdToKNHoWoBlockClusterAdaptor(c_k_n_hop_wop_grid_desc); using CBlockIdToBlockClusterAdaptor_K_N_H_W = - decltype(c_blockid_to_k_n_h_w_block_cluster_adaptor); + decltype(cblockid_to_k_n_h_w_block_cluster_adaptor); float ave_time = 0; -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE - if(has_main_e0_block_loop) { const auto kernel = kernel_gemm_dlops_v3_resize_add< @@ -404,7 +391,7 @@ struct DriverDynamicConvolutionForwardImplicitGemmDlops_v5r1_nc0hwc1_kc0yxc1_nk0 b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, d_k0_k1_n_h0_h1_h2x2_w0_w1_w2x2_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor); + cblockid_to_k_n_h_w_block_cluster_adaptor); } else { @@ -433,132 +420,9 @@ struct DriverDynamicConvolutionForwardImplicitGemmDlops_v5r1_nc0hwc1_kc0yxc1_nk0 b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, d_k0_k1_n_h0_h1_h2x2_w0_w1_w2x2_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor); + cblockid_to_k_n_h_w_block_cluster_adaptor); } -#elif CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VOID_POINTER - DeviceMem a_e0_e1_k0_k1_e2_grid_desc_dev_buf(sizeof(AGridDesc_E0_E1_K0_K1_E2)); - DeviceMem b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc_dev_buf( - sizeof(BGridDesc_E0_E1_N_H0_H1_H2_W0_W1_W2_E2)); - DeviceMem c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc_dev_buf( - sizeof(CGridDesc_K0_K1_N_H0_H1_H2_W0_W1_W2)); - DeviceMem d_k0_k1_n_h0_h1_h2x2_w0_w1_w2x2_grid_desc_dev_buf( - sizeof(DGridDesc_K0_K1_N_H0_H1_H2x2_W0_W1_W2x2)); - DeviceMem c_blockid_to_k_n_h_w_block_cluster_adaptor_dev_buf( - sizeof(CBlockIdToBlockClusterAdaptor_K_N_H_W)); - - a_e0_e1_k0_k1_e2_grid_desc_dev_buf.ToDevice(&a_e0_e1_k0_k1_e2_grid_desc); - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc_dev_buf.ToDevice( - &b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc); - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc_dev_buf.ToDevice( - &c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc); - d_k0_k1_n_h0_h1_h2x2_w0_w1_w2x2_grid_desc_dev_buf.ToDevice( - &d_k0_k1_n_h0_h1_h2x2_w0_w1_w2x2_grid_desc); - c_blockid_to_k_n_h_w_block_cluster_adaptor_dev_buf.ToDevice( - &c_blockid_to_k_n_h_w_block_cluster_adaptor); - - if(has_main_e0_block_loop) - { - - const auto kernel = kernel_gemm_dlops_v3_resize_add< - GridwiseGemm, - FloatAB, - FloatC, - remove_reference_t, - remove_reference_t, - remove_reference_t, - remove_reference_t, - remove_reference_t, - true, - activ_type>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_bias_grid, - p_d_grid, - cast_pointer_to_constant_address_space( - a_e0_e1_k0_k1_e2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - d_k0_k1_n_h0_h1_h2x2_w0_w1_w2x2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_k_n_h_w_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } - else - { - const auto kernel = kernel_gemm_dlops_v3_resize_add< - GridwiseGemm, - FloatAB, - FloatC, - remove_reference_t, - remove_reference_t, - remove_reference_t, - remove_reference_t, - remove_reference_t, - false, - activ_type>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_bias_grid, - p_d_grid, - cast_pointer_to_constant_address_space( - a_e0_e1_k0_k1_e2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - d_k0_k1_n_h0_h1_h2x2_w0_w1_w2x2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_k_n_h_w_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } -#elif CK_EXPERIMENTAL_STATIC_TENSOR_DESCRIPTOR - { - static_assert(a_e0_e1_k_e2_grid_desc.IsKnownAtCompileTime(), ""); - static_assert(b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc.IsKnownAtCompileTime(), ""); - static_assert(d_k0_k1_n_h0_h1_h2x2_w0_w1_w2x2_grid_desc.IsKnownAtCompileTime(), ""); - static_assert(c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc.IsKnownAtCompileTime(), ""); - static_assert(c_blockid_to_k_n_h_w_block_cluster_adaptor.IsKnownAtCompileTime(), ""); - - const auto kernel = kernel_gemm_dlops_v3_resize_add< - GridwiseGemm, - FloatAB, - FloatC, - remove_reference_t, - remove_reference_t, - remove_reference_t, - remove_reference_t, - remove_reference_t, - has_main_e0_block_loop, - activ_type>; - - ave_time = launch_and_time_kernel(kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_bias_grid, - p_d_grid); - } -#endif return ave_time; } }; diff --git a/host/driver_offline/include/driver_convolution_forward_implicit_gemm_v5r1_dlops_nc0hwc1_kc0yxc1_nk0hwk1.hpp b/host/driver_offline/include/driver_convolution_forward_implicit_gemm_v5r1_dlops_nc0hwc1_kc0yxc1_nk0hwk1.hpp index adb4cc79e7..e26dfa61e6 100644 --- a/host/driver_offline/include/driver_convolution_forward_implicit_gemm_v5r1_dlops_nc0hwc1_kc0yxc1_nk0hwk1.hpp +++ b/host/driver_offline/include/driver_convolution_forward_implicit_gemm_v5r1_dlops_nc0hwc1_kc0yxc1_nk0hwk1.hpp @@ -317,16 +317,14 @@ struct DriverDynamicConvolutionForwardImplicitGemmDlops_v5r1_nc0hwc1_kc0yxc1_nk0 std::cerr << "has_main_e0_block_loop = " << has_main_e0_block_loop << std::endl; - const auto c_blockid_to_k_n_h_w_block_cluster_adaptor = + const auto cblockid_to_k_n_h_w_block_cluster_adaptor = GridwiseGemm::MakeCBlockIdToKNHoWoBlockClusterAdaptor(c_k_n_hop_wop_grid_desc); using CBlockIdToBlockClusterAdaptor_K_N_H_W = - decltype(c_blockid_to_k_n_h_w_block_cluster_adaptor); + decltype(cblockid_to_k_n_h_w_block_cluster_adaptor); float ave_time = 0; -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE - if(has_main_e0_block_loop) { const auto kernel = @@ -352,7 +350,7 @@ struct DriverDynamicConvolutionForwardImplicitGemmDlops_v5r1_nc0hwc1_kc0yxc1_nk0 a_e0_e1_k0_k1_e2_grid_desc, b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor); + cblockid_to_k_n_h_w_block_cluster_adaptor); } else { @@ -379,121 +377,9 @@ struct DriverDynamicConvolutionForwardImplicitGemmDlops_v5r1_nc0hwc1_kc0yxc1_nk0 a_e0_e1_k0_k1_e2_grid_desc, b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor); + cblockid_to_k_n_h_w_block_cluster_adaptor); } -#elif CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VOID_POINTER - DeviceMem a_e0_e1_k0_k1_e2_grid_desc_dev_buf(sizeof(AGridDesc_E0_E1_K0_K1_E2)); - DeviceMem b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc_dev_buf( - sizeof(BGridDesc_E0_E1_N_H0_H1_H2_W0_W1_W2_E2)); - DeviceMem c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc_dev_buf( - sizeof(CGridDesc_K0_K1_N_H0_H1_H2_W0_W1_W2)); - DeviceMem c_blockid_to_k_n_h_w_block_cluster_adaptor_dev_buf( - sizeof(CBlockIdToBlockClusterAdaptor_K_N_H_W)); - - a_e0_e1_k0_k1_e2_grid_desc_dev_buf.ToDevice(&a_e0_e1_k0_k1_e2_grid_desc); - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc_dev_buf.ToDevice( - &b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc); - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc_dev_buf.ToDevice( - &c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc); - c_blockid_to_k_n_h_w_block_cluster_adaptor_dev_buf.ToDevice( - &c_blockid_to_k_n_h_w_block_cluster_adaptor); - - if(has_main_e0_block_loop) - { - - const auto kernel = - kernel_gemm_dlops_v3, - remove_reference_t, - remove_reference_t, - remove_reference_t, - true, - activ_type>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_bias_grid, - p_c_grid, - cast_pointer_to_constant_address_space( - a_e0_e1_k0_k1_e2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_k_n_h_w_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } - else - { - - const auto kernel = - kernel_gemm_dlops_v3, - remove_reference_t, - remove_reference_t, - remove_reference_t, - false, - activ_type>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_bias_grid, - p_c_grid, - cast_pointer_to_constant_address_space( - a_e0_e1_k0_k1_e2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_k_n_h_w_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } -#elif CK_EXPERIMENTAL_STATIC_TENSOR_DESCRIPTOR - { - static_assert(a_e0_e1_k_e2_grid_desc.IsKnownAtCompileTime(), ""); - static_assert(b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc.IsKnownAtCompileTime(), ""); - static_assert(c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc.IsKnownAtCompileTime(), ""); - static_assert(c_blockid_to_k_n_h_w_block_cluster_adaptor.IsKnownAtCompileTime(), ""); - - const auto kernel = - kernel_gemm_dlops_v3, - remove_reference_t, - remove_reference_t, - remove_reference_t, - has_main_e0_block_loop, - activ_type>; - - ave_time = launch_and_time_kernel(kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_bias_grid, - p_c_grid); - } -#endif return ave_time; } }; diff --git a/host/driver_offline/include/driver_convolution_maxpool_forward_implicit_gemm_v5r1_dlops_nc0hwc1_kc0yxc1_nk0hwk1.hpp b/host/driver_offline/include/driver_convolution_maxpool_forward_implicit_gemm_v5r1_dlops_nc0hwc1_kc0yxc1_nk0hwk1.hpp index 3d3d54fa45..0dbb76707f 100644 --- a/host/driver_offline/include/driver_convolution_maxpool_forward_implicit_gemm_v5r1_dlops_nc0hwc1_kc0yxc1_nk0hwk1.hpp +++ b/host/driver_offline/include/driver_convolution_maxpool_forward_implicit_gemm_v5r1_dlops_nc0hwc1_kc0yxc1_nk0hwk1.hpp @@ -365,16 +365,14 @@ struct DriverDynamicConvolutionForwardImplicitGemmDlops_v5r1_nc0hwc1_kc0yxc1_nk0 std::cerr << "has_main_e0_block_loop = " << has_main_e0_block_loop << std::endl; - const auto c_blockid_to_k_n_h_w_block_cluster_adaptor = + const auto cblockid_to_k_n_h_w_block_cluster_adaptor = GridwiseGemm::MakeCBlockIdToKNHoWoBlockClusterAdaptor(c_k_n_hop_wop_grid_desc); using CBlockIdToBlockClusterAdaptor_K_N_H_W = - decltype(c_blockid_to_k_n_h_w_block_cluster_adaptor); + decltype(cblockid_to_k_n_h_w_block_cluster_adaptor); float ave_time = 0; -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE - if(has_main_e0_block_loop) { const auto kernel = kernel_gemm_dlops_v3_maxpool< @@ -403,7 +401,7 @@ struct DriverDynamicConvolutionForwardImplicitGemmDlops_v5r1_nc0hwc1_kc0yxc1_nk0 b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor); + cblockid_to_k_n_h_w_block_cluster_adaptor); } else { @@ -433,136 +431,9 @@ struct DriverDynamicConvolutionForwardImplicitGemmDlops_v5r1_nc0hwc1_kc0yxc1_nk0 b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc, c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc, d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc, - c_blockid_to_k_n_h_w_block_cluster_adaptor); + cblockid_to_k_n_h_w_block_cluster_adaptor); } -#elif CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VOID_POINTER - DeviceMem a_e0_e1_k0_k1_e2_grid_desc_dev_buf(sizeof(AGridDesc_E0_E1_K0_K1_E2)); - DeviceMem b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc_dev_buf( - sizeof(BGridDesc_E0_E1_N_H0_H1_H2_W0_W1_W2_E2)); - DeviceMem c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc_dev_buf( - sizeof(CGridDesc_K0_K1_N_H0_H1_H2_W0_W1_W2)); - DeviceMem d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc_dev_buf( - sizeof(DGridDesc_K0_K1_N_H0_H1_Hx_W0_W1_Wx)); - DeviceMem c_blockid_to_k_n_h_w_block_cluster_adaptor_dev_buf( - sizeof(CBlockIdToBlockClusterAdaptor_K_N_H_W)); - - a_e0_e1_k0_k1_e2_grid_desc_dev_buf.ToDevice(&a_e0_e1_k0_k1_e2_grid_desc); - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc_dev_buf.ToDevice( - &b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc); - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc_dev_buf.ToDevice( - &c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc); - d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc_dev_buf.ToDevice( - &d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc); - c_blockid_to_k_n_h_w_block_cluster_adaptor_dev_buf.ToDevice( - &c_blockid_to_k_n_h_w_block_cluster_adaptor); - - if(has_main_e0_block_loop) - { - - const auto kernel = kernel_gemm_dlops_v3_maxpool< - GridwiseGemm, - FloatAB, - FloatC, - remove_reference_t, - remove_reference_t, - remove_reference_t, - remove_reference_t, - remove_reference_t, - true, - activ_type>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_bias_grid, - p_c_grid, - p_d_grid, - cast_pointer_to_constant_address_space( - a_e0_e1_k0_k1_e2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_k_n_h_w_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } - else - { - - const auto kernel = kernel_gemm_dlops_v3_maxpool< - GridwiseGemm, - FloatAB, - FloatC, - remove_reference_t, - remove_reference_t, - remove_reference_t, - remove_reference_t, - remove_reference_t, - false, - activ_type>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_bias_grid, - p_c_grid, - p_d_grid, - cast_pointer_to_constant_address_space( - a_e0_e1_k0_k1_e2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_k_n_h_w_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } -#elif CK_EXPERIMENTAL_STATIC_TENSOR_DESCRIPTOR - { - static_assert(a_e0_e1_k_e2_grid_desc.IsKnownAtCompileTime(), ""); - static_assert(b_e0_e1_n_h0_h1_h2_w0_w1_w2_e2_grid_desc.IsKnownAtCompileTime(), ""); - static_assert(d_k0_k1_n_h0_h1_hx_w0_w1_wx_grid_desc.IsKnownAtCompileTime(), ""); - static_assert(c_k0_k1_n_h0_h1_h2_w0_w1_w2_grid_desc.IsKnownAtCompileTime(), ""); - static_assert(c_blockid_to_k_n_h_w_block_cluster_adaptor.IsKnownAtCompileTime(), ""); - - const auto kernel = kernel_gemm_dlops_v3_maxpool< - GridwiseGemm, - FloatAB, - FloatC, - remove_reference_t, - remove_reference_t, - remove_reference_t, - remove_reference_t, - remove_reference_t, - has_main_e0_block_loop, - activ_type>; - - ave_time = launch_and_time_kernel(kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_bias_grid, - p_c_grid, - p_d_grid); - } -#endif return ave_time; } }; diff --git a/host/driver_offline/include/driver_gemm_dlops_v1r2.hpp b/host/driver_offline/include/driver_gemm_dlops_v1r2.hpp index bf5f7f1c0f..c51010272d 100644 --- a/host/driver_offline/include/driver_gemm_dlops_v1r2.hpp +++ b/host/driver_offline/include/driver_gemm_dlops_v1r2.hpp @@ -136,11 +136,11 @@ __host__ float driver_gemm_dlops_v1r2(const FloatAB* p_a_grid, using CM0M10M11N0N10N11GridDesc = decltype(c_m0_m10_m11_n0_n10_n11_grid_desc); - // c_blockid_to_m0_n0_block_cluster_adaptor - const auto c_blockid_to_m0_n0_block_cluster_adaptor = + // cblockid_to_m0_n0_block_cluster_adaptor + const auto cblockid_to_m0_n0_block_cluster_adaptor = GridwiseGemm::MakeCBlockIdToM0N0BlockClusterAdaptor(c_m_n_grid_desc); - using CBlockIdToM0N0BlockClusterAdaptor = decltype(c_blockid_to_m0_n0_block_cluster_adaptor); + using CBlockIdToM0N0BlockClusterAdaptor = decltype(cblockid_to_m0_n0_block_cluster_adaptor); const index_t grid_size = GridwiseGemm::CalculateGridSize(M, N); @@ -166,7 +166,6 @@ __host__ float driver_gemm_dlops_v1r2(const FloatAB* p_a_grid, << c_m0_m10_m11_n0_n10_n11_grid_desc.GetLength(I5) << "}" << std::endl; } -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE float ave_time = 0; if(has_main_k_block_loop && has_double_tail_k_block_loop) @@ -193,7 +192,7 @@ __host__ float driver_gemm_dlops_v1r2(const FloatAB* p_a_grid, a_k_m0_m1_grid_desc, b_k_n0_n1_grid_desc, c_m0_m10_m11_n0_n10_n11_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor); + cblockid_to_m0_n0_block_cluster_adaptor); } else if(has_main_k_block_loop && !has_double_tail_k_block_loop) { @@ -219,7 +218,7 @@ __host__ float driver_gemm_dlops_v1r2(const FloatAB* p_a_grid, a_k_m0_m1_grid_desc, b_k_n0_n1_grid_desc, c_m0_m10_m11_n0_n10_n11_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor); + cblockid_to_m0_n0_block_cluster_adaptor); } else if(!has_main_k_block_loop && has_double_tail_k_block_loop) { @@ -245,7 +244,7 @@ __host__ float driver_gemm_dlops_v1r2(const FloatAB* p_a_grid, a_k_m0_m1_grid_desc, b_k_n0_n1_grid_desc, c_m0_m10_m11_n0_n10_n11_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor); + cblockid_to_m0_n0_block_cluster_adaptor); } else { @@ -271,143 +270,9 @@ __host__ float driver_gemm_dlops_v1r2(const FloatAB* p_a_grid, a_k_m0_m1_grid_desc, b_k_n0_n1_grid_desc, c_m0_m10_m11_n0_n10_n11_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor); + cblockid_to_m0_n0_block_cluster_adaptor); } return ave_time; -#elif CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VOID_POINTER - DeviceMem a_k_m0_m1_grid_desc_dev_buf(sizeof(AKM0M1GridDesc)); - DeviceMem b_k_n0_n1_grid_desc_dev_buf(sizeof(BKN0N1GridDesc)); - DeviceMem c_m0_m10_m11_n0_n10_n11_grid_desc_dev_buf(sizeof(CM0M10M11N0N10N11GridDesc)); - DeviceMem c_blockid_to_m0_n0_block_cluster_adaptor_dev_buf( - sizeof(CBlockIdToM0N0BlockClusterAdaptor)); - - a_k_m0_m1_grid_desc_dev_buf.ToDevice(&a_k_m0_m1_grid_desc); - b_k_n0_n1_grid_desc_dev_buf.ToDevice(&b_k_n0_n1_grid_desc); - c_m0_m10_m11_n0_n10_n11_grid_desc_dev_buf.ToDevice(&c_m0_m10_m11_n0_n10_n11_grid_desc); - c_blockid_to_m0_n0_block_cluster_adaptor_dev_buf.ToDevice( - &c_blockid_to_m0_n0_block_cluster_adaptor); - - float ave_time = 0; - - if(has_main_k_block_loop && has_double_tail_k_block_loop) - { - const auto kernel = - kernel_gemm_dlops_v1r2, - remove_reference_t, - remove_reference_t, - remove_reference_t, - true, - true>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_c_grid, - cast_pointer_to_constant_address_space(a_k_m0_m1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space(b_k_n0_n1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_m0_m10_m11_n0_n10_n11_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_m0_n0_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } - else if(has_main_k_block_loop && !has_double_tail_k_block_loop) - { - const auto kernel = - kernel_gemm_dlops_v1r2, - remove_reference_t, - remove_reference_t, - remove_reference_t, - true, - false>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_c_grid, - cast_pointer_to_constant_address_space(a_k_m0_m1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space(b_k_n0_n1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_m0_m10_m11_n0_n10_n11_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_m0_n0_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } - else if(!has_main_k_block_loop && has_double_tail_k_block_loop) - { - const auto kernel = - kernel_gemm_dlops_v1r2, - remove_reference_t, - remove_reference_t, - remove_reference_t, - false, - true>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_c_grid, - cast_pointer_to_constant_address_space(a_k_m0_m1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space(b_k_n0_n1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_m0_m10_m11_n0_n10_n11_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_m0_n0_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } - else - { - const auto kernel = - kernel_gemm_dlops_v1r2, - remove_reference_t, - remove_reference_t, - remove_reference_t, - false, - false>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_c_grid, - cast_pointer_to_constant_address_space(a_k_m0_m1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space(b_k_n0_n1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_m0_m10_m11_n0_n10_n11_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_m0_n0_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } - - return ave_time; -#endif } #endif diff --git a/host/driver_offline/include/driver_gemm_dlops_v1r3.hpp b/host/driver_offline/include/driver_gemm_dlops_v1r3.hpp index 4470918820..8459bb0a22 100644 --- a/host/driver_offline/include/driver_gemm_dlops_v1r3.hpp +++ b/host/driver_offline/include/driver_gemm_dlops_v1r3.hpp @@ -131,11 +131,11 @@ __host__ float driver_gemm_dlops_v1r3(const FloatAB* p_a_grid, using CM0M10M11N0N10N11GridDesc = decltype(c_m0_m10_m11_n0_n10_n11_grid_desc); - // c_blockid_to_m0_n0_block_cluster_adaptor - const auto c_blockid_to_m0_n0_block_cluster_adaptor = + // cblockid_to_m0_n0_block_cluster_adaptor + const auto cblockid_to_m0_n0_block_cluster_adaptor = GridwiseGemm::MakeCBlockIdToM0N0BlockClusterAdaptor(c_m_n_grid_desc); - using CBlockIdToM0N0BlockClusterAdaptor = decltype(c_blockid_to_m0_n0_block_cluster_adaptor); + using CBlockIdToM0N0BlockClusterAdaptor = decltype(cblockid_to_m0_n0_block_cluster_adaptor); const index_t grid_size = GridwiseGemm::CalculateGridSize(M, N); @@ -163,7 +163,6 @@ __host__ float driver_gemm_dlops_v1r3(const FloatAB* p_a_grid, << c_m0_m10_m11_n0_n10_n11_grid_desc.GetLength(I5) << "}" << std::endl; } -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE float ave_time = 0; if(has_main_k_block_loop && has_double_tail_k_block_loop) @@ -190,7 +189,7 @@ __host__ float driver_gemm_dlops_v1r3(const FloatAB* p_a_grid, a_k0_m0_m1_k1_grid_desc, b_k0_n0_n1_k1_grid_desc, c_m0_m10_m11_n0_n10_n11_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor); + cblockid_to_m0_n0_block_cluster_adaptor); } else if(has_main_k_block_loop && !has_double_tail_k_block_loop) { @@ -216,7 +215,7 @@ __host__ float driver_gemm_dlops_v1r3(const FloatAB* p_a_grid, a_k0_m0_m1_k1_grid_desc, b_k0_n0_n1_k1_grid_desc, c_m0_m10_m11_n0_n10_n11_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor); + cblockid_to_m0_n0_block_cluster_adaptor); } else if(!has_main_k_block_loop && has_double_tail_k_block_loop) { @@ -242,7 +241,7 @@ __host__ float driver_gemm_dlops_v1r3(const FloatAB* p_a_grid, a_k0_m0_m1_k1_grid_desc, b_k0_n0_n1_k1_grid_desc, c_m0_m10_m11_n0_n10_n11_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor); + cblockid_to_m0_n0_block_cluster_adaptor); } else { @@ -268,151 +267,9 @@ __host__ float driver_gemm_dlops_v1r3(const FloatAB* p_a_grid, a_k0_m0_m1_k1_grid_desc, b_k0_n0_n1_k1_grid_desc, c_m0_m10_m11_n0_n10_n11_grid_desc, - c_blockid_to_m0_n0_block_cluster_adaptor); + cblockid_to_m0_n0_block_cluster_adaptor); } return ave_time; -#elif CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VOID_POINTER - DeviceMem a_k0_m0_m1_k1_grid_desc_dev_buf(sizeof(AK0M0M1K1GridDesc)); - DeviceMem b_k0_n0_n1_k1_grid_desc_dev_buf(sizeof(BK0N0N1K1GridDesc)); - DeviceMem c_m0_m10_m11_n0_n10_n11_grid_desc_dev_buf(sizeof(CM0M10M11N0N10N11GridDesc)); - DeviceMem c_blockid_to_m0_n0_block_cluster_adaptor_dev_buf( - sizeof(CBlockIdToM0N0BlockClusterAdaptor)); - - a_k0_m0_m1_k1_grid_desc_dev_buf.ToDevice(&a_k0_m0_m1_k1_grid_desc); - b_k0_n0_n1_k1_grid_desc_dev_buf.ToDevice(&b_k0_n0_n1_k1_grid_desc); - c_m0_m10_m11_n0_n10_n11_grid_desc_dev_buf.ToDevice(&c_m0_m10_m11_n0_n10_n11_grid_desc); - c_blockid_to_m0_n0_block_cluster_adaptor_dev_buf.ToDevice( - &c_blockid_to_m0_n0_block_cluster_adaptor); - - float ave_time = 0; - - if(has_main_k_block_loop && has_double_tail_k_block_loop) - { - const auto kernel = - kernel_gemm_dlops_v1r3, - remove_reference_t, - remove_reference_t, - remove_reference_t, - true, - true>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_c_grid, - cast_pointer_to_constant_address_space( - a_k0_m0_m1_k1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - b_k0_n0_n1_k1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_m0_m10_m11_n0_n10_n11_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_m0_n0_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } - else if(has_main_k_block_loop && !has_double_tail_k_block_loop) - { - const auto kernel = - kernel_gemm_dlops_v1r3, - remove_reference_t, - remove_reference_t, - remove_reference_t, - true, - false>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_c_grid, - cast_pointer_to_constant_address_space( - a_k0_m0_m1_k1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - b_k0_n0_n1_k1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_m0_m10_m11_n0_n10_n11_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_m0_n0_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } - else if(!has_main_k_block_loop && has_double_tail_k_block_loop) - { - const auto kernel = - kernel_gemm_dlops_v1r3, - remove_reference_t, - remove_reference_t, - remove_reference_t, - false, - true>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_c_grid, - cast_pointer_to_constant_address_space( - a_k0_m0_m1_k1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - b_k0_n0_n1_k1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_m0_m10_m11_n0_n10_n11_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_m0_n0_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } - else - { - const auto kernel = - kernel_gemm_dlops_v1r3, - remove_reference_t, - remove_reference_t, - remove_reference_t, - false, - false>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_c_grid, - cast_pointer_to_constant_address_space( - a_k0_m0_m1_k1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - b_k0_n0_n1_k1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_m0_m10_m11_n0_n10_n11_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_blockid_to_m0_n0_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } - - return ave_time; -#endif } #endif diff --git a/host/driver_offline/include/driver_gemm_xdlops_v2r3.hpp b/host/driver_offline/include/driver_gemm_xdlops_v2r3.hpp index 3aeb91a004..b3530fbb64 100644 --- a/host/driver_offline/include/driver_gemm_xdlops_v2r3.hpp +++ b/host/driver_offline/include/driver_gemm_xdlops_v2r3.hpp @@ -138,7 +138,8 @@ __host__ float driver_gemm_xdlops_v2r3(const FloatAB* p_a_grid, using CGridDesc_M0_N0_M1_N1_M2_M3_M4_N2 = decltype(c_m0_n0_m1_n1_m2_m3_m4_n2_grid_desc); - const auto block_2_ctile_map = GridwiseGemm::MakeBlock2CTileMap(c_grid_desc_m_n, M01, N01); + const auto block_2_ctile_map = + GridwiseGemm::MakeDefaultBlock2CTileMap(c_grid_desc_m_n, M01, N01); using Block2CTileMap = decltype(block_2_ctile_map); @@ -152,7 +153,6 @@ __host__ float driver_gemm_xdlops_v2r3(const FloatAB* p_a_grid, auto element_op_ = ElementwiseOperation{}; -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE if(has_main_k0_block_loop) { const auto kernel = @@ -215,74 +215,6 @@ __host__ float driver_gemm_xdlops_v2r3(const FloatAB* p_a_grid, element_op_, block_2_ctile_map); } -#elif CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VOID_POINTER - DeviceMem a_grid_desc_k0_m_k1_dev_buf(sizeof(AGridDesc_K0_M_K1)); - DeviceMem b_grid_desc_k0_n_k1_dev_buf(sizeof(BGridDesc_K0_N_K)); - DeviceMem c_m0_n0_m1_n1_m2_m3_m4_n2_grid_desc_dev_buf( - sizeof(CGridDesc_M0_N0_M1_N1_M2_M3_M4_N2)); - DeviceMem block_2_ctile_map_dev_buf(sizeof(Block2CTileMap)); - - a_grid_desc_k0_m_k1_dev_buf.ToDevice(&a_grid_desc_k0_m_k1); - b_grid_desc_k0_n_k1_dev_buf.ToDevice(&b_grid_desc_k0_n_k1); - c_m0_n0_m1_n1_m2_m3_m4_n2_grid_desc_dev_buf.ToDevice(&c_m0_n0_m1_n1_m2_m3_m4_n2_grid_desc); - block_2_ctile_map_dev_buf.ToDevice(&block_2_ctile_map); - - if(has_main_k0_block_loop) - { - const auto kernel = - kernel_gemm_xdlops_v2r3, - remove_reference_t, - remove_reference_t, - remove_reference_t, - true>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_c_grid, - cast_pointer_to_constant_address_space(a_grid_desc_k0_m_k1_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space(b_grid_desc_k0_n_k1_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_m0_n0_m1_n1_m2_m3_m4_n2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space(block_2_ctile_map_dev_buf.GetDeviceBuffer())); - } - else - { - const auto kernel = - kernel_gemm_xdlops_v2r3, - remove_reference_t, - remove_reference_t, - remove_reference_t, - false>; - - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_c_grid, - cast_pointer_to_constant_address_space(a_grid_desc_k0_m_k1_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space(b_grid_desc_k0_n_k1_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_m0_n0_m1_n1_m2_m3_m4_n2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space(block_2_ctile_map_dev_buf.GetDeviceBuffer())); - } -} -#endif return ave_time; } #endif diff --git a/host/driver_offline/include/driver_gemm_xdlops_v2r4.hpp b/host/driver_offline/include/driver_gemm_xdlops_v2r4.hpp index 30ecb02de1..f6525e7356 100644 --- a/host/driver_offline/include/driver_gemm_xdlops_v2r4.hpp +++ b/host/driver_offline/include/driver_gemm_xdlops_v2r4.hpp @@ -161,7 +161,6 @@ __host__ float driver_gemm_xdlops_v2r4(const FloatAB* p_a_grid, const bool has_main_k0_block_loop = GridwiseGemm::CalculateHasMainK0BlockLoop(K0); float ave_time = 0; -#if CK_EXPERIMENTAL_PASS_TENSOR_DESCRIPTOR_BY_VALUE if(has_main_k0_block_loop) { const auto kernel = kernel_gemm_xdlops_v2r4, - remove_reference_t, - remove_reference_t, - remove_reference_t, - true>; - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_c_grid, - cast_pointer_to_constant_address_space(a_b_k0_m_k1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space(b_b_k0_n_k1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_m0_n0_m1_n1_m2_m3_m4_n2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } - else - { - const auto kernel = kernel_gemm_xdlops_v2r4, - remove_reference_t, - remove_reference_t, - remove_reference_t, - false>; - ave_time = launch_and_time_kernel( - kernel, - nrepeat, - dim3(grid_size), - dim3(BlockSize), - 0, - p_a_grid, - p_b_grid, - p_c_grid, - cast_pointer_to_constant_address_space(a_b_k0_m_k1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space(b_b_k0_n_k1_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_m0_n0_m1_n1_m2_m3_m4_n2_grid_desc_dev_buf.GetDeviceBuffer()), - cast_pointer_to_constant_address_space( - c_block_cluster_adaptor_dev_buf.GetDeviceBuffer())); - } -#endif return ave_time; } #endif diff --git a/host/host_tensor/include/host_conv.hpp b/host/host_tensor/include/host_conv.hpp index 352986ce94..9285d0afd8 100644 --- a/host/host_tensor/include/host_conv.hpp +++ b/host/host_tensor/include/host_conv.hpp @@ -48,3 +48,102 @@ void host_conv_nchw_kcyx_nkhw(const Tensor& in, out.mDesc.GetLengths()[2], out.mDesc.GetLengths()[3])(std::thread::hardware_concurrency()); } + +template +void host_conv3d_ndhwc_kzyxc_ndhwk(const Tensor& in, + const Tensor& wei, + Tensor& out, + const ConvStrides& conv_strides, + const ConvDilations& conv_dilations, + const InLeftPads& in_left_pads, + const InRightPads&) +{ + using namespace ck; + + constexpr auto I0 = Number<0>{}; + constexpr auto I1 = Number<1>{}; + constexpr auto I2 = Number<2>{}; + const auto Di = in.mDesc.GetLengths()[1]; + const auto Hi = in.mDesc.GetLengths()[2]; + const auto Wi = in.mDesc.GetLengths()[3]; + const auto Z = wei.mDesc.GetLengths()[1]; + const auto Y = wei.mDesc.GetLengths()[2]; + const auto X = wei.mDesc.GetLengths()[3]; + const auto C = wei.mDesc.GetLengths()[4]; + + auto f_ndhwc = [&](auto n, auto do__, auto ho_, auto wo_, auto k) { + // do__ must be converted to signed integer, otherwise zmin might be wrong in cases + // negative values. + const int do_ = static_cast(do__); + const int ho = static_cast(ho_); + const int wo = static_cast(wo_); + const int zmin = + std::max(0, + (in_left_pads[I0] - do_ * conv_strides[I0] + conv_dilations[I0] - 1) / + conv_dilations[I0]); + const int ymin = + std::max(0, + (in_left_pads[I1] - ho * conv_strides[I1] + conv_dilations[I1] - 1) / + conv_dilations[I1]); + const int xmin = + std::max(0, + (in_left_pads[I2] - wo * conv_strides[I2] + conv_dilations[I2] - 1) / + conv_dilations[I2]); + const int zmax = + std::min(Z, (in_left_pads[I0] - do_ * conv_strides[I0] + Di) / conv_dilations[I0]); + const int ymax = + std::min(Y, (in_left_pads[I1] - ho * conv_strides[I1] + Hi) / conv_dilations[I1]); + const int xmax = + std::min(X, (in_left_pads[I2] - wo * conv_strides[I2] + Wi) / conv_dilations[I2]); + const int di_min = do_ * conv_strides[I0] + zmin * conv_dilations[I0] - in_left_pads[I0]; + const int hi_min = ho * conv_strides[I1] + ymin * conv_dilations[I1] - in_left_pads[I1]; + const int wi_min = wo * conv_strides[I2] + xmin * conv_dilations[I2] - in_left_pads[I2]; + + double v = 0; + + const TIn* in_n = in.mData.data() + n * Di * Hi * Wi * C; + const TWei* wei_k = wei.mData.data() + k * Z * Y * X * C; + + int di = di_min; + for(int z = zmin; z < zmax; ++z, di += conv_dilations[I0]) + { + const TIn* in_n_di = in_n + di * Hi * Wi * C; + const TWei* wei_k_z = wei_k + z * Y * X * C; + int hi = hi_min; + + for(int y = ymin; y < ymax; ++y, hi += conv_dilations[I1]) + { + const TIn* in_n_di_hi = in_n_di + hi * Wi * C; + const TWei* wei_k_z_y = wei_k_z + y * X * C; + int wi = wi_min; + + for(int x = xmin; x < xmax; ++x, wi += conv_dilations[I2]) + { + const TIn* in_n_di_hi_wi = in_n_di_hi + wi * C; + const TWei* wei_k_z_y_x = wei_k_z_y + x * C; + + for(int c = 0; c < C; ++c) + { + v += static_cast(in_n_di_hi_wi[c]) * + static_cast(wei_k_z_y_x[c]); + } + } + } + } + + out(n, do_, ho, wo, k) = v; + }; + + make_ParallelTensorFunctor(f_ndhwc, + out.mDesc.GetLengths()[0], + out.mDesc.GetLengths()[1], + out.mDesc.GetLengths()[2], + out.mDesc.GetLengths()[3], + out.mDesc.GetLengths()[4])(std::thread::hardware_concurrency() - 4); +} diff --git a/host/host_tensor/include/host_tensor_generator.hpp b/host/host_tensor/include/host_tensor_generator.hpp index 0b979069a6..87ce63331f 100644 --- a/host/host_tensor/include/host_tensor_generator.hpp +++ b/host/host_tensor/include/host_tensor_generator.hpp @@ -144,7 +144,7 @@ struct GeneratorTensor_Checkboard template float operator()(Ts... Xs) const { - std::array dims = {{static_cast(Xs)...}}; + std::array dims = {static_cast(Xs)...}; return std::accumulate(dims.begin(), dims.end(), true, diff --git a/test/conv2d_fwd/main.cpp b/test/conv2d_fwd/main.cpp index 8090186227..115f71d18d 100644 --- a/test/conv2d_fwd/main.cpp +++ b/test/conv2d_fwd/main.cpp @@ -130,13 +130,13 @@ int main(int argc, char* argv[]) const ck::index_t Ho = (Hi + in_left_pad_h + in_right_pad_h - YEff) / conv_stride_h + 1; const ck::index_t Wo = (Wi + in_left_pad_w + in_right_pad_w - XEff) / conv_stride_w + 1; - const std::vector input_spatial_lengths{{Hi, Wi}}; - const std::vector filter_spatial_lengths{{Y, X}}; - const std::vector output_spatial_lengths{{Ho, Wo}}; - const std::vector conv_filter_strides{{conv_stride_h, conv_stride_w}}; - const std::vector conv_filter_dilations{{conv_dilation_h, conv_dilation_w}}; - const std::vector input_left_pads{{in_left_pad_h, in_left_pad_w}}; - const std::vector input_right_pads{{in_right_pad_h, in_right_pad_w}}; + const std::vector input_spatial_lengths{Hi, Wi}; + const std::vector filter_spatial_lengths{Y, X}; + const std::vector output_spatial_lengths{Ho, Wo}; + const std::vector conv_filter_strides{conv_stride_h, conv_stride_w}; + const std::vector conv_filter_dilations{conv_dilation_h, conv_dilation_w}; + const std::vector input_left_pads{in_left_pad_h, in_left_pad_w}; + const std::vector input_right_pads{in_right_pad_h, in_right_pad_w}; auto f_host_tensor_descriptor = [](std::size_t N_, std::size_t C_, std::size_t H, std::size_t W) { diff --git a/test/magic_number_division/main.cpp b/test/magic_number_division/main.cpp index 7533feaa71..2e57820a36 100644 --- a/test/magic_number_division/main.cpp +++ b/test/magic_number_division/main.cpp @@ -41,6 +41,19 @@ gpu_naive_division(int32_t divisor, const int32_t* p_dividend, int32_t* p_result } } +__host__ void cpu_magic_number_division(uint32_t magic_multiplier, + uint32_t magic_shift, + const int32_t* p_dividend, + int32_t* p_result, + uint64_t num) +{ + for(uint64_t data_id = 0; data_id < num; ++data_id) + { + p_result[data_id] = + ck::MagicDivision::DoMagicDivision(p_dividend[data_id], magic_multiplier, magic_shift); + } +} + template T check_error(const std::vector& ref, const std::vector& result) { @@ -90,6 +103,7 @@ int main(int, char*[]) std::vector naive_result_host(num_dividend); std::vector magic_result_host(num_dividend); + std::vector magic_result_host2(num_dividend); dividends_dev_buf.ToDevice(dividends_host.data()); @@ -128,6 +142,20 @@ int main(int, char*[]) pass = false; continue; } + + cpu_magic_number_division(magic_multiplier, + magic_shift, + dividends_host.data(), + magic_result_host2.data(), + num_dividend); + + max_diff = check_error(naive_result_host, magic_result_host2); + + if(max_diff != 0) + { + pass = false; + continue; + } } if(pass)