diff --git a/include/ck_tile/core/arch/amd_buffer_addressing_builtins.hpp b/include/ck_tile/core/arch/amd_buffer_addressing_builtins.hpp index 5c7ffefc6a..4e0a86119a 100644 --- a/include/ck_tile/core/arch/amd_buffer_addressing_builtins.hpp +++ b/include/ck_tile/core/arch/amd_buffer_addressing_builtins.hpp @@ -2570,6 +2570,60 @@ CK_TILE_DEVICE void amd_buffer_atomic_max(const thread_buffer& src_thread_ #endif } +// amd_wave_read_first_lane is the SGPR function from AMD GPU device to load 1 or a series of the +// memory to the SGPR registers. +__device__ inline uint32_t amd_wave_read_first_lane(uint16_t v) +{ + return __builtin_amdgcn_readfirstlane(static_cast(v)); +} + +__device__ inline uint32_t amd_wave_read_first_lane(uint8_t v) +{ + return __builtin_amdgcn_readfirstlane(static_cast(v)); +} + +__device__ inline uint32_t amd_wave_read_first_lane(uint32_t value) +{ + return __builtin_amdgcn_readfirstlane(value); +} + +__device__ inline int32_t amd_wave_read_first_lane(int32_t value) +{ + return __builtin_amdgcn_readfirstlane(value); +} + +template , int> = 0> +__device__ inline auto amd_wave_read_first_lane(const Object& obj) +{ + constexpr size_t ObjectSize = sizeof(Object); + constexpr size_t SGPR_size = 4; + constexpr size_t NumFull = ObjectSize / SGPR_size; + constexpr size_t Tail = ObjectSize % SGPR_size; + + const unsigned char* src = reinterpret_cast(&obj); + alignas(Object) unsigned char dst[ObjectSize]; + + static_for<0, NumFull, 1>{}([&](auto Ic) { + constexpr size_t offset = Ic * SGPR_size; + uint32_t read_src; + __builtin_memcpy(&read_src, src + offset, SGPR_size); + read_src = __builtin_amdgcn_readfirstlane(read_src); + __builtin_memcpy(dst + offset, &read_src, SGPR_size); + }); + + if constexpr(Tail != 0) + { + constexpr size_t offset = NumFull * SGPR_size; + uint32_t tail_loc = 0; + __builtin_memcpy(&tail_loc, src + offset, Tail); + tail_loc = __builtin_amdgcn_readfirstlane(tail_loc); + __builtin_memcpy(dst + offset, &tail_loc, Tail); + } + Object out; + __builtin_memcpy(&out, dst, ObjectSize); + return out; +} + template CK_TILE_DEVICE void amd_direct_load_global_to_lds(const T* global_base_ptr, const index_t global_offset, diff --git a/include/ck_tile/core/tensor/load_tile.hpp b/include/ck_tile/core/tensor/load_tile.hpp index a3620453b4..2e9ab0f5c6 100644 --- a/include/ck_tile/core/tensor/load_tile.hpp +++ b/include/ck_tile/core/tensor/load_tile.hpp @@ -158,7 +158,4 @@ CK_TILE_DEVICE auto load_tile_raw(T& /*null_tile*/, const null_tile_window -concept IsLoadableTile = requires { load_tile(std::declval()); }; - } // namespace ck_tile diff --git a/include/ck_tile/ops/epilogue/cshuffle_epilogue.hpp b/include/ck_tile/ops/epilogue/cshuffle_epilogue.hpp index 6c815d804d..585a5f5b42 100644 --- a/include/ck_tile/ops/epilogue/cshuffle_epilogue.hpp +++ b/include/ck_tile/ops/epilogue/cshuffle_epilogue.hpp @@ -481,13 +481,10 @@ struct CShuffleEpilogue auto sm_tile = make_static_distributed_tensor(dram_tile_distribution); auto sn_tile = make_static_distributed_tensor(dram_tile_distribution); - // Build windows only if scales are provided + // Build windows only if non-scalar scales are provided auto scale_m_window = [&]() { if constexpr(has_scales && !has_scalar_scales) { - static_assert( - IsLoadableTile, - "ScaleM must be a loadable tile"); return make_tile_window(scale_m, dram_tile_distribution); } else @@ -498,9 +495,6 @@ struct CShuffleEpilogue auto scale_n_window = [&]() { if constexpr(has_scales && !has_scalar_scales) { - static_assert( - IsLoadableTile, - "ScaleN must be a loadable tile"); return make_tile_window(scale_n, dram_tile_distribution); } else @@ -515,8 +509,8 @@ struct CShuffleEpilogue merge_sequences(sequence{}, c_warp_y_index_zeros), merge_sequences(sequence<1, NRepeat>{}, c_warp_y_lengths)); - // If scales provided, load them with identical distribution - if constexpr(has_scales && IsLoadableTile && IsLoadableTile) + // If non-scalar scales provided, load them with identical distribution + if constexpr(has_scales && !has_scalar_scales) { sm_tile = load_tile(scale_m_window); // row scales in permuted layout sn_tile = load_tile(scale_n_window); // col scales in permuted layout @@ -535,7 +529,7 @@ struct CShuffleEpilogue { v = static_cast(v * scale_m * scale_n); } - else if constexpr(has_scales) + else if constexpr(has_scales && !has_scalar_scales) { // same linear index mapping on the permuted distribution const auto s_m = static_cast(sm_tile.get_thread_buffer()[out_idx]); @@ -636,9 +630,6 @@ struct CShuffleEpilogue } else if constexpr(has_scales) { - static_assert( - IsLoadableTile, - "ScaleM must be a loadable tile"); return make_tile_window(scale_m, lds_tile.get_tile_distribution()); } else @@ -653,9 +644,6 @@ struct CShuffleEpilogue } else if constexpr(has_scales) { - static_assert( - IsLoadableTile, - "ScaleN must be a loadable tile"); return make_tile_window(scale_n, lds_tile.get_tile_distribution()); } else