Mx fp6 flatmm (#3601)

* add fp6 data-type and support sync/async dwordx3 load/store

* clang-format

* pre-commit

* 1st commit

* default mnk pass ut

* fix a distrubution

* fix

* fix bdram distr

* update

* pass ut

* improve perf

* update

* clean code

* resolve copilot comment

* reslove comment

* clang-format

---------

Co-authored-by: ZheWang <zhewan@amd.com>
This commit is contained in:
ZheWang
2026-02-02 16:04:40 +08:00
committed by GitHub
parent 1ae83137eb
commit e6bcd192d4
21 changed files with 761 additions and 136 deletions

View File

@@ -20,6 +20,25 @@ struct MemoryCopyParam
ck_tile::index_t warp_id;
};
template <typename... Ts>
struct type_list
{
};
template <std::size_t Index, typename List>
struct type_at;
template <std::size_t Index, typename Head, typename... Tail>
struct type_at<Index, type_list<Head, Tail...>> : type_at<Index - 1, type_list<Tail...>>
{
};
template <typename Head, typename... Tail>
struct type_at<0, type_list<Head, Tail...>>
{
using type = Head;
};
template <typename DataType, bool AsyncCopy = true>
class TestCkTileMemoryCopy : public ::testing::TestWithParam<std::tuple<int, int, int>>
{
@@ -33,48 +52,47 @@ class TestCkTileMemoryCopy : public ::testing::TestWithParam<std::tuple<int, int
ck_tile::index_t n = memcpy_params.n;
ck_tile::index_t warp_id = memcpy_params.warp_id;
constexpr auto dword_bytes = 4;
if(n % (dword_bytes / sizeof(DataType)) != 0)
{
std::cerr << "n size should be multiple of dword_bytes" << std::endl;
}
constexpr auto dword_bytes = 4;
const ck_tile::index_t CpyCfg = std::is_same_v<DataType, ck_tile::pk_fp6x16_t> ? 1 : 0;
ck_tile::HostTensor<XDataType> x_host({m, n});
ck_tile::HostTensor<YDataType> y_host_dev({m, n});
ck_tile::HostTensor<int8_t> host_init_buf({x_host.get_element_space_size_in_bytes()});
std::cout << "input: " << x_host.mDesc << std::endl;
std::cout << "output: " << y_host_dev.mDesc << std::endl;
ck_tile::index_t value = 1;
for(int i = 0; i < m; i++)
{
value = 1;
for(int j = 0; j < n; j++)
{
value = (value + 1) % 127;
x_host(i, j) = static_cast<DataType>(value);
}
}
for(size_t i = 0; i < x_host.get_element_space_size_in_bytes(); i++)
host_init_buf.mData[i] = i % 64;
memcpy(x_host.mData.data(),
host_init_buf.mData.data(),
x_host.get_element_space_size_in_bytes());
ck_tile::DeviceMem x_buf(x_host.get_element_space_size_in_bytes());
ck_tile::DeviceMem y_buf(y_host_dev.get_element_space_size_in_bytes());
x_buf.ToDevice(x_host.data());
using BlockWaves = ck_tile::sequence<2, 1>;
using BlockTile = ck_tile::sequence<64, 8>;
using WaveTile = ck_tile::sequence<64, 8>;
using Vector = ck_tile::sequence<1, dword_bytes / sizeof(DataType)>;
using BlockTileList = type_list<ck_tile::sequence<64, 8>, ck_tile::sequence<16, 96>>;
using VectorList = type_list<ck_tile::sequence<1, dword_bytes / sizeof(DataType)>,
ck_tile::sequence<1, 24>>;
using BlockWaves = ck_tile::sequence<2, 1>;
using BlockTile = type_at<CpyCfg, BlockTileList>::type;
using WaveTile = type_at<CpyCfg, BlockTileList>::type;
using Vector = type_at<CpyCfg, VectorList>::type;
ck_tile::index_t kGridSize =
ck_tile::integer_divide_ceil(m, BlockTile::at(ck_tile::number<0>{}));
using Shape = ck_tile::TileCopyShape<BlockWaves, BlockTile, WaveTile, Vector>;
using Problem = ck_tile::TileCopyProblem<XDataType, Shape, AsyncCopy>;
using Problem = ck_tile::TileCopyProblem<DataType, Shape, AsyncCopy, CpyCfg>;
using Kernel = ck_tile::TileCopy<Problem>;
constexpr ck_tile::index_t kBlockSize = 128;
constexpr ck_tile::index_t kBlockPerCu = 1;
// when copy fp6x16 buffer, tread it as int8 buffer and recompute n-dim size.
ck_tile::index_t cpy_n =
CpyCfg == 1 ? n * sizeof(DataType) /
(sizeof(int8_t) * ck_tile::numeric_traits<DataType>::PackedSize)
: n;
auto ms = launch_kernel(
ck_tile::stream_config{nullptr, true},
@@ -85,21 +103,28 @@ class TestCkTileMemoryCopy : public ::testing::TestWithParam<std::tuple<int, int
static_cast<XDataType*>(x_buf.GetDeviceBuffer()),
static_cast<YDataType*>(y_buf.GetDeviceBuffer()),
m,
n,
cpy_n,
warp_id));
auto bytes = 2 * m * n * sizeof(DataType);
auto bytes = 2 * m * n * sizeof(DataType) / ck_tile::numeric_traits<DataType>::PackedSize;
std::cout << "elapsed: " << ms << " (ms)" << std::endl;
std::cout << (bytes * 1e-6 / ms) << " (GB/s)" << std::endl;
// reference
y_buf.FromDevice(y_host_dev.mData.data());
bool pass = ck_tile::check_err(y_host_dev, x_host);
EXPECT_TRUE(pass);
}
};
class TestCkTileMemoryCopyF6x16Async : public TestCkTileMemoryCopy<ck_tile::pk_fp6x16_t, true>
{
};
class TestCkTileMemoryCopyF6x16 : public TestCkTileMemoryCopy<ck_tile::pk_fp6x16_t, false>
{
};
class TestCkTileMemoryCopyHalfAsync : public TestCkTileMemoryCopy<ck_tile::half_t>
{
};
@@ -116,6 +141,18 @@ class TestCkTileMemoryCopyFP8Async : public TestCkTileMemoryCopy<ck_tile::fp8_t>
{
};
TEST_P(TestCkTileMemoryCopyF6x16, TestCorrectness)
{
auto [M, N, warp_id] = GetParam();
this->Run({M, N, warp_id});
}
TEST_P(TestCkTileMemoryCopyF6x16Async, TestCorrectness)
{
auto [M, N, warp_id] = GetParam();
this->Run({M, N, warp_id});
}
TEST_P(TestCkTileMemoryCopyHalfAsync, TestCorrectness)
{
auto [M, N, warp_id] = GetParam();
@@ -140,6 +177,20 @@ TEST_P(TestCkTileMemoryCopyFP8Async, TestCorrectness)
this->Run({M, N, warp_id});
}
INSTANTIATE_TEST_SUITE_P(TestCkTileMemCopySuite,
TestCkTileMemoryCopyF6x16,
::testing::Values(std::tuple{32, 128, 0},
std::tuple{64, 256, 0},
std::tuple{32, 128, 1},
std::tuple{64, 256, 1}));
INSTANTIATE_TEST_SUITE_P(TestCkTileMemCopySuite,
TestCkTileMemoryCopyF6x16Async,
::testing::Values(std::tuple{32, 128, 0},
std::tuple{64, 256, 0},
std::tuple{32, 128, 1},
std::tuple{64, 256, 1}));
INSTANTIATE_TEST_SUITE_P(TestCkTileMemCopySuite,
TestCkTileMemoryCopyHalfAsync,
::testing::Values(std::tuple{64, 8, 0},

View File

@@ -51,12 +51,15 @@ struct TileCopyShape
"Inconsistent wave group size!");
};
template <typename XDataType_, typename BlockShape_, bool AsyncCopy_>
template <typename XDataType_, typename BlockShape_, bool AsyncCopy_, int CpyCfg_>
struct TileCopyProblem
{
using XDataType = remove_cvref_t<XDataType_>;
using BlockShape = remove_cvref_t<BlockShape_>;
static constexpr bool AsyncCopy = AsyncCopy_;
// 0: copy 1, 2, 4 bytes data type
// 1: copy dwordx3 bytes data type
static constexpr int CpyCfg = CpyCfg_;
};
template <typename Problem_>
@@ -67,6 +70,7 @@ struct TileCopy
static constexpr index_t kBlockSize = Problem::BlockShape::BlockSize;
static constexpr bool AsyncCopy = Problem::AsyncCopy;
static constexpr int CpyCfg = Problem::CpyCfg;
template <typename Problem>
CK_TILE_DEVICE static constexpr auto MakeDRAMDistribution()
@@ -98,8 +102,40 @@ struct TileCopy
return make_static_tile_distribution(outer_encoding);
}
template <typename Problem>
// CK_TILE_DEVICE static constexpr auto MakeDwordx3DRAMDistribution()
CK_TILE_DEVICE static constexpr auto MakeDwordx3DRAMDistribution()
{
using S = typename Problem::BlockShape;
constexpr index_t warp_size = get_warp_size();
constexpr index_t X0 = S::ThreadPerWarp_N; // threads needed along N dimension, fastest
// changing with given vector size.
constexpr index_t X1 =
S::Block_N; // no. of elements along N dimensions to be read by each thread.
constexpr index_t X2 = 12; // l/w dwordx3 bytes
constexpr index_t Y0 =
S::WaveNum / S::WaveGroups; // number of active warps working in this thread block.
constexpr index_t Y2 =
warp_size / X0; // number of threads in a warp needed along M dimension.
constexpr index_t Y1 =
S::Warp_M /
Y2; // number of iterations each warp needs to perform to cover the entire tile window.
constexpr auto outer_encoding = tile_distribution_encoding<
sequence<S::WaveGroups>,
tuple<sequence<Y0, Y1, Y2>, sequence<X1 / (X0 * X2), X0, X2>>, // Y2==16,X0==4
tuple<sequence<0, 1>, sequence<1, 2>>,
tuple<sequence<0, 0>, sequence<2, 1>>,
sequence<1, 2, 2>,
sequence<1, 0, 2>>{};
return make_static_tile_distribution(outer_encoding);
}
CK_TILE_DEVICE void
operator()(const XDataType* p_x, XDataType* p_y, index_t M, index_t N, index_t warp_id) const
run_normal_cpy(XDataType* p_x, XDataType* p_y, index_t M, index_t N, index_t warp_id) const
{
using S = typename Problem::BlockShape;
@@ -170,6 +206,124 @@ struct TileCopy
move_tile_window(y_block_window, {0, S::Block_N});
}
}
};
CK_TILE_DEVICE void
run_dwordx3_cpy(XDataType* p_x, XDataType* p_y, index_t M, index_t N, index_t warp_id) const
{
using S = typename Problem::BlockShape;
constexpr index_t X0 = S::ThreadPerWarp_N;
constexpr index_t X1 = S::Block_N;
constexpr index_t X2 = 12; // l/w dwordx3 bytes
// LDS buffer
constexpr int dim1_stride =
AsyncCopy ? 16 : 12; // async_load dwordx3 will write 3 bytes & skip 1 bytes in lds.
constexpr int repeat_num = X1 / (X0 * X2);
__shared__ int8_t x_lds[repeat_num * S::Block_M * X0 * dim1_stride];
constexpr auto block_dims = make_tuple(number<S::Block_M>{}, number<S::Block_N>{});
constexpr auto block_dims_ = make_tuple(number<repeat_num>{},
number<S::Block_M>{},
number<X0>{},
number<S::Block_N / repeat_num / X0>{});
constexpr auto block_strides = make_tuple(number<S::Block_M * dim1_stride * X0>{},
number<X0 * dim1_stride>{},
number<dim1_stride>{},
number<1>{});
const auto x_lds_desc_ =
make_naive_tensor_descriptor(block_dims_, block_strides, number<12>{}, number<1>{});
const auto x_lds_desc = transform_tensor_descriptor(
x_lds_desc_,
make_tuple(make_pass_through_transform(number<S::Block_M>{}),
make_merge_transform_v3_division_mod(make_tuple(
number<2>{}, number<X0>{}, number<S::Block_N / repeat_num / X0>{}))),
make_tuple(sequence<1>{}, sequence<0, 2, 3>{}),
make_tuple(sequence<0>{}, sequence<1>{}));
auto x_lds_view =
make_tensor_view<address_space_enum::lds>(reinterpret_cast<int8_t*>(x_lds), x_lds_desc);
auto x_block_lds_write_window = make_tile_window(x_lds_view, block_dims, {0, 0});
auto x_block_lds_read_window = make_tile_window(
x_lds_view, block_dims, {0, 0}, MakeDwordx3DRAMDistribution<Problem>());
const index_t iM = __builtin_amdgcn_readfirstlane(get_block_id() * S::Block_M);
// Input tensor
const auto x_m_n =
make_naive_tensor_view<address_space_enum::global>(reinterpret_cast<int8_t*>(p_x),
make_tuple(M, N),
make_tuple(N, 1),
number<S::Vector_N>{},
number<1>{});
auto x_block_window =
make_tile_window(x_m_n, block_dims, {iM, 0}, MakeDwordx3DRAMDistribution<Problem>());
// Output tensor
const auto y_m =
make_naive_tensor_view<address_space_enum::global>(reinterpret_cast<int8_t*>(p_y),
make_tuple(M, N),
make_tuple(N, 1),
number<S::Vector_N>{},
number<1>{});
auto y_block_window = make_tile_window(y_m, block_dims, {iM, 0});
const index_t num_n_tile_iteration =
__builtin_amdgcn_readfirstlane(integer_divide_ceil(N, S::Block_N));
const index_t my_id = __builtin_amdgcn_readfirstlane(get_warp_id());
constexpr index_t async_copy_fence_cnt = 0;
for(int iN = __builtin_amdgcn_readfirstlane(0); iN < num_n_tile_iteration; ++iN)
{
if(my_id == warp_id)
{
if constexpr(AsyncCopy)
{
async_load_tile(x_block_lds_write_window, x_block_window);
// We don't have prefetch here, wait the data back immediately.
// Wait all asyncload insts complete.
// Wait all waves synced
s_waitcnt_barrier<async_copy_fence_cnt>();
auto lds_tile = load_tile(x_block_lds_read_window);
// store from registers to DRAM
store_tile(y_block_window, lds_tile);
}
else
{
// load from DRAM to registers
auto dram_tile = load_tile(x_block_window);
// store in lds
store_tile(x_block_lds_write_window, dram_tile);
// Wait all lds write insts complete
// Wait all waves synced
block_sync_lds();
// read from lds to registers
auto lds_tile = load_tile(x_block_lds_read_window);
// store from registers to DRAM
store_tile(y_block_window, lds_tile);
}
}
move_tile_window(x_block_window, {0, S::Block_N});
move_tile_window(y_block_window, {0, S::Block_N});
}
}
CK_TILE_DEVICE void
operator()(XDataType* p_x, XDataType* p_y, index_t M, index_t N, index_t warp_id) const
{
if constexpr(CpyCfg == 1)
{
run_dwordx3_cpy(p_x, p_y, M, N, warp_id);
}
else if constexpr(CpyCfg == 0)
{
run_normal_cpy(p_x, p_y, M, N, warp_id);
}
else
{
static_assert(false, "unsupported copy config type.");
}
}
};
} // namespace ck_tile