mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-14 18:17:44 +00:00
* Squashed commit of the following:
commit 3e1a851dad834776efbe4fe365ac82c4ed312010
Author: Ding, Yi <yi.ding@amd.com>
Date: Thu Oct 23 06:10:54 2025 +0000
Fix & clean after rebase
commit 1edf485092f44411da9a1796a4a6b72d5cdb67c6
Author: Ding, Yi <yi.ding@amd.com>
Date: Wed Oct 22 10:46:13 2025 +0000
Squashed commit of the following:
commit 5276b28a51dac7b5d2106fbae8e78de190ee0de1
Author: mtgu0705 <mtgu@amd.com>
Date: Mon Sep 22 02:04:27 2025 -0500
fix bandwidth calculation
commit d645bb20c6d879154c30ecd82bbff4d2a9206750
Author: mtgu0705 <mtgu@amd.com>
Date: Mon Sep 22 00:58:59 2025 -0500
updates
commit 0fa7e6b88aaf81a36034aa7607746de295de4263
Author: mtgu0705 <mtgu@amd.com>
Date: Fri Sep 19 00:39:46 2025 -0500
fix a bug, set the A DS_read preload size to 4 for MXFP4
commit 50cafa824e2267f2b2f0dfeeb93e69a673630c61
Author: mtgu0705 <mtgu@amd.com>
Date: Thu Sep 18 01:19:03 2025 -0500
fix a_wrap preload issue for large MPerBlock.
commit e6333bbbc6ef540e24f92095040085f1ed59041e
Author: mtgu0705 <mtgu@amd.com>
Date: Wed Sep 17 21:34:03 2025 -0500
optimized the VGPR repack issue for MXFP4
commit e99e4932c401b9f6d1893dd5044c2827d6b3f145
Author: Gino Lu <gino.lu@amd.com>
Date: Wed Sep 17 04:19:44 2025 -0500
fix time error
commit 4586ce6da7fba0514f2e01a8124c76b7d494e124
Author: mtgu0705 <mtgu@amd.com>
Date: Wed Sep 17 03:58:00 2025 -0500
updated, function passed.
commit c4f25e7579573db5681b9160f6bdb1349f3566f1
Author: mtgu0705 <mtgu@amd.com>
Date: Tue Sep 16 22:21:39 2025 -0500
fix, function partially passed
commit a51b56eb6b00b99a4e8d2802dbf5b5b5277b54d8
Author: mtgu0705 <mtgu@amd.com>
Date: Tue Sep 16 03:01:12 2025 -0500
fix, reference function passed, next check kernel function
commit 5b02643ebab18960e8f9ba66c6bd2f91774f9cae
Author: Gino Lu <gino.lu@amd.com>
Date: Tue Sep 16 02:29:01 2025 -0500
let pack/unpack return pk_fp4_t
commit 76d37c5d4b17530e95c6fced31bff66a35d54b8f
Author: mtgu0705 <mtgu@amd.com>
Date: Mon Sep 15 20:50:26 2025 -0500
fix
commit e5be3e162b9a20e5355bd556d2b27afb6d8bf085
Author: Gino Lu <gino.lu@amd.com>
Date: Mon Sep 15 05:51:06 2025 -0500
fix bug
commit 39a024efe4aa773df589712b1290803bb5ab5d1d
Author: mtgu0705 <mtgu@amd.com>
Date: Mon Sep 15 04:02:05 2025 -0500
fix core dump issue, function is not correct.
commit 16c49d268cfe065b5112b960b2d852b26552686a
Author: mtgu0705 <mtgu@amd.com>
Date: Mon Sep 15 03:03:02 2025 -0500
updates, build pass
commit fe7a961852dee6eff3be3cf1e0d0fabec5cd42ee
Author: mtgu0705 <mtgu@amd.com>
Date: Mon Sep 15 00:05:18 2025 -0500
updates
commit aaf9fe8022a72df59e04e4d5886dca3ba9c23400
Author: Gino Lu <gino.lu@amd.com>
Date: Sun Sep 14 23:40:28 2025 -0500
fix bug
commit a3da89290e1553b85fbf1171c07e93ac0f5584db
Author: Gino Lu <gino.lu@amd.com>
Date: Fri Sep 12 03:28:50 2025 -0500
fix interface
commit c5ff747e72d877461ba61dc19a0fe15527d3161e
Author: Gino Lu <gino.lu@amd.com>
Date: Fri Sep 12 02:53:50 2025 -0500
add interface in warp_gemm_impl
commit 0a48d369e601cc798589fc59e0784bdbfc0a22f9
Author: mtgu0705 <mtgu@amd.com>
Date: Wed Sep 10 05:03:08 2025 -0500
updates some fixes.
commit aaa2beca30ff5546d171a2028d1894fd4e131d4e
Author: mtgu0705 <mtgu@amd.com>
Date: Tue Sep 9 04:37:42 2025 -0500
fix after merge ginolu/add_wgmfma_dispatcher
commit bf87449b09cba690922b2f3f78ba39bf1b1e472e
Merge: 05ab58e3d 991d7fdbb
Author: mtgu0705 <mtgu@amd.com>
Date: Mon Sep 8 22:09:15 2025 -0500
Merge remote-tracking branch 'origin/ginolu/add_wgmfma_dispatcher' into mtgu/cktile_mxfp4_flatmm_dev
commit 05ab58e3de2b708aceda63d704089c0fa89437ae
Author: mtgu0705 <mtgu@amd.com>
Date: Mon Sep 8 21:42:47 2025 -0500
update mx flatmm tail pipeline
commit 991d7fdbb726d65091a91b5cc2800f798a6661fc
Merge: ad046084a 41ee8fe31
Author: Gino Lu <gino.lu@amd.com>
Date: Mon Sep 8 19:10:23 2025 -0500
Merge branch 'develop' into ginolu/add_wgmfma_dispatcher
commit ad046084a2f6e4ebf0cd8b47d0d72b74815061fa
Author: Gino Lu <gino.lu@amd.com>
Date: Mon Sep 8 19:09:55 2025 -0500
fix type error
commit 42e16b43a035364a42789d7ce45a1e6a7d1d2609
Author: mtgu0705 <mtgu@amd.com>
Date: Mon Sep 8 04:01:40 2025 -0500
update hotloop pipeline
commit c2f69745346545087c8ce24acaba2961bb93ef0b
Merge: adbeeb90b 91db4cec3
Author: Gino Lu <gino.lu@amd.com>
Date: Fri Sep 5 04:22:26 2025 -0500
Merge branch 'develop' into ginolu/add_wgmfma_dispatcher
commit adbeeb90be1533f8aeb8c1d5aea6470d45a455a0
Author: Gino Lu <gino.lu@amd.com>
Date: Fri Sep 5 04:21:26 2025 -0500
fix clang format
commit e2378ac393bb79ac80a8eef84677bffce86d9e0a
Author: mtgu0705 <mtgu@amd.com>
Date: Wed Sep 3 10:00:54 2025 -0500
some updates
commit bdc18a2269db49ff88e1ef1db30f83ea430d7544
Merge: 6c5cea2b7 b3886a6d8
Author: asleepzzz <hanwen.chang@amd.com>
Date: Wed Sep 3 13:22:03 2025 +0800
Merge branch 'develop' into ginolu/add_wgmfma_dispatcher
commit 6c5cea2b7a306f5d0ad346cb9baf6370ea2a73fe
Author: Gino Lu <gino.lu@amd.com>
Date: Mon Sep 1 02:11:02 2025 -0500
fix vec size error
commit 76d1dfa352087dfd5867c8909b73726d3a1e853e
Author: Gino Lu <gino.lu@amd.com>
Date: Mon Sep 1 01:23:39 2025 -0500
fix format error
commit a9061aaa1b4bfaa9db102c75b9d74863f39708a9
Author: mtgu0705 <mtgu@amd.com>
Date: Sat Aug 30 03:19:07 2025 -0500
update codes
commit 0caa184a271a8824ef40f87de456d0fa2500c8ad
Author: mtgu0705 <mtgu@amd.com>
Date: Fri Aug 29 11:27:33 2025 -0500
init ck_tile mxfp4 flatmm
commit 5d46a6635f04bd69b76f7eda1438862e271b987a
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Thu Aug 28 08:02:50 2025 +0000
Add bias for f16xf4 moe_flatmm
commit dd112dc302d17f541737671a3ac557d7c09ff969
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Wed Aug 27 13:39:47 2025 +0000
update case construction
commit b1aca68a073d82c7b3c7bb53286e5f415999edc1
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Tue Aug 26 12:32:29 2025 +0000
support swiglu activaion and use rcpf to accelerate silu
commit 49235bd42349a84fc2ebd7ad0b100cc2545bb80a
Author: Gino Lu <gino.lu@amd.com>
Date: Tue Aug 26 02:33:55 2025 -0500
first commit
commit c169e39d6381b932cf7098cc118db29df91da1cb
Author: root <root@smci355-ccs-aus-m02-25.cs-aus.dcgpu>
Date: Fri Aug 22 04:01:59 2025 -0500
add line to last
commit 318f9bf317306454941bbf394c1940023edcf0ac
Author: root <root@smci355-ccs-aus-m02-25.cs-aus.dcgpu>
Date: Fri Aug 22 03:20:46 2025 -0500
adjust A_LDS descriptor to avoid bankconflict
commit 9d066120ed068d6d102da25d619e170a28a04d18
Author: root <root@smci355-ccs-aus-m02-25.cs-aus.dcgpu>
Date: Thu Aug 21 09:46:52 2025 -0500
enable hotloop
commit 61a895e6b821798970afffd0e9432a21e2f04df8
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Thu Aug 21 09:12:21 2025 +0000
support atomic_pk_add_bf16 on gfx950
commit 9f14864e45f21d8c1bc70a94988fb86c2c0017d8
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Thu Aug 21 06:58:55 2025 +0000
use int64_t as expert stride to avoid overflow
commit e63af46b32e1139a1e59dee6f46b9971047c4026
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Wed Aug 20 13:53:32 2025 +0000
use v4i32 as the storage type for B to avoid repack operation
commit 6cf0224dd8a229bf2be726ca861c736c9b5f5415
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Wed Aug 20 06:40:03 2025 +0000
add pk_fp4_t and e8m0_t support for amd_buffer_load_impl
commit 67a591f2240b0b035029edad904627f98b3839fd
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Wed Aug 20 04:39:14 2025 +0000
optimize cvt_pkf4_to_f16 implementation
commit 51c7126e77e9b17af694eaa57040e487f9d443e8
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Tue Aug 19 14:56:46 2025 +0000
optimize A_LDS descriptor to avoid bankconflict
commit c113160f326353290a2878d7b8febf7daed91d71
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Mon Aug 18 18:43:37 2025 +0000
fix gate-up when GU_NRepeat > 1
commit a45ca0e9934ca4bb9114f65621d5c9582d937a45
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Mon Aug 18 17:28:11 2025 +0000
add fp16xf4 moe
commit dc8c8e484804f7bca10c8f0764540af3b5884e83
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Sun Aug 17 17:51:18 2025 +0000
rename example
commit b177c967141cfdc401d3f36bf17830fe99893600
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Fri Aug 15 06:20:46 2025 +0000
remove additional check when e8m0->float
commit d467f9688c3d35f391e15089135edb1ad1d38b05
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Thu Aug 14 09:34:12 2025 +0000
eliminate repeat dequant
commit 1b20674b26ab3ce6bd2f710dd729fd4cc0f79428
Merge: faa3c0278 7d02625e7
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Wed Aug 13 16:51:49 2025 +0000
Merge remote-tracking branch 'origin/moe_flatmm' into feat-mixed_input_flatmm
commit faa3c0278cf11b7105a4302dea3a4416520b2cc7
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Wed Aug 13 16:16:48 2025 +0000
update f16xMXF4
commit a2a2e1dab05501cc2136133236c01c08d51db4ea
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Wed Aug 13 10:48:53 2025 +0000
update scale-preshuffle for MXF4
commit eac9667feb899419dda1628164c092b969852660
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Mon Aug 11 11:24:34 2025 +0000
update
commit 7d02625e7678882af653f52c2a4ddaf64568a41c
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Mon Aug 11 08:38:23 2025 +0000
optimize gemm2 atomic_add pattern
commit d5f3c3e3ec72d0e6739467c4dc0b4e209f6d1192
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Mon Aug 11 07:59:47 2025 +0000
update scale for mxfp4
commit 15db198084614466bd4cfd4943fcb549cab2069a
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Mon Aug 11 07:56:14 2025 +0000
update case construction
commit 5dff349d82a5f70b6eea821d2622df51f90ef200
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Mon Aug 11 06:03:06 2025 +0000
update granularity control
commit d32cdc52144f65ec473f4ec8e45ea23968811184
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Mon Aug 11 03:42:46 2025 +0000
fix TileConfig
commit 26f38c5716304ee5f84e5c4f6f88144d9f3dddaf
Author: Gino Lu <gino.lu@amd.com>
Date: Thu Aug 7 21:37:28 2025 +0800
Add e8m0 scaled convert into CK_TILE (#2617)
* first commit
* remove redundent code
* modify according to comments.
* fix type_convert error with scaled_type_convert
commit 419041478745f65dfec18859e75a13d975089519
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Fri Aug 8 20:19:16 2025 +0000
add mixed_prec fp16xfp4
commit 92e2a8b0308b9b107df9d2fd63a961efce706402
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Thu Aug 7 09:22:04 2025 +0000
debug mixed_prec flatmm
commit dea3ce80496ebcb00512979f0c3bb897f25e11a5
Merge: fde443bc3 b4f45fe14
Author: lalala-sh <Jiaxing.Wen@amd.com>
Date: Wed Aug 6 16:49:47 2025 +0800
Merge pull request #2626 from ROCm/felix/flatmm_fix_splitk
fix split k
commit d480e8150358cc4ef8b05e25afe299141fad4fde
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Wed Aug 6 08:33:33 2025 +0000
add moe_flatmm
commit b4f45fe14d11569f34de40c8a205cd6760b61357
Author: coderfeli <coderfeli@163.com>
Date: Wed Aug 6 02:45:31 2025 +0000
fix split k
commit fde443bc38fe60e52195817ecb2c7b20d772eedb
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Mon Aug 4 07:16:36 2025 +0000
fix flatmm with scaling when WarpTileM == 32
commit 5a0667afa889a5af8c6b8509232eabd50cf5efef
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Fri Aug 1 11:01:23 2025 +0000
optimize scaling epilogue
commit 5c3502bbf71833c6f6f7d4a1cc4f4fd93811f522
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Fri Aug 1 07:28:38 2025 +0000
fix wrong config for fp8 scaling
commit eb2d0653cdb86603cb11539cbac466b6431b58b7
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Wed Jul 30 06:20:30 2025 +0000
prune debug message
commit 0c089cb56343a39e02a1ee38e9cabeb71ba35e92
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Wed Jul 30 04:52:08 2025 +0000
fix compile error
commit 61759ca30ce3787f70e228c3919b3e4d354016dd
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Tue Jul 29 15:42:58 2025 +0000
Add persistent option on flatmm for tuning
commit b36dc5dd55f15fc1ce8eb21637bdec862e56a883
Author: AMD-dteng <dteng@amd.com>
Date: Tue Jul 29 22:48:00 2025 +0800
update pipeline v1: add atomic IGLP schedule
commit f886f26994454fc2b4fc3433c86bf699767a2a7c
Author: lalala-sh <Jiaxing.Wen@amd.com>
Date: Thu Jul 24 09:09:27 2025 +0000
fix error log throwing
commit 4b4686ab144daa9061fbda17f3df4c17600c8e9a
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Mon Jul 28 08:24:51 2025 +0000
crz idea
commit 7099af44a81be41431ba70ae60827b60116d02d2
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Sun Jul 27 11:57:38 2025 +0000
Add permuteN optimzization when NRepeat % 2 == 0 on flatmm
commit b147524c92e69a267337c8e48b6e64bcb1483551
Author: sjfeng <j514681085@icloud.com>
Date: Sun Jul 27 17:24:08 2025 +0800
try to remove c_shuffle_lds
commit 2dd94f59d1a7740a5689e1713ed45588cd0d55dd
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Fri Jul 25 07:41:48 2025 +0000
fix loop-dim mismatch and improve c_shuffle alu parallelism
commit 4e93f0c5e27806adc070e4caa81661069295751c
Merge: 3f12ef5aa 0eb7455f1
Author: lalala-sh <Jiaxing.Wen@amd.com>
Date: Thu Jul 24 08:46:51 2025 +0000
merge flatmm -scale
commit 3f12ef5aa52ced1bff3bfb57b878358330e9e095
Author: lalala-sh <Jiaxing.Wen@amd.com>
Date: Thu Jul 24 16:19:58 2025 +0800
revert delete of inc file
commit 08c3a0d184d7581dc5be364f5b36f16fb4a8d6fa
Author: solin <bingzhou@amd.com>
Date: Thu Jul 24 04:38:16 2025 +0000
reorg flatmm code
commit 0eb7455f106604d5254ed16b0daeda68e2a148e3
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Wed Jul 23 19:12:31 2025 +0000
fix flatmm syntax error on gfx950
commit 695ff87e68fdcbe28452c1805cd4dbb643c45495
Author: Feng Shijie <Shijie.Feng@amd.com>
Date: Wed Jul 23 19:04:22 2025 +0000
support flatmm scaling
commit e3c29d9dea8758db96b998982ccc8bd1c4e8298d
Author: valarLip <340077269@qq.com>
Date: Wed Jul 23 08:44:12 2025 +0000
merge flatmm pipe v0 from dteng_flatmm_opt
commit 425c366fa4c30426ff36cade89b39fd8cb7b9732
Author: lalala-sh <Jiaxing.Wen@amd.com>
Date: Wed Jul 23 15:38:12 2025 +0800
build pass
commit 6b377a9481535696de40f175d7e2159263d21bdc
Author: lalala-sh <Jiaxing.Wen@amd.com>
Date: Wed Jul 23 07:20:26 2025 +0000
fix bug
commit b6dc58d1ea676fe480c0243ae098c875498f6d6a
Author: lalala-sh <Jiaxing.Wen@amd.com>
Date: Wed Jul 23 15:01:53 2025 +0800
sync
commit 904359f401866ee810484e6b8f5b46d79d9e25c8
Author: valarLip <340077269@qq.com>
Date: Tue Jul 22 08:09:35 2025 +0000
adaptive scheduler instead of Macro definition
commit f29916c17228c17de9923aab62e7d72d7a30f4e9
Author: lalala-sh <Jiaxing.Wen@amd.com>
Date: Thu Jul 17 08:40:35 2025 +0000
fix tail handler bug
commit e2c60a90929fec955d91db909d50db538d58363b
Author: lalala-sh <Jiaxing.Wen@amd.com>
Date: Wed Jul 16 10:12:19 2025 +0000
merge from dteng_flatmm_opt
---------
Co-authored-by: lalala-sh <Jiaxing.Wen@amd.com>
Co-authored-by: AMD-dteng <dteng@amd.com>
Co-authored-by: solin <bingzhou@amd.com>
Co-authored-by: sjfeng <j514681085@icloud.com>
Co-authored-by: valarLip <340077269@qq.com>
Co-authored-by: asleepzzz <hanwen.chang@amd.com>
Co-authored-by: Feng Shijie <Shijie.Feng@amd.com>
Co-authored-by: coderfeli <coderfeli@163.com>
Co-authored-by: Gino Lu <gino.lu@amd.com>
Co-authored-by: mtgu0705 <mtgu@amd.com>
* Fix crash on small M
* Apply suggestion from @Copilot
---------
Co-authored-by: lalala-sh <Jiaxing.Wen@amd.com>
Co-authored-by: AMD-dteng <dteng@amd.com>
Co-authored-by: solin <bingzhou@amd.com>
Co-authored-by: sjfeng <j514681085@icloud.com>
Co-authored-by: valarLip <340077269@qq.com>
Co-authored-by: asleepzzz <hanwen.chang@amd.com>
Co-authored-by: Feng Shijie <Shijie.Feng@amd.com>
Co-authored-by: coderfeli <coderfeli@163.com>
Co-authored-by: Gino Lu <gino.lu@amd.com>
Co-authored-by: mtgu0705 <mtgu@amd.com>
[ROCm/composable_kernel commit: e135dd518d]
507 lines
21 KiB
C++
507 lines
21 KiB
C++
// SPDX-License-Identifier: MIT
|
|
// Copyright (c) 2024-2025, Advanced Micro Devices, Inc. All rights reserved.
|
|
|
|
#include <hip/hip_runtime.h>
|
|
|
|
#include <cstring>
|
|
#include <iostream>
|
|
#include <ostream>
|
|
#include <string>
|
|
#include <tuple>
|
|
#include <type_traits>
|
|
|
|
#include "ck_tile/host.hpp"
|
|
#include "mx_flatmm.hpp"
|
|
|
|
template <typename Layout>
|
|
static constexpr inline auto is_row_major(Layout layout_)
|
|
{
|
|
return ck_tile::bool_constant<std::is_same_v<ck_tile::remove_cvref_t<decltype(layout_)>,
|
|
ck_tile::tensor_layout::gemm::RowMajor>>{};
|
|
}
|
|
|
|
template <typename FlatmmConfig,
|
|
typename ADataType,
|
|
typename BDataType,
|
|
typename DsDatatype,
|
|
typename AccDataType,
|
|
typename CDataType,
|
|
typename ALayout,
|
|
typename BLayout,
|
|
typename DsLayout,
|
|
typename ELayout,
|
|
typename ScaleM,
|
|
typename ScaleN,
|
|
bool persistent,
|
|
typename CDEElementWise>
|
|
float mx_flatmm_calc(const ck_tile::ScaleFlatmmHostArgs<ScaleM, ScaleN>& args,
|
|
const ck_tile::stream_config& s)
|
|
{
|
|
using CodegenFlatmmShape = ck_tile::TileGemmShape<
|
|
ck_tile::sequence<FlatmmConfig::M_Tile, FlatmmConfig::N_Tile, FlatmmConfig::K_Tile>,
|
|
ck_tile::sequence<FlatmmConfig::M_Warp, FlatmmConfig::N_Warp, FlatmmConfig::K_Warp>,
|
|
ck_tile::sequence<FlatmmConfig::M_Warp_Tile,
|
|
FlatmmConfig::N_Warp_Tile,
|
|
FlatmmConfig::K_Warp_Tile>>;
|
|
|
|
using TilePartitioner =
|
|
ck_tile::GemmSpatiallyLocalTilePartitioner<CodegenFlatmmShape,
|
|
FlatmmConfig::TileParitionerGroupNum,
|
|
FlatmmConfig::TileParitionerM01>;
|
|
|
|
using Traits = ck_tile::TileGemmTraits<FlatmmConfig::kPadM,
|
|
FlatmmConfig::kPadN,
|
|
FlatmmConfig::kPadK,
|
|
ALayout,
|
|
BLayout,
|
|
ELayout,
|
|
FlatmmConfig::NumWaveGroups>;
|
|
|
|
using CodegenGemmTraits = ck_tile::TileGemmUniversalTraits<FlatmmConfig::kPadM,
|
|
FlatmmConfig::kPadN,
|
|
FlatmmConfig::kPadK,
|
|
FlatmmConfig::DoubleSmemBuffer,
|
|
ALayout,
|
|
BLayout,
|
|
ELayout,
|
|
FlatmmConfig::TransposeC,
|
|
FlatmmConfig::UseStructuredSparsity,
|
|
persistent,
|
|
FlatmmConfig::NumWaveGroups,
|
|
true>;
|
|
|
|
using ComputeDataType = ADataType;
|
|
static_assert(sizeof(ComputeDataType) >= sizeof(BDataType),
|
|
"mixed_prec_flatmm requires ADataType is a wider type than BDataType");
|
|
|
|
using GemmPipelineProblem = ck_tile::GemmPipelineProblem<ComputeDataType,
|
|
ComputeDataType,
|
|
AccDataType,
|
|
CodegenFlatmmShape,
|
|
Traits>;
|
|
|
|
using BaseGemmPipeline = ck_tile::BaseFlatmmPipelineAGmemBGmemCRegV1<GemmPipelineProblem>;
|
|
|
|
const ck_tile::index_t k_grain = args.k_batch * FlatmmConfig::K_Tile;
|
|
const ck_tile::index_t K_split = (args.K + k_grain - 1) / k_grain * FlatmmConfig::K_Tile;
|
|
const ck_tile::index_t num_loop = TilePartitioner::GetLoopNum(K_split);
|
|
const bool has_hot_loop = BaseGemmPipeline::BlockHasHotloop(num_loop);
|
|
const ck_tile::TailNumber tail_num = BaseGemmPipeline::GetBlockLoopTailNum(num_loop);
|
|
float ave_time{0};
|
|
|
|
const auto Run = [&](const auto has_hot_loop_,
|
|
const auto tail_number_,
|
|
const auto memory_operation_) {
|
|
constexpr bool has_hot_loop_v = has_hot_loop_.value;
|
|
constexpr auto tail_number_v = tail_number_.value;
|
|
constexpr auto scheduler = FlatmmConfig::Scheduler;
|
|
constexpr auto memory_operation = memory_operation_.value;
|
|
|
|
constexpr int BlockedXDLN_PerWarp = 2; // determined by scale shuffle pattern
|
|
|
|
using CodegenPipelineProblem = ck_tile::MXFlatmmPipelineProblem<ADataType,
|
|
BDataType,
|
|
AccDataType,
|
|
CodegenFlatmmShape,
|
|
CodegenGemmTraits,
|
|
scheduler,
|
|
has_hot_loop_v,
|
|
tail_number_v>;
|
|
|
|
using CodegenMXFlatmmPipeline =
|
|
ck_tile::MXF4FlatmmPipelineAGmemBGmemCRegV1<CodegenPipelineProblem>;
|
|
|
|
using GemmEpilogue = ck_tile::CShuffleEpilogue<
|
|
ck_tile::CShuffleEpilogueProblem<ComputeDataType,
|
|
ComputeDataType,
|
|
DsDatatype,
|
|
AccDataType,
|
|
CDataType,
|
|
DsLayout,
|
|
ELayout,
|
|
CDEElementWise,
|
|
TilePartitioner::MPerBlock,
|
|
TilePartitioner::NPerBlock,
|
|
FlatmmConfig::M_Warp,
|
|
FlatmmConfig::N_Warp,
|
|
FlatmmConfig::M_Warp_Tile,
|
|
FlatmmConfig::N_Warp_Tile,
|
|
FlatmmConfig::K_Warp_Tile,
|
|
CodegenPipelineProblem::TransposeC,
|
|
memory_operation,
|
|
FlatmmConfig::NumWaveGroups,
|
|
false, // FixedVectorSize
|
|
1, // VectorSizeC
|
|
FlatmmConfig::TiledMMAPermuteN,
|
|
BlockedXDLN_PerWarp>>;
|
|
|
|
using Kernel =
|
|
ck_tile::MXFlatmmKernel<TilePartitioner, CodegenMXFlatmmPipeline, GemmEpilogue>;
|
|
|
|
auto kargs = Kernel::MakeKernelArgs(args);
|
|
|
|
const dim3 grids = Kernel::GridSize(kargs);
|
|
constexpr dim3 blocks = Kernel::BlockSize();
|
|
|
|
if(!Kernel::IsSupportedArgument(kargs))
|
|
{
|
|
throw std::runtime_error("Wrong! Arguments not supported! Skipping gemm!\n");
|
|
}
|
|
|
|
if(s.log_level_ > 0)
|
|
{
|
|
std::cout << "Launching kernel with args:" << CodegenFlatmmShape::GetName() << "\n"
|
|
<< "Shape: " << CodegenFlatmmShape::GetName() << "\n"
|
|
<< "problem: " << CodegenPipelineProblem::GetName() << "\n"
|
|
<< "pipeline: " << CodegenMXFlatmmPipeline::GetName() << "\n"
|
|
<< "grid: {" << grids.x << ", " << grids.y << ", " << grids.z << "}"
|
|
<< ", blocks: {" << blocks.x << ", " << blocks.y << ", " << blocks.z << "}"
|
|
<< std::endl;
|
|
}
|
|
|
|
// Declare rotating_mem_ptr here so it stays in scope until it is needed
|
|
std::unique_ptr<ck_tile::RotatingMemWrapper<ADataType, BDataType>> rotating_mem_ptr;
|
|
std::function<void()> preprocess;
|
|
|
|
auto clear_gemm_output = [&]() {
|
|
if(args.k_batch > 1)
|
|
hipGetErrorString(hipMemsetAsync(
|
|
args.e_ptr, 0, args.M * args.N * sizeof(CDataType), s.stream_id_));
|
|
};
|
|
|
|
if(s.flush_cache_)
|
|
{
|
|
std::cout << "Flushing cache..." << std::endl;
|
|
constexpr ck_tile::index_t APackedSize = ck_tile::numeric_traits<ADataType>::PackedSize;
|
|
constexpr ck_tile::index_t BPackedSize = ck_tile::numeric_traits<BDataType>::PackedSize;
|
|
|
|
ck_tile::HostTensor<ADataType> a_m(ck_tile::host_tensor_descriptor(
|
|
args.M, args.K, args.stride_A, is_row_major(ALayout{})));
|
|
ck_tile::HostTensor<BDataType> b_n(ck_tile::host_tensor_descriptor(
|
|
args.K, args.N, args.stride_B, is_row_major(BLayout{})));
|
|
|
|
auto size_a_buffer = a_m.get_element_space_size_in_bytes() / APackedSize;
|
|
auto size_b_buffer = b_n.get_element_space_size_in_bytes() / BPackedSize;
|
|
|
|
rotating_mem_ptr = std::make_unique<ck_tile::RotatingMemWrapper<ADataType, BDataType>>(
|
|
kargs.a_ptr, kargs.b_ptr, s.rotating_count_, size_a_buffer, size_b_buffer);
|
|
rotating_mem_ptr->Print();
|
|
|
|
preprocess = [&]() {
|
|
ck_tile::flush_icache();
|
|
rotating_mem_ptr->Next();
|
|
clear_gemm_output();
|
|
};
|
|
}
|
|
else
|
|
{
|
|
preprocess = clear_gemm_output;
|
|
}
|
|
|
|
ave_time = ck_tile::launch_kernel_time_mask(
|
|
s,
|
|
preprocess,
|
|
ck_tile::make_kernel<FlatmmConfig::kBlockPerCu>(Kernel{}, grids, blocks, 0, kargs));
|
|
return ave_time;
|
|
};
|
|
|
|
const auto RunSplitk = [&](const auto has_hot_loop_, const auto tail_number_) {
|
|
if(args.k_batch == 1)
|
|
{
|
|
Run(has_hot_loop_,
|
|
tail_number_,
|
|
ck_tile::integral_constant<ck_tile::memory_operation_enum,
|
|
ck_tile::memory_operation_enum::set>{});
|
|
}
|
|
else
|
|
{
|
|
Run(has_hot_loop_,
|
|
tail_number_,
|
|
ck_tile::integral_constant<ck_tile::memory_operation_enum,
|
|
ck_tile::memory_operation_enum::atomic_add>{});
|
|
}
|
|
};
|
|
BaseGemmPipeline::TailHandler(RunSplitk, has_hot_loop, tail_num);
|
|
return ave_time;
|
|
}
|
|
|
|
template <typename FlatmmConfig,
|
|
typename ADataType,
|
|
typename BDataType,
|
|
typename DsDatatype,
|
|
typename AccDataType,
|
|
typename CDataType,
|
|
typename ALayout,
|
|
typename BLayout,
|
|
typename DsLayout,
|
|
typename CLayout,
|
|
typename ScaleA,
|
|
typename ScaleB,
|
|
bool UsePersistentKernel = false,
|
|
typename CDEElementWise = ck_tile::element_wise::PassThrough>
|
|
float invoke_mx_flatmm(ck_tile::DeviceMem& a_dev_buf,
|
|
ck_tile::DeviceMem& b_shuffle_dev_buf,
|
|
ck_tile::DeviceMem& c_dev_buf,
|
|
ck_tile::index_t M,
|
|
ck_tile::index_t N,
|
|
ck_tile::index_t K,
|
|
ck_tile::index_t stride_A,
|
|
ck_tile::index_t stride_B,
|
|
ck_tile::index_t stride_C,
|
|
ck_tile::index_t kbatch,
|
|
ScaleA scale_a,
|
|
ScaleB scale_b,
|
|
int n_warmup,
|
|
int n_repeat)
|
|
{
|
|
ck_tile::ScaleFlatmmHostArgs<ScaleA, ScaleB> args = {a_dev_buf.GetDeviceBuffer(),
|
|
b_shuffle_dev_buf.GetDeviceBuffer(),
|
|
{},
|
|
c_dev_buf.GetDeviceBuffer(),
|
|
kbatch,
|
|
M,
|
|
N,
|
|
K,
|
|
stride_A,
|
|
stride_B,
|
|
{},
|
|
stride_C,
|
|
scale_a,
|
|
scale_b};
|
|
|
|
float ave_time = mx_flatmm_calc<FlatmmConfig,
|
|
ADataType,
|
|
BDataType,
|
|
DsDatatype,
|
|
AccDataType,
|
|
CDataType,
|
|
ALayout,
|
|
BLayout,
|
|
DsLayout,
|
|
CLayout,
|
|
ScaleA,
|
|
ScaleB,
|
|
UsePersistentKernel,
|
|
CDEElementWise>(
|
|
args, ck_tile::stream_config{nullptr, true, 1, n_warmup, n_repeat, true, true, 50});
|
|
|
|
constexpr int APackedSize = ck_tile::numeric_traits<ADataType>::PackedSize;
|
|
constexpr int BPackedSize = ck_tile::numeric_traits<BDataType>::PackedSize;
|
|
|
|
std::size_t flop = std::size_t(2) * M * N * K + std::size_t(2) * M * N * K / 32;
|
|
std::size_t num_byte = sizeof(ADataType) * M * K / APackedSize +
|
|
sizeof(BDataType) * N * K / BPackedSize + sizeof(CDataType) * M * N +
|
|
sizeof(ck_tile::e8m0_t) * M * K / 32 +
|
|
sizeof(ck_tile::e8m0_t) * N * K / 32;
|
|
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
|
float gb_per_sec = num_byte / 1.E6 / ave_time;
|
|
|
|
std::cout << "Run MXFP4_Flatmm kernel " //
|
|
<< " M =" << M << " N =" << N << " K =" << K << " StrideA =" << stride_A
|
|
<< " StrideB =" << stride_B << " StrideC =" << stride_C << " : " << ave_time
|
|
<< " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s, " << std::endl;
|
|
|
|
return ave_time;
|
|
}
|
|
|
|
auto create_args(int argc, char* argv[])
|
|
{
|
|
ck_tile::ArgParser arg_parser;
|
|
arg_parser.insert("m", "32", "m dimension")
|
|
.insert("n", "128", "n dimension")
|
|
.insert("k", "256", "k dimension")
|
|
.insert("a_layout", "R", "A tensor data layout - Row by default")
|
|
.insert("b_layout", "C", "B tensor data layout - Row by default")
|
|
.insert("c_layout", "R", "C tensor data layout - Row by default")
|
|
.insert("stride_a", "0", "Tensor A stride")
|
|
.insert("stride_b", "0", "Tensor B stride")
|
|
.insert("stride_c", "0", "Tensor C stride")
|
|
.insert("v", "1", "0. No validation, 1. Validation on CPU, 2. Validation on GPU")
|
|
.insert(
|
|
"mx_prec", "fp4xfp4", "data type for activation and weight, support: fp6xfp6, fp8xfp8")
|
|
.insert("warmup", "50", "number of iterations before benchmark the kernel")
|
|
.insert("repeat", "100", "number of iterations to benchmark the kernel")
|
|
.insert("timer", "gpu", "gpu:gpu timer, cpu:cpu timer")
|
|
.insert("split_k", "1", "splitK value")
|
|
.insert("init", "0", "0:random, 1:constant(1)")
|
|
.insert("persistent", "0", "0: no persistent, 1: persistent kernel")
|
|
.insert("warp_tile",
|
|
"0",
|
|
"0: 16x16, 1: 32x32, 2: 16x16x128 (950 only), 3: 32x32x64 (950 only)");
|
|
bool result = arg_parser.parse(argc, argv);
|
|
return std::make_tuple(result, arg_parser);
|
|
}
|
|
|
|
template <class FlatmmConfig, class IterSrc, class IterDst>
|
|
void preShuffleWeight(const IterSrc src, IterDst dst, int N, int K)
|
|
{
|
|
int KPack = 16;
|
|
int NLane = FlatmmConfig::N_Warp_Tile;
|
|
int KLane = 64 / NLane;
|
|
int K_pk = K / 2;
|
|
int K0 = K_pk / (KLane * KPack);
|
|
// K -> K0 KLane KPack
|
|
// N -> N0 NLane
|
|
// N, K -> N0 K0 KLane NLane KPack
|
|
int tempk;
|
|
for(int n = 0; n < N; ++n)
|
|
{
|
|
for(int k = 0; k < K_pk; ++k)
|
|
{
|
|
int n0 = n / NLane;
|
|
int n1 = n % NLane;
|
|
|
|
int k0 = k / (KLane * KPack);
|
|
tempk = k % (KLane * KPack);
|
|
int k1 = tempk / KPack;
|
|
int k2 = tempk % KPack;
|
|
|
|
int outputIndex = n0 * KPack * NLane * KLane * K0 + k0 * KPack * NLane * KLane +
|
|
k1 * KPack * NLane + n1 * KPack + k2;
|
|
|
|
dst[outputIndex] = src[n * K_pk + k];
|
|
}
|
|
}
|
|
}
|
|
|
|
template <class FlatmmConfig, bool KLast, typename Src>
|
|
auto preShuffleScale(Src& src)
|
|
{
|
|
using dtype = typename Src::Data::value_type;
|
|
auto src_lengths = src.get_lengths();
|
|
const auto MN = KLast ? src_lengths[0] : src_lengths[1];
|
|
const auto K = KLast ? src_lengths[1] : src_lengths[0];
|
|
|
|
size_t MNXdlPack = 2;
|
|
size_t KXdlPack = 2;
|
|
size_t XdlMNThread = FlatmmConfig::N_Warp_Tile; // 16
|
|
size_t XdlKThread = 64 / XdlMNThread;
|
|
|
|
const auto MN_Paded = ck_tile::integer_least_multiple(MN, XdlMNThread * MNXdlPack);
|
|
|
|
ck_tile::HostTensor<dtype> shuffled(ck_tile::HostTensorDescriptor({MN_Paded * K}, {1}));
|
|
|
|
size_t K0 = K / KXdlPack / XdlKThread; // KRepeat
|
|
|
|
// The 4 16x128 building blocks will be packed into 1 32x256 for F4
|
|
// The 8 16x16x128 mfma will be packed into 1 32x32x256 for F4
|
|
|
|
// unfold the MN32xK(256/32) scale buffer
|
|
// 4 16 2 2
|
|
// To XdlKThread-> XdlMNThread -> KXdlPack -> MNXdlPack
|
|
// Then, MNRepeat->KRepeat
|
|
|
|
for(size_t n = 0; n < MN_Paded; ++n)
|
|
{
|
|
for(size_t k = 0; k < K; ++k)
|
|
{
|
|
auto n0 = n / (XdlMNThread * MNXdlPack); // i MNRepeat
|
|
auto tempn = n % (XdlMNThread * MNXdlPack);
|
|
auto n1 = tempn % XdlMNThread; // i XdlMNThread
|
|
auto n2 = tempn / XdlMNThread; // i MNXdlPack
|
|
|
|
auto k0 = k / (XdlKThread * KXdlPack); // i KRepeat
|
|
auto tempk = k % (XdlKThread * KXdlPack);
|
|
auto k1 = tempk % XdlKThread; // i XdlKThread
|
|
auto k2 = tempk / XdlKThread; // i KXdlPack
|
|
|
|
auto outputIndex = n0 * MNXdlPack * KXdlPack * XdlMNThread * XdlKThread * K0 +
|
|
k0 * MNXdlPack * KXdlPack * XdlMNThread * XdlKThread +
|
|
k1 * MNXdlPack * KXdlPack * XdlMNThread + n1 * MNXdlPack * KXdlPack +
|
|
k2 * MNXdlPack + n2;
|
|
|
|
if constexpr(KLast)
|
|
shuffled(outputIndex) = n < MN ? src(n, k) : dtype{};
|
|
else
|
|
shuffled(outputIndex) = n < MN ? src(k, n) : dtype{};
|
|
}
|
|
}
|
|
return shuffled;
|
|
}
|
|
|
|
#include "run_mx_flatmm.inc"
|
|
|
|
template <typename FlatmmConfig>
|
|
int run_mx_flatmm_example(int argc, char* argv[])
|
|
{
|
|
auto [result, arg_parser] = create_args(argc, argv);
|
|
if(!result)
|
|
return -1;
|
|
|
|
using Row = ck_tile::tensor_layout::gemm::RowMajor;
|
|
using Col = ck_tile::tensor_layout::gemm::ColumnMajor;
|
|
|
|
std::string mx_prec = arg_parser.get_str("mx_prec");
|
|
std::string a_layout = arg_parser.get_str("a_layout");
|
|
std::string b_layout = arg_parser.get_str("b_layout");
|
|
int persistent_opt = arg_parser.get_int("persistent");
|
|
|
|
if(a_layout == "R" && b_layout == "C")
|
|
{
|
|
if(mx_prec == "fp4xfp4")
|
|
{
|
|
if(persistent_opt == 0)
|
|
{
|
|
run_mx_flatmm_with_layouts<ck_tile::pk_fp4_t,
|
|
ck_tile::pk_fp4_t,
|
|
ck_tile::fp16_t,
|
|
FlatmmConfig,
|
|
false>(argc, argv, Row{}, Col{}, Row{});
|
|
}
|
|
else
|
|
{
|
|
run_mx_flatmm_with_layouts<ck_tile::pk_fp4_t,
|
|
ck_tile::pk_fp4_t,
|
|
ck_tile::fp16_t,
|
|
FlatmmConfig,
|
|
true>(argc, argv, Row{}, Col{}, Row{});
|
|
}
|
|
}
|
|
else if(mx_prec == "fp6xfp6")
|
|
{
|
|
throw std::runtime_error("Only support fp4xfp4 now!");
|
|
}
|
|
else if(mx_prec == "fp8xfp8")
|
|
{
|
|
throw std::runtime_error("Only support fp4xfp4 now!");
|
|
}
|
|
else
|
|
{
|
|
throw std::runtime_error("Unsupported data_type!");
|
|
}
|
|
}
|
|
else
|
|
{
|
|
throw std::runtime_error("Unsupported data layout configuration for A,B and C tensors!");
|
|
}
|
|
return -1;
|
|
}
|
|
|
|
int main(int argc, char* argv[])
|
|
{
|
|
auto [result, arg_parser] = create_args(argc, argv);
|
|
if(!result)
|
|
return EXIT_FAILURE;
|
|
try
|
|
{
|
|
int warp_tile = arg_parser.get_int("warp_tile");
|
|
if(warp_tile == 0)
|
|
{
|
|
return !run_mx_flatmm_example<MXfp4_FlatmmConfig16>(argc, argv);
|
|
}
|
|
else if(warp_tile == 1)
|
|
{
|
|
throw std::runtime_error("Only support MFMA_16x16x128 now!");
|
|
}
|
|
else
|
|
{
|
|
throw std::runtime_error("Unsupported warp_tile!");
|
|
}
|
|
}
|
|
catch(const std::runtime_error& e)
|
|
{
|
|
std::cerr << "Runtime error: " << e.what() << '\n';
|
|
return EXIT_FAILURE;
|
|
}
|
|
}
|