[rocm-libraries] ROCm/rocm-libraries#4302 (commit e62bd8a)

[CK_TILE] add tf32 support
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

## Proposed changes

TF32 is added in CK on gfx942 and gfx950. This PR is to initiate tf32 in
CK_TILE on gfx942 and gfx950.

## Checklist

Please put an into the boxes that apply. You can also fill these out
after creating the PR. If you're not sure, please don't hesitate to ask.

- [ ] I have added tests relevant to the introduced functionality, and
the unit tests are passing locally
- [ ] I have added the test to REGRESSION_TESTS list defined at the top
of CMakeLists.txt in tests/CMakeLists.txt, **IF** the test takes more
than 30 seconds to run.
- [ ] I have added inline documentation which enables the maintainers
with understanding the motivation
- [ ] I have removed the stale documentation which is no longer relevant
after this pull request
- [ ] (If this change is user-facing) I have added release notes which
provide the end users with a brief summary of the improvement from this
pull request
- [x] I have run  on all changed files
- [ ] Any dependent changes have been merged

## Discussion
This commit is contained in:
yinglu
2026-03-19 09:19:06 +00:00
committed by assistant-librarian[bot]
parent 652d3456ca
commit d460ab35b6
30 changed files with 1164 additions and 260 deletions

View File

@@ -6,6 +6,7 @@
#include "ck_tile/core/numeric/half.hpp"
#include "ck_tile/core/numeric/integral_constant.hpp"
#include "ck_tile/core/numeric/numeric.hpp"
#include "ck_tile/core/numeric/ext_vector_base.hpp"
#if CK_TILE_USE_LLVM_BUILTIN_BF16
#include <hip/hip_bfloat16.h>
#endif
@@ -440,4 +441,62 @@ CK_TILE_HOST_DEVICE constexpr bf16x2_t fp32x2_to_bf16x2(const fp32x2_t& x)
return bf16x2_t{float_to_bf16<rounding>(x.x), float_to_bf16<rounding>(x.y)};
}
// Available on gfx94x (gfx942, gfx950) and later
CK_TILE_DEVICE bf16x2_t cvt_pk_bf16_f32(float a, float b)
{
#if defined(__gfx94__) && CK_TILE_USE_LLVM_BUILTIN_BF16
return __builtin_convertvector(fp32x2_t{a, b}, bf16x2_t);
#else
return fp32x2_to_bf16x2(fp32x2_t{a, b});
#endif
}
// Packed bf16x2 to fp32x2 conversion
CK_TILE_HOST_DEVICE constexpr fp32x2_t bf16x2_to_fp32x2(bf16x2_t x)
{
#if CK_TILE_USE_LLVM_BUILTIN_BF16
return __builtin_convertvector(x, fp32x2_t);
#else
uint32_t packed = bit_cast<uint32_t>(x);
float f0 = bit_cast<float>(packed << 16);
float f1 = bit_cast<float>(packed & 0xFFFF0000u);
return fp32x2_t{f0, f1};
#endif
}
#ifndef CK_TILE_TF32_USE_PACKED_CVT
#define CK_TILE_TF32_USE_PACKED_CVT 1
#endif
template <int VecSize>
CK_TILE_DEVICE void convert_float_to_bf16_pairs(const ext_vector_t<float, VecSize>& reg_f32,
ext_vector_t<bfloat16_t, VecSize>& reg_bf16_big,
ext_vector_t<bfloat16_t, VecSize>& reg_bf16_small)
{
#if defined(__gfx94__) && CK_TILE_TF32_USE_PACKED_CVT && CK_TILE_USE_LLVM_BUILTIN_BF16
static_assert(VecSize % 2 == 0, "VecSize must be even for packed operations");
#pragma unroll
for(int i = 0; i < VecSize; i += 2)
{
fp32x2_t orig = {reg_f32[i], reg_f32[i + 1]};
bf16x2_t big_pair = cvt_pk_bf16_f32(orig[0], orig[1]);
fp32x2_t big_f32 = bf16x2_to_fp32x2(big_pair);
fp32x2_t diff = orig - big_f32;
bf16x2_t small_pair = cvt_pk_bf16_f32(diff[0], diff[1]);
reinterpret_cast<bf16x2_t*>(&reg_bf16_big)[i / 2] = big_pair;
reinterpret_cast<bf16x2_t*>(&reg_bf16_small)[i / 2] = small_pair;
}
#else
#pragma unroll
for(int i = 0; i < VecSize; i++)
{
reg_bf16_big[i] = float_to_bf16(reg_f32[i]);
reg_bf16_small[i] = float_to_bf16(reg_f32[i] - bf16_to_float(reg_bf16_big[i]));
}
#endif
}
} // namespace ck_tile

View File

@@ -0,0 +1,80 @@
// Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
// SPDX-License-Identifier: MIT
#pragma once
#include "ck_tile/core/numeric/integer.hpp"
#include "ck_tile/core/utility/type_traits.hpp"
#include <type_traits>
namespace ck_tile {
// this structure is used to pick up the <base> type inside
// using xxx = <base> __attribute__((ext_vector_type(N)));
// because clang only allow native type + bool in this term (custom type will fail)
// overload this structure to let proper <base> type
template <typename T>
struct native_t
{
using type = remove_cvref_t<T>;
};
// we name this as ext_vector purposely, because clang ext_vector_type extention only accept literay
// basic type to construct a ext_vector_type you must be very careful using this, or will have lot
// of compiler errors e.g. struct A; using Ax2_t = A __attribute__((ext_vector_type(2))); -> will
// have compiler error
namespace impl {
template <typename T_, index_t N_, typename = void>
struct ext_vector;
template <typename T_, index_t N_>
struct ext_vector<T_, N_, std::enable_if_t<!std::is_class_v<typename native_t<T_>::type>>>
{
static constexpr index_t N = N_;
// struct type is not supported for ext_vector
using value_type = typename native_t<T_>::type;
static_assert(!std::is_class_v<value_type>);
using type = value_type __attribute__((ext_vector_type(N))); // this is danguous
};
template <typename T_, index_t N_>
struct ext_vector<T_, N_, std::enable_if_t<std::is_class_v<typename native_t<T_>::type>>>
{
static constexpr index_t N = N_;
// struct type is not supported for ext_vector
using value_type = typename native_t<T_>::type::type;
static_assert(!std::is_class_v<value_type>);
using type = value_type __attribute__((ext_vector_type(N))); // this is danguous
};
template <typename V_, index_t Vs_, index_t N_>
struct ext_vector<V_ __attribute__((ext_vector_type(Vs_))),
N_,
std::enable_if_t<!std::is_class_v<typename native_t<V_>::type>>>
{
static constexpr index_t N = Vs_ * N_;
using value_type = typename native_t<remove_cvref_t<V_>>::type;
static_assert(!std::is_class_v<value_type>);
using type = value_type __attribute__((ext_vector_type(N))); // this is danguous
};
template <typename V_, index_t Vs_, index_t N_>
struct ext_vector<V_ __attribute__((ext_vector_type(Vs_))),
N_,
std::enable_if_t<std::is_class_v<typename native_t<V_>::type>>>
{
static constexpr index_t N = Vs_ * N_;
using value_type = typename native_t<remove_cvref_t<V_>>::type::type;
static_assert(!std::is_class_v<value_type>);
using type = value_type __attribute__((ext_vector_type(N))); // this is danguous
};
} // namespace impl
template <typename T, index_t N>
using ext_vector_t = typename impl::ext_vector<T, N>::type;
} // namespace ck_tile

View File

@@ -9,6 +9,11 @@
namespace ck_tile {
// TF32 tag type: 1 sign bit, 8 exponent bits, 10 mantissa bits (see numeric_traits<tf32_t>)
struct tf32_t
{
};
// this struct has the information of
// 1. limit of a certain type, simliar to std::numeric_limits
// 2. some pre-defined value, zero, one...
@@ -101,6 +106,25 @@ struct numeric_traits<float>
using bitwise_type = uint32_t;
};
template <>
struct numeric_traits<tf32_t>
{
static constexpr int exp = 8;
static constexpr int mant = 10;
static constexpr int bias = 127;
static constexpr uint32_t nan_mask = 0x7F800000;
static constexpr uint32_t head_mask = 0xFF800000;
static constexpr uint32_t mant_mask = 0x7FFFFF;
static constexpr uint32_t exp_mask = 0xFF;
static constexpr uint32_t abs_mask = 0x7FFFFFFF;
static constexpr uint32_t Inf = 0x7F800000;
static constexpr uint32_t NegInf = 0xFF800000;
static constexpr uint32_t NaN = 0x7F800001;
static constexpr uint32_t Neg0 = 0x80000000;
static constexpr int PackedSize = 1;
using bitwise_type = uint32_t;
};
} // namespace ck_tile
#define CK_TILE_ARITHMETIC_USING_FLOAT(attr_, type_) \

View File

@@ -57,6 +57,44 @@ CK_TILE_TYPE_CONVERT(float, float, bf16_t, bf16)
CK_TILE_TYPE_CONVERT(float, float, fp8_t, fp8)
CK_TILE_TYPE_CONVERT(float, float, bf8_t, bf8)
static constexpr uint32_t float32_exponent_mask = 0x7f800000u;
enum class tf32_rounding_mode
{
trunc = 0, // truncate
rne = 1, // round to nearest even (RTNE)
};
template <tf32_rounding_mode rounding = tf32_rounding_mode::trunc>
CK_TILE_HOST_DEVICE constexpr float float_to_tf32(float x)
{
uint32_t i = bit_cast<uint32_t>(x);
if constexpr(rounding == tf32_rounding_mode::rne)
{
// RTNE rounding.
if((i & float32_exponent_mask) != float32_exponent_mask)
{
// Add rounding bias for round-to-nearest-even (RTNE) before truncating:
// - 0xfff is the rounding bias corresponding to the 13 fraction bits that
// will be discarded.
// - (i >> 13) & 1 extracts the least significant of those discarded bits and
// adding it implements "ties to even" (round half-way cases to even).
i += 0xfff + ((i >> 13) & 1);
}
}
// Zero out the lowest 13 fraction bits to form the TF32-like value.
i &= 0xFFFFE000u;
return bit_cast<float>(i);
}
template <typename Y,
tf32_rounding_mode rounding = tf32_rounding_mode::trunc,
std::enable_if_t<std::is_same_v<Y, tf32_t>, bool> = false>
CK_TILE_HOST_DEVICE constexpr float type_convert(float x)
{
return float_to_tf32<rounding>(x);
}
CK_TILE_TYPE_CONVERT(fp16_t, fp16, float, float)
CK_TILE_TYPE_CONVERT(bf16_t, bf16, float, float)
CK_TILE_TYPE_CONVERT(fp8_t, fp8, float, float)

View File

@@ -5,7 +5,7 @@
#include "ck_tile/core/config.hpp"
#include "ck_tile/core/container/array.hpp"
#include "ck_tile/core/numeric/integer.hpp"
#include "ck_tile/core/numeric/ext_vector_base.hpp"
#include "ck_tile/core/numeric/integral_constant.hpp"
#include "ck_tile/core/numeric/float8.hpp"
#include "ck_tile/core/numeric/half.hpp"
@@ -13,77 +13,9 @@
#include "ck_tile/core/numeric/pk_int4.hpp"
#include "ck_tile/core/numeric/pk_fp4.hpp"
#include "ck_tile/core/numeric/e8m0.hpp"
#include "ck_tile/core/utility/type_traits.hpp"
namespace ck_tile {
// this structure is used to pick up the <base> type inside
// using xxx = <base> __attribute__((ext_vector_type(N)));
// because clang only allow native type + bool in this term (custom type will fail)
// overload this structure to let proper <base> type
template <typename T>
struct native_t
{
using type = remove_cvref_t<T>;
};
// we name this as ext_vector purposely, because clang ext_vector_type extention only accept literay
// basic type to construct a ext_vector_type you must be very careful using this, or will have lot
// of compiler errors e.g. struct A; using Ax2_t = A __attribute__((ext_vector_type(2))); -> will
// have compiler error
namespace impl {
template <typename T_, index_t N_, typename = void>
struct ext_vector;
template <typename T_, index_t N_>
struct ext_vector<T_, N_, std::enable_if_t<!std::is_class_v<typename native_t<T_>::type>>>
{
static constexpr index_t N = N_;
// struct type is not supported for ext_vector
using value_type = typename native_t<T_>::type;
static_assert(!std::is_class_v<value_type>);
using type = value_type __attribute__((ext_vector_type(N))); // this is danguous
};
template <typename T_, index_t N_>
struct ext_vector<T_, N_, std::enable_if_t<std::is_class_v<typename native_t<T_>::type>>>
{
static constexpr index_t N = N_;
// struct type is not supported for ext_vector
using value_type = typename native_t<T_>::type::type;
static_assert(!std::is_class_v<value_type>);
using type = value_type __attribute__((ext_vector_type(N))); // this is danguous
};
template <typename V_, index_t Vs_, index_t N_>
struct ext_vector<V_ __attribute__((ext_vector_type(Vs_))),
N_,
std::enable_if_t<!std::is_class_v<typename native_t<V_>::type>>>
{
static constexpr index_t N = Vs_ * N_;
using value_type = typename native_t<remove_cvref_t<V_>>::type;
static_assert(!std::is_class_v<value_type>);
using type = value_type __attribute__((ext_vector_type(N))); // this is danguous
};
template <typename V_, index_t Vs_, index_t N_>
struct ext_vector<V_ __attribute__((ext_vector_type(Vs_))),
N_,
std::enable_if_t<std::is_class_v<typename native_t<V_>::type>>>
{
static constexpr index_t N = Vs_ * N_;
using value_type = typename native_t<remove_cvref_t<V_>>::type::type;
static_assert(!std::is_class_v<value_type>);
using type = value_type __attribute__((ext_vector_type(N))); // this is danguous
};
} // namespace impl
template <typename T, index_t N>
using ext_vector_t = typename impl::ext_vector<T, N>::type;
// by default, any type will result in a vector_size=1 with scalar_type=T traits.
// ... unless we have other vector_traits specialization
template <typename T, typename = void>

View File

@@ -112,6 +112,11 @@ CK_TILE_HOST_DEVICE PY c_style_pointer_cast(PX p_x)
#pragma clang diagnostic pop
}
// Template ternary: if Cond == Match, use TrueType, else FalseType
// Usage: if_select_t<T, int, float, double> evaluates to float if T==int, else double
template <typename Cond, typename Match, typename TrueType, typename FalseType>
using if_select_t = std::conditional_t<std::is_same_v<Cond, Match>, TrueType, FalseType>;
template <typename CompareTo, typename... Rest>
struct is_any_of : std::false_type
{

View File

@@ -58,6 +58,7 @@ CK_TILE_HOST double get_relative_threshold(const int number_of_accumulations = 1
F16,
BF16,
F32,
tf32_t,
pk_fp4_t,
pk_fp4_raw_t,
pk_int4_t,
@@ -76,8 +77,9 @@ CK_TILE_HOST double get_relative_threshold(const int number_of_accumulations = 1
compute_error = std::pow(2, -numeric_traits<ComputeDataType>::mant) * 0.5;
}
static_assert(is_any_of<OutDataType, F8, BF8, F16, BF16, F32, pk_int4_t, I8, I32, int>::value,
"Warning: Unhandled OutDataType for setting up the relative threshold!");
static_assert(
is_any_of<OutDataType, F8, BF8, F16, BF16, F32, tf32_t, pk_int4_t, I8, I32, int>::value,
"Warning: Unhandled OutDataType for setting up the relative threshold!");
double output_error = 0;
if constexpr(is_any_of<OutDataType, pk_int4_t, I8, I32, int>::value)
@@ -90,8 +92,9 @@ CK_TILE_HOST double get_relative_threshold(const int number_of_accumulations = 1
}
double midway_error = std::max(compute_error, output_error);
static_assert(is_any_of<AccDataType, F8, BF8, F16, BF16, F32, pk_int4_t, I8, I32, int>::value,
"Warning: Unhandled AccDataType for setting up the relative threshold!");
static_assert(
is_any_of<AccDataType, F8, BF8, F16, BF16, F32, tf32_t, pk_int4_t, I8, I32, int>::value,
"Warning: Unhandled AccDataType for setting up the relative threshold!");
double acc_error = 0;
if constexpr(is_any_of<AccDataType, pk_int4_t, I8, I32, int>::value)
@@ -129,6 +132,7 @@ CK_TILE_HOST double get_absolute_threshold(const double max_possible_num,
F16,
BF16,
F32,
tf32_t,
pk_fp4_t,
pk_fp4_raw_t,
pk_int4_t,
@@ -151,8 +155,9 @@ CK_TILE_HOST double get_absolute_threshold(const double max_possible_num,
compute_error = std::pow(2, discrete_expo - numeric_traits<ComputeDataType>::mant) * 0.5;
}
static_assert(is_any_of<OutDataType, F8, BF8, F16, BF16, F32, pk_int4_t, I8, I32, int>::value,
"Warning: Unhandled OutDataType for setting up the absolute threshold!");
static_assert(
is_any_of<OutDataType, F8, BF8, F16, BF16, F32, tf32_t, pk_int4_t, I8, I32, int>::value,
"Warning: Unhandled OutDataType for setting up the absolute threshold!");
double output_error = 0;
if constexpr(is_any_of<OutDataType, pk_int4_t, I8, I32, int>::value)
@@ -168,8 +173,9 @@ CK_TILE_HOST double get_absolute_threshold(const double max_possible_num,
}
double midway_error = std::max(compute_error, output_error);
static_assert(is_any_of<AccDataType, F8, BF8, F16, BF16, F32, pk_int4_t, I8, I32, int>::value,
"Warning: Unhandled AccDataType for setting up the absolute threshold!");
static_assert(
is_any_of<AccDataType, F8, BF8, F16, BF16, F32, tf32_t, pk_int4_t, I8, I32, int>::value,
"Warning: Unhandled AccDataType for setting up the absolute threshold!");
double acc_error = 0;
if constexpr(is_any_of<AccDataType, pk_int4_t, I8, I32, int>::value)

View File

@@ -4,11 +4,11 @@
#pragma once
#include <cstdlib>
#include <mutex>
#include <thread>
#include "ck_tile/core.hpp"
#include "ck_tile/host/host_tensor.hpp"
#include "ck_tile/host/device_prop.hpp"
namespace ck_tile {
@@ -447,24 +447,34 @@ CK_TILE_HOST void reference_mx_gemm_bquant(const HostTensor<ADataType>& a_m_k,
std::cout << std::endl;
}
template <typename ADataType,
typename BDataType,
template <typename ADataType_,
typename BDataType_,
typename AccDataType,
typename CDataType,
typename AElementOp = ck_tile::identity,
typename BElementOp = ck_tile::identity,
typename ACCElementOp = ck_tile::identity>
CK_TILE_HOST void reference_gemm(const HostTensor<ADataType>& a_m_k,
const HostTensor<BDataType>& b_k_n,
HostTensor<CDataType>& c_m_n,
const AElementOp& a_element_op = {},
const BElementOp& b_element_op = {},
const ACCElementOp& acc_element_op = {})
CK_TILE_HOST void
reference_gemm(const HostTensor<if_select_t<ADataType_, tf32_t, float, ADataType_>>& a_m_k,
const HostTensor<if_select_t<BDataType_, tf32_t, float, BDataType_>>& b_k_n,
HostTensor<CDataType>& c_m_n,
const AElementOp& a_element_op = {},
const BElementOp& b_element_op = {},
const ACCElementOp& acc_element_op = {})
{
if constexpr(std::is_same_v<ADataType_, tf32_t> || std::is_same_v<BDataType_, tf32_t>)
static_assert(std::is_same_v<ADataType_, BDataType_>,
"ADataType and BDataType must be the same");
using ADataTypeCompute = ADataType_;
using ADataTypeBuf = if_select_t<ADataType_, tf32_t, float, ADataType_>;
using BDataTypeBuf = if_select_t<BDataType_, tf32_t, float, BDataType_>;
const std::size_t M = a_m_k.get_length(0);
const std::size_t N = b_k_n.get_length(1);
const std::size_t K = a_m_k.get_length(1);
const bool is_gfx950 = (ck_tile::get_device_name() == "gfx950");
auto f_mn = [&](auto m, auto n) {
AccDataType v_acc = 0;
@@ -472,7 +482,7 @@ CK_TILE_HOST void reference_gemm(const HostTensor<ADataType>& a_m_k,
{
AccDataType v_a;
AccDataType v_b;
if constexpr(std::is_same_v<ADataType, pk_fp4_t>)
if constexpr(std::is_same_v<ADataTypeBuf, pk_fp4_t>)
{
// HostTensor automatically handles packed indexing: a_m_k(m,k) divides offset by
// PackedSize So a_m_k(m,0) and a_m_k(m,1) return the same packed byte
@@ -481,7 +491,7 @@ CK_TILE_HOST void reference_gemm(const HostTensor<ADataType>& a_m_k,
const float unpacked = (k % 2 == 1) ? fp32_val.hi : fp32_val.lo;
v_a = ck_tile::type_convert<AccDataType>(a_element_op(unpacked));
}
else if constexpr(std::is_same_v<ADataType, pk_int4_t>)
else if constexpr(std::is_same_v<ADataTypeBuf, pk_int4_t>)
{
// HostTensor automatically handles packed indexing
const pk_int4_t pk_val = a_m_k(m, k);
@@ -493,7 +503,7 @@ CK_TILE_HOST void reference_gemm(const HostTensor<ADataType>& a_m_k,
{
v_a = ck_tile::type_convert<AccDataType>(a_element_op(a_m_k(m, k)));
}
if constexpr(std::is_same_v<BDataType, pk_fp4_t>)
if constexpr(std::is_same_v<BDataTypeBuf, pk_fp4_t>)
{
// HostTensor automatically handles packed indexing
const pk_fp4_t pk_val = b_k_n(k, n);
@@ -501,7 +511,7 @@ CK_TILE_HOST void reference_gemm(const HostTensor<ADataType>& a_m_k,
const float unpacked = (k % 2 == 1) ? fp32_val.hi : fp32_val.lo;
v_b = ck_tile::type_convert<AccDataType>(b_element_op(unpacked));
}
else if constexpr(std::is_same_v<BDataType, pk_int4_t>)
else if constexpr(std::is_same_v<BDataTypeBuf, pk_int4_t>)
{
// HostTensor automatically handles packed indexing
const pk_int4_t pk_val = b_k_n(k, n);
@@ -513,7 +523,36 @@ CK_TILE_HOST void reference_gemm(const HostTensor<ADataType>& a_m_k,
{
v_b = ck_tile::type_convert<AccDataType>(b_element_op(b_k_n(k, n)));
}
v_acc += v_a * v_b;
if constexpr(std::is_same_v<ADataTypeCompute, tf32_t>)
{
if(is_gfx950)
{
// gfx950: use 3x bf16 emulation
bf16_t v_a_bf16_big = ck_tile::type_convert<bf16_t>(v_a);
bf16_t v_a_bf16_small = ck_tile::type_convert<bf16_t>(
v_a - type_convert<AccDataType>(v_a_bf16_big));
bf16_t v_b_bf16_big = ck_tile::type_convert<bf16_t>(v_b);
bf16_t v_b_bf16_small = ck_tile::type_convert<bf16_t>(
v_b - type_convert<AccDataType>(v_b_bf16_big));
v_acc += ck_tile::type_convert<AccDataType>(v_a_bf16_big) *
ck_tile::type_convert<AccDataType>(v_b_bf16_small) +
ck_tile::type_convert<AccDataType>(v_a_bf16_small) *
ck_tile::type_convert<AccDataType>(v_b_bf16_big) +
ck_tile::type_convert<AccDataType>(v_a_bf16_big) *
ck_tile::type_convert<AccDataType>(v_b_bf16_big);
}
else
{
// Other architectures: tf32 not supported or handled via fp32 fallback
v_acc += v_a * v_b;
}
}
else
{
v_acc += v_a * v_b;
}
}
c_m_n(m, n) = ck_tile::type_convert<CDataType>(acc_element_op(v_acc));
@@ -764,15 +803,15 @@ reference_gemm_multiple_d(const HostTensor<ADataType>& a_m_k,
make_ParallelTensorFunctor(f_mk_kn_mn, M, N)(std::thread::hardware_concurrency());
}
template <typename ADataType,
typename BDataType,
template <typename ADataType_,
typename BDataType_,
typename AccDataType,
typename CDataType,
typename LayoutA,
typename LayoutB,
typename LayoutC>
__global__ void naive_gemm_kernel(ADataType* A,
BDataType* B,
__global__ void naive_gemm_kernel(if_select_t<ADataType_, tf32_t, float, ADataType_>* A,
if_select_t<BDataType_, tf32_t, float, BDataType_>* B,
CDataType* C,
ck_tile::index_t M,
ck_tile::index_t N,
@@ -781,6 +820,14 @@ __global__ void naive_gemm_kernel(ADataType* A,
ck_tile::index_t strideB,
ck_tile::index_t strideC)
{
if constexpr(std::is_same_v<ADataType_, tf32_t> || std::is_same_v<BDataType_, tf32_t>)
static_assert(std::is_same_v<ADataType_, BDataType_>,
"ADataType and BDataType must be the same");
using ADataTypeCompute = ADataType_;
// ADataTypeBuf: buffer/storage type (fp32 when tf32)
using ADataTypeBuf = if_select_t<ADataType_, tf32_t, float, ADataType_>;
using BDataTypeBuf = if_select_t<BDataType_, tf32_t, float, BDataType_>;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int row = idx / N; // Compute row index
int col = idx % N; // Compute column index
@@ -790,8 +837,8 @@ __global__ void naive_gemm_kernel(ADataType* A,
AccDataType acc = 0.0;
for(int k = 0; k < K; ++k)
{
constexpr index_t packed_size_a = ck_tile::numeric_traits<ADataType>::PackedSize;
constexpr index_t packed_size_b = ck_tile::numeric_traits<BDataType>::PackedSize;
constexpr index_t packed_size_a = ck_tile::numeric_traits<ADataTypeBuf>::PackedSize;
constexpr index_t packed_size_b = ck_tile::numeric_traits<BDataTypeBuf>::PackedSize;
// Adjust indexing based on matrix layout
int a_index = (std::is_same_v<LayoutA, tensor_layout::gemm::RowMajor>)
? row * strideA + k
@@ -802,7 +849,7 @@ __global__ void naive_gemm_kernel(ADataType* A,
AccDataType v_a;
AccDataType v_b;
if constexpr(std::is_same_v<ADataType, pk_int4_t>)
if constexpr(std::is_same_v<ADataTypeBuf, pk_int4_t>)
{
const fp32x2_t fp32_val = pk_int4_t_to_fp32x2_t(A[a_index / packed_size_a]);
if(k % 2 == 1)
@@ -810,7 +857,7 @@ __global__ void naive_gemm_kernel(ADataType* A,
else
v_a = fp32_val.lo;
}
else if constexpr(std::is_same_v<ADataType, pk_fp4_t>)
else if constexpr(std::is_same_v<ADataTypeBuf, pk_fp4_t>)
{
const fp32x2_t fp32_val = pk_fp4_to_fp32x2(A[a_index / packed_size_a], 1.0f);
if(k % 2 == 1)
@@ -822,7 +869,7 @@ __global__ void naive_gemm_kernel(ADataType* A,
{
v_a = ck_tile::type_convert<AccDataType>(A[a_index]);
}
if constexpr(std::is_same_v<BDataType, pk_int4_t>)
if constexpr(std::is_same_v<BDataTypeBuf, pk_int4_t>)
{
const fp32x2_t fp32_val = pk_int4_t_to_fp32x2_t(B[b_index / packed_size_b]);
if(k % 2 == 1)
@@ -830,7 +877,7 @@ __global__ void naive_gemm_kernel(ADataType* A,
else
v_b = fp32_val.lo;
}
else if constexpr(std::is_same_v<BDataType, pk_fp4_t>)
else if constexpr(std::is_same_v<BDataTypeBuf, pk_fp4_t>)
{
const fp32x2_t fp32_val = pk_fp4_to_fp32x2(B[b_index / packed_size_b], 1.0f);
if(k % 2 == 1)
@@ -842,7 +889,33 @@ __global__ void naive_gemm_kernel(ADataType* A,
{
v_b = ck_tile::type_convert<AccDataType>(B[b_index]);
}
acc += v_a * v_b;
if constexpr(std::is_same_v<ADataTypeCompute, tf32_t>)
{
#ifdef CK_GFX950_SUPPORT
// gfx950: use 3x bf16 emulation
bf16_t v_a_bf16_big = ck_tile::type_convert<bf16_t>(v_a);
bf16_t v_a_bf16_small =
ck_tile::type_convert<bf16_t>(v_a - type_convert<AccDataType>(v_a_bf16_big));
bf16_t v_b_bf16_big = ck_tile::type_convert<bf16_t>(v_b);
bf16_t v_b_bf16_small =
ck_tile::type_convert<bf16_t>(v_b - type_convert<AccDataType>(v_b_bf16_big));
acc += ck_tile::type_convert<AccDataType>(v_a_bf16_big) *
ck_tile::type_convert<AccDataType>(v_b_bf16_small) +
ck_tile::type_convert<AccDataType>(v_a_bf16_small) *
ck_tile::type_convert<AccDataType>(v_b_bf16_big) +
ck_tile::type_convert<AccDataType>(v_a_bf16_big) *
ck_tile::type_convert<AccDataType>(v_b_bf16_big);
#else
// Other architectures: use fp32 fallback
acc += v_a * v_b;
#endif
}
else
{
acc += v_a * v_b;
}
}
int c_index = (std::is_same_v<LayoutC, tensor_layout::gemm::RowMajor>)
@@ -852,15 +925,15 @@ __global__ void naive_gemm_kernel(ADataType* A,
}
}
template <typename ADataType,
typename BDataType,
template <typename ADataType_,
typename BDataType_,
typename AccDataType,
typename CDataType,
typename LayoutA,
typename LayoutB,
typename LayoutC>
__global__ void blockwise_gemm_kernel(ADataType* A,
BDataType* B,
__global__ void blockwise_gemm_kernel(if_select_t<ADataType_, tf32_t, float, ADataType_>* A,
if_select_t<BDataType_, tf32_t, float, BDataType_>* B,
CDataType* C,
ck_tile::index_t M,
ck_tile::index_t N,
@@ -874,6 +947,14 @@ __global__ void blockwise_gemm_kernel(ADataType* A,
float* scale_A_ptr,
float* scale_B_ptr)
{
if constexpr(std::is_same_v<ADataType_, tf32_t> || std::is_same_v<BDataType_, tf32_t>)
static_assert(std::is_same_v<ADataType_, BDataType_>,
"ADataType and BDataType must be the same");
using ADataTypeCompute = ADataType_;
// ADataTypeBuf: buffer/storage type (fp32 when tf32)
using ADataTypeBuf = if_select_t<ADataType_, tf32_t, float, ADataType_>;
using BDataTypeBuf = if_select_t<BDataType_, tf32_t, float, BDataType_>;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int row = idx / N; // Compute row index
int col = idx % N; // Compute column index
@@ -902,8 +983,8 @@ __global__ void blockwise_gemm_kernel(ADataType* A,
(k / scale_granularity_k) * scale_B_stride];
}
constexpr index_t packed_size_a = ck_tile::numeric_traits<ADataType>::PackedSize;
constexpr index_t packed_size_b = ck_tile::numeric_traits<BDataType>::PackedSize;
constexpr index_t packed_size_a = ck_tile::numeric_traits<ADataTypeBuf>::PackedSize;
constexpr index_t packed_size_b = ck_tile::numeric_traits<BDataTypeBuf>::PackedSize;
// Adjust indexing based on matrix layout
int a_index = (std::is_same_v<LayoutA, tensor_layout::gemm::RowMajor>)
? row * strideA + k
@@ -914,7 +995,7 @@ __global__ void blockwise_gemm_kernel(ADataType* A,
AccDataType v_a;
AccDataType v_b;
if constexpr(std::is_same_v<ADataType, pk_int4_t>)
if constexpr(std::is_same_v<ADataTypeBuf, pk_int4_t>)
{
const fp32x2_t fp32_val = pk_int4_t_to_fp32x2_t(A[a_index / packed_size_a]);
if(k % 2 == 1)
@@ -922,7 +1003,7 @@ __global__ void blockwise_gemm_kernel(ADataType* A,
else
v_a = fp32_val.lo;
}
else if constexpr(std::is_same_v<ADataType, pk_fp4_t>)
else if constexpr(std::is_same_v<ADataTypeBuf, pk_fp4_t>)
{
const fp32x2_t fp32_val = pk_fp4_to_fp32x2(A[a_index / packed_size_a], 1.0f);
if(k % 2 == 1)
@@ -935,7 +1016,7 @@ __global__ void blockwise_gemm_kernel(ADataType* A,
v_a = ck_tile::type_convert<AccDataType>(A[a_index]);
}
if constexpr(std::is_same_v<BDataType, pk_int4_t>)
if constexpr(std::is_same_v<BDataTypeBuf, pk_int4_t>)
{
const fp32x2_t fp32_val = pk_int4_t_to_fp32x2_t(B[b_index / packed_size_b]);
if(k % 2 == 1)
@@ -943,7 +1024,7 @@ __global__ void blockwise_gemm_kernel(ADataType* A,
else
v_b = fp32_val.lo;
}
else if constexpr(std::is_same_v<BDataType, pk_fp4_t>)
else if constexpr(std::is_same_v<BDataTypeBuf, pk_fp4_t>)
{
const fp32x2_t fp32_val = pk_fp4_to_fp32x2(B[b_index / packed_size_b], 1.0f);
if(k % 2 == 1)
@@ -955,7 +1036,33 @@ __global__ void blockwise_gemm_kernel(ADataType* A,
{
v_b = ck_tile::type_convert<AccDataType>(B[b_index]);
}
acc_temp += v_a * v_b;
if constexpr(std::is_same_v<ADataTypeCompute, tf32_t>)
{
#ifdef CK_GFX950_SUPPORT
// gfx950: use 3x bf16 emulation
bf16_t v_a_bf16_big = ck_tile::type_convert<bf16_t>(v_a);
bf16_t v_a_bf16_small =
ck_tile::type_convert<bf16_t>(v_a - type_convert<AccDataType>(v_a_bf16_big));
bf16_t v_b_bf16_big = ck_tile::type_convert<bf16_t>(v_b);
bf16_t v_b_bf16_small =
ck_tile::type_convert<bf16_t>(v_b - type_convert<AccDataType>(v_b_bf16_big));
acc_temp += ck_tile::type_convert<AccDataType>(v_a_bf16_big) *
ck_tile::type_convert<AccDataType>(v_b_bf16_small) +
ck_tile::type_convert<AccDataType>(v_a_bf16_small) *
ck_tile::type_convert<AccDataType>(v_b_bf16_big) +
ck_tile::type_convert<AccDataType>(v_a_bf16_big) *
ck_tile::type_convert<AccDataType>(v_b_bf16_big);
#else
// Other architectures: use fp32 fallback
acc_temp += v_a * v_b;
#endif
}
else
{
acc_temp += v_a * v_b;
}
}
// final accumulation
acc += acc_temp * scale_A * scale_B;
@@ -974,8 +1081,8 @@ template <typename ADataType,
typename LayoutA,
typename LayoutB,
typename LayoutC>
void reference_gemm_gpu(ADataType* a_ptr,
BDataType* b_ptr,
void reference_gemm_gpu(if_select_t<ADataType, tf32_t, float, ADataType>* a_ptr,
if_select_t<BDataType, tf32_t, float, BDataType>* b_ptr,
CDataType* c_ptr,
index_t M,
index_t N,
@@ -1002,8 +1109,8 @@ template <typename ADataType,
typename LayoutA,
typename LayoutB,
typename LayoutC>
void reference_blockwise_gemm_gpu(ADataType* a_ptr,
BDataType* b_ptr,
void reference_blockwise_gemm_gpu(if_select_t<ADataType, tf32_t, float, ADataType>* a_ptr,
if_select_t<BDataType, tf32_t, float, BDataType>* b_ptr,
CDataType* c_ptr,
index_t M,
index_t N,
@@ -1040,15 +1147,15 @@ void reference_blockwise_gemm_gpu(ADataType* a_ptr,
return;
}
template <typename ADataType,
typename BDataType,
template <typename ADataType_,
typename BDataType_,
typename AccDataType,
typename CDataType,
typename LayoutA,
typename LayoutB,
typename LayoutC>
void reference_batched_gemm_gpu(ADataType* a_ptr,
BDataType* b_ptr,
void reference_batched_gemm_gpu(if_select_t<ADataType_, tf32_t, float, ADataType_>* a_ptr,
if_select_t<BDataType_, tf32_t, float, BDataType_>* b_ptr,
CDataType* c_ptr,
index_t M,
index_t N,
@@ -1061,18 +1168,29 @@ void reference_batched_gemm_gpu(ADataType* a_ptr,
index_t batch_stride_C,
index_t batch_count)
{
using ADataTypeBuf = if_select_t<ADataType_, tf32_t, float, ADataType_>;
using BDataTypeBuf = if_select_t<BDataType_, tf32_t, float, BDataType_>;
using ADataTypeCompute = ADataType_;
using BDataTypeCompute = BDataType_;
int totalElements = M * N;
int numThreadsPerBlock = 256; // Common choice for threads per block
int numBlocks = (totalElements + numThreadsPerBlock - 1) / numThreadsPerBlock;
for(index_t batch_id = 0; batch_id < batch_count; ++batch_id)
{
ADataType* d_ATemp = a_ptr + batch_id * batch_stride_A;
BDataType* d_BTemp = b_ptr + batch_id * batch_stride_B;
CDataType* d_CTemp = c_ptr + batch_id * batch_stride_C;
naive_gemm_kernel<ADataType, BDataType, AccDataType, CDataType, LayoutA, LayoutB, LayoutC>
<<<numBlocks, numThreadsPerBlock>>>(
d_ATemp, d_BTemp, d_CTemp, M, N, K, stride_a, stride_b, stride_c);
ADataTypeBuf* d_ATemp = a_ptr + batch_id * batch_stride_A;
BDataTypeBuf* d_BTemp = b_ptr + batch_id * batch_stride_B;
CDataType* d_CTemp = c_ptr + batch_id * batch_stride_C;
naive_gemm_kernel<ADataTypeCompute,
BDataTypeCompute,
AccDataType,
CDataType,
LayoutA,
LayoutB,
LayoutC><<<numBlocks, numThreadsPerBlock>>>(
d_ATemp, d_BTemp, d_CTemp, M, N, K, stride_a, stride_b, stride_c);
}
return;

View File

@@ -89,19 +89,32 @@ struct CShuffleEpilogue
remove_cvref_t<BsDataType>,
remove_cvref_t<tuple<BsDataType>>>;
using ADataType = remove_cvref_t<std::tuple_element_t<number<0>{}, AsDataTypeTuple>>;
using BDataType = remove_cvref_t<std::tuple_element_t<number<0>{}, BsDataTypeTuple>>;
// ADataTypeCompute: compute type from Problem (may be tf32_t for TF32 mode)
using ADataTypeCompute = remove_cvref_t<std::tuple_element_t<number<0>{}, AsDataTypeTuple>>;
using BDataTypeCompute = remove_cvref_t<std::tuple_element_t<number<0>{}, BsDataTypeTuple>>;
using ATypeToUse = std::conditional_t<std::is_same_v<ADataType, pk_int4_t> ||
std::is_same_v<ADataType, pk_fp4_t>,
BDataType,
ADataType>;
// ADataTypeBuf: buffer/storage type (fp32 when tf32)
using ADataTypeBuf = if_select_t<ADataTypeCompute, tf32_t, float, ADataTypeCompute>;
using BDataTypeBuf = if_select_t<BDataTypeCompute, tf32_t, float, BDataTypeCompute>;
// For warp gemm selection: use tf32_t if compute type was tf32_t
// For pk_int4/pk_fp4: use the other data type
using ATypeToUse =
std::conditional_t<std::is_same_v<ADataTypeCompute, tf32_t>,
tf32_t,
std::conditional_t<std::is_same_v<ADataTypeBuf, pk_int4_t> ||
std::is_same_v<ADataTypeBuf, pk_fp4_t>,
BDataTypeBuf,
ADataTypeBuf>>;
// Used for weight-only quantization kernel, B would be dequantized to the same data type as A
using BTypeToUse = std::conditional_t<std::is_same_v<BDataType, pk_int4_t> ||
std::is_same_v<BDataType, pk_fp4_t> ||
sizeof(BDataType) < sizeof(ADataType),
ADataType,
BDataType>;
using BTypeToUse =
std::conditional_t<std::is_same_v<BDataTypeCompute, tf32_t>,
tf32_t,
std::conditional_t<std::is_same_v<BDataTypeBuf, pk_int4_t> ||
std::is_same_v<BDataTypeBuf, pk_fp4_t> ||
sizeof(BDataTypeBuf) < sizeof(ADataTypeBuf),
ADataTypeBuf,
BDataTypeBuf>>;
using ELayout = remove_cvref_t<typename Problem::ELayout>;
using CDElementwise = remove_cvref_t<typename Problem::CDElementwise>;
@@ -137,7 +150,7 @@ struct CShuffleEpilogue
[[nodiscard]] CK_TILE_HOST static const std::string GetName()
{
// clang-format off
return concat('_', "CShuffleEpilogue",
return concat('_', "CShuffleEpilogue",
concat('x', MWave, NWave),
concat('x', MPerXdl, NPerXdl, KPerXdl),
VectorSizeC,
@@ -440,8 +453,8 @@ struct CShuffleEpilogue
constexpr int RakedXDLN_PerWarp = NumNXdlPerWavePerShuffle / BlockedXDLN_PerWarp;
// BlockedLayout
// this branch is for original a16w4
if constexpr(is_950 || is_any_of<ADataType, pk_int4_t, pk_fp4_t>::value ||
is_any_of<BDataType, pk_int4_t, pk_fp4_t>::value)
if constexpr(is_950 || is_any_of<ADataTypeBuf, pk_int4_t, pk_fp4_t>::value ||
is_any_of<BDataTypeBuf, pk_int4_t, pk_fp4_t>::value)
{
if constexpr(EightWave)
{

View File

@@ -229,15 +229,6 @@ CK_TILE_DEVICE fp16x2_t cvt_pk_fp16_f32(float a, float b)
return result;
}
CK_TILE_DEVICE bf16x2_t cvt_pk_bf16_f32(float a, float b)
{
bf16x2_t result;
asm volatile("v_cvt_pk_bf16_f32 %[result], %[a], %[b]"
: [result] "=v"(result)
: [a] "v"(a), [b] "v"(b));
return result;
}
CK_TILE_DEVICE fp32x2_t pk_mul_f32(fp32x2_t lhs, fp32x2_t rhs)
{
fp32x2_t result;
@@ -856,7 +847,7 @@ struct BlockFmhaFwdV3Pipeline
}
else
{
auto casted = detail::cvt_pk_bf16_f32(x, y);
auto casted = ck_tile::cvt_pk_bf16_f32(x, y);
sp(sp_reg_idx).p.thread_buf_[idx] = casted.x;
sp(sp_reg_idx).p.thread_buf_[idx + 1] = casted.y;
}

View File

@@ -49,6 +49,7 @@ struct GemmPipelineAgBgCrImplBase
// that only work for certain K warp tile sizes based on data type size:
// - For 1-byte types (fp8/bf8): K warp tile <= 64
// - For 2-byte types (fp16/bf16): K warp tile <= 32
// - For 4-byte types (float/tf32): transpose load not supported
static constexpr bool is_a_load_tr = []() {
using WarpTile = typename BlockGemmShape::WarpTile;
constexpr index_t kKWarpTile = WarpTile::at(number<2>{});
@@ -57,6 +58,8 @@ struct GemmPipelineAgBgCrImplBase
return false;
else if constexpr(std::is_same_v<BDataType, pk_int4_t>)
return false;
else if constexpr(sizeof(ADataType) >= 4)
return false; // 4-byte types (float/tf32) don't support transpose load
else if constexpr(kKWarpTile > kMaxKWarpTile)
return false;
else
@@ -71,6 +74,8 @@ struct GemmPipelineAgBgCrImplBase
return false;
else if constexpr(std::is_same_v<BDataType, pk_int4_t>)
return false;
else if constexpr(sizeof(BDataType) >= 4)
return false; // 4-byte types (float/tf32) don't support transpose load
else if constexpr(kKWarpTile > kMaxKWarpTile)
return false;
else

View File

@@ -909,26 +909,28 @@ struct UniversalGemmPipelineAgBgCrPolicy
: vector_size * 4 == thread_elements ? WGAttrNumAccessEnum::Quad
: WGAttrNumAccessEnum::Invalid;
using ADataType = remove_cvref_t<typename Problem::ADataType>;
using BDataType = remove_cvref_t<typename Problem::BDataType>;
using ATypeToUse =
std::conditional_t<std::is_same_v<ADataType, pk_int4_t>, BDataType, ADataType>;
using ADataType = remove_cvref_t<typename Problem::ADataType>;
using BDataType = remove_cvref_t<typename Problem::BDataType>;
using ComputeDataType = remove_cvref_t<typename Problem::ComputeDataType>;
using ATypeToUse = if_select_t<ADataType, pk_int4_t, BDataType, ADataType>;
using BTypeToUse = std::conditional_t<std::is_same_v<BDataType, pk_int4_t> ||
std::is_same_v<BDataType, pk_fp4_t> ||
sizeof(BDataType) < sizeof(ADataType),
ADataType,
BDataType>;
using WarpGemm = WarpGemmDispatcher<ATypeToUse,
BTypeToUse,
typename Problem::CDataType,
WarpTile::at(I0),
WarpTile::at(I1),
WarpTile::at(I2),
Problem::TransposeC,
false,
Problem::UseStructuredSparsity,
wg_attr_num_access>;
using WarpGemm =
WarpGemmDispatcher<if_select_t<ComputeDataType, tf32_t, tf32_t, ATypeToUse>,
if_select_t<ComputeDataType, tf32_t, tf32_t, BTypeToUse>,
typename Problem::CDataType,
WarpTile::at(I0),
WarpTile::at(I1),
WarpTile::at(I2),
Problem::TransposeC,
false,
Problem::UseStructuredSparsity,
wg_attr_num_access>;
using BlockGemmPolicy = BlockGemmASmemBSmemCRegV1CustomPolicy<ATypeToUse,
BTypeToUse,

View File

@@ -257,33 +257,37 @@ struct UniversalWeightPreshufflePipelineAgBgCrPolicy
using BlockWarps = typename Problem::BlockGemmShape::BlockWarps;
using WarpTile = typename Problem::BlockGemmShape::WarpTile;
// Use ComputeDataType to detect tf32 mode for warp gemm selection
using ComputeDataType = remove_cvref_t<typename Problem::ComputeDataType>;
using ADataType = remove_cvref_t<typename Problem::ADataType>;
using BDataType = remove_cvref_t<typename Problem::BDataType>;
// Determine compute types to use
// This logic defaults to A/B DataType, but if one of them is packed falls back to the other
// If both are packed, it falls back to the explicitly defined ComputeDataType in the
// problem It might be a good idea to use ComputeDataType anyway, but that would break how
// this behaviour used to work
using ATypeToUse = mixed_prec_compute_type_from_input_t<typename Problem::ADataType,
typename Problem::BDataType,
typename Problem::ComputeDataType>;
using BTypeToUse = mixed_prec_compute_type_from_input_t<typename Problem::BDataType,
typename Problem::ADataType,
typename Problem::ComputeDataType>;
using ATypeToUse =
mixed_prec_compute_type_from_input_t<ADataType, BDataType, ComputeDataType>;
using BTypeToUse =
mixed_prec_compute_type_from_input_t<BDataType, ADataType, ComputeDataType>;
constexpr index_t WaveSize = get_warp_size();
constexpr index_t KLane = WarpTile::at(I2) * WarpTile::at(I0) / WaveSize;
// When BDataType is pk_int4_t, it is internally converted to fp8 for computation.
constexpr index_t KLaneBytes = KLane * sizeof(BTypeToUse);
constexpr auto NumAccess = static_cast<WGAttrNumAccessEnum>(max(1, KLaneBytes / 16));
using WarpGemm = WarpGemmDispatcher<ATypeToUse,
BTypeToUse,
typename Problem::CDataType,
WarpTile::at(I0),
WarpTile::at(I1),
WarpTile::at(I2),
Problem::TransposeC,
false,
false,
NumAccess>;
// For tf32 mode, use tf32_t for warp gemm; otherwise use original types
using WarpGemm =
WarpGemmDispatcher<if_select_t<ComputeDataType, tf32_t, tf32_t, ATypeToUse>,
if_select_t<ComputeDataType, tf32_t, tf32_t, BTypeToUse>,
typename Problem::CDataType,
WarpTile::at(I0),
WarpTile::at(I1),
WarpTile::at(I2),
Problem::TransposeC,
false,
false,
NumAccess>;
using BlockWeightPreshufflePolicy =
BlockWeightPreshuffleASmemBSmemCRegV1CustomPolicy<typename Problem::ADataType,

View File

@@ -48,6 +48,28 @@ using WarpGemmMfmaF32F32F32M16N16K16TransposedCDistribution =
4,
AttrNumAccess>>;
// tf32
// On gfx950: uses 3x bf16 MFMA emulation (no native xf32 support)
#if defined(CK_GFX950_SUPPORT)
// gfx950: tf32 emulated using 3x bf16 MFMA
using WarpGemmMfmaTf32Tf32F32M32N32K16Native = WarpGemmImpl<WarpGemmAttributeMfma<
WarpGemmAttributeMfmaImplF32F32F32M32N32K16Tf32Gfx950<WGAttrCtlEnum::Default_>>>;
using WarpGemmMfmaTf32Tf32F32M16N16K32Native = WarpGemmImpl<WarpGemmAttributeMfma<
WarpGemmAttributeMfmaImplF32F32F32M16N16K32Tf32Gfx950<WGAttrCtlEnum::Default_>>>;
template <WGAttrNumAccessEnum AttrNumAccess = WGAttrNumAccessEnum::Single>
using WarpGemmMfmaTf32Tf32F32M32N32K16 = WarpGemmImpl<WarpGemmAttributeMfma<
WarpGemmAttributeMfmaImplF32F32F32M32N32K16Tf32Gfx950<WGAttrCtlEnum::Default_>,
AttrNumAccess>>;
template <WGAttrNumAccessEnum AttrNumAccess = WGAttrNumAccessEnum::Single>
using WarpGemmMfmaTf32Tf32F32M16N16K32 = WarpGemmImpl<WarpGemmAttributeMfma<
WarpGemmAttributeMfmaImplF32F32F32M16N16K32Tf32Gfx950<WGAttrCtlEnum::Default_>,
AttrNumAccess>>;
#endif
// fp16
using WarpGemmMfmaF16F16F32M32N32K8 = WarpGemmImpl<

View File

@@ -190,6 +190,141 @@ struct WarpGemmAttributeMfmaImplF32F32F32M32N32K2
}
};
// tf32/xf32 emulation on gfx950 using 3x bf16 MFMA
// Algorithm: split float into bf16_big and bf16_small, then compute:
// out = A_big * B_big + A_small * B_big + A_big * B_small
// This provides tf32-like precision using bf16 hardware
// V_MFMA_F32_32x32x16_XF32 emulated on gfx950 using 3x bf16 32x32x16
template <WGAttrCtlEnum Ctrl_ = WGAttrCtlEnum::Default_>
struct WarpGemmAttributeMfmaImplF32F32F32M32N32K16Tf32Gfx950
{
static constexpr WGAttrCtlEnum Ctrl = Ctrl_;
using ADataType = float;
using BDataType = float;
using CDataType = float;
// Input: 8 floats for K=16 (each lane holds 8 elements, kABKPerLane=8)
using AVecType = ext_vector_t<ADataType, 8>;
using BVecType = ext_vector_t<BDataType, 8>;
using CVecType = ext_vector_t<CDataType, 16>;
static constexpr index_t kM = 32;
static constexpr index_t kN = 32;
static constexpr index_t kK = 16;
static constexpr index_t kAMBlock = 1;
static constexpr index_t kBNBlock = 1;
static constexpr index_t kAMLane = 32;
static constexpr index_t kBNLane = 32;
static constexpr index_t kABKLane = 2;
static constexpr index_t kABKPerLane = 8;
static constexpr index_t kCMLane = 2;
static constexpr index_t kCNLane = 32;
static constexpr index_t kCM0PerLane = 4;
static constexpr index_t kCM1PerLane = 4;
// c_vec += a_vec * b_vec
template <bool post_nop_ = false>
CK_TILE_DEVICE void operator()(CVecType& c_vec,
const AVecType& a_vec,
const BVecType& b_vec,
bool_constant<post_nop_> = {}) const
{
#if defined(__gfx950__)
// Convert float to bf16 pairs using packed instructions
ext_vector_t<bf16_t, 8> a_big, a_small, b_big, b_small;
convert_float_to_bf16_pairs<8>(a_vec, a_big, a_small);
convert_float_to_bf16_pairs<8>(b_vec, b_big, b_small);
// Run 3 bf16 MFMAs: small*big, big*small, big*big
c_vec = __builtin_amdgcn_mfma_f32_32x32x16_bf16(a_small, b_big, c_vec, 0, 0, 0);
c_vec = __builtin_amdgcn_mfma_f32_32x32x16_bf16(a_big, b_small, c_vec, 0, 0, 0);
c_vec = __builtin_amdgcn_mfma_f32_32x32x16_bf16(a_big, b_big, c_vec, 0, 0, 0);
#else
ck_tile::ignore = c_vec;
ck_tile::ignore = a_vec;
ck_tile::ignore = b_vec;
#endif
}
// c_vec = a_vec * b_vec
CK_TILE_DEVICE CVecType operator()(const AVecType& a_vec, const BVecType& b_vec) const
{
CVecType c_vec{0.f};
(*this)(c_vec, a_vec, b_vec);
return c_vec;
}
};
// V_MFMA_F32_16x16x32_XF32 emulated on gfx950 using 3x bf16 16x16x32
template <WGAttrCtlEnum Ctrl_ = WGAttrCtlEnum::Default_>
struct WarpGemmAttributeMfmaImplF32F32F32M16N16K32Tf32Gfx950
{
static constexpr WGAttrCtlEnum Ctrl = Ctrl_;
using ADataType = float;
using BDataType = float;
using CDataType = float;
// Input: 8 floats for K=32 (each lane holds 8 elements, kABKPerLane=8)
using AVecType = ext_vector_t<ADataType, 8>;
using BVecType = ext_vector_t<BDataType, 8>;
using CVecType = ext_vector_t<CDataType, 4>;
static constexpr index_t kM = 16;
static constexpr index_t kN = 16;
static constexpr index_t kK = 32;
static constexpr index_t kAMBlock = 1;
static constexpr index_t kBNBlock = 1;
static constexpr index_t kAMLane = 16;
static constexpr index_t kBNLane = 16;
static constexpr index_t kABKLane = 4;
static constexpr index_t kABKPerLane = 8;
static constexpr index_t kCMLane = 4;
static constexpr index_t kCNLane = 16;
static constexpr index_t kCM0PerLane = 1;
static constexpr index_t kCM1PerLane = 4;
// c_vec += a_vec * b_vec
template <bool post_nop_ = false>
CK_TILE_DEVICE void operator()(CVecType& c_vec,
const AVecType& a_vec,
const BVecType& b_vec,
bool_constant<post_nop_> = {}) const
{
#if defined(__gfx950__)
// Convert float to bf16 pairs using packed instructions
ext_vector_t<bf16_t, 8> a_big, a_small, b_big, b_small;
convert_float_to_bf16_pairs<8>(a_vec, a_big, a_small);
convert_float_to_bf16_pairs<8>(b_vec, b_big, b_small);
// Run 3 bf16 MFMAs: small*big, big*small, big*big
c_vec = __builtin_amdgcn_mfma_f32_16x16x32_bf16(a_small, b_big, c_vec, 0, 0, 0);
c_vec = __builtin_amdgcn_mfma_f32_16x16x32_bf16(a_big, b_small, c_vec, 0, 0, 0);
c_vec = __builtin_amdgcn_mfma_f32_16x16x32_bf16(a_big, b_big, c_vec, 0, 0, 0);
#else
ck_tile::ignore = c_vec;
ck_tile::ignore = a_vec;
ck_tile::ignore = b_vec;
#endif
}
// c_vec = a_vec * b_vec
CK_TILE_DEVICE CVecType operator()(const AVecType& a_vec, const BVecType& b_vec) const
{
CVecType c_vec{0.f};
(*this)(c_vec, a_vec, b_vec);
return c_vec;
}
};
// V_MFMA_F32_16x16x32_BF16
template <WGAttrCtlEnum Ctrl_ = WGAttrCtlEnum::Default_>
struct WarpGemmAttributeMfmaImplBf16Bf16F32M16N16K32

View File

@@ -40,6 +40,22 @@ template<> struct Dispatcher<float, float, float, 32, 32, 4, false> { using Typ
template<> struct Dispatcher<float, float, float, 32, 32, 8, false> { using Type = WarpGemmMfmaF32F32F32M32N32K8<>; };
template<> struct Dispatcher<float, float, float, 32, 32, 8, false, false, false, EDouble> { using Type = WarpGemmMfmaF32F32F32M32N32K8<EDouble>; };
template<> struct Dispatcher<float, float, float, 16, 16, 16, true> { using Type = WarpGemmMfmaF32F32F32M16N16K16TransposedCDistribution<>; };
// tf32 (on gfx950: uses 3x bf16 MFMA emulation)
// ADataType, BDataType, AccDataType, MPerWave, NPerWave, KPerWave, TransposeC, SwizzleA, UseStructuredSparsity
#if defined(CK_GFX950_SUPPORT)
template<> struct Dispatcher<tf32_t, tf32_t, float, 32, 32, 16, false> { using Type = WarpGemmMfmaTf32Tf32F32M32N32K16<>; };
template<> struct Dispatcher<tf32_t, tf32_t, float, 32, 32, 16, true> { using Type = WarpGemmMfmaTf32Tf32F32M32N32K16<>; };
template<> struct Dispatcher<tf32_t, tf32_t, float, 32, 32, 16, false, false, false, EDouble> { using Type = WarpGemmMfmaTf32Tf32F32M32N32K16<EDouble>; };
template<> struct Dispatcher<tf32_t, tf32_t, float, 32, 32, 16, false, false, false, EQuad> { using Type = WarpGemmMfmaTf32Tf32F32M32N32K16<EQuad>; };
// TF32 16x16x32 for weight preshuffle pipeline (uses native 16x16x32 TF32 MFMA emulation)
template<> struct Dispatcher<tf32_t, tf32_t, float, 16, 16, 32, false> { using Type = WarpGemmMfmaTf32Tf32F32M16N16K32<>; };
template<> struct Dispatcher<tf32_t, tf32_t, float, 16, 16, 32, false, false, false, EDouble> { using Type = WarpGemmMfmaTf32Tf32F32M16N16K32<EDouble>; };
template<> struct Dispatcher<tf32_t, tf32_t, float, 16, 16, 32, false, false, false, EQuad> { using Type = WarpGemmMfmaTf32Tf32F32M16N16K32<EQuad>; };
#endif
// Note: For gfx11/gfx12 and other architectures that don't support tf32,
// these dispatchers are not defined. Code using tf32 should be guarded
// by CK_ENABLE_TF32 or CK_GFX950_SUPPORT macros.
// fp16
// ADataType, BDataType, AccDataType, MPerWave, NPerWave, KPerWave, TransposeC, SwizzleA, UseStructuredSparsity
template<> struct Dispatcher<half_t, half_t, float, 32, 32, 8, false> { using Type = WarpGemmMfmaF16F16F32M32N32K8; };