mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-17 03:19:48 +00:00
Add bfp16/int8 support into XDL GEMM operator (#50)
* init StaticBufferV2
* clean
* adopt old output stage for staticBufferV2
* clean
* remove hack
* clean
* clean
* add parameters
* clean code
* move c_buffer alloc into blockwise gemm
* add adaptors for m/n_thread_data_on_grid
* tweak gemm
* adjust blockwise_gemm_xdlops
* tweak
* update conv
* update script
* adding bwd 1x1
* update script
* adding 1x1 bwd
* debugging bwd 1x1 failure
* update script
* update script
* test
* test v100
* add bf16_1k
* clang-format
* clean
* add bfp16 for gfx908
* add verification
* clean up
* clean code
* restore bfl16
* clean
* add bfp16 support into gemm_driver
* apply new generator to other drivers
* add int8 support
* cleanb
* clean
* clean
* clean
Co-authored-by: Chao Liu <chao.liu2@amd.com>
Co-authored-by: Chao Liu <lc.roy86@gmail.com>
Co-authored-by: root <root@hayabusa6111.amd.com>
[ROCm/composable_kernel commit: 3737bb039a]
This commit is contained in:
@@ -1,6 +1,162 @@
|
||||
#pragma once
|
||||
#include "host_tensor.hpp"
|
||||
|
||||
template <>
|
||||
void host_gemm<ushort, ushort, ushort>(const Tensor<ushort>& a,
|
||||
const Tensor<ushort>& b,
|
||||
Tensor<ushort>& c,
|
||||
const GemmMatrixLayout layout)
|
||||
{
|
||||
if(layout == GemmMatrixLayout::MK_KN_MN)
|
||||
{
|
||||
auto f_mk_kn_mn = [&](auto m, auto n) {
|
||||
const int K = a.mDesc.GetLengths()[1];
|
||||
|
||||
double v = 0;
|
||||
|
||||
for(int k = 0; k < K; ++k)
|
||||
{
|
||||
v += bfloat16_to_float(a(m, k)) * bfloat16_to_float(b(k, n));
|
||||
}
|
||||
|
||||
c(m, n) = float_to_bfloat16(v);
|
||||
};
|
||||
|
||||
make_ParallelTensorFunctor(f_mk_kn_mn, c.mDesc.GetLengths()[0], c.mDesc.GetLengths()[1])(
|
||||
std::thread::hardware_concurrency());
|
||||
}
|
||||
else if(layout == GemmMatrixLayout::MK_NK_MN)
|
||||
{
|
||||
auto f_mk_nk_mn = [&](auto m, auto n) {
|
||||
const int K = a.mDesc.GetLengths()[1];
|
||||
|
||||
double v = 0;
|
||||
|
||||
for(int k = 0; k < K; ++k)
|
||||
{
|
||||
v += bfloat16_to_float(a(m, k)) * bfloat16_to_float(b(n, k));
|
||||
}
|
||||
|
||||
c(m, n) = float_to_bfloat16(v);
|
||||
};
|
||||
|
||||
make_ParallelTensorFunctor(f_mk_nk_mn, c.mDesc.GetLengths()[0], c.mDesc.GetLengths()[1])(
|
||||
std::thread::hardware_concurrency());
|
||||
}
|
||||
else if(layout == GemmMatrixLayout::KM_KN_MN)
|
||||
{
|
||||
auto f_km_kn_mn = [&](auto m, auto n) {
|
||||
const int K = a.mDesc.GetLengths()[0];
|
||||
|
||||
double v = 0;
|
||||
|
||||
for(int k = 0; k < K; ++k)
|
||||
{
|
||||
v += bfloat16_to_float(a(k, m)) * bfloat16_to_float(b(k, n));
|
||||
}
|
||||
|
||||
c(m, n) = float_to_bfloat16(v);
|
||||
};
|
||||
|
||||
make_ParallelTensorFunctor(f_km_kn_mn, c.mDesc.GetLengths()[0], c.mDesc.GetLengths()[1])(
|
||||
std::thread::hardware_concurrency());
|
||||
}
|
||||
else if(layout == GemmMatrixLayout::KM_NK_MN)
|
||||
{
|
||||
auto f_km_nk_mn = [&](auto m, auto n) {
|
||||
const int K = a.mDesc.GetLengths()[0];
|
||||
|
||||
double v = 0;
|
||||
|
||||
for(int k = 0; k < K; ++k)
|
||||
{
|
||||
v += bfloat16_to_float(a(k, m)) * bfloat16_to_float(b(n, k));
|
||||
}
|
||||
|
||||
c(m, n) = float_to_bfloat16(v);
|
||||
};
|
||||
|
||||
make_ParallelTensorFunctor(f_km_nk_mn, c.mDesc.GetLengths()[0], c.mDesc.GetLengths()[1])(
|
||||
std::thread::hardware_concurrency());
|
||||
}
|
||||
else if(layout == GemmMatrixLayout::MK_KN_NM)
|
||||
{
|
||||
auto f_mk_kn_nm = [&](auto n, auto m) {
|
||||
const int K = a.mDesc.GetLengths()[1];
|
||||
|
||||
double v = 0;
|
||||
|
||||
for(int k = 0; k < K; ++k)
|
||||
{
|
||||
v += bfloat16_to_float(a(m, k)) * bfloat16_to_float(b(k, n));
|
||||
}
|
||||
|
||||
c(n, m) = float_to_bfloat16(v);
|
||||
};
|
||||
|
||||
make_ParallelTensorFunctor(f_mk_kn_nm, c.mDesc.GetLengths()[0], c.mDesc.GetLengths()[1])(
|
||||
std::thread::hardware_concurrency());
|
||||
}
|
||||
else if(layout == GemmMatrixLayout::MK_NK_NM)
|
||||
{
|
||||
auto f_mk_nk_nm = [&](auto n, auto m) {
|
||||
const int K = a.mDesc.GetLengths()[1];
|
||||
|
||||
double v = 0;
|
||||
|
||||
for(int k = 0; k < K; ++k)
|
||||
{
|
||||
v += bfloat16_to_float(a(m, k)) * bfloat16_to_float(b(n, k));
|
||||
}
|
||||
|
||||
c(n, m) = float_to_bfloat16(v);
|
||||
};
|
||||
|
||||
make_ParallelTensorFunctor(f_mk_nk_nm, c.mDesc.GetLengths()[0], c.mDesc.GetLengths()[1])(
|
||||
std::thread::hardware_concurrency());
|
||||
}
|
||||
else if(layout == GemmMatrixLayout::KM_KN_NM)
|
||||
{
|
||||
auto f_km_kn_nm = [&](auto n, auto m) {
|
||||
const int K = a.mDesc.GetLengths()[0];
|
||||
|
||||
double v = 0;
|
||||
|
||||
for(int k = 0; k < K; ++k)
|
||||
{
|
||||
v += bfloat16_to_float(a(k, m)) * bfloat16_to_float(b(k, n));
|
||||
}
|
||||
|
||||
c(n, m) = float_to_bfloat16(v);
|
||||
};
|
||||
|
||||
make_ParallelTensorFunctor(f_km_kn_nm, c.mDesc.GetLengths()[0], c.mDesc.GetLengths()[1])(
|
||||
std::thread::hardware_concurrency());
|
||||
}
|
||||
else if(layout == GemmMatrixLayout::KM_NK_NM)
|
||||
{
|
||||
auto f_km_nk_nm = [&](auto n, auto m) {
|
||||
const int K = a.mDesc.GetLengths()[0];
|
||||
|
||||
double v = 0;
|
||||
|
||||
for(int k = 0; k < K; ++k)
|
||||
{
|
||||
v += bfloat16_to_float(a(k, m)) * bfloat16_to_float(b(n, k));
|
||||
}
|
||||
|
||||
c(n, m) = float_to_bfloat16(v);
|
||||
};
|
||||
|
||||
make_ParallelTensorFunctor(f_km_nk_nm, c.mDesc.GetLengths()[0], c.mDesc.GetLengths()[1])(
|
||||
std::thread::hardware_concurrency());
|
||||
}
|
||||
else
|
||||
{
|
||||
throw std::runtime_error("wrong! not supported layout");
|
||||
}
|
||||
}
|
||||
|
||||
template <typename AType, typename BType, typename CType>
|
||||
void host_gemm_mk_kn_mn(const Tensor<AType>& a_m_k,
|
||||
const Tensor<BType>& b_k_n,
|
||||
|
||||
@@ -321,4 +321,41 @@ void check_error(const Tensor<T>& ref, const Tensor<T>& result)
|
||||
std::cout << "max_diff: " << max_diff << ", " << ref_value << ", " << result_value << std::endl;
|
||||
}
|
||||
|
||||
float bf16_to_f32(ushort src_val)
|
||||
{
|
||||
typedef union
|
||||
{
|
||||
ushort x, y;
|
||||
float f32;
|
||||
} bf16_f32_t;
|
||||
|
||||
bf16_f32_t v;
|
||||
v.x = 0;
|
||||
v.y = src_val;
|
||||
return v.f32;
|
||||
}
|
||||
|
||||
template <>
|
||||
void check_error<ushort>(const Tensor<ushort>& ref, const Tensor<ushort>& result)
|
||||
{
|
||||
float error = 0;
|
||||
float max_diff = -1;
|
||||
float ref_value = 0, result_value = 0;
|
||||
for(int i = 0; i < ref.mData.size(); ++i)
|
||||
{
|
||||
error += std::abs(bf16_to_f32(ref.mData[i]) - bf16_to_f32(result.mData[i]));
|
||||
float diff = std::abs(bf16_to_f32(ref.mData[i]) - bf16_to_f32(result.mData[i]));
|
||||
if(max_diff < diff)
|
||||
{
|
||||
max_diff = diff;
|
||||
ref_value = bf16_to_f32(ref.mData[i]);
|
||||
result_value = bf16_to_f32(result.mData[i]);
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "error: " << error << std::endl;
|
||||
std::cout << "max_diff: " << max_diff << ", ref: " << ref_value << ", res: " << result_value
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#include <cmath>
|
||||
#include "config.hpp"
|
||||
|
||||
template <typename T>
|
||||
struct GeneratorTensor_1
|
||||
{
|
||||
int value = 1;
|
||||
@@ -15,6 +16,30 @@ struct GeneratorTensor_1
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct GeneratorTensor_1<ushort>
|
||||
{
|
||||
float value = 1.0;
|
||||
|
||||
template <typename... Is>
|
||||
ushort operator()(Is...)
|
||||
{
|
||||
return float_to_bfloat16(value);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct GeneratorTensor_1<int8_t>
|
||||
{
|
||||
int8_t value = 1;
|
||||
|
||||
template <typename... Is>
|
||||
int8_t operator()(Is...)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
struct GeneratorTensor_0
|
||||
{
|
||||
int value = 0;
|
||||
@@ -26,6 +51,7 @@ struct GeneratorTensor_0
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct GeneratorTensor_2
|
||||
{
|
||||
int min_value = 0;
|
||||
@@ -38,6 +64,33 @@ struct GeneratorTensor_2
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct GeneratorTensor_2<ushort>
|
||||
{
|
||||
int min_value = 0;
|
||||
int max_value = 1;
|
||||
|
||||
template <typename... Is>
|
||||
ushort operator()(Is...)
|
||||
{
|
||||
float tmp = (std::rand() % (max_value - min_value)) + min_value;
|
||||
return float_to_bfloat16(tmp);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct GeneratorTensor_2<int8_t>
|
||||
{
|
||||
int min_value = 0;
|
||||
int max_value = 1;
|
||||
|
||||
template <typename... Is>
|
||||
int8_t operator()(Is...)
|
||||
{
|
||||
return (std::rand() % (max_value - min_value)) + min_value;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct GeneratorTensor_3
|
||||
{
|
||||
@@ -53,6 +106,39 @@ struct GeneratorTensor_3
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct GeneratorTensor_3<ushort>
|
||||
{
|
||||
float min_value = 0;
|
||||
float max_value = 1;
|
||||
|
||||
template <typename... Is>
|
||||
ushort operator()(Is...)
|
||||
{
|
||||
float tmp = float(std::rand()) / float(RAND_MAX);
|
||||
|
||||
float fp32_tmp = min_value + tmp * (max_value - min_value);
|
||||
|
||||
return float_to_bfloat16(fp32_tmp);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct GeneratorTensor_3<int8_t>
|
||||
{
|
||||
float min_value = 0;
|
||||
float max_value = 1;
|
||||
|
||||
template <typename... Is>
|
||||
int8_t operator()(Is...)
|
||||
{
|
||||
int8_t min_tmp = static_cast<int8_t>(min_value);
|
||||
int8_t max_tmp = static_cast<int8_t>(max_value);
|
||||
|
||||
return (std::rand() % (max_tmp - min_tmp)) + min_tmp;
|
||||
}
|
||||
};
|
||||
|
||||
struct GeneratorTensor_Checkboard
|
||||
{
|
||||
template <typename... Ts>
|
||||
|
||||
Reference in New Issue
Block a user