mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-14 18:17:44 +00:00
* add intrin_mfma_f64_16x16x4f64
* add example
* gemm reference add double data type
* chang init data
* fix M N PerXdlops
* fix ifdef
* add comparsion config
* add conv fwd example
* format log out
* change rc matrix egister layout
* reorganize example
* reorganize example 2
* format,because merge develop
* fix call impl adding acc data type
* lost ;
* add compiler warning
* change example tunning parameters
* add test for fp64
* add instance
* add test/gemm/gemm_fp64.cpp
* fix get name issue
* remove some tunning parameter
* fix conflict
* format
* use integer value for GEMM test
* add acc data type
* remove typeid because fp16
* fix streamconfig etc bug from merging develop
* format
* remove test_gemm_xdl_fp64
* add AccDataType
* AccDataType problem
Co-authored-by: qinletao <letaoqin@amd.com>
Co-authored-by: Chao Liu <chao.liu2@amd.com>
[ROCm/composable_kernel commit: 3e6c2610ae]
162 lines
8.0 KiB
C++
162 lines
8.0 KiB
C++
#include <iostream>
|
|
#include <numeric>
|
|
#include <initializer_list>
|
|
#include <cstdlib>
|
|
#include <stdlib.h>
|
|
#include <half.hpp>
|
|
#include "profile_grouped_gemm_impl.hpp"
|
|
|
|
enum struct GemmMatrixLayout
|
|
{
|
|
MK_KN_MN, // 0
|
|
MK_NK_MN, // 1
|
|
KM_KN_MN, // 2
|
|
KM_NK_MN, // 3
|
|
MK_KN_NM, // 4
|
|
MK_NK_NM, // 5
|
|
KM_KN_NM, // 6
|
|
KM_NK_NM, // 7
|
|
};
|
|
|
|
enum struct GemmDataType
|
|
{
|
|
F32_F32_F32, // 0
|
|
F16_F16_F16, // 1
|
|
BF16_BF16_BF16, // 2
|
|
INT8_INT8_INT8, // 3
|
|
};
|
|
|
|
std::vector<int> argToIntArray(char* input)
|
|
{
|
|
std::vector<int> out;
|
|
|
|
std::istringstream in(input);
|
|
|
|
std::string item;
|
|
|
|
while(std::getline(in, item, ','))
|
|
{
|
|
out.push_back(std::stoi(item));
|
|
}
|
|
|
|
return out;
|
|
}
|
|
|
|
int profile_grouped_gemm(int argc, char* argv[])
|
|
{
|
|
if(!(argc == 14))
|
|
{
|
|
printf("arg1: tensor operation (grouped_gemm: Grouped GEMM)\n");
|
|
printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n");
|
|
printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
|
|
printf(" 1: A[m, k] * B[n, k] = C[m, n];\n");
|
|
printf(" 2: A[k, m] * B[k, n] = C[m, n];\n");
|
|
printf(" 3: A[k, m] * B[n, k] = C[m, n])\n");
|
|
printf("arg4: verification (0: no; 1: yes)\n");
|
|
printf("arg5: initialization (0: no init; 1: integer value; 2: decimal value)\n");
|
|
printf("arg6: print tensor value (0: no; 1: yes)\n");
|
|
printf("arg7: time kernel (0=n0, 1=yes)\n");
|
|
printf("arg8 to 13: Ms, Ns, Ks, StrideAs, StrideBs, StrideCs (e.g., 256,256 128,128 64,64 "
|
|
"64,64 64,64 128,128)\n");
|
|
exit(1);
|
|
}
|
|
|
|
const auto data_type = static_cast<GemmDataType>(std::stoi(argv[2]));
|
|
const auto layout = static_cast<GemmMatrixLayout>(std::stoi(argv[3]));
|
|
const bool do_verification = std::stoi(argv[4]);
|
|
const int init_method = std::stoi(argv[5]);
|
|
const bool do_log = std::stoi(argv[6]);
|
|
const bool time_kernel = std::stoi(argv[7]);
|
|
|
|
const auto Ms = argToIntArray(argv[8]);
|
|
const auto Ns = argToIntArray(argv[9]);
|
|
const auto Ks = argToIntArray(argv[10]);
|
|
|
|
const auto StrideAs = argToIntArray(argv[11]);
|
|
const auto StrideBs = argToIntArray(argv[12]);
|
|
const auto StrideCs = argToIntArray(argv[13]);
|
|
|
|
if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::MK_KN_MN)
|
|
{
|
|
ck::profiler::profile_grouped_gemm_impl<ck::half_t,
|
|
ck::half_t,
|
|
ck::half_t,
|
|
ck::half_t,
|
|
ck::tensor_layout::gemm::RowMajor,
|
|
ck::tensor_layout::gemm::RowMajor,
|
|
ck::tensor_layout::gemm::RowMajor>(do_verification,
|
|
init_method,
|
|
do_log,
|
|
time_kernel,
|
|
Ms,
|
|
Ns,
|
|
Ks,
|
|
StrideAs,
|
|
StrideBs,
|
|
StrideCs);
|
|
}
|
|
else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::MK_NK_MN)
|
|
{
|
|
ck::profiler::profile_grouped_gemm_impl<ck::half_t,
|
|
ck::half_t,
|
|
ck::half_t,
|
|
ck::half_t,
|
|
ck::tensor_layout::gemm::RowMajor,
|
|
ck::tensor_layout::gemm::ColumnMajor,
|
|
ck::tensor_layout::gemm::RowMajor>(do_verification,
|
|
init_method,
|
|
do_log,
|
|
time_kernel,
|
|
Ms,
|
|
Ns,
|
|
Ks,
|
|
StrideAs,
|
|
StrideBs,
|
|
StrideCs);
|
|
}
|
|
else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::KM_KN_MN)
|
|
{
|
|
ck::profiler::profile_grouped_gemm_impl<ck::half_t,
|
|
ck::half_t,
|
|
ck::half_t,
|
|
ck::half_t,
|
|
ck::tensor_layout::gemm::ColumnMajor,
|
|
ck::tensor_layout::gemm::RowMajor,
|
|
ck::tensor_layout::gemm::RowMajor>(do_verification,
|
|
init_method,
|
|
do_log,
|
|
time_kernel,
|
|
Ms,
|
|
Ns,
|
|
Ks,
|
|
StrideAs,
|
|
StrideBs,
|
|
StrideCs);
|
|
}
|
|
else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::KM_NK_MN)
|
|
{
|
|
ck::profiler::profile_grouped_gemm_impl<ck::half_t,
|
|
ck::half_t,
|
|
ck::half_t,
|
|
ck::half_t,
|
|
ck::tensor_layout::gemm::ColumnMajor,
|
|
ck::tensor_layout::gemm::ColumnMajor,
|
|
ck::tensor_layout::gemm::RowMajor>(do_verification,
|
|
init_method,
|
|
do_log,
|
|
time_kernel,
|
|
Ms,
|
|
Ns,
|
|
Ks,
|
|
StrideAs,
|
|
StrideBs,
|
|
StrideCs);
|
|
}
|
|
else
|
|
{
|
|
throw std::runtime_error("wrong! this GEMM data_type & layout is not implemented");
|
|
}
|
|
|
|
return 0;
|
|
}
|