mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-11 08:50:17 +00:00
* FA fwd dropout * FA bwd * epilogue reuse * CMakeLists update * [CK_TILE] support alibi (#1269) * add alibi support * fix code * update code based on comment * Support more hdim * fix fp8 bias * support seqlen_k=0 case * remove unused printf * fix format --------- Co-authored-by: rocking <ChunYu.Lai@amd.com> * now fwd/bwd can build * bwd alibi * add bwd validation stream_config * update generated filenames * update bwd kernel launch * CK_TILE_HOST_DEVICE in philox * Transpose -> transpose * format * format * format * Generate the instance for FA required * format * fix error in WarpGemm * Add num_splits option and dummy split-kv api method * Generate fmha_fwd_splitkv() * Add SplitKV kernel codegen logics * Add SplitKV combine kernel codegen logics * Fix mismatched return type * Clean-up code * Replace sentinel value before storing * Fix wrong layout of LSE/LSEacc/Oacc * Format codes * Fix o_acc memory error * Fix wrong kBlockSize used in policy * Reduce # of combine kernels * Fix split-kv combine kernel name * Fix wrong LDS indexing logics * Fix wrong loop counter step logic * Undo vector size changes * Remove no-longer used field * Remove in-consistent comment * Remove debug statements in example * Remove more debug statements * Add constness to local variables * Clearn up generate.py * Fix unstable clang-format comment * Remove unused include directive * Use shorter template parameter name * Enable non-split-kv blobs * Update license date * Print num_splits conditionally * Undo disabling data types * Remove unnessary tile size for fp8 * Fix wrong pipeline args for fp8 * Fix example output format * Remove more debug code in combine pipeline * Add stride kernel arguments for LSE/O acc workspace * Re-order split-kv pipeline call operator arguments * Pass LSE/O strides in kernel argument * Re-order pipeline call operator arguments * Use tensor_descriptor to locate LSEacc elements * Support providing invalid element for tensor view * Set invalid element value for LSEacc tensor view * Remove hand-written store_tile() code * Remove necessary value-overwrite logic * Add transposed lds descriptor * Support load_tile() for tile_window_with_static_lengths<> * Undo removing necessary value-overwrite logic * Use read descriptor to locate lds elements * Simplify pipeline source code * Add constraint to kMaxSplits * Default use kMaxSplits=64 in generate.py * Revert "Add constraint to kMaxSplits" This reverts commit0a2132d758. * Revert "Default use kMaxSplits=64 in generate.py" This reverts commitc7d9c80b77. * Decide alignment by the padding parameter * Remove no-longer used utility functions * Remove not-working code * Add comment & remove no-longer used code * Fix computation errors * Add heuristic to override num_splits option * Add constraint to kMaxSplits * Fix compilation error * Clean up pipeline code * Wrap pointer access as lambda function * Rename confusing methods * Use kLogMasSplits as template parameter * Finish splitkv combine kernel codegen * Update kMaxSplits limit * Use smaller kM0 for splitkv combine kernel * Ignore droupout flag in splitkv pipeline * Unify flag usage * Add back flag kStoreLSE * Merge lambda calls in pipeline * Fix compilation errors * Avoid all empty splits * Always check for empty loop in splitkv pipelines * Re-order parameters * Remove redundant p_drop option check * Add traits/problem for fwd splitkv kernel * Conditionally enable uneven split boundary checks * Add comment for the splitkv traits field * Change even split criteria * Re-order statements * Refine occupancy value for hdim=128&256 * Refine occupancy value for hdim=32&64 * Remove redundant kernel argument * Separate fmha bwd codegen logics * Separate fmha fwd codegen logics * Remove redundant direction parameter in fwd&bwd codegen logics * Support generate multiple APIs for an example * Let 'api' an alias of 'direction' option * Remove choices for the 'direction' option * Use dictionary to config all the functions * Move fmha splitkv codegen logics to other file * Add fwd_splitkv api for tile_example_fmha_fwd --------- Co-authored-by: danyao12 <danyao12> Co-authored-by: carlushuang <carlus.huang@amd.com> Co-authored-by: rocking <ChunYu.Lai@amd.com> Co-authored-by: Jing Zhang <jizhan@amd.com>
516 lines
23 KiB
C++
516 lines
23 KiB
C++
// SPDX-License-Identifier: MIT
|
|
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
|
|
|
#pragma once
|
|
|
|
#include "ck_tile/core.hpp"
|
|
#include "ck_tile/host/kernel_launch.hpp"
|
|
#include "ck_tile/ops/fmha.hpp"
|
|
#include "ck_tile/ops/epilogue.hpp"
|
|
#include "mask.hpp"
|
|
#include "bias.hpp"
|
|
#include <type_traits>
|
|
|
|
template <typename DataType>
|
|
struct FmhaFwdTypeConfig;
|
|
|
|
template <>
|
|
struct FmhaFwdTypeConfig<ck_tile::half_t>
|
|
{
|
|
using QDataType = ck_tile::half_t;
|
|
using KDataType = ck_tile::half_t;
|
|
using VDataType = ck_tile::half_t;
|
|
using BiasDataType = ck_tile::half_t;
|
|
using RandValOutputDataType = uint8_t;
|
|
using LSEDataType = float; // data type for lse(logsumexp L_j = max_j + log(l_j))
|
|
using SaccDataType = float; // data type for first gemm accumulation
|
|
using SMPLComputeDataType = float; // data type for reduction, softmax
|
|
using PDataType = ck_tile::half_t; // data type for A matrix of second gemm
|
|
using OaccDataType = float; // data type for second gemm accumulation
|
|
using ODataType = ck_tile::half_t;
|
|
};
|
|
|
|
template <>
|
|
struct FmhaFwdTypeConfig<ck_tile::bf16_t>
|
|
{
|
|
using QDataType = ck_tile::bf16_t;
|
|
using KDataType = ck_tile::bf16_t;
|
|
using VDataType = ck_tile::bf16_t;
|
|
using BiasDataType = ck_tile::bf16_t;
|
|
using RandValOutputDataType = uint8_t;
|
|
using LSEDataType = float; // data type for lse(logsumexp L_j = max_j + log(l_j))
|
|
using SaccDataType = float; // data type for first gemm accumulation
|
|
using SMPLComputeDataType = float; // data type for reduction, softmax
|
|
using PDataType = ck_tile::bf16_t; // data type for A matrix of second gemm
|
|
using OaccDataType = float; // data type for second gemm accumulation
|
|
using ODataType = ck_tile::bf16_t;
|
|
};
|
|
|
|
template <>
|
|
struct FmhaFwdTypeConfig<ck_tile::fp8_t>
|
|
{
|
|
using QDataType = ck_tile::fp8_t;
|
|
using KDataType = ck_tile::fp8_t;
|
|
using VDataType = ck_tile::fp8_t;
|
|
using BiasDataType = float;
|
|
using RandValOutputDataType = uint8_t;
|
|
using LSEDataType = float; // data type for lse(logsumexp L_j = max_j + log(l_j))
|
|
using SaccDataType = float; // data type for first gemm accumulation
|
|
using SMPLComputeDataType = float; // data type for reduction, softmax
|
|
using PDataType = ck_tile::fp8_t; // data type for A matrix of second gemm
|
|
using OaccDataType = float; // data type for second gemm accumulation
|
|
using ODataType = ck_tile::fp8_t;
|
|
};
|
|
|
|
template <>
|
|
struct FmhaFwdTypeConfig<ck_tile::bf8_t>
|
|
{
|
|
using QDataType = ck_tile::bf8_t;
|
|
using KDataType = ck_tile::bf8_t;
|
|
using VDataType = ck_tile::bf8_t;
|
|
using BiasDataType = ck_tile::bf8_t;
|
|
using RandValOutputDataType = uint8_t;
|
|
using LSEDataType = float; // data type for lse(logsumexp L_j = max_j + log(l_j))
|
|
using SaccDataType = float; // data type for first gemm accumulation
|
|
using SMPLComputeDataType = float; // data type for reduction, softmax
|
|
using PDataType = ck_tile::bf8_t; // data type for A matrix of second gemm
|
|
using OaccDataType = float; // data type for second gemm accumulation
|
|
using ODataType = ck_tile::bf8_t;
|
|
};
|
|
|
|
struct FmhaMasks
|
|
{
|
|
using NoMask = ck_tile::GenericAttentionMask<false>;
|
|
using GenericMask = ck_tile::GenericAttentionMask<true, true>;
|
|
using CausalMask = ck_tile::GenericAttentionMask<true, false>;
|
|
};
|
|
|
|
// runtime args, some will passed to karg, some will used to compute grids/blocks
|
|
struct fmha_fwd_args
|
|
{
|
|
const void* q_ptr;
|
|
const void* k_ptr;
|
|
const void* v_ptr;
|
|
const void* bias_ptr; // bias or alibi_slope pointer
|
|
void* rand_val_ptr;
|
|
void* lse_acc_ptr;
|
|
void* o_acc_ptr;
|
|
void* lse_ptr;
|
|
void* o_ptr;
|
|
const void* seqstart_q_ptr;
|
|
const void* seqstart_k_ptr;
|
|
const void* seqlen_k_ptr;
|
|
ck_tile::index_t seqlen_q;
|
|
ck_tile::index_t seqlen_k;
|
|
ck_tile::index_t batch;
|
|
ck_tile::index_t max_seqlen_q;
|
|
ck_tile::index_t hdim_q;
|
|
ck_tile::index_t hdim_v;
|
|
ck_tile::index_t nhead_q;
|
|
ck_tile::index_t nhead_k;
|
|
ck_tile::index_t num_splits;
|
|
float scale_s;
|
|
float scale_p;
|
|
float scale_o;
|
|
ck_tile::index_t stride_q;
|
|
ck_tile::index_t stride_k;
|
|
ck_tile::index_t stride_v;
|
|
ck_tile::index_t stride_bias; // if alibi, b*h need set this to h, 1*h need set this to 0
|
|
ck_tile::index_t stride_randval;
|
|
ck_tile::index_t stride_o_acc;
|
|
ck_tile::index_t stride_o;
|
|
ck_tile::index_t nhead_stride_q;
|
|
ck_tile::index_t nhead_stride_k;
|
|
ck_tile::index_t nhead_stride_v;
|
|
ck_tile::index_t nhead_stride_bias;
|
|
ck_tile::index_t nhead_stride_randval;
|
|
ck_tile::index_t nhead_stride_lse;
|
|
ck_tile::index_t nhead_stride_lse_acc;
|
|
ck_tile::index_t nhead_stride_o_acc;
|
|
ck_tile::index_t nhead_stride_o;
|
|
ck_tile::index_t batch_stride_q;
|
|
ck_tile::index_t batch_stride_k;
|
|
ck_tile::index_t batch_stride_v;
|
|
ck_tile::index_t batch_stride_bias;
|
|
ck_tile::index_t batch_stride_randval;
|
|
ck_tile::index_t batch_stride_lse;
|
|
ck_tile::index_t batch_stride_lse_acc;
|
|
ck_tile::index_t batch_stride_o_acc;
|
|
ck_tile::index_t batch_stride_o;
|
|
ck_tile::index_t split_stride_lse_acc;
|
|
ck_tile::index_t split_stride_o_acc;
|
|
ck_tile::index_t window_size_left;
|
|
ck_tile::index_t window_size_right;
|
|
ck_tile::index_t mask_type;
|
|
float p_drop;
|
|
bool s_randval;
|
|
std::tuple<uint64_t, uint64_t> drop_seed_offset;
|
|
};
|
|
|
|
template <typename FmhaKernel>
|
|
auto fmha_fwd_create_kargs_and_grids(fmha_fwd_args args)
|
|
{
|
|
assert(args.nhead_q % args.nhead_k == 0);
|
|
auto kargs = [&] {
|
|
// create group mode kernel arguments
|
|
if constexpr(FmhaKernel::kIsGroupMode)
|
|
{
|
|
return FmhaKernel::MakeKargs(args.q_ptr,
|
|
args.k_ptr,
|
|
args.v_ptr,
|
|
args.bias_ptr,
|
|
args.rand_val_ptr,
|
|
args.lse_ptr,
|
|
args.o_ptr,
|
|
args.seqstart_q_ptr,
|
|
args.seqstart_k_ptr,
|
|
args.seqlen_k_ptr,
|
|
args.hdim_q,
|
|
args.hdim_v,
|
|
args.nhead_q,
|
|
args.nhead_q / args.nhead_k,
|
|
args.scale_s,
|
|
args.scale_p,
|
|
args.scale_o,
|
|
args.stride_q,
|
|
args.stride_k,
|
|
args.stride_v,
|
|
args.stride_bias,
|
|
args.stride_randval,
|
|
args.stride_o,
|
|
args.nhead_stride_q,
|
|
args.nhead_stride_k,
|
|
args.nhead_stride_v,
|
|
args.nhead_stride_bias,
|
|
args.nhead_stride_randval,
|
|
args.nhead_stride_lse,
|
|
args.nhead_stride_o,
|
|
args.batch_stride_lse,
|
|
args.window_size_left,
|
|
args.window_size_right,
|
|
args.mask_type,
|
|
args.p_drop,
|
|
args.s_randval,
|
|
args.drop_seed_offset);
|
|
}
|
|
else
|
|
{ // create batch mode kernel arguments
|
|
return FmhaKernel::MakeKargs(args.q_ptr,
|
|
args.k_ptr,
|
|
args.v_ptr,
|
|
args.bias_ptr,
|
|
args.rand_val_ptr,
|
|
args.lse_ptr,
|
|
args.o_ptr,
|
|
args.seqlen_q,
|
|
args.seqlen_k,
|
|
args.hdim_q,
|
|
args.hdim_v,
|
|
args.nhead_q,
|
|
args.nhead_q / args.nhead_k,
|
|
args.scale_s,
|
|
args.scale_p,
|
|
args.scale_o,
|
|
args.stride_q,
|
|
args.stride_k,
|
|
args.stride_v,
|
|
args.stride_bias,
|
|
args.stride_randval,
|
|
args.stride_o,
|
|
args.nhead_stride_q,
|
|
args.nhead_stride_k,
|
|
args.nhead_stride_v,
|
|
args.nhead_stride_bias,
|
|
args.nhead_stride_randval,
|
|
args.nhead_stride_lse,
|
|
args.nhead_stride_o,
|
|
args.batch_stride_q,
|
|
args.batch_stride_k,
|
|
args.batch_stride_v,
|
|
args.batch_stride_bias,
|
|
args.batch_stride_randval,
|
|
args.batch_stride_lse,
|
|
args.batch_stride_o,
|
|
args.window_size_left,
|
|
args.window_size_right,
|
|
args.mask_type,
|
|
args.p_drop,
|
|
args.s_randval,
|
|
args.drop_seed_offset);
|
|
}
|
|
}();
|
|
|
|
dim3 grids = FmhaKernel::GridSize(args.batch, args.nhead_q, args.max_seqlen_q, args.hdim_v);
|
|
return ck_tile::make_tuple(kargs, grids);
|
|
}
|
|
|
|
template <typename Kernel>
|
|
auto fmha_fwd_splitkv_create_kargs_and_grids(fmha_fwd_args args)
|
|
{
|
|
assert(args.nhead_q % args.nhead_k == 0);
|
|
auto kargs = [&] {
|
|
// create group mode kernel arguments
|
|
if constexpr(Kernel::kIsGroupMode)
|
|
{
|
|
return Kernel::MakeKargs(args.q_ptr,
|
|
args.k_ptr,
|
|
args.v_ptr,
|
|
args.bias_ptr,
|
|
args.rand_val_ptr,
|
|
args.lse_acc_ptr,
|
|
args.o_acc_ptr,
|
|
args.batch,
|
|
args.max_seqlen_q,
|
|
args.seqstart_q_ptr,
|
|
args.seqstart_k_ptr,
|
|
args.seqlen_k_ptr,
|
|
args.hdim_q,
|
|
args.hdim_v,
|
|
args.nhead_q,
|
|
args.nhead_q / args.nhead_k,
|
|
args.num_splits,
|
|
args.scale_s,
|
|
args.scale_p,
|
|
args.stride_q,
|
|
args.stride_k,
|
|
args.stride_v,
|
|
args.stride_bias,
|
|
args.stride_randval,
|
|
args.stride_o_acc,
|
|
args.nhead_stride_q,
|
|
args.nhead_stride_k,
|
|
args.nhead_stride_v,
|
|
args.nhead_stride_bias,
|
|
args.nhead_stride_randval,
|
|
args.nhead_stride_lse_acc,
|
|
args.nhead_stride_o_acc,
|
|
args.batch_stride_lse_acc,
|
|
args.batch_stride_o_acc,
|
|
args.split_stride_lse_acc,
|
|
args.split_stride_o_acc,
|
|
args.window_size_left,
|
|
args.window_size_right,
|
|
args.mask_type,
|
|
args.p_drop,
|
|
args.s_randval,
|
|
args.drop_seed_offset);
|
|
}
|
|
else
|
|
{ // create batch mode kernel arguments
|
|
return Kernel::MakeKargs(args.q_ptr,
|
|
args.k_ptr,
|
|
args.v_ptr,
|
|
args.bias_ptr,
|
|
args.rand_val_ptr,
|
|
args.lse_acc_ptr,
|
|
args.o_acc_ptr,
|
|
args.batch,
|
|
args.max_seqlen_q,
|
|
args.seqlen_q,
|
|
args.seqlen_k,
|
|
args.hdim_q,
|
|
args.hdim_v,
|
|
args.nhead_q,
|
|
args.nhead_q / args.nhead_k,
|
|
args.num_splits,
|
|
args.scale_s,
|
|
args.scale_p,
|
|
args.stride_q,
|
|
args.stride_k,
|
|
args.stride_v,
|
|
args.stride_bias,
|
|
args.stride_randval,
|
|
args.stride_o_acc,
|
|
args.nhead_stride_q,
|
|
args.nhead_stride_k,
|
|
args.nhead_stride_v,
|
|
args.nhead_stride_bias,
|
|
args.nhead_stride_randval,
|
|
args.nhead_stride_lse_acc,
|
|
args.nhead_stride_o_acc,
|
|
args.batch_stride_q,
|
|
args.batch_stride_k,
|
|
args.batch_stride_v,
|
|
args.batch_stride_bias,
|
|
args.batch_stride_randval,
|
|
args.batch_stride_lse_acc,
|
|
args.batch_stride_o_acc,
|
|
args.split_stride_lse_acc,
|
|
args.split_stride_o_acc,
|
|
args.window_size_left,
|
|
args.window_size_right,
|
|
args.mask_type,
|
|
args.p_drop,
|
|
args.s_randval,
|
|
args.drop_seed_offset);
|
|
}
|
|
}();
|
|
|
|
dim3 grids =
|
|
Kernel::GridSize(args.batch, args.nhead_q, args.max_seqlen_q, args.hdim_v, args.num_splits);
|
|
|
|
return ck_tile::make_tuple(kargs, grids);
|
|
}
|
|
|
|
template <typename Kernel>
|
|
auto fmha_fwd_splitkv_combine_create_kargs_and_grids(fmha_fwd_args args)
|
|
{
|
|
assert(args.nhead_q % args.nhead_k == 0);
|
|
auto kargs = [&] {
|
|
// create group mode kernel argumentszs
|
|
if constexpr(Kernel::kIsGroupMode)
|
|
{
|
|
return Kernel::MakeKargs(args.lse_acc_ptr,
|
|
args.o_acc_ptr,
|
|
args.lse_ptr,
|
|
args.o_ptr,
|
|
args.batch,
|
|
args.max_seqlen_q,
|
|
args.seqstart_q_ptr,
|
|
args.hdim_v,
|
|
args.num_splits,
|
|
args.scale_o,
|
|
args.stride_o_acc,
|
|
args.stride_o,
|
|
args.nhead_stride_lse_acc,
|
|
args.nhead_stride_o_acc,
|
|
args.nhead_stride_lse,
|
|
args.nhead_stride_o,
|
|
args.batch_stride_lse_acc,
|
|
args.batch_stride_o_acc,
|
|
args.batch_stride_lse,
|
|
args.split_stride_lse_acc,
|
|
args.split_stride_o_acc);
|
|
}
|
|
else
|
|
{ // create batch mode kernel arguments
|
|
return Kernel::MakeKargs(args.lse_acc_ptr,
|
|
args.o_acc_ptr,
|
|
args.lse_ptr,
|
|
args.o_ptr,
|
|
args.batch,
|
|
args.max_seqlen_q,
|
|
args.seqlen_q,
|
|
args.hdim_v,
|
|
args.num_splits,
|
|
args.scale_o,
|
|
args.stride_o_acc,
|
|
args.stride_o,
|
|
args.nhead_stride_lse_acc,
|
|
args.nhead_stride_o_acc,
|
|
args.nhead_stride_lse,
|
|
args.nhead_stride_o,
|
|
args.batch_stride_lse_acc,
|
|
args.batch_stride_o_acc,
|
|
args.batch_stride_lse,
|
|
args.batch_stride_o,
|
|
args.split_stride_lse_acc,
|
|
args.split_stride_o_acc);
|
|
}
|
|
}();
|
|
|
|
dim3 grids = Kernel::GridSize(args.batch, args.nhead_q, args.max_seqlen_q, args.hdim_v);
|
|
|
|
return ck_tile::make_tuple(kargs, grids);
|
|
}
|
|
|
|
// this is used to pattern-match internl kernel implementation, not to instantiate kernel
|
|
template <ck_tile::index_t HDim_,
|
|
typename DataType_,
|
|
bool kIsGroupMode_,
|
|
ck_tile::index_t kM0_,
|
|
ck_tile::index_t kN0_,
|
|
ck_tile::index_t kK0_,
|
|
ck_tile::index_t kN1_,
|
|
ck_tile::index_t kK1_,
|
|
ck_tile::index_t kK0BlockLength_,
|
|
bool kIsVLayoutRowMajor_,
|
|
ck_tile::BlockFmhaPipelineEnum FmhaPipelineEnum_,
|
|
typename FmhaMask_,
|
|
ck_tile::BlockAttentionBiasEnum BiasEnum_,
|
|
bool kStoreLse_,
|
|
bool kHasDropout_,
|
|
bool kDoFp8StaticQuant_,
|
|
bool kPadS_,
|
|
bool kPadSK_,
|
|
bool kPadD_,
|
|
bool kPadDv_>
|
|
struct fmha_fwd_traits_
|
|
{
|
|
static constexpr ck_tile::index_t HDim = HDim_;
|
|
using DataType = ck_tile::remove_cvref_t<DataType_>;
|
|
static constexpr bool kIsGroupMode = kIsGroupMode_;
|
|
static constexpr ck_tile::index_t kM0 = kM0_;
|
|
static constexpr ck_tile::index_t kN0 = kN0_;
|
|
static constexpr ck_tile::index_t kK0 = kK0_;
|
|
static constexpr ck_tile::index_t kN1 = kN1_;
|
|
static constexpr ck_tile::index_t kK1 = kK1_;
|
|
static constexpr ck_tile::index_t kK0BlockLength = kK0BlockLength_;
|
|
static constexpr bool kIsVLayoutRowMajor = kIsVLayoutRowMajor_;
|
|
static constexpr auto FmhaPipelineEnum = FmhaPipelineEnum_;
|
|
using FmhaMask = ck_tile::remove_cvref_t<FmhaMask_>;
|
|
static constexpr auto BiasEnum = BiasEnum_;
|
|
static constexpr bool kStoreLse = kStoreLse_;
|
|
static constexpr bool kHasDropout = kHasDropout_;
|
|
static constexpr bool kDoFp8StaticQuant = kDoFp8StaticQuant_;
|
|
static constexpr bool kPadS = kPadS_;
|
|
static constexpr bool kPadSK = kPadSK_;
|
|
static constexpr bool kPadD = kPadD_;
|
|
static constexpr bool kPadDv = kPadDv_;
|
|
};
|
|
|
|
template <typename Traits_>
|
|
float fmha_fwd_(const ck_tile::stream_config&, fmha_fwd_args);
|
|
|
|
template <typename Traits_>
|
|
void fmha_fwd_splitkv_oneshot_(const ck_tile::stream_config&, fmha_fwd_args);
|
|
|
|
template <typename Traits_>
|
|
std::string fmha_fwd_splitkv_get_name_();
|
|
|
|
template <ck_tile::index_t HDim_,
|
|
typename DataType_,
|
|
bool kIsGroupMode_,
|
|
ck_tile::index_t kM0_,
|
|
ck_tile::index_t kN1_,
|
|
bool kStoreLse_,
|
|
bool kDoFp8StaticQuant_,
|
|
bool kPadS_,
|
|
bool kPadDv_>
|
|
struct fmha_fwd_splitkv_combine_traits_
|
|
{
|
|
static constexpr ck_tile::index_t HDim = HDim_;
|
|
using DataType = ck_tile::remove_cvref_t<DataType_>;
|
|
static constexpr bool kIsGroupMode = kIsGroupMode_;
|
|
static constexpr ck_tile::index_t kM0 = kM0_;
|
|
static constexpr ck_tile::index_t kN1 = kN1_;
|
|
static constexpr bool kStoreLse = kStoreLse_;
|
|
static constexpr bool kDoFp8StaticQuant = kDoFp8StaticQuant_;
|
|
static constexpr bool kPadS = kPadS_;
|
|
static constexpr bool kPadDv = kPadDv_;
|
|
};
|
|
|
|
template <typename Traits_>
|
|
void fmha_fwd_splitkv_combine_oneshot_(const ck_tile::stream_config&, fmha_fwd_args);
|
|
|
|
template <typename Traits_>
|
|
std::string fmha_fwd_splitkv_combine_get_name_();
|
|
|
|
// This is the public API, will be generated by script
|
|
struct fmha_fwd_traits
|
|
{
|
|
int hdim_q;
|
|
int hdim_v;
|
|
std::string data_type;
|
|
bool is_group_mode;
|
|
bool is_v_rowmajor;
|
|
mask_enum mask_type;
|
|
bias_enum bias_type; // 0:no bias, 1:elementwise bias, 2:alibi. sync with BlockAttentionBiasEnum
|
|
bool has_lse;
|
|
bool has_dropout;
|
|
bool do_fp8_static_quant;
|
|
// TODO: padding check is inside this api
|
|
};
|
|
float fmha_fwd(fmha_fwd_traits, fmha_fwd_args, const ck_tile::stream_config&);
|
|
float fmha_fwd_splitkv(fmha_fwd_traits, fmha_fwd_args, const ck_tile::stream_config&);
|