mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-16 19:09:59 +00:00
CK Tile FA Training kernels (#1286)
* FA fwd dropout
* FA bwd
* epilogue reuse
* CMakeLists update
* [CK_TILE] support alibi (#1269)
* add alibi support
* fix code
* update code based on comment
* Support more hdim
* fix fp8 bias
* support seqlen_k=0 case
* remove unused printf
* fix format
---------
Co-authored-by: rocking <ChunYu.Lai@amd.com>
* now fwd/bwd can build
* bwd alibi
* add bwd validation stream_config
* update generated filenames
* update bwd kernel launch
* CK_TILE_HOST_DEVICE in philox
* Transpose -> transpose
* format
* format
* format
* Generate the instance for FA required
* format
* fix error in WarpGemm
---------
Co-authored-by: danyao12 <danyao12>
Co-authored-by: carlushuang <carlus.huang@amd.com>
Co-authored-by: rocking <ChunYu.Lai@amd.com>
Co-authored-by: Po Yen Chen <PoYen.Chen@amd.com>
Co-authored-by: Jing Zhang <jizhan@amd.com>
[ROCm/composable_kernel commit: 2cab8d39e3]
This commit is contained in:
33
include/ck_tile/host/reference/reference_batched_dropout.hpp
Normal file
33
include/ck_tile/host/reference/reference_batched_dropout.hpp
Normal file
@@ -0,0 +1,33 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ck_tile/core.hpp"
|
||||
#include "ck_tile/host/host_tensor.hpp"
|
||||
#include <thread>
|
||||
|
||||
namespace ck_tile {
|
||||
|
||||
template <typename DataType, typename RandValOutputDataType>
|
||||
CK_TILE_HOST void reference_batched_dropout(HostTensor<DataType>& in_out_b_m_n,
|
||||
const HostTensor<RandValOutputDataType>& randval_b_m_n,
|
||||
const uint8_t& p_undrop_in_uint8_t,
|
||||
const float scale)
|
||||
{
|
||||
const int N = in_out_b_m_n.mDesc.get_lengths()[2];
|
||||
auto f = [&](auto batch, auto m) {
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
float tmp = ck_tile::type_convert<float>(in_out_b_m_n(batch, m, n)) * scale;
|
||||
in_out_b_m_n(batch, m, n) = randval_b_m_n(batch, m, n) <= p_undrop_in_uint8_t
|
||||
? ck_tile::type_convert<DataType>(tmp)
|
||||
: DataType(0);
|
||||
}
|
||||
};
|
||||
|
||||
make_ParallelTensorFunctor(
|
||||
f, randval_b_m_n.mDesc.get_lengths()[0], randval_b_m_n.mDesc.get_lengths()[1])(
|
||||
std::thread::hardware_concurrency());
|
||||
}
|
||||
} // namespace ck_tile
|
||||
Reference in New Issue
Block a user