mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-04-19 22:39:03 +00:00
* Use dictionary to config all the functions * Add init codegen logic for fmha fwd appendkv * Call HIP_CHECK_ERROR() macro to get real source info * Setup meaningfull arguments * Sync kernel name with the codegen * Add knew/vnew tensors to the kernel argument * Fix wrong K values after appending * Fix vnew append errro * Extract common logics * Fix Vnew tile dstr for row major case * Conditionally add fwd_splitkv API in fmha_fwd example * Conditionally add call to fmha_fwd_splitkv() * Remove "EXAMPLE_" prefix of cmake variables * Regsiter API handlers automatically * Early return if 0 < s_k_new is not supported * Show message if we are ignoring option * Unify CMakeLists.txt coding style * Set num_splits=1 if split-kv is not supported * Add length/stride getters for HostTensor * Add RoPE example utilities * Add reference_rotary_position_embedding() (not implemented) * Finish reference_rotary_position_embedding() impl * Fix typo of HostTensor<>::get_length() * Fix compilation errors * Fix wrong answer when interleaved=false * Fix wrong answer when interleaved=true * Append K/V in the host verification code * Simplify K appending logics * Simplify v_host_ref definition * Reduce input/output dimensions * Rename function: add "batched" prefix * Apply RoPE on host side * Rename RoPE utility function * Fix wrong tensor size * Avoid invoking deprecated method 'find_module' * Pass RoPE kernel args * Create Rotary Cos/Sin tile windows in kernel * Add compute data type alias for RoPE * Randomly generate seqlen_knew if needed * Fix seqlen_knew enabling check logic * Add minimum seqlen_k to generate compliance kvcache * Fix compilation error in debug mode * Fix wrong boundaries * Fix wrong seqlen_k for kvcache * Rename variables used in distributio encoding * Fix rotary cos/sin tensor/tile size * Add constraint to the rotary_dim option * Remove unused inner namespace * Add dram distribution for rotary_cos/rotary_sin (interleaved) * Only apply interleaved RoPE on Knew for now * Fix wrong thread starting offset * Instantiate multiple kernels for RoPE approaches * Clean-up pipeline * Fix error in RoPE host reference * Handle RoPE half-rotated logics * Support 8x rotary_dim under half-rotated RoPE * Add comment * Apply elementwise function to the loaded tiles * Unify parameter/variable naming style * Remove constness from q_ptr * Add code blocks for q_tile * Apply RoPE to q_tile * Remove debug print code in kernel * Fix wrong knew/vnew appending positions * Use better naming for tile indices * Add make_tile_window() for adding distribution only * Skip code if # of block is more than needed * Move thread locating logics into policy * Remove always true static_assert() * Rename header * Rename RotaryEmbeddingEnum * Extract rotary embedding logic out * Re-order parameters * Align naming of some tile size constants * Rename more tile size constants * Fix wrong grid size * Fix wrong shape of knew_host/vnew_host * Fix wrong index into knew_host/vnew_host * Fix wrong rotary_cos/rotary_sin memory size for Q * Extract Q/Knew vector size to helper methods * Use different rotary_cos/rotary_sin distr for Q/Knew * Update host/device specifiers * Fix wrong data type for Q rotary_cos/rotary_sin * Remove RoPEComputeDataType type alias * Shift rotary_cos/rotary_sin by cache_seqlen_k * Add comment for why I just 't' for all padding flags * Align commit message to the real comment * Fix wrong pipeline * Rename utility function * Disable host verification if API not exist * Fix wrong rope key for fp8 pipeline * Allow only apply RoPE on Q (without append KV) * Add append-kv smoke tests * Remove debug statements * Remove more debug statements * Re-arrange the 'set +x' command * Remove no-longer used method in pipeline * Add missing init code * Refine pipeline padding settings * Enlarge rotary_dim limit (8 -> 16) * Enlarge KPerThread for rotary_interleaved=false * Update rotary_dim range in smoke_test_fwd.sh * Add template argument 'kIsPagedKV' for splitkv kernels * Launch splitkv kernel if given page_block_size * Fix wrong kernel name * Fix seqlen_k_min for pre-fill case (1 -> 0) * Add copy_const<> type trait * Add another make_tile_window() * Introduce 'TileWindowNavigator' types * Simplify TileWindowNavigator interfaces * Fix tile window navigation bugs * Disable calling fmha_fwd() * Remove ununnecessary data members * Simplify more make_tile_window() overloads * Move V tile through TileWindowNavigator * Fix uneven split checking logic * Move code after decide seqlen_q/seqlen_k * Make sure we always start reading complete tile * Use 128 as minimus page_block_size * Fix wrong origin for bias * Add batch_stride_k/batch_stride_v in group mode * Unify origin * Add missing kernel arguments for group mode * Add paged-kv codegen logic for appendkv kernels * Add block_table kernel args for appendkv kernel * Add tile navigators to the appendkv kernel * Fix wrong tensor descriptor lengths * Pass re-created tile window to pipeline * Fix wrong strides for appendkv kernel * Allow transit tile_window to another page-block * Handle cross-page-block write * Donot perform write again if already in last page-block * Always add fmha_fwd() api * Add missing group mode argument * Remove debug macro usages * Rename option s_k_new to s_knew * Separate splitkv/non-splitkv args/traits * Remove fmha_fwd_dispatch() * Fix compilation errors * Remove dropout code in splitkv kernel * Allow problem types without define kHasDropout attr * Use generic lambda to init traits objects * Separate more non-splitkv & splitkv traits/args * Display more info for specific kernels * Show more detailed warning message * Rename 'max_num_blocks' to 'max_num_page_blocks' * Remove no-longer used pipeline files * Wrap code by #if directives * Move functors to the begining of validation code * Use generic lambda to init all the api traits/args * Fix wrong seqlen for kvcache * Add missing comment * Rename TileWindowNavigator to PageBlockNavigator * Only expose necessary methods (not attributes) * Re-order pipeline paremeters * Refine smoke_test_fwd.sh * Fix wrong arugment count * Make tile window directly via PageBlockNavigator * Remove unused template paremeter * Remove group mode from appendkv kernel * Fix skcheck logic * Fix wrong syntax in skcheck expr * Use meaningful options in smoke test * Remove options * Fix formatting * Fix more format * Re-organize bash functions * Pass cache_batch_idx to kernels * Support cache_batch_idx in example * Fix compilation error * Add more appendkv test * Add more case for appendkv * Fix unexisted attribute * Remove 0 < seqlen_knew constraint * Clarify the case in warning message * Remove macro checking * Force batch mode when invoking appendkv & splitkv apis * Fix mode overriding logics * Fix wrong parameter name * Randomize seqlen_k if use kvcache * Use randomized seqlen_k for kvcache * Avoid using too small rotary_cos & rotary_sin * Rename parameter * Add seqlen_q & seqlen_k rules * Add comment * Add more comments * Fix compilation errors * Fix typo in comment * Remove type argument * Avoid seqlen_k=0 for kvcache * Revert "Avoid seqlen_k=0 for kvcache" This reverts commit21c4df89e4. * Fix wrong uneven split checking logics * Only randomize kvcache seqlen_k if 1 < batch * Return earlier if split is empty * Revert "Only randomize kvcache seqlen_k if 1 < batch" This reverts commitb9a4ab0d7e. * Re-order seqlen_k_start adjustment logics * Fix compilation errors * Re-format script * Find executable from folder automatically * Fix kvcache seqlen_k generating logic * Make comment more clear * Fix wrong knew/vew appending logic on host * Add s_barrier to sync threads * Revert "Add s_barrier to sync threads" This reverts commitd3f550f30c. * Support only using 1 row of rotary_cos/rotary_sin * Rotate Q in different way * Unify tensor view creation logics * Fix wrong argument * Add mask to switch how we use the rotary_cos/sin * Move attr from traits to problem * Move has_mask to fmha_fwd_appendkv_args * Support use uint32_t as SAD operand in Alibi<> * Use sad_u32() in splitkv kernels * Store tensor views in PageBlockNavigator * Use stored tensor view to update tile windows * Enlarge tensor view size * Remove debug code * Fix wrong tensor view size * Wrap tensor view into PageBlockNavigator * Add DataType member to PageBlockNavigator * Remove unnecessary member functions * Refind macro use * Fix typo * Add blank line between directives and actual code * Re-format files * Remove type in comment --------- Co-authored-by: carlushuang <carlus.huang@amd.com> Co-authored-by: rocking <ChunYu.Lai@amd.com>
561 lines
17 KiB
C++
561 lines
17 KiB
C++
// SPDX-License-Identifier: MIT
|
|
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
|
|
|
#pragma once
|
|
|
|
#include <algorithm>
|
|
#include <cassert>
|
|
#include <iostream>
|
|
#include <iomanip>
|
|
#include <numeric>
|
|
#include <thread>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
#include "ck_tile/core.hpp"
|
|
#include "ck_tile/host/ranges.hpp"
|
|
|
|
namespace ck_tile {
|
|
|
|
template <typename Range>
|
|
CK_TILE_HOST std::ostream& LogRange(std::ostream& os,
|
|
Range&& range,
|
|
std::string delim,
|
|
int precision = std::cout.precision(),
|
|
int width = 0)
|
|
{
|
|
bool first = true;
|
|
for(auto&& v : range)
|
|
{
|
|
if(first)
|
|
first = false;
|
|
else
|
|
os << delim;
|
|
os << std::setw(width) << std::setprecision(precision) << v;
|
|
}
|
|
return os;
|
|
}
|
|
|
|
template <typename T, typename Range>
|
|
CK_TILE_HOST std::ostream& LogRangeAsType(std::ostream& os,
|
|
Range&& range,
|
|
std::string delim,
|
|
int precision = std::cout.precision(),
|
|
int width = 0)
|
|
{
|
|
bool first = true;
|
|
for(auto&& v : range)
|
|
{
|
|
if(first)
|
|
first = false;
|
|
else
|
|
os << delim;
|
|
os << std::setw(width) << std::setprecision(precision) << static_cast<T>(v);
|
|
}
|
|
return os;
|
|
}
|
|
|
|
template <typename F, typename T, std::size_t... Is>
|
|
CK_TILE_HOST auto call_f_unpack_args_impl(F f, T args, std::index_sequence<Is...>)
|
|
{
|
|
return f(std::get<Is>(args)...);
|
|
}
|
|
|
|
template <typename F, typename T>
|
|
CK_TILE_HOST auto call_f_unpack_args(F f, T args)
|
|
{
|
|
constexpr std::size_t N = std::tuple_size<T>{};
|
|
|
|
return call_f_unpack_args_impl(f, args, std::make_index_sequence<N>{});
|
|
}
|
|
|
|
template <typename F, typename T, std::size_t... Is>
|
|
CK_TILE_HOST auto construct_f_unpack_args_impl(T args, std::index_sequence<Is...>)
|
|
{
|
|
return F(std::get<Is>(args)...);
|
|
}
|
|
|
|
template <typename F, typename T>
|
|
CK_TILE_HOST auto construct_f_unpack_args(F, T args)
|
|
{
|
|
constexpr std::size_t N = std::tuple_size<T>{};
|
|
|
|
return construct_f_unpack_args_impl<F>(args, std::make_index_sequence<N>{});
|
|
}
|
|
|
|
struct HostTensorDescriptor
|
|
{
|
|
HostTensorDescriptor() = default;
|
|
|
|
void CalculateStrides()
|
|
{
|
|
mStrides.clear();
|
|
mStrides.resize(mLens.size(), 0);
|
|
if(mStrides.empty())
|
|
return;
|
|
|
|
mStrides.back() = 1;
|
|
std::partial_sum(mLens.rbegin(),
|
|
mLens.rend() - 1,
|
|
mStrides.rbegin() + 1,
|
|
std::multiplies<std::size_t>());
|
|
}
|
|
|
|
template <typename X, typename = std::enable_if_t<std::is_convertible_v<X, std::size_t>>>
|
|
HostTensorDescriptor(const std::initializer_list<X>& lens) : mLens(lens.begin(), lens.end())
|
|
{
|
|
this->CalculateStrides();
|
|
}
|
|
|
|
template <typename Lengths,
|
|
typename = std::enable_if_t<
|
|
std::is_convertible_v<ck_tile::ranges::range_value_t<Lengths>, std::size_t>>>
|
|
HostTensorDescriptor(const Lengths& lens) : mLens(lens.begin(), lens.end())
|
|
{
|
|
this->CalculateStrides();
|
|
}
|
|
|
|
template <typename X,
|
|
typename Y,
|
|
typename = std::enable_if_t<std::is_convertible_v<X, std::size_t> &&
|
|
std::is_convertible_v<Y, std::size_t>>>
|
|
HostTensorDescriptor(const std::initializer_list<X>& lens,
|
|
const std::initializer_list<Y>& strides)
|
|
: mLens(lens.begin(), lens.end()), mStrides(strides.begin(), strides.end())
|
|
{
|
|
}
|
|
|
|
template <typename Lengths,
|
|
typename Strides,
|
|
typename = std::enable_if_t<
|
|
std::is_convertible_v<ck_tile::ranges::range_value_t<Lengths>, std::size_t> &&
|
|
std::is_convertible_v<ck_tile::ranges::range_value_t<Strides>, std::size_t>>>
|
|
HostTensorDescriptor(const Lengths& lens, const Strides& strides)
|
|
: mLens(lens.begin(), lens.end()), mStrides(strides.begin(), strides.end())
|
|
{
|
|
}
|
|
|
|
std::size_t get_num_of_dimension() const { return mLens.size(); }
|
|
std::size_t get_element_size() const
|
|
{
|
|
assert(mLens.size() == mStrides.size());
|
|
return std::accumulate(
|
|
mLens.begin(), mLens.end(), std::size_t{1}, std::multiplies<std::size_t>());
|
|
}
|
|
std::size_t get_element_space_size() const
|
|
{
|
|
std::size_t space = 1;
|
|
for(std::size_t i = 0; i < mLens.size(); ++i)
|
|
{
|
|
if(mLens[i] == 0)
|
|
continue;
|
|
|
|
space += (mLens[i] - 1) * mStrides[i];
|
|
}
|
|
return space;
|
|
}
|
|
|
|
std::size_t get_length(std::size_t dim) const { return mLens[dim]; }
|
|
|
|
const std::vector<std::size_t>& get_lengths() const { return mLens; }
|
|
|
|
std::size_t get_stride(std::size_t dim) const { return mStrides[dim]; }
|
|
|
|
const std::vector<std::size_t>& get_strides() const { return mStrides; }
|
|
|
|
template <typename... Is>
|
|
std::size_t GetOffsetFromMultiIndex(Is... is) const
|
|
{
|
|
assert(sizeof...(Is) == this->get_num_of_dimension());
|
|
std::initializer_list<std::size_t> iss{static_cast<std::size_t>(is)...};
|
|
return std::inner_product(iss.begin(), iss.end(), mStrides.begin(), std::size_t{0});
|
|
}
|
|
|
|
std::size_t GetOffsetFromMultiIndex(std::vector<std::size_t> iss) const
|
|
{
|
|
return std::inner_product(iss.begin(), iss.end(), mStrides.begin(), std::size_t{0});
|
|
}
|
|
|
|
friend std::ostream& operator<<(std::ostream& os, const HostTensorDescriptor& desc);
|
|
|
|
private:
|
|
std::vector<std::size_t> mLens;
|
|
std::vector<std::size_t> mStrides;
|
|
};
|
|
|
|
template <typename New2Old>
|
|
CK_TILE_HOST HostTensorDescriptor transpose_host_tensor_descriptor_given_new2old(
|
|
const HostTensorDescriptor& a, const New2Old& new2old)
|
|
{
|
|
std::vector<std::size_t> new_lengths(a.get_num_of_dimension());
|
|
std::vector<std::size_t> new_strides(a.get_num_of_dimension());
|
|
|
|
for(std::size_t i = 0; i < a.get_num_of_dimension(); i++)
|
|
{
|
|
new_lengths[i] = a.get_lengths()[new2old[i]];
|
|
new_strides[i] = a.get_strides()[new2old[i]];
|
|
}
|
|
|
|
return HostTensorDescriptor(new_lengths, new_strides);
|
|
}
|
|
|
|
struct joinable_thread : std::thread
|
|
{
|
|
template <typename... Xs>
|
|
joinable_thread(Xs&&... xs) : std::thread(std::forward<Xs>(xs)...)
|
|
{
|
|
}
|
|
|
|
joinable_thread(joinable_thread&&) = default;
|
|
joinable_thread& operator=(joinable_thread&&) = default;
|
|
|
|
~joinable_thread()
|
|
{
|
|
if(this->joinable())
|
|
this->join();
|
|
}
|
|
};
|
|
|
|
template <typename F, typename... Xs>
|
|
struct ParallelTensorFunctor
|
|
{
|
|
F mF;
|
|
static constexpr std::size_t NDIM = sizeof...(Xs);
|
|
std::array<std::size_t, NDIM> mLens;
|
|
std::array<std::size_t, NDIM> mStrides;
|
|
std::size_t mN1d;
|
|
|
|
ParallelTensorFunctor(F f, Xs... xs) : mF(f), mLens({static_cast<std::size_t>(xs)...})
|
|
{
|
|
mStrides.back() = 1;
|
|
std::partial_sum(mLens.rbegin(),
|
|
mLens.rend() - 1,
|
|
mStrides.rbegin() + 1,
|
|
std::multiplies<std::size_t>());
|
|
mN1d = mStrides[0] * mLens[0];
|
|
}
|
|
|
|
std::array<std::size_t, NDIM> GetNdIndices(std::size_t i) const
|
|
{
|
|
std::array<std::size_t, NDIM> indices;
|
|
|
|
for(std::size_t idim = 0; idim < NDIM; ++idim)
|
|
{
|
|
indices[idim] = i / mStrides[idim];
|
|
i -= indices[idim] * mStrides[idim];
|
|
}
|
|
|
|
return indices;
|
|
}
|
|
|
|
void operator()(std::size_t num_thread = 1) const
|
|
{
|
|
std::size_t work_per_thread = (mN1d + num_thread - 1) / num_thread;
|
|
|
|
std::vector<joinable_thread> threads(num_thread);
|
|
|
|
for(std::size_t it = 0; it < num_thread; ++it)
|
|
{
|
|
std::size_t iw_begin = it * work_per_thread;
|
|
std::size_t iw_end = std::min((it + 1) * work_per_thread, mN1d);
|
|
|
|
auto f = [this, iw_begin, iw_end] {
|
|
for(std::size_t iw = iw_begin; iw < iw_end; ++iw)
|
|
{
|
|
call_f_unpack_args(this->mF, this->GetNdIndices(iw));
|
|
}
|
|
};
|
|
threads[it] = joinable_thread(f);
|
|
}
|
|
}
|
|
};
|
|
|
|
template <typename F, typename... Xs>
|
|
CK_TILE_HOST auto make_ParallelTensorFunctor(F f, Xs... xs)
|
|
{
|
|
return ParallelTensorFunctor<F, Xs...>(f, xs...);
|
|
}
|
|
|
|
template <typename T>
|
|
struct HostTensor
|
|
{
|
|
using Descriptor = HostTensorDescriptor;
|
|
using Data = std::vector<T>;
|
|
|
|
template <typename X>
|
|
HostTensor(std::initializer_list<X> lens) : mDesc(lens), mData(mDesc.get_element_space_size())
|
|
{
|
|
}
|
|
|
|
template <typename X, typename Y>
|
|
HostTensor(std::initializer_list<X> lens, std::initializer_list<Y> strides)
|
|
: mDesc(lens, strides), mData(mDesc.get_element_space_size())
|
|
{
|
|
}
|
|
|
|
template <typename Lengths>
|
|
HostTensor(const Lengths& lens) : mDesc(lens), mData(mDesc.get_element_space_size())
|
|
{
|
|
}
|
|
|
|
template <typename Lengths, typename Strides>
|
|
HostTensor(const Lengths& lens, const Strides& strides)
|
|
: mDesc(lens, strides), mData(get_element_space_size())
|
|
{
|
|
}
|
|
|
|
HostTensor(const Descriptor& desc) : mDesc(desc), mData(mDesc.get_element_space_size()) {}
|
|
|
|
template <typename OutT>
|
|
HostTensor<OutT> CopyAsType() const
|
|
{
|
|
HostTensor<OutT> ret(mDesc);
|
|
std::transform(mData.cbegin(), mData.cend(), ret.mData.begin(), [](auto value) {
|
|
return ck_tile::type_convert<OutT>(value);
|
|
});
|
|
return ret;
|
|
}
|
|
|
|
HostTensor() = delete;
|
|
HostTensor(const HostTensor&) = default;
|
|
HostTensor(HostTensor&&) = default;
|
|
|
|
~HostTensor() = default;
|
|
|
|
HostTensor& operator=(const HostTensor&) = default;
|
|
HostTensor& operator=(HostTensor&&) = default;
|
|
|
|
template <typename FromT>
|
|
explicit HostTensor(const HostTensor<FromT>& other) : HostTensor(other.template CopyAsType<T>())
|
|
{
|
|
}
|
|
|
|
std::size_t get_length(std::size_t dim) const { return mDesc.get_length(dim); }
|
|
|
|
decltype(auto) get_lengths() const { return mDesc.get_lengths(); }
|
|
|
|
std::size_t get_stride(std::size_t dim) const { return mDesc.get_stride(dim); }
|
|
|
|
decltype(auto) get_strides() const { return mDesc.get_strides(); }
|
|
|
|
std::size_t get_num_of_dimension() const { return mDesc.get_num_of_dimension(); }
|
|
|
|
std::size_t get_element_size() const { return mDesc.get_element_size(); }
|
|
|
|
std::size_t get_element_space_size() const { return mDesc.get_element_space_size(); }
|
|
|
|
std::size_t get_element_space_size_in_bytes() const
|
|
{
|
|
return sizeof(T) * get_element_space_size();
|
|
}
|
|
|
|
// void SetZero() { ck_tile::ranges::fill<T>(mData, 0); }
|
|
void SetZero() { std::fill(mData.begin(), mData.end(), 0); }
|
|
|
|
template <typename F>
|
|
void ForEach_impl(F&& f, std::vector<size_t>& idx, size_t rank)
|
|
{
|
|
if(rank == mDesc.get_num_of_dimension())
|
|
{
|
|
f(*this, idx);
|
|
return;
|
|
}
|
|
// else
|
|
for(size_t i = 0; i < mDesc.get_lengths()[rank]; i++)
|
|
{
|
|
idx[rank] = i;
|
|
ForEach_impl(std::forward<F>(f), idx, rank + 1);
|
|
}
|
|
}
|
|
|
|
template <typename F>
|
|
void ForEach(F&& f)
|
|
{
|
|
std::vector<size_t> idx(mDesc.get_num_of_dimension(), 0);
|
|
ForEach_impl(std::forward<F>(f), idx, size_t(0));
|
|
}
|
|
|
|
template <typename F>
|
|
void ForEach_impl(const F&& f, std::vector<size_t>& idx, size_t rank) const
|
|
{
|
|
if(rank == mDesc.get_num_of_dimension())
|
|
{
|
|
f(*this, idx);
|
|
return;
|
|
}
|
|
// else
|
|
for(size_t i = 0; i < mDesc.get_lengths()[rank]; i++)
|
|
{
|
|
idx[rank] = i;
|
|
ForEach_impl(std::forward<const F>(f), idx, rank + 1);
|
|
}
|
|
}
|
|
|
|
template <typename F>
|
|
void ForEach(const F&& f) const
|
|
{
|
|
std::vector<size_t> idx(mDesc.get_num_of_dimension(), 0);
|
|
ForEach_impl(std::forward<const F>(f), idx, size_t(0));
|
|
}
|
|
|
|
template <typename G>
|
|
void GenerateTensorValue(G g, std::size_t num_thread = 1)
|
|
{
|
|
switch(mDesc.get_num_of_dimension())
|
|
{
|
|
case 1: {
|
|
auto f = [&](auto i) { (*this)(i) = g(i); };
|
|
make_ParallelTensorFunctor(f, mDesc.get_lengths()[0])(num_thread);
|
|
break;
|
|
}
|
|
case 2: {
|
|
auto f = [&](auto i0, auto i1) { (*this)(i0, i1) = g(i0, i1); };
|
|
make_ParallelTensorFunctor(f, mDesc.get_lengths()[0], mDesc.get_lengths()[1])(
|
|
num_thread);
|
|
break;
|
|
}
|
|
case 3: {
|
|
auto f = [&](auto i0, auto i1, auto i2) { (*this)(i0, i1, i2) = g(i0, i1, i2); };
|
|
make_ParallelTensorFunctor(f,
|
|
mDesc.get_lengths()[0],
|
|
mDesc.get_lengths()[1],
|
|
mDesc.get_lengths()[2])(num_thread);
|
|
break;
|
|
}
|
|
case 4: {
|
|
auto f = [&](auto i0, auto i1, auto i2, auto i3) {
|
|
(*this)(i0, i1, i2, i3) = g(i0, i1, i2, i3);
|
|
};
|
|
make_ParallelTensorFunctor(f,
|
|
mDesc.get_lengths()[0],
|
|
mDesc.get_lengths()[1],
|
|
mDesc.get_lengths()[2],
|
|
mDesc.get_lengths()[3])(num_thread);
|
|
break;
|
|
}
|
|
case 5: {
|
|
auto f = [&](auto i0, auto i1, auto i2, auto i3, auto i4) {
|
|
(*this)(i0, i1, i2, i3, i4) = g(i0, i1, i2, i3, i4);
|
|
};
|
|
make_ParallelTensorFunctor(f,
|
|
mDesc.get_lengths()[0],
|
|
mDesc.get_lengths()[1],
|
|
mDesc.get_lengths()[2],
|
|
mDesc.get_lengths()[3],
|
|
mDesc.get_lengths()[4])(num_thread);
|
|
break;
|
|
}
|
|
case 6: {
|
|
auto f = [&](auto i0, auto i1, auto i2, auto i3, auto i4, auto i5) {
|
|
(*this)(i0, i1, i2, i3, i4, i5) = g(i0, i1, i2, i3, i4, i5);
|
|
};
|
|
make_ParallelTensorFunctor(f,
|
|
mDesc.get_lengths()[0],
|
|
mDesc.get_lengths()[1],
|
|
mDesc.get_lengths()[2],
|
|
mDesc.get_lengths()[3],
|
|
mDesc.get_lengths()[4],
|
|
mDesc.get_lengths()[5])(num_thread);
|
|
break;
|
|
}
|
|
default: throw std::runtime_error("unspported dimension");
|
|
}
|
|
}
|
|
|
|
template <typename... Is>
|
|
std::size_t GetOffsetFromMultiIndex(Is... is) const
|
|
{
|
|
return mDesc.GetOffsetFromMultiIndex(is...);
|
|
}
|
|
|
|
template <typename... Is>
|
|
T& operator()(Is... is)
|
|
{
|
|
return mData[mDesc.GetOffsetFromMultiIndex(is...)];
|
|
}
|
|
|
|
template <typename... Is>
|
|
const T& operator()(Is... is) const
|
|
{
|
|
return mData[mDesc.GetOffsetFromMultiIndex(is...)];
|
|
}
|
|
|
|
T& operator()(std::vector<std::size_t> idx)
|
|
{
|
|
return mData[mDesc.GetOffsetFromMultiIndex(idx)];
|
|
}
|
|
|
|
const T& operator()(std::vector<std::size_t> idx) const
|
|
{
|
|
return mData[mDesc.GetOffsetFromMultiIndex(idx)];
|
|
}
|
|
|
|
HostTensor<T> transpose(std::vector<size_t> axes = {}) const
|
|
{
|
|
if(axes.empty())
|
|
{
|
|
axes.resize(this->get_num_of_dimension());
|
|
std::iota(axes.rbegin(), axes.rend(), 0);
|
|
}
|
|
if(axes.size() != mDesc.get_num_of_dimension())
|
|
{
|
|
throw std::runtime_error(
|
|
"HostTensor::transpose(): size of axes must match tensor dimension");
|
|
}
|
|
std::vector<size_t> tlengths, tstrides;
|
|
for(const auto& axis : axes)
|
|
{
|
|
tlengths.push_back(get_lengths()[axis]);
|
|
tstrides.push_back(get_strides()[axis]);
|
|
}
|
|
HostTensor<T> ret(*this);
|
|
ret.mDesc = HostTensorDescriptor(tlengths, tstrides);
|
|
return ret;
|
|
}
|
|
|
|
HostTensor<T> transpose(std::vector<size_t> axes = {})
|
|
{
|
|
return const_cast<HostTensor<T> const*>(this)->transpose(axes);
|
|
}
|
|
|
|
typename Data::iterator begin() { return mData.begin(); }
|
|
|
|
typename Data::iterator end() { return mData.end(); }
|
|
|
|
typename Data::pointer data() { return mData.data(); }
|
|
|
|
typename Data::const_iterator begin() const { return mData.begin(); }
|
|
|
|
typename Data::const_iterator end() const { return mData.end(); }
|
|
|
|
typename Data::const_pointer data() const { return mData.data(); }
|
|
|
|
typename Data::size_type size() const { return mData.size(); }
|
|
|
|
template <typename U = T>
|
|
auto AsSpan() const
|
|
{
|
|
constexpr std::size_t FromSize = sizeof(T);
|
|
constexpr std::size_t ToSize = sizeof(U);
|
|
|
|
using Element = std::add_const_t<std::remove_reference_t<U>>;
|
|
return ck_tile::span<Element>{reinterpret_cast<Element*>(data()),
|
|
size() * FromSize / ToSize};
|
|
}
|
|
|
|
template <typename U = T>
|
|
auto AsSpan()
|
|
{
|
|
constexpr std::size_t FromSize = sizeof(T);
|
|
constexpr std::size_t ToSize = sizeof(U);
|
|
|
|
using Element = std::remove_reference_t<U>;
|
|
return ck_tile::span<Element>{reinterpret_cast<Element*>(data()),
|
|
size() * FromSize / ToSize};
|
|
}
|
|
|
|
Descriptor mDesc;
|
|
Data mData;
|
|
};
|
|
} // namespace ck_tile
|