mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-17 11:30:02 +00:00
* [fix] align v3 gufusion pipeline
* fix device kernel selection.
* Add .co direct asm support by CK_USE_ASM_MOE_STAGE2_BLOCKSCALE
* experimental optimization for scale load in blkscale gemm
* Add asm for no-loop v3_128x128x128
* fix bugs
* tune fp8 example
* Update v1_128x128x128 to 2x2 instead of 4x1
* wip
* add warmup to asm launch
* wip2
* 16x16 function merged to moe
* temp save, a performant version.
* wip3
* Update .co binary to 16x16
* 16x16x128 correct; 64x64x128 failed
* update
* use mem_op::set when topk=1
* add mx fp8 b_preshuffle support, function not yet tested.
* Spilt the fp4 target. Fix the known bugs. 128x128x128 sanity checked; remove prints
* some fixes
* fix update
* remove some unnecessary hacky; enable 256x256x256 tilesize
* update for function debug
* Add pipeline v3. Have some runtime issue and register spill
* Fix pipe v3 correctness issue
* remove unnecessary hacky
* clang format
* fix a bug
* fix the bug, functional test passed
* tempsave; buggy at passed 4 e8m0 to scaled mfma
* added fp4_bpreshuffle example, build failures
* fixed some bugs
* implement shuffled scale mxfp4gemm, blocker: opsel not effect
* hotfix
* fix bugs, build passed
* (M, N, K)=(128, 128, 128) function failed.
* temp save for gemm1. Function not ready
* fix compile error. Gemm2 pass. Gemm1 WIP
* fix bug for a lds read
* update moe
* Compile pass. Gemm1 function WIP
* update moe
* fix fp8; fix even/odd
* tempsave
* update moe
* Revert "update"
This reverts commit c7d79dcb672616d9bc0fd9958f714fc80e7c84fd.
* Revert "use mem_op::set when topk=1"
This reverts commit 8c7772860735001a51421e7b6d0a28f6676d6c40.
* Add v3 128x128x128_4x4_16x16.co for gfx950
* temp cmake flag suppression for aiter test
* add code for mxfp4 gemm, blockscale not supported yet
* gemm1 up-only pass. GU WIP
* function pass with inline asm hacky
* revert unexpected file change
* updated and build passed
* update CE elementOP
* added code for debug
* Gemm1 GUFusion function pass. Perf WIP
* Fix fp8/bf8; remove duplicated code
* disable the scheduler in v3; bring it back when compiler feature ready.
* update moe v1 pipeline
* Add gemm1 v1 32x128x128
* remove schedule barrier
* updated
* Fix fp8/bf8 B-row
* mfma using asm, device result correct, host result need to check
* gemm1 v3 64x128x128 debug
* fix cpu ref
* a/b thread_desc stride fix
* Use random scale for init1
* 16x16x128 input size blockscale function passed
* fix blockscale gemm bug
* tempsave. Almost all instances passed.
* v1 fix for mi350.
* temp save
* debug save
* update debug
* fix the bug, 128x128x256 tile function passed
* v3
* rename moe block selector and pipeline
* Add gemm1 v1
* Add gemm1 v1 to selector
* added mx moe block v3 support, function passed
* compile error fix
* Improve the pipeline
* Pack e8m0 as int32_t
* v1 compile pass. Function not ready
* debug synchronize issue over different GPU/ROCm
* minor fix
* Add profiler filter
* Add f4 ckProfiler
* Fix example compile error
* Add f4 profiler examples
* tempsave
* v1 function pass.
* v3 function pass
* align file and function name
* mx_moe_fp4 ready for aiter with clang-format.
* modify the way we represent fp4
* generalize the pipeline scheduling.
* init moe mx f4 scale shuffle
* Cmakelist diable compiler-bound flags
* mx_fp4 default parameter change
* Moe blockscale gemm1&gemm2 asm support for aiter. Suppression cmkae flag til new compler.
* update code
* tempsave; modify the way we represent fp4
* generalize the pipeline scheduling.
* Add gemm1 gfx942 .co support
* updated code, build passed.
* Update gemm2 asm with latest compiler flag
* Fix mx f4 ckProfiler
* Fix blockwise gemm mx v1
* lds conflict free + buffer load lds
* Add gemm2 v3 64x128x128
* fix a, b scale loading bugs, a, b scale loading now correctly
* Add gemm2 v3 64x128x128
* commit with debug info
* fix fp4 profiler
* Add mx fp4 pileline v1 instances
* Fix v2 topk_weight cal. Add silu asm.
* v2 tok_weight WIP
* init mx fp4 B no preshuffle version
* tempsave. compile pass, function wrong
* enable fp4 moe no weigth preshuffle, function pass
* update the TFlops calculation in the example
* Add gemm2 64x128x128 asm. Fix BF16 ref.
* fix 2 typos in fp4_preshuffle
* Better kernel selection in device classes
* correct preShuffleBuffer
we should used packed k to do shuffle.
* lds conflict free + buffer load lds
* optimize offset math in dma
* Fix fp4 ckProfiler
* Fix MX MFMA tests
* fix f4 pipeline issues
* gemm1 func pass
* update mx moe gemm1_bns tile size to 64x128x256
* update mx moe gemm1 gemm2 TF and BW calculation
* fix typo
* temp save
* Fix example_gemm_mx build
* rename the block pipeline
* correct a typo in tail
* Add rotating to mx examples
* fix the correctness issue
* Fix v1; use M padding
* Add NT flag to B/BScale buffer
* Merge gemm_mx_common.hpp
* temp save, 4.4~4.5
* Fix 'Merge gemm_mx_common.hpp'
* refactor the pipeline
* Pad the M for scale buffer unconditionaly
* update MX moe GEMM1 hotloopscheduling
* change the gemm1 tile from 64x128x128 to 128x64x128
* Unconditional Ascale padding
* Pad shuffled a scale only
* pad ascale
* add vmcnt guard for async copy
* Profiler add f4 wp
* Merge preshuffle device
* Add more fp4 wp instances
* Fix do_weight in gemm1. Fix cshuffle_datatype. Clang-format
* Clang-format after 2 merges
* Remove rocm6.3 workaround flags and macro
* Fix fp8 config
* Fix bf8 config
* flag and barrier fix for copmiler branch MainOpSelV3
* Add fp8 profiler instances
* Remove debug infos; Enable flags for blockscale f8
* No asm ver. for merging moe blocksale fp8 into mainline
* update the flag name for f8blockscale
* recover example
* fix performance bug of bpreshuffle f8 gemm
* clang format, remove single rate mfma restriction for f8
* remove single rate mfma restriction for f8 blockscale gemm
* Fix moe blockscale gemm1 barrier 0x800 for new compiler
* add pipeline v1 for MOE Gemm2
* Use v1 pipeline for example_moe_gemm2_xdl_mx_fp4_bns
* Fix OOB; add MB96 instances
* remove unnecessary files
* fix the cmake issue
* Enable splitk for mxfp4; clang format;
* Generate random tensor values with multiple threads
* Use packed_size_v for A/BPackedSize
* Fix warning
* Fix target_compile_options for disabled target on gfx942
* fix moe pki4 on gfx950
* doc the kGroup definition
* Fix ThreadwiseTensorSliceTransfer_v4::Run (Fuse scale)
* Refactor thread_copy_lds_direct_load; fix gfx942 direct lds load example; fix f16_pki4 example
* Fix unknown compiler flag
* fix two failed examples.
* fix some failure tile size in gfx950 universal gemm. fix test_gemm_fp16
* workaround fix for test_gemm_f32; * We have very limited support for lds direct load if input matrix is not K major
* fix test_gemm_splitk;
* Fix compile for mx_mfma_op
* add mfma selection logic for multipled_v3
* Clean up
* Fix device gemm mx link error
* improve the global atomic pattern
* Revert unnecessary copyright updates
* restore minimum_occupancy logic
* Avoid data race in moe gemm2 ref
* Build fp8 gemm_multiply_multiply and moe only on gfx94/95
* update the instance in device_mx_gemm
* Resolve comments
* Copyright 2025
* Remove unused code
* fix library linking issue
---------
Co-authored-by: OscarXu <huaiguxu@amd.com>
Co-authored-by: lalala-sh <Jiaxing.Wen@amd.com>
Co-authored-by: mtgu0705 <mtgu@amd.com>
Co-authored-by: aska-0096 <haocwang@amd.com>
Co-authored-by: Your Name <you@example.com>
Co-authored-by: valarLip <340077269@qq.com>
Co-authored-by: feifei14119 <feiw@amd.com>
Co-authored-by: Lin, Qun <qlin@amd.com>
Co-authored-by: Andriy Roshchenko <andriy.roshchenko@amd.com>
Co-authored-by: joye <joye@amd.com>
Co-authored-by: asleepzzz <hanwen.chang@amd.com>
[ROCm/composable_kernel commit: 37554c31e8]
650 lines
22 KiB
C++
650 lines
22 KiB
C++
// SPDX-License-Identifier: MIT
|
|
// Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved.
|
|
|
|
#pragma once
|
|
|
|
#include <algorithm>
|
|
#include <cassert>
|
|
#include <iostream>
|
|
#include <fstream>
|
|
#include <numeric>
|
|
#include <random>
|
|
#include <thread>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
#include "ck/utility/data_type.hpp"
|
|
#include "ck/utility/span.hpp"
|
|
#include "ck/utility/type_convert.hpp"
|
|
|
|
#include "ck/library/utility/algorithm.hpp"
|
|
#include "ck/library/utility/ranges.hpp"
|
|
#include "ck/library/utility/thread.hpp"
|
|
|
|
template <typename Range>
|
|
std::ostream& LogRange(std::ostream& os, Range&& range, std::string delim)
|
|
{
|
|
bool first = true;
|
|
for(auto&& v : range)
|
|
{
|
|
if(first)
|
|
first = false;
|
|
else
|
|
os << delim;
|
|
os << v;
|
|
}
|
|
return os;
|
|
}
|
|
|
|
template <typename T, typename Range>
|
|
std::ostream& LogRangeAsType(std::ostream& os, Range&& range, std::string delim)
|
|
{
|
|
bool first = true;
|
|
for(auto&& v : range)
|
|
{
|
|
if(first)
|
|
first = false;
|
|
else
|
|
os << delim;
|
|
|
|
using RangeType = ck::remove_cvref_t<decltype(v)>;
|
|
if constexpr(std::is_same_v<RangeType, ck::f8_t> || std::is_same_v<RangeType, ck::bf8_t> ||
|
|
std::is_same_v<RangeType, ck::bhalf_t>)
|
|
{
|
|
os << ck::type_convert<float>(v);
|
|
}
|
|
else if constexpr(std::is_same_v<RangeType, ck::pk_i4_t> ||
|
|
std::is_same_v<RangeType, ck::f4x2_pk_t>)
|
|
{
|
|
const auto packed_floats = ck::type_convert<ck::float2_t>(v);
|
|
const ck::vector_type<float, 2> vector_of_floats{packed_floats};
|
|
os << vector_of_floats.template AsType<float>()[ck::Number<0>{}] << delim
|
|
<< vector_of_floats.template AsType<float>()[ck::Number<1>{}];
|
|
}
|
|
else
|
|
{
|
|
os << static_cast<T>(v);
|
|
}
|
|
}
|
|
return os;
|
|
}
|
|
|
|
template <typename F, typename T, std::size_t... Is>
|
|
auto call_f_unpack_args_impl(F f, T args, std::index_sequence<Is...>)
|
|
{
|
|
return f(std::get<Is>(args)...);
|
|
}
|
|
|
|
template <typename F, typename T>
|
|
auto call_f_unpack_args(F f, T args)
|
|
{
|
|
constexpr std::size_t N = std::tuple_size<T>{};
|
|
|
|
return call_f_unpack_args_impl(f, args, std::make_index_sequence<N>{});
|
|
}
|
|
|
|
template <typename F, typename T, std::size_t... Is>
|
|
auto construct_f_unpack_args_impl(T args, std::index_sequence<Is...>)
|
|
{
|
|
return F(std::get<Is>(args)...);
|
|
}
|
|
|
|
template <typename F, typename T>
|
|
auto construct_f_unpack_args(F, T args)
|
|
{
|
|
constexpr std::size_t N = std::tuple_size<T>{};
|
|
|
|
return construct_f_unpack_args_impl<F>(args, std::make_index_sequence<N>{});
|
|
}
|
|
|
|
struct HostTensorDescriptor
|
|
{
|
|
HostTensorDescriptor() = default;
|
|
|
|
void CalculateStrides();
|
|
|
|
template <typename X, typename = std::enable_if_t<std::is_convertible_v<X, std::size_t>>>
|
|
HostTensorDescriptor(const std::initializer_list<X>& lens) : mLens(lens.begin(), lens.end())
|
|
{
|
|
this->CalculateStrides();
|
|
}
|
|
|
|
HostTensorDescriptor(const std::initializer_list<ck::long_index_t>& lens)
|
|
: mLens(lens.begin(), lens.end())
|
|
{
|
|
this->CalculateStrides();
|
|
}
|
|
|
|
template <typename Lengths,
|
|
typename = std::enable_if_t<
|
|
std::is_convertible_v<ck::ranges::range_value_t<Lengths>, std::size_t> ||
|
|
std::is_convertible_v<ck::ranges::range_value_t<Lengths>, ck::long_index_t>>>
|
|
HostTensorDescriptor(const Lengths& lens) : mLens(lens.begin(), lens.end())
|
|
{
|
|
this->CalculateStrides();
|
|
}
|
|
|
|
template <typename X,
|
|
typename Y,
|
|
typename = std::enable_if_t<std::is_convertible_v<X, std::size_t> &&
|
|
std::is_convertible_v<Y, std::size_t>>>
|
|
HostTensorDescriptor(const std::initializer_list<X>& lens,
|
|
const std::initializer_list<Y>& strides)
|
|
: mLens(lens.begin(), lens.end()), mStrides(strides.begin(), strides.end())
|
|
{
|
|
}
|
|
|
|
HostTensorDescriptor(const std::initializer_list<ck::long_index_t>& lens,
|
|
const std::initializer_list<ck::long_index_t>& strides)
|
|
: mLens(lens.begin(), lens.end()), mStrides(strides.begin(), strides.end())
|
|
{
|
|
}
|
|
|
|
template <typename Lengths,
|
|
typename Strides,
|
|
typename = std::enable_if_t<
|
|
(std::is_convertible_v<ck::ranges::range_value_t<Lengths>, std::size_t> &&
|
|
std::is_convertible_v<ck::ranges::range_value_t<Strides>, std::size_t>) ||
|
|
(std::is_convertible_v<ck::ranges::range_value_t<Lengths>, ck::long_index_t> &&
|
|
std::is_convertible_v<ck::ranges::range_value_t<Strides>, ck::long_index_t>)>>
|
|
HostTensorDescriptor(const Lengths& lens, const Strides& strides)
|
|
: mLens(lens.begin(), lens.end()), mStrides(strides.begin(), strides.end())
|
|
{
|
|
}
|
|
|
|
std::size_t GetNumOfDimension() const;
|
|
std::size_t GetElementSize() const;
|
|
std::size_t GetElementSpaceSize() const;
|
|
|
|
const std::vector<std::size_t>& GetLengths() const;
|
|
const std::vector<std::size_t>& GetStrides() const;
|
|
|
|
template <typename... Is>
|
|
std::size_t GetOffsetFromMultiIndex(Is... is) const
|
|
{
|
|
assert(sizeof...(Is) == this->GetNumOfDimension());
|
|
std::initializer_list<std::size_t> iss{static_cast<std::size_t>(is)...};
|
|
return std::inner_product(iss.begin(), iss.end(), mStrides.begin(), std::size_t{0});
|
|
}
|
|
|
|
std::size_t GetOffsetFromMultiIndex(std::vector<std::size_t> iss) const
|
|
{
|
|
return std::inner_product(iss.begin(), iss.end(), mStrides.begin(), std::size_t{0});
|
|
}
|
|
|
|
friend std::ostream& operator<<(std::ostream& os, const HostTensorDescriptor& desc);
|
|
|
|
private:
|
|
std::vector<std::size_t> mLens;
|
|
std::vector<std::size_t> mStrides;
|
|
};
|
|
|
|
template <typename New2Old>
|
|
HostTensorDescriptor transpose_host_tensor_descriptor_given_new2old(const HostTensorDescriptor& a,
|
|
const New2Old& new2old)
|
|
{
|
|
std::vector<std::size_t> new_lengths(a.GetNumOfDimension());
|
|
std::vector<std::size_t> new_strides(a.GetNumOfDimension());
|
|
|
|
for(std::size_t i = 0; i < a.GetNumOfDimension(); i++)
|
|
{
|
|
new_lengths[i] = a.GetLengths()[new2old[i]];
|
|
new_strides[i] = a.GetStrides()[new2old[i]];
|
|
}
|
|
|
|
return HostTensorDescriptor(new_lengths, new_strides);
|
|
}
|
|
|
|
struct joinable_thread : std::thread
|
|
{
|
|
template <typename... Xs>
|
|
joinable_thread(Xs&&... xs) : std::thread(std::forward<Xs>(xs)...)
|
|
{
|
|
}
|
|
|
|
joinable_thread(joinable_thread&&) = default;
|
|
joinable_thread& operator=(joinable_thread&&) = default;
|
|
|
|
~joinable_thread()
|
|
{
|
|
if(this->joinable())
|
|
this->join();
|
|
}
|
|
};
|
|
|
|
template <typename F, typename... Xs>
|
|
struct ParallelTensorFunctor
|
|
{
|
|
F mF;
|
|
static constexpr std::size_t NDIM = sizeof...(Xs);
|
|
std::array<std::size_t, NDIM> mLens;
|
|
std::array<std::size_t, NDIM> mStrides;
|
|
std::size_t mN1d;
|
|
|
|
ParallelTensorFunctor(F f, Xs... xs) : mF(f), mLens({static_cast<std::size_t>(xs)...})
|
|
{
|
|
mStrides.back() = 1;
|
|
std::partial_sum(mLens.rbegin(),
|
|
mLens.rend() - 1,
|
|
mStrides.rbegin() + 1,
|
|
std::multiplies<std::size_t>());
|
|
mN1d = mStrides[0] * mLens[0];
|
|
}
|
|
|
|
std::array<std::size_t, NDIM> GetNdIndices(std::size_t i) const
|
|
{
|
|
std::array<std::size_t, NDIM> indices;
|
|
|
|
for(std::size_t idim = 0; idim < NDIM; ++idim)
|
|
{
|
|
indices[idim] = i / mStrides[idim];
|
|
i -= indices[idim] * mStrides[idim];
|
|
}
|
|
|
|
return indices;
|
|
}
|
|
|
|
void operator()(std::size_t num_thread = 1) const
|
|
{
|
|
std::size_t work_per_thread = (mN1d + num_thread - 1) / num_thread;
|
|
|
|
std::vector<joinable_thread> threads(num_thread);
|
|
|
|
for(std::size_t it = 0; it < num_thread; ++it)
|
|
{
|
|
std::size_t iw_begin = it * work_per_thread;
|
|
std::size_t iw_end = std::min((it + 1) * work_per_thread, mN1d);
|
|
|
|
auto f = [=, *this] {
|
|
for(std::size_t iw = iw_begin; iw < iw_end; ++iw)
|
|
{
|
|
call_f_unpack_args(mF, GetNdIndices(iw));
|
|
}
|
|
};
|
|
threads[it] = joinable_thread(f);
|
|
}
|
|
}
|
|
};
|
|
|
|
template <typename F, typename... Xs>
|
|
auto make_ParallelTensorFunctor(F f, Xs... xs)
|
|
{
|
|
return ParallelTensorFunctor<F, Xs...>(f, xs...);
|
|
}
|
|
|
|
template <typename T>
|
|
struct Tensor
|
|
{
|
|
using Descriptor = HostTensorDescriptor;
|
|
using Data = std::vector<T>;
|
|
|
|
template <typename X>
|
|
Tensor(std::initializer_list<X> lens) : mDesc(lens), mData(GetElementSpaceSize())
|
|
{
|
|
}
|
|
|
|
template <typename X, typename Y>
|
|
Tensor(std::initializer_list<X> lens, std::initializer_list<Y> strides)
|
|
: mDesc(lens, strides), mData(GetElementSpaceSize())
|
|
{
|
|
}
|
|
|
|
template <typename Lengths>
|
|
Tensor(const Lengths& lens) : mDesc(lens), mData(GetElementSpaceSize())
|
|
{
|
|
}
|
|
|
|
template <typename Lengths, typename Strides>
|
|
Tensor(const Lengths& lens, const Strides& strides)
|
|
: mDesc(lens, strides), mData(GetElementSpaceSize())
|
|
{
|
|
}
|
|
|
|
Tensor(const Descriptor& desc) : mDesc(desc), mData(GetElementSpaceSize()) {}
|
|
|
|
template <typename OutT>
|
|
Tensor<OutT> CopyAsType() const
|
|
{
|
|
Tensor<OutT> ret(mDesc);
|
|
|
|
ck::ranges::transform(
|
|
mData, ret.mData.begin(), [](auto value) { return ck::type_convert<OutT>(value); });
|
|
|
|
return ret;
|
|
}
|
|
|
|
Tensor() = delete;
|
|
Tensor(const Tensor&) = default;
|
|
Tensor(Tensor&&) = default;
|
|
|
|
~Tensor() = default;
|
|
|
|
Tensor& operator=(const Tensor&) = default;
|
|
Tensor& operator=(Tensor&&) = default;
|
|
|
|
template <typename FromT>
|
|
explicit Tensor(const Tensor<FromT>& other) : Tensor(other.template CopyAsType<T>())
|
|
{
|
|
}
|
|
void savetxt(std::string file_name, std::string dtype = "float")
|
|
{
|
|
std::ofstream file(file_name);
|
|
|
|
if(file.is_open())
|
|
{
|
|
for(auto& itm : mData)
|
|
{
|
|
if(dtype == "float")
|
|
file << ck::type_convert<float>(itm) << std::endl;
|
|
else if(dtype == "int")
|
|
file << ck::type_convert<int>(itm) << std::endl;
|
|
else
|
|
// TODO: we didn't implement operator<< for all custom
|
|
// data types, here fall back to float in case compile error
|
|
file << ck::type_convert<float>(itm) << std::endl;
|
|
}
|
|
file.close();
|
|
}
|
|
else
|
|
{
|
|
// Print an error message to the standard error
|
|
// stream if the file cannot be opened.
|
|
throw std::runtime_error(std::string("unable to open file:") + file_name);
|
|
}
|
|
}
|
|
decltype(auto) GetLengths() const { return mDesc.GetLengths(); }
|
|
|
|
decltype(auto) GetStrides() const { return mDesc.GetStrides(); }
|
|
|
|
std::size_t GetNumOfDimension() const { return mDesc.GetNumOfDimension(); }
|
|
|
|
std::size_t GetElementSize() const { return mDesc.GetElementSize(); }
|
|
|
|
std::size_t GetElementSpaceSize() const
|
|
{
|
|
if constexpr(ck::is_packed_type_v<ck::remove_cvref_t<T>>)
|
|
{
|
|
return (mDesc.GetElementSpaceSize() + 1) / ck::packed_size_v<ck::remove_cvref_t<T>>;
|
|
}
|
|
else
|
|
{
|
|
return mDesc.GetElementSpaceSize();
|
|
}
|
|
}
|
|
|
|
std::size_t GetElementSpaceSizeInBytes() const { return sizeof(T) * GetElementSpaceSize(); }
|
|
|
|
void SetZero() { ck::ranges::fill<T>(mData, T{0}); }
|
|
|
|
template <typename F>
|
|
void ForEach_impl(F&& f, std::vector<size_t>& idx, size_t rank)
|
|
{
|
|
if(rank == mDesc.GetNumOfDimension())
|
|
{
|
|
f(*this, idx);
|
|
return;
|
|
}
|
|
// else
|
|
for(size_t i = 0; i < mDesc.GetLengths()[rank]; i++)
|
|
{
|
|
idx[rank] = i;
|
|
ForEach_impl(std::forward<F>(f), idx, rank + 1);
|
|
}
|
|
}
|
|
|
|
template <typename F>
|
|
void ForEach(F&& f)
|
|
{
|
|
std::vector<size_t> idx(mDesc.GetNumOfDimension(), 0);
|
|
ForEach_impl(std::forward<F>(f), idx, size_t(0));
|
|
}
|
|
|
|
template <typename F>
|
|
void ForEach_impl(const F&& f, std::vector<size_t>& idx, size_t rank) const
|
|
{
|
|
if(rank == mDesc.GetNumOfDimension())
|
|
{
|
|
f(*this, idx);
|
|
return;
|
|
}
|
|
// else
|
|
for(size_t i = 0; i < mDesc.GetLengths()[rank]; i++)
|
|
{
|
|
idx[rank] = i;
|
|
ForEach_impl(std::forward<const F>(f), idx, rank + 1);
|
|
}
|
|
}
|
|
|
|
template <typename F>
|
|
void ForEach(const F&& f) const
|
|
{
|
|
std::vector<size_t> idx(mDesc.GetNumOfDimension(), 0);
|
|
ForEach_impl(std::forward<const F>(f), idx, size_t(0));
|
|
}
|
|
|
|
template <typename G>
|
|
void GenerateTensorValue(G g, std::size_t num_thread = 1)
|
|
{
|
|
switch(mDesc.GetNumOfDimension())
|
|
{
|
|
case 1: {
|
|
auto f = [&](auto i) { (*this)(i) = g(i); };
|
|
make_ParallelTensorFunctor(f, mDesc.GetLengths()[0])(num_thread);
|
|
break;
|
|
}
|
|
case 2: {
|
|
auto f = [&](auto i0, auto i1) { (*this)(i0, i1) = g(i0, i1); };
|
|
make_ParallelTensorFunctor(f, mDesc.GetLengths()[0], mDesc.GetLengths()[1])(num_thread);
|
|
break;
|
|
}
|
|
case 3: {
|
|
auto f = [&](auto i0, auto i1, auto i2) { (*this)(i0, i1, i2) = g(i0, i1, i2); };
|
|
make_ParallelTensorFunctor(
|
|
f, mDesc.GetLengths()[0], mDesc.GetLengths()[1], mDesc.GetLengths()[2])(num_thread);
|
|
break;
|
|
}
|
|
case 4: {
|
|
auto f = [&](auto i0, auto i1, auto i2, auto i3) {
|
|
(*this)(i0, i1, i2, i3) = g(i0, i1, i2, i3);
|
|
};
|
|
make_ParallelTensorFunctor(f,
|
|
mDesc.GetLengths()[0],
|
|
mDesc.GetLengths()[1],
|
|
mDesc.GetLengths()[2],
|
|
mDesc.GetLengths()[3])(num_thread);
|
|
break;
|
|
}
|
|
case 5: {
|
|
auto f = [&](auto i0, auto i1, auto i2, auto i3, auto i4) {
|
|
(*this)(i0, i1, i2, i3, i4) = g(i0, i1, i2, i3, i4);
|
|
};
|
|
make_ParallelTensorFunctor(f,
|
|
mDesc.GetLengths()[0],
|
|
mDesc.GetLengths()[1],
|
|
mDesc.GetLengths()[2],
|
|
mDesc.GetLengths()[3],
|
|
mDesc.GetLengths()[4])(num_thread);
|
|
break;
|
|
}
|
|
case 6: {
|
|
auto f = [&](auto i0, auto i1, auto i2, auto i3, auto i4, auto i5) {
|
|
(*this)(i0, i1, i2, i3, i4, i5) = g(i0, i1, i2, i3, i4, i5);
|
|
};
|
|
make_ParallelTensorFunctor(f,
|
|
mDesc.GetLengths()[0],
|
|
mDesc.GetLengths()[1],
|
|
mDesc.GetLengths()[2],
|
|
mDesc.GetLengths()[3],
|
|
mDesc.GetLengths()[4],
|
|
mDesc.GetLengths()[5])(num_thread);
|
|
break;
|
|
}
|
|
case 12: {
|
|
auto f = [&](auto i0,
|
|
auto i1,
|
|
auto i2,
|
|
auto i3,
|
|
auto i4,
|
|
auto i5,
|
|
auto i6,
|
|
auto i7,
|
|
auto i8,
|
|
auto i9,
|
|
auto i10,
|
|
auto i11) {
|
|
(*this)(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11) =
|
|
g(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11);
|
|
};
|
|
make_ParallelTensorFunctor(f,
|
|
mDesc.GetLengths()[0],
|
|
mDesc.GetLengths()[1],
|
|
mDesc.GetLengths()[2],
|
|
mDesc.GetLengths()[3],
|
|
mDesc.GetLengths()[4],
|
|
mDesc.GetLengths()[5],
|
|
mDesc.GetLengths()[6],
|
|
mDesc.GetLengths()[7],
|
|
mDesc.GetLengths()[8],
|
|
mDesc.GetLengths()[9],
|
|
mDesc.GetLengths()[10],
|
|
mDesc.GetLengths()[11])(num_thread);
|
|
break;
|
|
}
|
|
default: throw std::runtime_error("unspported dimension");
|
|
}
|
|
}
|
|
|
|
// Generate random values with multiple threads. Guaranteed to give the same sequence with any
|
|
// number of threads provided.
|
|
template <typename Distribution = std::uniform_real_distribution<float>,
|
|
typename Mapping = ck::identity,
|
|
typename Generator = std::minstd_rand>
|
|
void GenerateTensorDistr(Distribution dis = {0.f, 1.f},
|
|
Mapping fn = {},
|
|
const Generator g = Generator(0), // default seed 0
|
|
std::size_t num_thread = -1)
|
|
{
|
|
using ck::math::integer_divide_ceil;
|
|
using ck::math::min;
|
|
if(num_thread == -1ULL)
|
|
num_thread = min(ck::get_available_cpu_cores(), 80U); // max 80 threads
|
|
// At least 2MB per thread
|
|
num_thread = min(num_thread, integer_divide_ceil(this->GetElementSpaceSize(), 0x200000));
|
|
constexpr std::size_t BLOCK_BYTES = 64;
|
|
constexpr std::size_t BLOCK_SIZE = BLOCK_BYTES / sizeof(T);
|
|
|
|
const std::size_t num_blocks = integer_divide_ceil(this->GetElementSpaceSize(), BLOCK_SIZE);
|
|
const std::size_t blocks_per_thread = integer_divide_ceil(num_blocks, num_thread);
|
|
|
|
std::vector<std::thread> threads;
|
|
threads.reserve(num_thread - 1);
|
|
const auto dst = const_cast<T*>(this->mData.data());
|
|
const auto element_space_size = this->GetElementSpaceSize();
|
|
for(int it = num_thread - 1; it >= 0; --it)
|
|
{
|
|
std::size_t ib_begin = it * blocks_per_thread;
|
|
std::size_t ib_end = min(ib_begin + blocks_per_thread, num_blocks);
|
|
|
|
auto job = [=]() {
|
|
auto g_ = g; // copy
|
|
auto dis_ = dis; // copy
|
|
g_.discard(ib_begin * BLOCK_SIZE * ck::packed_size_v<T>);
|
|
auto t_fn = [&]() {
|
|
if constexpr(ck::packed_size_v<T> == 1)
|
|
return ck::type_convert<T>(fn(dis_(g_)));
|
|
else if constexpr(ck::is_same_v<T, ck::f4x2_pk_t>)
|
|
return ck::f4x2_pk_t{ck::type_convert<ck::f4x2_t>(
|
|
ck::float2_t{ck::type_convert<float>(fn(dis_(g_))),
|
|
ck::type_convert<float>(fn(dis_(g_)))})};
|
|
else
|
|
static_assert(false, "Unsupported packed size for T");
|
|
};
|
|
|
|
std::size_t ib = ib_begin;
|
|
for(; ib < ib_end - 1; ++ib)
|
|
ck::static_for<0, BLOCK_SIZE, 1>{}([&](auto iw_) {
|
|
constexpr size_t iw = iw_.value;
|
|
dst[ib * BLOCK_SIZE + iw] = t_fn();
|
|
});
|
|
for(std::size_t iw = 0; iw < BLOCK_SIZE; ++iw)
|
|
if(ib * BLOCK_SIZE + iw < element_space_size)
|
|
dst[ib * BLOCK_SIZE + iw] = t_fn();
|
|
};
|
|
|
|
if(it > 0)
|
|
threads.emplace_back(std::move(job));
|
|
else
|
|
job(); // last job run in the main thread
|
|
}
|
|
for(auto& t : threads)
|
|
t.join();
|
|
}
|
|
|
|
template <typename... Is>
|
|
std::size_t GetOffsetFromMultiIndex(Is... is) const
|
|
{
|
|
return mDesc.GetOffsetFromMultiIndex(is...) / ck::packed_size_v<ck::remove_cvref_t<T>>;
|
|
}
|
|
|
|
template <typename... Is>
|
|
T& operator()(Is... is)
|
|
{
|
|
return mData[mDesc.GetOffsetFromMultiIndex(is...) /
|
|
ck::packed_size_v<ck::remove_cvref_t<T>>];
|
|
}
|
|
|
|
template <typename... Is>
|
|
const T& operator()(Is... is) const
|
|
{
|
|
return mData[mDesc.GetOffsetFromMultiIndex(is...) /
|
|
ck::packed_size_v<ck::remove_cvref_t<T>>];
|
|
}
|
|
|
|
T& operator()(std::vector<std::size_t> idx)
|
|
{
|
|
return mData[mDesc.GetOffsetFromMultiIndex(idx) / ck::packed_size_v<ck::remove_cvref_t<T>>];
|
|
}
|
|
|
|
const T& operator()(std::vector<std::size_t> idx) const
|
|
{
|
|
return mData[mDesc.GetOffsetFromMultiIndex(idx) / ck::packed_size_v<ck::remove_cvref_t<T>>];
|
|
}
|
|
|
|
typename Data::iterator begin() { return mData.begin(); }
|
|
|
|
typename Data::iterator end() { return mData.end(); }
|
|
|
|
typename Data::pointer data() { return mData.data(); }
|
|
|
|
typename Data::const_iterator begin() const { return mData.begin(); }
|
|
|
|
typename Data::const_iterator end() const { return mData.end(); }
|
|
|
|
typename Data::const_pointer data() const { return mData.data(); }
|
|
|
|
typename Data::size_type size() const { return mData.size(); }
|
|
|
|
template <typename U = T>
|
|
auto AsSpan() const
|
|
{
|
|
constexpr std::size_t FromSize = sizeof(T);
|
|
constexpr std::size_t ToSize = sizeof(U);
|
|
|
|
using Element = std::add_const_t<std::remove_reference_t<U>>;
|
|
return ck::span<Element>{reinterpret_cast<Element*>(data()), size() * FromSize / ToSize};
|
|
}
|
|
|
|
template <typename U = T>
|
|
auto AsSpan()
|
|
{
|
|
constexpr std::size_t FromSize = sizeof(T);
|
|
constexpr std::size_t ToSize = sizeof(U);
|
|
|
|
using Element = std::remove_reference_t<U>;
|
|
return ck::span<Element>{reinterpret_cast<Element*>(data()), size() * FromSize / ToSize};
|
|
}
|
|
|
|
Descriptor mDesc;
|
|
Data mData;
|
|
};
|