mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-16 19:09:59 +00:00
* Convolution ND
* Code unification across dimensions for generating tensor descriptors.
* Example
* Instances
* Move convnd f32 instance file to comply with repo structure.
* Conv 1D tensor layouts.
* Formatting and use ReferenceConv
* Reference ConvFwd supporting 1D and 2D convolution.
* Debug printing TensorLayout name.
* Conv fwd 1D instance f32
* Refactor conv ND example.
Needed to support various conv dimensio.
Needed to support various conv dimensions
* Rename conv nd example director to prevent conflicts.
* Refactor some common utility to single file.
Plus some tests.
* Refactor GetHostTensorDescriptor + UT.
* Add 1D test case.
* Test reference convolution 1d/2d
* Remove some leftovers.
* Fix convolution example error for 1D
* Refactor test check errors utility function.
* Test Conv2D Fwd XDL
* More UT for 1D case.
* Parameterize input & weight initializers.
* Rename example to prevent conflicts.
* Split convnd instance into separate files for 1d/2d
* Address review comments.
* Fix data type for flops/gbytes calculations.
* Assign example number 11.
* 3D cases for convolution utility functions.
* 3D reference convolution.
* Add support for 3D convolution.
* Check for inputs bigger than 2GB.
* Formatting
* Support for bf16/f16/f32/i8 - conv instances + UT.
* Use check_err from test_util.hpp.
* Split convnd test into separate files for each dim.
* Fix data generation and use proper instances.
* Formatting
* Skip tensor initialization if not necessary.
* Fix CMakefiles.
* Remove redundant conv2d_fwd test.
* Lower problem size for conv3D UT.
* 3D case for convnd example.
* Remove leftovers after merge.
* Add Conv Specialization string to GetTypeString
* Skip instance causing numerical errors.
* Small fixes.
* Remove redundant includes.
* Fix namespace name error.
* Script for automatic testing and logging convolution fwd UTs
* Comment out numactl cmd.
* Refine weights initalization and relax rtol for fp16
* Move test_util.hpp to check_err.hpp
* Refine weights initalization and relax rtol for fp16
* Refactor common part of test conv utils.
* Move utility function to single common place.
* Add additional common functions to utility.
* Refactor convnd_fwd_xdl examples.
* Remove redundant files.
* Unify structure.
* Add constructor to ConvParams.
* And add input parameters validation.
* Modify conv examples to use single utility file.
* Remove check_error from host_tensor.hpp
* Get rid of check_indices function.
* Remove bf16_to_f32 function overload for scalars.
* Fix namespace.
* Add half_float::half for check_err.
* Fix conv params size in UT.
* Fix weights initialization for int8.
* Fix weights initialization for int8.
* Add type_convert when store output in ref conv 1D.
* Get back old conv2d_fwd_xdl operation.
* Silence conv debug print.
* format
* clean
* clean
* Fix merge.
* Fix namespace for check_err
* Formatting.
* Fix merge artifacts.
* Remove deleted header.
* Fix some includes and use ck::utils::check_err.
* Remove unused check_indices restored by previous merge.
* Fix namespaces after merge.
* Fix compilation error.
* Small fixes.
* Use common functions.
* Fix filename
* Fix namespaces.
* Fix merge artifact - retrieve removed by accident fun.
* Fix ConvForwardSpecialization.
* Adhere to coding style rules.
* Fix merge artifacts.
Co-authored-by: Adam Osewski <aosewski@amd.com>
Co-authored-by: Chao Liu <chao.liu2@amd.com>
[ROCm/composable_kernel commit: abf4bdb9a9]
151 lines
4.5 KiB
C++
151 lines
4.5 KiB
C++
#include <iostream>
|
|
#include <numeric>
|
|
#include <initializer_list>
|
|
#include <cstdlib>
|
|
#include <stdlib.h>
|
|
#include <half.hpp>
|
|
|
|
#include "check_err.hpp"
|
|
#include "config.hpp"
|
|
#include "magic_division.hpp"
|
|
#include "device.hpp"
|
|
#include "host_tensor.hpp"
|
|
#include "host_tensor_generator.hpp"
|
|
#include "device_tensor.hpp"
|
|
|
|
__global__ void gpu_magic_number_division(uint32_t magic_multiplier,
|
|
uint32_t magic_shift,
|
|
const int32_t* p_dividend,
|
|
int32_t* p_result,
|
|
uint64_t num)
|
|
{
|
|
uint64_t global_thread_num = blockDim.x * gridDim.x;
|
|
|
|
uint64_t global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
|
|
|
|
for(uint64_t data_id = global_thread_id; data_id < num; data_id += global_thread_num)
|
|
{
|
|
p_result[data_id] =
|
|
ck::MagicDivision::DoMagicDivision(p_dividend[data_id], magic_multiplier, magic_shift);
|
|
}
|
|
}
|
|
|
|
__global__ void
|
|
gpu_naive_division(int32_t divisor, const int32_t* p_dividend, int32_t* p_result, uint64_t num)
|
|
{
|
|
uint64_t global_thread_num = blockDim.x * gridDim.x;
|
|
|
|
uint64_t global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
|
|
|
|
for(uint64_t data_id = global_thread_id; data_id < num; data_id += global_thread_num)
|
|
{
|
|
p_result[data_id] = p_dividend[data_id] / divisor;
|
|
}
|
|
}
|
|
|
|
__host__ void cpu_magic_number_division(uint32_t magic_multiplier,
|
|
uint32_t magic_shift,
|
|
const int32_t* p_dividend,
|
|
int32_t* p_result,
|
|
uint64_t num)
|
|
{
|
|
for(uint64_t data_id = 0; data_id < num; ++data_id)
|
|
{
|
|
p_result[data_id] =
|
|
ck::MagicDivision::DoMagicDivision(p_dividend[data_id], magic_multiplier, magic_shift);
|
|
}
|
|
}
|
|
|
|
int main(int, char*[])
|
|
{
|
|
uint64_t num_divisor = 4096;
|
|
uint64_t num_dividend = 1L << 16;
|
|
|
|
std::vector<int32_t> divisors_host(num_divisor);
|
|
std::vector<int32_t> dividends_host(num_dividend);
|
|
|
|
// generate divisor
|
|
for(uint64_t i = 0; i < num_divisor; ++i)
|
|
{
|
|
divisors_host[i] = i + 1;
|
|
}
|
|
|
|
// generate dividend
|
|
for(uint64_t i = 0; i < num_divisor; ++i)
|
|
{
|
|
dividends_host[i] = i;
|
|
}
|
|
|
|
DeviceMem dividends_dev_buf(sizeof(int32_t) * num_dividend);
|
|
DeviceMem naive_result_dev_buf(sizeof(int32_t) * num_dividend);
|
|
DeviceMem magic_result_dev_buf(sizeof(int32_t) * num_dividend);
|
|
|
|
std::vector<int32_t> naive_result_host(num_dividend);
|
|
std::vector<int32_t> magic_result_host(num_dividend);
|
|
std::vector<int32_t> magic_result_host2(num_dividend);
|
|
|
|
dividends_dev_buf.ToDevice(dividends_host.data());
|
|
|
|
bool pass = true;
|
|
|
|
for(std::size_t i = 0; i < num_divisor; ++i)
|
|
{
|
|
// run naive division on GPU
|
|
gpu_naive_division<<<1024, 256>>>(
|
|
divisors_host[i],
|
|
static_cast<const int32_t*>(dividends_dev_buf.GetDeviceBuffer()),
|
|
static_cast<int32_t*>(naive_result_dev_buf.GetDeviceBuffer()),
|
|
num_dividend);
|
|
|
|
// calculate magic number
|
|
uint32_t magic_multiplier, magic_shift;
|
|
|
|
ck::tie(magic_multiplier, magic_shift) =
|
|
ck::MagicDivision::CalculateMagicNumbers(divisors_host[i]);
|
|
|
|
// run magic division on GPU
|
|
gpu_magic_number_division<<<1024, 256>>>(
|
|
magic_multiplier,
|
|
magic_shift,
|
|
static_cast<const int32_t*>(dividends_dev_buf.GetDeviceBuffer()),
|
|
static_cast<int32_t*>(magic_result_dev_buf.GetDeviceBuffer()),
|
|
num_dividend);
|
|
|
|
naive_result_dev_buf.FromDevice(naive_result_host.data());
|
|
magic_result_dev_buf.FromDevice(magic_result_host.data());
|
|
|
|
bool res = ck::utils::check_err(magic_result_host, naive_result_host);
|
|
|
|
if(!res)
|
|
{
|
|
pass = false;
|
|
continue;
|
|
}
|
|
|
|
cpu_magic_number_division(magic_multiplier,
|
|
magic_shift,
|
|
dividends_host.data(),
|
|
magic_result_host2.data(),
|
|
num_dividend);
|
|
|
|
res = ck::utils::check_err(magic_result_host2, naive_result_host);
|
|
|
|
if(!res)
|
|
{
|
|
pass = false;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
if(pass)
|
|
{
|
|
std::cout << "test magic number division: Pass" << std::endl;
|
|
return 0;
|
|
}
|
|
else
|
|
{
|
|
std::cout << "test magic number division: Fail" << std::endl;
|
|
return -1;
|
|
}
|
|
}
|