mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-12 09:16:52 +00:00
* Convolution ND * Code unification across dimensions for generating tensor descriptors. * Example * Instances * Move convnd f32 instance file to comply with repo structure. * Conv 1D tensor layouts. * Formatting and use ReferenceConv * Reference ConvFwd supporting 1D and 2D convolution. * Debug printing TensorLayout name. * Conv fwd 1D instance f32 * Refactor conv ND example. Needed to support various conv dimensio. Needed to support various conv dimensions * Rename conv nd example director to prevent conflicts. * Refactor some common utility to single file. Plus some tests. * Refactor GetHostTensorDescriptor + UT. * Add 1D test case. * Test reference convolution 1d/2d * Remove some leftovers. * Fix convolution example error for 1D * Refactor test check errors utility function. * Test Conv2D Fwd XDL * More UT for 1D case. * Parameterize input & weight initializers. * Rename example to prevent conflicts. * Split convnd instance into separate files for 1d/2d * Address review comments. * Fix data type for flops/gbytes calculations. * Assign example number 11. * 3D cases for convolution utility functions. * 3D reference convolution. * Add support for 3D convolution. * Check for inputs bigger than 2GB. * Formatting * Support for bf16/f16/f32/i8 - conv instances + UT. * Use check_err from test_util.hpp. * Split convnd test into separate files for each dim. * Fix data generation and use proper instances. * Formatting * Skip tensor initialization if not necessary. * Fix CMakefiles. * Remove redundant conv2d_fwd test. * Lower problem size for conv3D UT. * 3D case for convnd example. * Remove leftovers after merge. * Add Conv Specialization string to GetTypeString * Skip instance causing numerical errors. * Small fixes. * Remove redundant includes. * Fix namespace name error. * Script for automatic testing and logging convolution fwd UTs * Comment out numactl cmd. * Refine weights initalization and relax rtol for fp16 * Move test_util.hpp to check_err.hpp * Refine weights initalization and relax rtol for fp16 * Refactor common part of test conv utils. * Move utility function to single common place. * Add additional common functions to utility. * Refactor convnd_fwd_xdl examples. * Remove redundant files. * Unify structure. * Add constructor to ConvParams. * And add input parameters validation. * Modify conv examples to use single utility file. * Remove check_error from host_tensor.hpp * Get rid of check_indices function. * Remove bf16_to_f32 function overload for scalars. * Fix namespace. * Add half_float::half for check_err. * Fix conv params size in UT. * Fix weights initialization for int8. * Fix weights initialization for int8. * Add type_convert when store output in ref conv 1D. * Get back old conv2d_fwd_xdl operation. * Silence conv debug print. * format * clean * clean * Fix merge. * Fix namespace for check_err * Formatting. * Fix merge artifacts. * Remove deleted header. * Fix some includes and use ck::utils::check_err. * Remove unused check_indices restored by previous merge. * Fix namespaces after merge. * Fix compilation error. * Small fixes. * Use common functions. * Fix filename * Fix namespaces. * Fix merge artifact - retrieve removed by accident fun. * Fix ConvForwardSpecialization. * Working example of OpInstanceRunEngine for conv2dfwd UT. * Adhere to coding style rules. * Formatting and adhere to coding style rules. * Fix merge artifacts. * Utility for collecting conv fwd instances. + Plus commmon part for parsing cmdline params. * Refactor FillUniform because of segfault for int8_t. * Naming convention. * Elegant version of device mem allocation. * Use OpInstanceRunEngine in conv fwd nd tests. * Multiple refinements. * conditional init * don't run reference op if not provided. * Use OpInstanceRunEngine for ckProfiler conv_fwd * Refactor common tensor fill function to separate file. * Clean up unused functions. * Support different init methods. * Create CMake target for conv_fwd_util. * Add header for profile_convnd_fwd.cpp * Fix CMakefiles to link with conv_fwd_util where needed. * Fix some clutter. Co-authored-by: Adam Osewski <aosewski@amd.com> Co-authored-by: Chao Liu <chao.liu2@amd.com>
116 lines
4.0 KiB
C++
116 lines
4.0 KiB
C++
#include <iostream>
|
|
#include <numeric>
|
|
#include <initializer_list>
|
|
#include <cstdlib>
|
|
#include <cstring>
|
|
|
|
#include "profile_convnd_fwd.hpp"
|
|
|
|
int profile_gemm(int, char*[]);
|
|
int profile_gemm_bias_2d(int, char*[]);
|
|
int profile_gemm_bias_relu(int, char*[]);
|
|
int profile_gemm_bias_relu_add(int, char*[]);
|
|
int profile_gemm_reduce(int, char*[]);
|
|
int profile_batched_gemm(int, char*[]);
|
|
int profile_grouped_gemm(int, char*[]);
|
|
int profile_conv_fwd_bias_relu(int, char*[]);
|
|
int profile_conv_fwd_bias_relu_add(int, char*[]);
|
|
int profile_conv_fwd_bias_relu_atomic_add(int, char*[]);
|
|
int profile_convnd_bwd_data(int, char*[], int);
|
|
int profile_reduce(int, char*[]);
|
|
int profile_conv_bwd_weight(int, char*[]);
|
|
int profile_batched_gemm_reduce(int, char*[]);
|
|
|
|
int main(int argc, char* argv[])
|
|
{
|
|
if(strcmp(argv[1], "gemm") == 0)
|
|
{
|
|
return profile_gemm(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "gemm_bias_2d") == 0)
|
|
{
|
|
return profile_gemm_bias_2d(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "gemm_bias_relu") == 0)
|
|
{
|
|
return profile_gemm_bias_relu(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "gemm_bias_relu_add") == 0)
|
|
{
|
|
return profile_gemm_bias_relu_add(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "gemm_reduce") == 0)
|
|
{
|
|
return profile_gemm_reduce(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "batched_gemm") == 0)
|
|
{
|
|
return profile_batched_gemm(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "batched_gemm_reduce") == 0)
|
|
{
|
|
return profile_batched_gemm_reduce(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "grouped_gemm") == 0)
|
|
{
|
|
profile_grouped_gemm(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "conv_fwd") == 0)
|
|
{
|
|
return ck::profiler::profile_convnd_fwd(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "conv_fwd_bias_relu") == 0)
|
|
{
|
|
return profile_conv_fwd_bias_relu(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "conv_fwd_bias_relu_add") == 0)
|
|
{
|
|
return profile_conv_fwd_bias_relu_add(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "conv_fwd_bias_relu_atomic_add") == 0)
|
|
{
|
|
return profile_conv_fwd_bias_relu_atomic_add(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "conv1d_bwd_data") == 0)
|
|
{
|
|
return profile_convnd_bwd_data(argc, argv, 1);
|
|
}
|
|
else if(strcmp(argv[1], "conv2d_bwd_data") == 0)
|
|
{
|
|
return profile_convnd_bwd_data(argc, argv, 2);
|
|
}
|
|
else if(strcmp(argv[1], "conv3d_bwd_data") == 0)
|
|
{
|
|
return profile_convnd_bwd_data(argc, argv, 3);
|
|
}
|
|
else if(strcmp(argv[1], "reduce") == 0)
|
|
{
|
|
return profile_reduce(argc, argv);
|
|
}
|
|
else if(strcmp(argv[1], "conv2d_bwd_weight") == 0)
|
|
{
|
|
return profile_conv_bwd_weight(argc, argv);
|
|
}
|
|
else
|
|
{
|
|
// clang-format off
|
|
printf("arg1: tensor operation (gemm: GEMM\n"
|
|
" gemm_bias_2d: GEMM+Bias(2D)\n"
|
|
" gemm_bias_relu: GEMM+Bias+ReLU\n"
|
|
" gemm_bias_relu_add: GEMM+Bias+ReLU+Add\n"
|
|
" gemm_reduce: GEMM+Reduce\n"
|
|
" grouped_gemm: Grouped GEMM\n"
|
|
" conv_fwd: ForwardConvolution\n"
|
|
" conv_fwd_bias_relu: ForwardConvolution+Bias+ReLU\n"
|
|
" conv_fwd_bias_relu_add: ForwardConvolution+Bias+ReLU+Add\n"
|
|
" conv_fwd_bias_relu_atomic_add: ForwardConvolution+Bias+ReLU+AtomicAdd\n"
|
|
" conv1d_bwd_data: BackwardConvolution data 1 dim\n"
|
|
" conv2d_bwd_data: BackwardConvolution data 2 dim\n"
|
|
" conv3d_bwd_data: BackwardConvolution data 3 dim\n"
|
|
" reduce: REDUCE\n"
|
|
" conv2d_bwd_weight: Backward Weight Convolution 2d\n");
|
|
// clang-format on
|
|
}
|
|
return 0;
|
|
}
|