mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-11 17:00:18 +00:00
* Convolution ND * Code unification across dimensions for generating tensor descriptors. * Example * Instances * Move convnd f32 instance file to comply with repo structure. * Conv 1D tensor layouts. * Formatting and use ReferenceConv * Reference ConvFwd supporting 1D and 2D convolution. * Debug printing TensorLayout name. * Conv fwd 1D instance f32 * Refactor conv ND example. Needed to support various conv dimensio. Needed to support various conv dimensions * Rename conv nd example director to prevent conflicts. * Refactor some common utility to single file. Plus some tests. * Refactor GetHostTensorDescriptor + UT. * Add 1D test case. * Test reference convolution 1d/2d * Remove some leftovers. * Fix convolution example error for 1D * Refactor test check errors utility function. * Test Conv2D Fwd XDL * More UT for 1D case. * Parameterize input & weight initializers. * Rename example to prevent conflicts. * Split convnd instance into separate files for 1d/2d * Address review comments. * Fix data type for flops/gbytes calculations. * Assign example number 11. Co-authored-by: Adam Osewski <aosewski@amd.com> Co-authored-by: Chao Liu <chao.liu2@amd.com>
51 lines
2.9 KiB
CMake
51 lines
2.9 KiB
CMake
include_directories(BEFORE
|
|
${PROJECT_SOURCE_DIR}
|
|
${PROJECT_SOURCE_DIR}/host/host_tensor/include
|
|
${PROJECT_SOURCE_DIR}/host/device/include
|
|
${PROJECT_SOURCE_DIR}/device_operation/include
|
|
${PROJECT_SOURCE_DIR}/reference_operation/include
|
|
${PROJECT_SOURCE_DIR}/composable_kernel/include
|
|
${PROJECT_SOURCE_DIR}/composable_kernel/include/utility
|
|
${PROJECT_SOURCE_DIR}/composable_kernel/include/tensor_description
|
|
${PROJECT_SOURCE_DIR}/composable_kernel/include/tensor_operation
|
|
${PROJECT_SOURCE_DIR}/composable_kernel/include/problem_transform
|
|
${PROJECT_SOURCE_DIR}/external/rocm/include
|
|
${PROJECT_SOURCE_DIR}/device_operation_reference/include
|
|
)
|
|
|
|
set(GEMM_XDL_SOURCE 1_gemm_xdl/gemm_xdl.cpp)
|
|
set(GEMM_XDL_BIAS_RELU_SOURCE 2_gemm_xdl_bias_relu/gemm_xdl_bias_relu.cpp)
|
|
set(GEMM_XDL_BIAS_RELU_ADD_SOURCE 3_gemm_xdl_bias_relu_add/gemm_xdl_bias_relu_add.cpp)
|
|
set(CONV2D_FWD_XDL_SOURCE 4_conv2d_fwd_xdl/conv2d_fwd_xdl.cpp)
|
|
set(CONV2D_FWD_XDL_BIAS_RELU_SOURCE 5_conv2d_fwd_xdl_bias_relu/conv2d_fwd_xdl_bias_relu.cpp)
|
|
set(CONV2D_FWD_XDL_BIAS_RELU_ADD_SOURCE 6_conv2d_fwd_xdl_bias_relu_add/conv2d_fwd_xdl_bias_relu_add.cpp)
|
|
set(CONV2D_FWD_XDL_BIAS_RELU_ATOMIC_ADD_SOURCE 7_conv2d_fwd_xdl_bias_relu_atomic_add/conv2d_fwd_xdl_bias_relu_atomic_add.cpp)
|
|
set(GEMM_XDL_ALPHA_BETA_SOURCE 8_gemm_xdl_alpha_beta/gemm_xdl_alpha_beta.cpp)
|
|
set(CONV2D_FWD_XDL_INT8_SOURCE 9_conv2d_fwd_xdl_int8/conv2d_fwd_xdl_int8.cpp)
|
|
set(CONV3D_FWD_XDL_SOURCE 10_conv3d_fwd_xdl/conv3d_fwd_xdl.cpp)
|
|
set(CONVND_FWD_XDL_SOURCE 11_convnd_fwd_xdl/convnd_fwd_xdl.cpp)
|
|
|
|
add_executable(gemm_xdl ${GEMM_XDL_SOURCE})
|
|
add_executable(gemm_xdl_bias_relu ${GEMM_XDL_BIAS_RELU_SOURCE})
|
|
add_executable(gemm_xdl_bias_relu_add ${GEMM_XDL_BIAS_RELU_ADD_SOURCE})
|
|
add_executable(conv2d_fwd_xdl ${CONV2D_FWD_XDL_SOURCE})
|
|
add_executable(conv2d_fwd_xdl_bias_relu ${CONV2D_FWD_XDL_BIAS_RELU_SOURCE})
|
|
add_executable(conv2d_fwd_xdl_bias_relu_add ${CONV2D_FWD_XDL_BIAS_RELU_ADD_SOURCE})
|
|
add_executable(conv2d_fwd_xdl_bias_relu_atomic_add ${CONV2D_FWD_XDL_BIAS_RELU_ATOMIC_ADD_SOURCE})
|
|
add_executable(gemm_xdl_alpha_beta ${GEMM_XDL_ALPHA_BETA_SOURCE})
|
|
add_executable(conv2d_fwd_xdl_int8 ${CONV2D_FWD_XDL_INT8_SOURCE})
|
|
add_executable(conv3d_fwd_xdl ${CONV3D_FWD_XDL_SOURCE})
|
|
add_executable(convnd_fwd_xdl ${CONVND_FWD_XDL_SOURCE})
|
|
|
|
target_link_libraries(gemm_xdl PRIVATE host_tensor)
|
|
target_link_libraries(gemm_xdl_bias_relu PRIVATE host_tensor)
|
|
target_link_libraries(gemm_xdl_bias_relu_add PRIVATE host_tensor)
|
|
target_link_libraries(conv2d_fwd_xdl PRIVATE host_tensor)
|
|
target_link_libraries(conv2d_fwd_xdl_bias_relu PRIVATE host_tensor)
|
|
target_link_libraries(conv2d_fwd_xdl_bias_relu_add PRIVATE host_tensor)
|
|
target_link_libraries(conv2d_fwd_xdl_bias_relu_atomic_add PRIVATE host_tensor)
|
|
target_link_libraries(gemm_xdl_alpha_beta PRIVATE host_tensor)
|
|
target_link_libraries(conv2d_fwd_xdl_int8 PRIVATE host_tensor)
|
|
target_link_libraries(conv3d_fwd_xdl PRIVATE host_tensor)
|
|
target_link_libraries(convnd_fwd_xdl PRIVATE host_tensor)
|