mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-15 18:42:06 +00:00
* start conv2d bwd api
* kernel running
* add bwd reference
* change to no shuffle
* fix bwd reference
* pass verification
* add Filter1x1Stride1Pad0 and start testing
* change some tuning parameter
* fix test error
* add fp16 tuning parameter
* add bf16 tuning parameter
* add int8 tuning parameters
* change fp32 tuning parameter
* add bwd to profiler
* fix bug for bwd profiler
* fix ckProfiler bug
* change conv2d_bwd_xdl to fp16
* fix bug in comments
* fix precompile id
* fix enum conv name
* chage _bwd_ to _bwd_data_
* change conv2d_bwd example id
* bwd to bwd data
* fix prehead
* fix MakeDefaultBlock2CTileMap ,import form merge develop
* format bwd instance
* bwd to bwd data
* change name bwd to bwd data
* change name bwd to bwd data in example
* formate code
* change conv2d bwd data id in example
* rewrite readme for example
* fix CalculateMagicNumbers about div zero
* add workaround CK_WORKAROUND_SWDEV_325164
* change test_conf2d_bwd_data show info
* format
* fix bug for workaround:CK_WORKAROUND_SWDEV_325164
* formate tuning parameters
* formate tuning parameters again
* formate tuning parameters 3
* formate tuning parameters 4
* remove add function template
* format
* update comment
Co-authored-by: ltqin <letaoqin@amd.com>
Co-authored-by: Chao Liu <chao.liu2@amd.com>
[ROCm/composable_kernel commit: c254e5abd2]
55 lines
3.1 KiB
CMake
55 lines
3.1 KiB
CMake
include_directories(BEFORE
|
|
${PROJECT_SOURCE_DIR}
|
|
${PROJECT_SOURCE_DIR}/host/host_tensor/include
|
|
${PROJECT_SOURCE_DIR}/host/device/include
|
|
${PROJECT_SOURCE_DIR}/device_operation/include
|
|
${PROJECT_SOURCE_DIR}/reference_operation/include
|
|
${PROJECT_SOURCE_DIR}/composable_kernel/include
|
|
${PROJECT_SOURCE_DIR}/composable_kernel/include/utility
|
|
${PROJECT_SOURCE_DIR}/composable_kernel/include/tensor_description
|
|
${PROJECT_SOURCE_DIR}/composable_kernel/include/tensor_operation
|
|
${PROJECT_SOURCE_DIR}/composable_kernel/include/problem_transform
|
|
${PROJECT_SOURCE_DIR}/external/rocm/include
|
|
${PROJECT_SOURCE_DIR}/device_operation_reference/include
|
|
)
|
|
|
|
set(GEMM_XDL_SOURCE 1_gemm_xdl/gemm_xdl.cpp)
|
|
set(GEMM_XDL_BIAS_RELU_SOURCE 2_gemm_xdl_bias_relu/gemm_xdl_bias_relu.cpp)
|
|
set(GEMM_XDL_BIAS_RELU_ADD_SOURCE 3_gemm_xdl_bias_relu_add/gemm_xdl_bias_relu_add.cpp)
|
|
set(CONV2D_FWD_XDL_SOURCE 4_conv2d_fwd_xdl/conv2d_fwd_xdl.cpp)
|
|
set(CONV2D_FWD_XDL_BIAS_RELU_SOURCE 5_conv2d_fwd_xdl_bias_relu/conv2d_fwd_xdl_bias_relu.cpp)
|
|
set(CONV2D_FWD_XDL_BIAS_RELU_ADD_SOURCE 6_conv2d_fwd_xdl_bias_relu_add/conv2d_fwd_xdl_bias_relu_add.cpp)
|
|
set(CONV2D_FWD_XDL_BIAS_RELU_ATOMIC_ADD_SOURCE 7_conv2d_fwd_xdl_bias_relu_atomic_add/conv2d_fwd_xdl_bias_relu_atomic_add.cpp)
|
|
set(GEMM_XDL_ALPHA_BETA_SOURCE 8_gemm_xdl_alpha_beta/gemm_xdl_alpha_beta.cpp)
|
|
set(CONV2D_FWD_XDL_INT8_SOURCE 9_conv2d_fwd_xdl_int8/conv2d_fwd_xdl_int8.cpp)
|
|
set(CONV3D_FWD_XDL_SOURCE 10_conv3d_fwd_xdl/conv3d_fwd_xdl.cpp)
|
|
set(CONVND_FWD_XDL_SOURCE 11_convnd_fwd_xdl/convnd_fwd_xdl.cpp)
|
|
set(CONV2D_BWD_DATA_XDL_SOURCE 12_conv2d_bwd_data_xdl/conv2d_bwd_data_xdl.cpp)
|
|
|
|
add_executable(gemm_xdl ${GEMM_XDL_SOURCE})
|
|
add_executable(gemm_xdl_bias_relu ${GEMM_XDL_BIAS_RELU_SOURCE})
|
|
add_executable(gemm_xdl_bias_relu_add ${GEMM_XDL_BIAS_RELU_ADD_SOURCE})
|
|
add_executable(conv2d_fwd_xdl ${CONV2D_FWD_XDL_SOURCE})
|
|
add_executable(conv2d_fwd_xdl_bias_relu ${CONV2D_FWD_XDL_BIAS_RELU_SOURCE})
|
|
add_executable(conv2d_fwd_xdl_bias_relu_add ${CONV2D_FWD_XDL_BIAS_RELU_ADD_SOURCE})
|
|
add_executable(conv2d_fwd_xdl_bias_relu_atomic_add ${CONV2D_FWD_XDL_BIAS_RELU_ATOMIC_ADD_SOURCE})
|
|
add_executable(gemm_xdl_alpha_beta ${GEMM_XDL_ALPHA_BETA_SOURCE})
|
|
add_executable(conv2d_fwd_xdl_int8 ${CONV2D_FWD_XDL_INT8_SOURCE})
|
|
add_executable(conv3d_fwd_xdl ${CONV3D_FWD_XDL_SOURCE})
|
|
add_executable(convnd_fwd_xdl ${CONVND_FWD_XDL_SOURCE})
|
|
add_executable(conv2d_bwd_data_xdl ${CONV2D_BWD_DATA_XDL_SOURCE})
|
|
|
|
target_link_libraries(gemm_xdl PRIVATE host_tensor)
|
|
target_link_libraries(gemm_xdl_bias_relu PRIVATE host_tensor)
|
|
target_link_libraries(gemm_xdl_bias_relu_add PRIVATE host_tensor)
|
|
target_link_libraries(conv2d_fwd_xdl PRIVATE host_tensor)
|
|
target_link_libraries(conv2d_fwd_xdl_bias_relu PRIVATE host_tensor)
|
|
target_link_libraries(conv2d_fwd_xdl_bias_relu_add PRIVATE host_tensor)
|
|
target_link_libraries(conv2d_fwd_xdl_bias_relu_atomic_add PRIVATE host_tensor)
|
|
target_link_libraries(gemm_xdl_alpha_beta PRIVATE host_tensor)
|
|
target_link_libraries(conv2d_fwd_xdl_int8 PRIVATE host_tensor)
|
|
target_link_libraries(conv3d_fwd_xdl PRIVATE host_tensor)
|
|
target_link_libraries(convnd_fwd_xdl PRIVATE host_tensor)
|
|
target_link_libraries(conv2d_bwd_data_xdl PRIVATE host_tensor)
|
|
|