mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-11 08:50:17 +00:00
* Use thread cluster descriptor and explicit M_K 2d descriptor to simply Blockwise Reduction * Change by replacing ReduceDims by NumReduceDims as Device Reduce interface template parameter * Rename the folder name for the pool2d and reduce examples * Update to reduction test scripts * Add Readme for pool2d_fwd and reduce_blockwise examples * Add support for int8_t reduction (ADD/AVG, MIN/MAX/AMAX) * Tiny fix in reduce profiler and tiny update in reduce testing scripts * Tiny fix in testing script profile_reduce_no_index.sh * Tiny fix in testing script profile_reduce_no_index.sh * Add support for bfp16 reduction (using bhalf_t = ushort) * Tiny fix in amd_buffer_addressing.hpp * Tiny change in script/profile_reduce_with_index.sh * Use AccDataType for Beta value and use element_wise::PassThrough * Use type_convert for type converting in host layer reduction * Renaming and refining in Reduction profiler/device layer/examples * Renaming and refining in Reduction profiler/device layer/examples * Renaming all NumReduceDims to NumReduceDim * Fix the leaked type_convert in ThreadwiseTensorSliceTransfer_v2 * Update to testing scripts to add bf16 support * added more static_assert * Remove buggy tunable configurations defined in device_reduce_instance_xxx.hpp * Add static_assert to give compile-time warning for incorrect thread slice-size/vector-size configurations * minor change * Refine and fix (in GetWorkspaceSizeInBytes of MultiBlockPartialReduce) to make int8 completely pass * Tiny renaming in gridwise_2d_reduction_multiblock_partial_reduce.hpp * Tiny fix in script/profile_reduce_no_index.sh * Refine in DeviceReduce layer with regard to using NumInvariantDim/NumReduceDim or InvariantDims/ReduceDims * Generic renaming in host reduction and DeviceReduce layer * Add support for 4-d all dimension reduction in the profiler and add_device_reduce_xxx instances * Use multi-thread and simplification for host Reduction implementation * Add ctest for reduction * Update to clarify the using of data init method in produce_reduce/example_reduce/test_reduce/ * Update to the reduce CTest executables to enable default testing behavior when no command argument * Renaming Co-authored-by: Jianfeng yan <jfyan008@gmail.com>
44 lines
1.9 KiB
CMake
44 lines
1.9 KiB
CMake
include_directories(BEFORE
|
|
${PROJECT_SOURCE_DIR}/include/ck
|
|
${PROJECT_SOURCE_DIR}/include/ck/utility
|
|
${PROJECT_SOURCE_DIR}/include/ck/tensor_description
|
|
${PROJECT_SOURCE_DIR}/include/ck/tensor
|
|
${PROJECT_SOURCE_DIR}/include/ck/problem_transform
|
|
${PROJECT_SOURCE_DIR}/include/ck/tensor_operation/gpu/device
|
|
${PROJECT_SOURCE_DIR}/include/ck/tensor_operation/gpu/grid
|
|
${PROJECT_SOURCE_DIR}/include/ck/tensor_operation/gpu/block
|
|
${PROJECT_SOURCE_DIR}/include/ck/tensor_operation/gpu/warp
|
|
${PROJECT_SOURCE_DIR}/include/ck/tensor_operation/gpu/thread
|
|
${PROJECT_SOURCE_DIR}/include/ck/tensor_operation/gpu/element
|
|
${PROJECT_SOURCE_DIR}/library/include/ck/library/host_tensor
|
|
${PROJECT_SOURCE_DIR}/library/include/ck/library/tensor_operation_instance
|
|
${PROJECT_SOURCE_DIR}/library/include/ck/library/tensor_operation_instance/gpu/reduce
|
|
${PROJECT_SOURCE_DIR}/library/include/ck/library/reference_tensor_operation/cpu
|
|
${PROJECT_SOURCE_DIR}/library/include/ck/library/reference_tensor_operation/gpu
|
|
${PROJECT_SOURCE_DIR}/test/include
|
|
${PROJECT_SOURCE_DIR}/external/include/half
|
|
)
|
|
|
|
add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure -C ${CMAKE_CFG_INTDIR})
|
|
add_custom_target(tests)
|
|
|
|
function(add_test_executable TEST_NAME)
|
|
message("adding test ${TEST_NAME}")
|
|
add_executable(${TEST_NAME} ${ARGN})
|
|
add_test(NAME ${TEST_NAME} COMMAND $<TARGET_FILE:${TEST_NAME}> )
|
|
add_dependencies(tests ${TEST_NAME})
|
|
add_dependencies(check ${TEST_NAME})
|
|
endfunction(add_test_executable TEST_NAME)
|
|
|
|
add_subdirectory(magic_number_division)
|
|
add_subdirectory(space_filling_curve)
|
|
add_subdirectory(conv_util)
|
|
add_subdirectory(reference_conv_fwd)
|
|
add_subdirectory(gemm)
|
|
add_subdirectory(gemm_split_k)
|
|
add_subdirectory(conv2d_fwd)
|
|
add_subdirectory(convnd_fwd)
|
|
add_subdirectory(conv2d_bwd_data)
|
|
add_subdirectory(batched_gemm)
|
|
add_subdirectory(reduce)
|