mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-12 01:10:17 +00:00
merge from public repo
This commit is contained in:
@@ -1,3 +1,3 @@
|
||||
CheckOptions:
|
||||
- key: bugprone-reserved-identifier.AllowedIdentifiers
|
||||
value: '__HIP_PLATFORM_HCC__;__HIP_ROCclr__'
|
||||
value: '__HIP_PLATFORM_HCC__;__HIP_PLATFORM_AMD__;__HIP_ROCclr__'
|
||||
|
||||
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@@ -1,3 +1,4 @@
|
||||
* @zjing14 @asroy @junliume @illsilin @carlushuang @aosewski
|
||||
# Documentation files
|
||||
docs/* @saadrahim @LisaDelaney
|
||||
*.md @saadrahim @LisaDelaney
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
blank_issues_enabled: true
|
||||
221
.github/ISSUE_TEMPLATE/issue_report.yml
vendored
Normal file
221
.github/ISSUE_TEMPLATE/issue_report.yml
vendored
Normal file
@@ -0,0 +1,221 @@
|
||||
name: Issue Report
|
||||
description: File a report for ROCm related issues on Linux and Windows. For issues pertaining to documentation or non-bug related, please open a blank issue located below.
|
||||
title: "[Issue]: "
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for taking the time to fill out this report!
|
||||
|
||||
You can acquire your OS, CPU, GPU (for filling out this report) with the following commands:
|
||||
|
||||
Linux:
|
||||
echo "OS:" && cat /etc/os-release | grep -E "^(NAME=|VERSION=)";
|
||||
echo "CPU: " && cat /proc/cpuinfo | grep "model name" | sort --unique;
|
||||
echo "GPU:" && /opt/rocm/bin/rocminfo | grep -E "^\s*(Name|Marketing Name)";
|
||||
|
||||
Windows:
|
||||
(Get-WmiObject Win32_OperatingSystem).Version
|
||||
(Get-WmiObject win32_Processor).Name
|
||||
(Get-WmiObject win32_VideoController).Name
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Problem Description
|
||||
description: Describe the issue you encountered.
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Operating System
|
||||
description: What is the name and version number of the OS?
|
||||
placeholder: "e.g. Ubuntu 22.04.3 LTS (Jammy Jellyfish)"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: CPU
|
||||
description: What CPU did you encounter the issue on?
|
||||
placeholder: "e.g. AMD Ryzen 9 5900HX with Radeon Graphics"
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: GPU
|
||||
description: What GPU(s) did you encounter the issue on (you can select multiple GPUs from the list)
|
||||
multiple: true
|
||||
options:
|
||||
- AMD Instinct MI300X
|
||||
- AMD Instinct MI300A
|
||||
- AMD Instinct MI300
|
||||
- AMD Instinct MI250X
|
||||
- AMD Instinct MI250
|
||||
- AMD Instinct MI210
|
||||
- AMD Instinct MI100
|
||||
- AMD Instinct MI50
|
||||
- AMD Instinct MI25
|
||||
- AMD Radeon Pro V620
|
||||
- AMD Radeon Pro VII
|
||||
- AMD Radeon RX 7900 XTX
|
||||
- AMD Radeon VII
|
||||
- AMD Radeon Pro W7900
|
||||
- AMD Radeon Pro W7800
|
||||
- AMD Radeon Pro W6800
|
||||
- AMD Radeon Pro W6600
|
||||
- AMD Radeon Pro W5500
|
||||
- AMD Radeon RX 7900 XT
|
||||
- AMD Radeon RX 7600
|
||||
- AMD Radeon RX 6950 XT
|
||||
- AMD Radeon RX 6900 XT
|
||||
- AMD Radeon RX 6800 XT
|
||||
- AMD Radeon RX 6800
|
||||
- AMD Radeon RX 6750
|
||||
- AMD Radeon RX 6700 XT
|
||||
- AMD Radeon RX 6700
|
||||
- AMD Radeon RX 6650 XT
|
||||
- AMD Radeon RX 6600 XT
|
||||
- AMD Radeon RX 6600
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Other
|
||||
description: If you selected Other, please specify
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: ROCm Version
|
||||
description: What version(s) of ROCm did you encounter the issue on?
|
||||
multiple: true
|
||||
options:
|
||||
- ROCm 6.0.0
|
||||
- ROCm 5.7.1
|
||||
- ROCm 5.7.0
|
||||
- ROCm 5.6.1
|
||||
- ROCm 5.6.0
|
||||
- ROCm 5.5.1
|
||||
- ROCm 5.5.0
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: ROCm Component
|
||||
description: (Optional) If this issue relates to a specific ROCm component, it can be mentioned here.
|
||||
multiple: true
|
||||
options:
|
||||
- Other
|
||||
- AMD Common Language Runtime
|
||||
- AMD MIGraphX
|
||||
- AMD System Management Interface
|
||||
- amdgpu KCL/autoconf
|
||||
- amdgpu Kernel-mode GPU Driver
|
||||
- amdgpu-install
|
||||
- AOMP
|
||||
- AOMP Extras
|
||||
- AqlProfile
|
||||
- build-infra
|
||||
- chelsio
|
||||
- clang-ocl
|
||||
- Composable Kernel
|
||||
- dkms
|
||||
- docker / ROCm-docker
|
||||
- flang
|
||||
- gpuburn
|
||||
- half
|
||||
- HIP
|
||||
- HIP Examples
|
||||
- hipBLAS
|
||||
- hipBLASLt
|
||||
- HIPCC
|
||||
- hipCUB
|
||||
- hip-examples-private
|
||||
- hipFFT
|
||||
- hipfort
|
||||
- HIPIFY
|
||||
- hipRAND
|
||||
- hipSOLVER
|
||||
- hipSPARSE
|
||||
- hipSPARSELt
|
||||
- hipTensor
|
||||
- hip-tests
|
||||
- HSA Runtime
|
||||
- infrastructure
|
||||
- jenkins-utils
|
||||
- libdrm
|
||||
- Linux BPI packaging framework
|
||||
- llvm-project
|
||||
- Mesa
|
||||
- meta
|
||||
- MIOpen
|
||||
- MIVisionX
|
||||
- ml-framework-ci
|
||||
- MLSEQA_TestRepo
|
||||
- OpenCL API C++ Bindings
|
||||
- OpenCL API Headers
|
||||
- OpenCL Conformance Test Suite
|
||||
- OpenCL ICD Loader
|
||||
- perftest-p2p
|
||||
- prototype
|
||||
- RCCL
|
||||
- rccl-rdma-sharp-plugins
|
||||
- rocALUTION
|
||||
- rocBLAS
|
||||
- ROCdbgapi
|
||||
- ROCdebug-agent
|
||||
- rocFFT
|
||||
- ROCgdb
|
||||
- ROCK
|
||||
- ROCm Documentation/Website
|
||||
- ROCm Data Center Tool
|
||||
- ROCm Examples
|
||||
- ROCm for Windows
|
||||
- ROCm Performance Primitives
|
||||
- ROCm System Management Interface Library
|
||||
- ROCm Thrust
|
||||
- ROCm Validation Suite
|
||||
- rocm_bandwidth_test
|
||||
- rocm-cmake
|
||||
- rocm-core
|
||||
- rocm-docs-core
|
||||
- rocminfo
|
||||
- rocMLIR
|
||||
- rocmtools
|
||||
- rocPRIM
|
||||
- rocprofiler
|
||||
- rocRAND
|
||||
- ROCR-Runtime
|
||||
- rocSOLVER
|
||||
- rocSPARSE
|
||||
- roctracer
|
||||
- ROCT-Thunk-Interface
|
||||
- rocWMMA
|
||||
- Tensile
|
||||
- umr
|
||||
- ibv_rc_pingpong-amd
|
||||
- mellanox
|
||||
- mpitest
|
||||
- Pytorch
|
||||
- Tensorflow
|
||||
- APEX
|
||||
- torchvision
|
||||
- Magma
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: (Optional) Detailed steps to reproduce the issue.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: (Optional for Linux users) Output of /opt/rocm/bin/rocminfo --support
|
||||
description: The output of rocminfo --support could help to better address the problem.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Information
|
||||
description: (Optional) Any additional information that is relevant, e.g. relevant environment variables, dockerfiles, log files, dmesg output (on Linux), etc.
|
||||
validations:
|
||||
required: false
|
||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -10,3 +10,9 @@ updates:
|
||||
open-pull-requests-limit: 10
|
||||
schedule:
|
||||
interval: "daily"
|
||||
labels:
|
||||
- "documentation"
|
||||
- "dependencies"
|
||||
- "ci:docs-only"
|
||||
reviewers:
|
||||
- "samjwu"
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -54,5 +54,13 @@ _images/
|
||||
_static/
|
||||
_templates/
|
||||
_toc.yml
|
||||
docBin/
|
||||
_doxygen/
|
||||
|
||||
# JetBrains IDE
|
||||
.idea/
|
||||
cmake-build*/
|
||||
build*/
|
||||
|
||||
# Python virtualenv
|
||||
.venv/
|
||||
|
||||
|
||||
@@ -3,11 +3,6 @@
|
||||
|
||||
version: 2
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.8"
|
||||
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
||||
@@ -16,3 +11,8 @@ formats: [htmlzip, pdf, epub]
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/sphinx/requirements.txt
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.8"
|
||||
|
||||
72
CHANGELOG.md
72
CHANGELOG.md
@@ -2,52 +2,66 @@
|
||||
|
||||
Full documentation for Composable Kernel is not yet available.
|
||||
|
||||
## (Unreleased) CK for ROCm 6.0.0
|
||||
## (Unreleased) CK
|
||||
|
||||
### Fixes
|
||||
- Fixed a hazard associated with inline v_dot (#808)
|
||||
- Fixed two bugs in grouped convolution backward data without K padding (#848 #876)
|
||||
None
|
||||
|
||||
### Optimizations
|
||||
None
|
||||
|
||||
### Additions
|
||||
- Added an image to a column kernel (#867)
|
||||
- Added a column to an image kernel (#930)
|
||||
- Support for 3D grouped convolution on RDNA 3 GPUs (#935, #950, #985)
|
||||
- Grouped convolution support for small K and C (#822 #879 #897)
|
||||
- Support for NHWGC (2D and 3D) grouped convolution backward weight (#769 #804)
|
||||
- Support for bf16/f32/f16 and NHWGC (2D and 3D) grouped convolution backward data (#757 #799)
|
||||
- Support for Batched Gemm DL (#732)
|
||||
* Introduced wrapper sublibrary (limited functionality). (#1071, #1098, #1108, #1126, #1139)
|
||||
|
||||
### Changes
|
||||
- Changed the grouped convolution API to maintain consistency with other convolution kernels (#817)
|
||||
None
|
||||
|
||||
## CK for ROCm 6.0.0
|
||||
|
||||
### Fixes
|
||||
* Fixed a hazard associated with inline v_dot (#808)
|
||||
* Fixed two bugs in grouped convolution backward data without K padding (#848 #876)
|
||||
|
||||
### Optimizations
|
||||
None
|
||||
|
||||
### Additions
|
||||
* Added an image to a column kernel (#867)
|
||||
* Added a column to an image kernel (#930)
|
||||
* Support for 3D grouped convolution on RDNA 3 GPUs (#935, #950, #985)
|
||||
* Grouped convolution support for small K and C (#822 #879 #897)
|
||||
* Support for NHWGC (2D and 3D) grouped convolution backward weight (#769 #804)
|
||||
* Support for bf16/f32/f16 and NHWGC (2D and 3D) grouped convolution backward data (#757 #799)
|
||||
* Support for Batched Gemm DL (#732)
|
||||
|
||||
### Changes
|
||||
* Changed the grouped convolution API to maintain consistency with other convolution kernels (#817)
|
||||
|
||||
## CK 0.2.0 for ROCm 5.7.0
|
||||
|
||||
### Fixes
|
||||
- Fixed a bug in 6-dimensional kernels (#555)
|
||||
- Fixed a test case failure with grouped convolution backward weight (#524)
|
||||
* Fixed a bug in 6-dimensional kernels (#555)
|
||||
* Fixed a test case failure with grouped convolution backward weight (#524)
|
||||
|
||||
### Optimizations
|
||||
- Improved the performance of the normalization kernel
|
||||
* Improved the performance of the normalization kernel
|
||||
|
||||
### Additions
|
||||
- New CMake flags:
|
||||
- "DL_KERNELS"-- Must be set to "ON" in order to build the gemm_dl and batched_gemm_multi_d_dl instances
|
||||
- "DTYPES" -- Can be set to any subset of "fp64;fp32;fp16;fp8;bf16;int8" to build an instance of the specified data types
|
||||
- "INSTANCES_ONLY" -- Only builds CK library and instances without tests, examples, or profiler
|
||||
- New feature: if GPU_TARGETS is not set in the CMake command line, CK will be built for all targets supported by the compiler
|
||||
- Support for MI300A/MI300X
|
||||
- Support for AMD RDNA 3
|
||||
- New user tutorial (#563)
|
||||
- Additional instances for irregular GEMM sizes (#560)
|
||||
- New inter-wave consumer-producer programming model for GEMM kernels (#310)
|
||||
- GEMM with support multiple elementwise fusions (multi-D) (#534)
|
||||
- Multi-embeddings support (#542)
|
||||
- AMD RDNA 3 blockwise GEMM and real GEMM support (#541)
|
||||
- AMD RDNA grouped convolution backward weight support (#505)
|
||||
- MaxPool and AvgPool forward (#815); MaxPool backward (#750)
|
||||
* New CMake flags:
|
||||
* "DL_KERNELS"-* Must be set to "ON" in order to build the gemm_dl and batched_gemm_multi_d_dl instances
|
||||
* "DTYPES" -- Can be set to any subset of "fp64;fp32;fp16;fp8;bf16;int8" to build an instance of the specified data types
|
||||
* "INSTANCES_ONLY" -- Only builds CK library and instances without tests, examples, or profiler
|
||||
* New feature: if GPU_TARGETS is not set in the CMake command line, CK will be built for all targets supported by the compiler
|
||||
* Support for MI300A/MI300X
|
||||
* Support for AMD RDNA 3
|
||||
* New user tutorial (#563)
|
||||
* Additional instances for irregular GEMM sizes (#560)
|
||||
* New inter-wave consumer-producer programming model for GEMM kernels (#310)
|
||||
* GEMM with support multiple elementwise fusions (multi-D) (#534)
|
||||
* Multi-embeddings support (#542)
|
||||
* AMD RDNA 3 blockwise GEMM and real GEMM support (#541)
|
||||
* AMD RDNA grouped convolution backward weight support (#505)
|
||||
* MaxPool and AvgPool forward (#815); MaxPool backward (#750)
|
||||
|
||||
### Changes
|
||||
None
|
||||
|
||||
@@ -59,9 +59,9 @@ authors:
|
||||
family-names: Zhou
|
||||
- given-names: Jianfeng
|
||||
family-names: Yan
|
||||
repository-code: 'https://github.com/ROCmSoftwarePlatform/composable_kernel'
|
||||
repository-code: 'https://github.com/ROCm/composable_kernel'
|
||||
abstract: Composable Kernel (CK) library aims to provide a programming model for writing performance critical kernels for Machine Learning workloads across multiple architectures including GPUs, CPUs, etc, through general purpose kernel progarmming languages, like HIP C++.
|
||||
keywords:
|
||||
- 'CK, Composable Kernel, Tensor Coordinate Transformation'
|
||||
license: MIT
|
||||
license-url: https://github.com/ROCmSoftwarePlatform/composable_kernel/blob/7fc3ed761aa35709d87c8fbbe41dd368648b3541/LICENSE
|
||||
license-url: https://github.com/ROCm/composable_kernel/blob/7fc3ed761aa35709d87c8fbbe41dd368648b3541/LICENSE
|
||||
|
||||
@@ -4,22 +4,27 @@ if(POLICY CMP0140)
|
||||
cmake_policy(SET CMP0140 NEW)
|
||||
endif()
|
||||
|
||||
get_property(_GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
|
||||
# This has to be initialized before the project() command appears
|
||||
# Set the default of CMAKE_BUILD_TYPE to be release, unless user specifies with -D. MSVC_IDE does not use CMAKE_BUILD_TYPE
|
||||
if( NOT MSVC_IDE AND NOT CMAKE_BUILD_TYPE )
|
||||
set( CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel." )
|
||||
if(_GENERATOR_IS_MULTI_CONFIG)
|
||||
set(CMAKE_CONFIGURATION_TYPES "Debug;Release;RelWithDebInfo;MinSizeRel" CACHE STRING
|
||||
"Available build types (configurations) on multi-config generators")
|
||||
else()
|
||||
set(CMAKE_BUILD_TYPE Release CACHE STRING
|
||||
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel.")
|
||||
endif()
|
||||
|
||||
# Default installation path
|
||||
if(WIN32)
|
||||
set(CMAKE_INSTALL_PREFIX "/opt/rocm/x86_64-w64-mingw32" CACHE PATH "")
|
||||
else()
|
||||
if(NOT WIN32)
|
||||
set(CMAKE_INSTALL_PREFIX "/opt/rocm" CACHE PATH "")
|
||||
endif()
|
||||
|
||||
set(version 1.1.0)
|
||||
# Check support for CUDA/HIP in Cmake
|
||||
project(composable_kernel VERSION ${version})
|
||||
project(composable_kernel VERSION ${version} LANGUAGES CXX)
|
||||
include(CTest)
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake")
|
||||
|
||||
@@ -61,6 +66,8 @@ endif()
|
||||
|
||||
#for f8/bf8_t type
|
||||
add_compile_options(-Wno-bit-int-extension)
|
||||
add_compile_options(-Wno-pass-failed)
|
||||
add_compile_options(-Wno-switch-default)
|
||||
|
||||
if(DL_KERNELS)
|
||||
add_definitions(-DDL_KERNELS)
|
||||
@@ -72,15 +79,15 @@ if(INSTANCES_ONLY)
|
||||
set(CK_ENABLE_INSTANCES_ONLY "ON")
|
||||
endif()
|
||||
|
||||
include(getopt)
|
||||
|
||||
# CK config file to record supported datatypes, etc.
|
||||
configure_file("${PROJECT_SOURCE_DIR}/include/ck/config.h.in" "${PROJECT_BINARY_DIR}/include/ck/config.h")
|
||||
configure_file(include/ck/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/include/ck/config.h)
|
||||
|
||||
# CK version file to record release version as well as git commit hash
|
||||
find_package(Git REQUIRED)
|
||||
execute_process(COMMAND "${GIT_EXECUTABLE}" rev-parse HEAD OUTPUT_VARIABLE COMMIT_ID OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
configure_file("${PROJECT_SOURCE_DIR}/include/ck/version.h.in" "${PROJECT_BINARY_DIR}/include/ck/version.h")
|
||||
|
||||
enable_testing()
|
||||
configure_file(include/ck/version.h.in ${CMAKE_CURRENT_BINARY_DIR}/include/ck/version.h)
|
||||
|
||||
set(ROCM_SYMLINK_LIBS OFF)
|
||||
find_package(ROCM REQUIRED PATHS /opt/rocm)
|
||||
@@ -96,7 +103,7 @@ include(TargetFlags)
|
||||
|
||||
rocm_setup_version(VERSION ${version})
|
||||
|
||||
list(APPEND CMAKE_PREFIX_PATH ${CMAKE_INSTALL_PREFIX} ${CMAKE_INSTALL_PREFIX}/llvm ${CMAKE_INSTALL_PREFIX}/hip /opt/rocm /opt/rocm/llvm /opt/rocm/hip)
|
||||
list(APPEND CMAKE_PREFIX_PATH ${CMAKE_INSTALL_PREFIX} ${CMAKE_INSTALL_PREFIX}/llvm ${CMAKE_INSTALL_PREFIX}/hip /opt/rocm /opt/rocm/llvm /opt/rocm/hip "$ENV{ROCM_PATH}" "$ENV{HIP_PATH}")
|
||||
|
||||
message("GPU_TARGETS= ${GPU_TARGETS}")
|
||||
|
||||
@@ -141,13 +148,40 @@ find_package(hip)
|
||||
# SWDEV-413293 and https://reviews.llvm.org/D155213
|
||||
math(EXPR hip_VERSION_FLAT "(${hip_VERSION_MAJOR} * 1000 + ${hip_VERSION_MINOR}) * 100000 + ${hip_VERSION_PATCH}")
|
||||
message("hip_version_flat=${hip_VERSION_FLAT}")
|
||||
if(${hip_VERSION_FLAT} GREATER 500723302)
|
||||
if(NOT WIN32 AND ${hip_VERSION_FLAT} GREATER 500723302)
|
||||
message("Adding the fno-offload-uniform-block compiler flag")
|
||||
add_compile_options(-fno-offload-uniform-block)
|
||||
endif()
|
||||
|
||||
option(USE_BITINT_EXTENSION_INT4, "Whether to enable clang's BitInt extension to provide int4 data type." OFF)
|
||||
option(USE_OPT_NAVI3X, "Whether to enable LDS cumode and Wavefront32 mode for NAVI3X silicons." OFF)
|
||||
#
|
||||
# Seperate linking jobs from compiling
|
||||
# Too many concurrent linking jobs can break the build
|
||||
# Copied from LLVM
|
||||
set(CK_PARALLEL_LINK_JOBS "" CACHE STRING
|
||||
"Define the maximum number of concurrent link jobs (Ninja only).")
|
||||
if(CMAKE_GENERATOR MATCHES "Ninja")
|
||||
if(CK_PARALLEL_LINK_JOBS)
|
||||
set_property(GLOBAL APPEND PROPERTY JOB_POOLS link_job_pool=${CK_PARALLEL_LINK_JOBS})
|
||||
set(CMAKE_JOB_POOL_LINK link_job_pool)
|
||||
endif()
|
||||
elseif(CK_PARALLEL_LINK_JOBS)
|
||||
message(WARNING "Job pooling is only available with Ninja generators.")
|
||||
endif()
|
||||
# Similar for compiling
|
||||
set(CK_PARALLEL_COMPILE_JOBS "" CACHE STRING
|
||||
"Define the maximum number of concurrent compile jobs (Ninja only).")
|
||||
if(CMAKE_GENERATOR MATCHES "Ninja")
|
||||
if(CK_PARALLEL_COMPILE_JOBS)
|
||||
set_property(GLOBAL APPEND PROPERTY JOB_POOLS compile_job_pool=${CK_PARALLEL_COMPILE_JOBS})
|
||||
set(CMAKE_JOB_POOL_COMPILE compile_job_pool)
|
||||
endif()
|
||||
elseif(CK_PARALLEL_COMPILE_JOBS)
|
||||
message(WARNING "Job pooling is only available with Ninja generators.")
|
||||
endif()
|
||||
|
||||
|
||||
option(USE_BITINT_EXTENSION_INT4 "Whether to enable clang's BitInt extension to provide int4 data type." OFF)
|
||||
option(USE_OPT_NAVI3X "Whether to enable LDS cumode and Wavefront32 mode for NAVI3X silicons." OFF)
|
||||
|
||||
if(USE_BITINT_EXTENSION_INT4)
|
||||
add_compile_definitions(CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4)
|
||||
@@ -167,7 +201,6 @@ find_package(Threads REQUIRED)
|
||||
link_libraries(Threads::Threads)
|
||||
|
||||
## C++
|
||||
enable_language(CXX)
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
@@ -216,7 +249,11 @@ if( DEFINED CK_OVERRIDE_HIP_VERSION_PATCH )
|
||||
endif()
|
||||
message(STATUS "Build with HIP ${HIP_VERSION}")
|
||||
link_libraries(hip::device)
|
||||
add_compile_definitions(__HIP_PLATFORM_HCC__=1)
|
||||
if(CK_hip_VERSION VERSION_GREATER_EQUAL 6.0.23494)
|
||||
add_compile_definitions(__HIP_PLATFORM_AMD__=1)
|
||||
else()
|
||||
add_compile_definitions(__HIP_PLATFORM_HCC__=1)
|
||||
endif()
|
||||
|
||||
## tidy
|
||||
include(EnableCompilerWarnings)
|
||||
@@ -373,10 +410,9 @@ include_directories(BEFORE
|
||||
|
||||
SET(BUILD_DEV ON CACHE BOOL "BUILD_DEV")
|
||||
if(BUILD_DEV)
|
||||
add_compile_options(-Werror -Weverything)
|
||||
add_compile_options(-Werror)
|
||||
add_compile_options(-Weverything)
|
||||
endif()
|
||||
#add flags to reduce the size of binaries
|
||||
add_compile_options(-Oz -flto=thin)
|
||||
message("CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}")
|
||||
|
||||
add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure -C ${CMAKE_CFG_INTDIR})
|
||||
@@ -435,7 +471,9 @@ if(NOT DEFINED INSTANCES_ONLY)
|
||||
PACKAGE_NAME examples
|
||||
)
|
||||
add_subdirectory(example)
|
||||
add_subdirectory(test)
|
||||
if(BUILD_TESTING)
|
||||
add_subdirectory(test)
|
||||
endif()
|
||||
|
||||
rocm_package_setup_component(profiler
|
||||
LIBRARY_NAME composablekernel
|
||||
|
||||
31
Dockerfile
31
Dockerfile
@@ -1,6 +1,6 @@
|
||||
FROM ubuntu:20.04
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG ROCMVERSION=5.7
|
||||
ARG ROCMVERSION=6.0
|
||||
ARG compiler_version=""
|
||||
ARG compiler_commit=""
|
||||
|
||||
@@ -16,12 +16,18 @@ RUN apt-get install -y --allow-unauthenticated apt-utils wget gnupg2 curl
|
||||
ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
|
||||
RUN curl -fsSL https://repo.radeon.com/rocm/rocm.gpg.key | gpg --dearmor -o /etc/apt/trusted.gpg.d/rocm-keyring.gpg
|
||||
|
||||
RUN wget https://repo.radeon.com/amdgpu-install/5.7/ubuntu/focal/amdgpu-install_5.7.50700-1_all.deb --no-check-certificate
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ./amdgpu-install_5.7.50700-1_all.deb
|
||||
|
||||
RUN wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add - && \
|
||||
sh -c "echo deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/rocm-keyring.gpg] $DEB_ROCM_REPO focal main > /etc/apt/sources.list.d/rocm.list" && \
|
||||
sh -c 'echo deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/rocm-keyring.gpg] https://repo.radeon.com/amdgpu/$ROCMVERSION/ubuntu focal main > /etc/apt/sources.list.d/amdgpu.list'
|
||||
RUN if [ "$ROCMVERSION" != "6.0.1" ]; then \
|
||||
sh -c "wget https://repo.radeon.com/amdgpu-install/6.0/ubuntu/focal/amdgpu-install_6.0.60000-1_all.deb --no-check-certificate" && \
|
||||
apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ./amdgpu-install_6.0.60000-1_all.deb && \
|
||||
wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add - && \
|
||||
sh -c "echo deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/rocm-keyring.gpg] $DEB_ROCM_REPO focal main > /etc/apt/sources.list.d/rocm.list" && \
|
||||
sh -c 'echo deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/rocm-keyring.gpg] https://repo.radeon.com/amdgpu/$ROCMVERSION/ubuntu focal main > /etc/apt/sources.list.d/amdgpu.list'; \
|
||||
elif [ "$ROCMVERSION" = "6.0.1" ] && [ "$compiler_version" = "rc1" ]; then \
|
||||
sh -c "wget http://artifactory-cdn.amd.com/artifactory/list/amdgpu-deb/amdgpu-install-internal_6.0-20.04-1_all.deb --no-check-certificate" && \
|
||||
apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install dialog && DEBIAN_FRONTEND=noninteractive apt-get install ./amdgpu-install-internal_6.0-20.04-1_all.deb && \
|
||||
sh -c 'echo deb [arch=amd64 trusted=yes] http://compute-artifactory.amd.com/artifactory/list/rocm-release-archive-20.04-deb/ 6.0.1 rel-95 > /etc/apt/sources.list.d/rocm-build.list' && \
|
||||
amdgpu-repo --amdgpu-build=1704947; \
|
||||
fi
|
||||
|
||||
RUN sh -c "echo deb http://mirrors.kernel.org/ubuntu focal main universe | tee -a /etc/apt/sources.list"
|
||||
RUN amdgpu-install -y --usecase=rocm --no-dkms
|
||||
@@ -68,7 +74,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
#Install latest version of cmake
|
||||
#Install ninja build tracing tools
|
||||
RUN wget -qO /usr/local/bin/ninja.gz https://github.com/ninja-build/ninja/releases/latest/download/ninja-linux.zip
|
||||
RUN gunzip /usr/local/bin/ninja.gz
|
||||
RUN chmod a+x /usr/local/bin/ninja
|
||||
@@ -76,6 +82,11 @@ RUN git clone https://github.com/nico/ninjatracing.git
|
||||
# Update the cmake to the latest version
|
||||
RUN pip install --upgrade cmake==3.27.5
|
||||
|
||||
#Install latest cppcheck
|
||||
RUN git clone https://github.com/danmar/cppcheck.git && \
|
||||
cd cppcheck && mkdir build && cd build && cmake .. && cmake --build .
|
||||
WORKDIR /
|
||||
|
||||
# Setup ubsan environment to printstacktrace
|
||||
RUN ln -s /usr/bin/llvm-symbolizer-3.8 /usr/local/bin/llvm-symbolizer
|
||||
ENV UBSAN_OPTIONS=print_stacktrace=1
|
||||
@@ -111,7 +122,7 @@ ENV compiler_commit=$compiler_commit
|
||||
RUN sh -c "echo compiler version = '$compiler_version'"
|
||||
RUN sh -c "echo compiler commit = '$compiler_commit'"
|
||||
|
||||
RUN if [ "$compiler_version" = "amd-stg-open" ] && [ "$compiler_commit" = "" ]; then \
|
||||
RUN if ( [ "$compiler_version" = "amd-staging" ] || [ "$compiler_version" = "amd-mainline-open" ] ) && [ "$compiler_commit" = "" ]; then \
|
||||
git clone -b "$compiler_version" https://github.com/RadeonOpenCompute/llvm-project.git && \
|
||||
cd llvm-project && mkdir build && cd build && \
|
||||
cmake -DCMAKE_INSTALL_PREFIX=/opt/rocm/llvm -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=1 -DLLVM_TARGETS_TO_BUILD="AMDGPU;X86" -DLLVM_ENABLE_PROJECTS="clang;lld" -DLLVM_ENABLE_RUNTIMES="compiler-rt" ../llvm && \
|
||||
@@ -119,7 +130,7 @@ RUN if [ "$compiler_version" = "amd-stg-open" ] && [ "$compiler_commit" = "" ];
|
||||
else echo "using the release compiler"; \
|
||||
fi
|
||||
|
||||
RUN if [ "$compiler_version" = "amd-stg-open" ] && [ "$compiler_commit" != "" ]; then \
|
||||
RUN if ( [ "$compiler_version" = "amd-staging" ] || [ "$compiler_version" = "amd-mainline-open" ] ) && [ "$compiler_commit" != "" ]; then \
|
||||
git clone -b "$compiler_version" https://github.com/RadeonOpenCompute/llvm-project.git && \
|
||||
cd llvm-project && git checkout "$compiler_commit" && echo "checking out commit $compiler_commit" && mkdir build && cd build && \
|
||||
cmake -DCMAKE_INSTALL_PREFIX=/opt/rocm/llvm -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=1 -DLLVM_TARGETS_TO_BUILD="AMDGPU;X86" -DLLVM_ENABLE_PROJECTS="clang;lld" -DLLVM_ENABLE_RUNTIMES="compiler-rt" ../llvm && \
|
||||
|
||||
126
Jenkinsfile
vendored
126
Jenkinsfile
vendored
@@ -1,5 +1,5 @@
|
||||
def rocmnode(name) {
|
||||
return 'rocmtest && miopen && ' + name
|
||||
return '(rocmtest || miopen) && ' + name
|
||||
}
|
||||
|
||||
def show_node_info() {
|
||||
@@ -33,7 +33,7 @@ def runShell(String command){
|
||||
|
||||
def getDockerImageName(){
|
||||
def img
|
||||
if (params.ROCMVERSION != "6.0"){
|
||||
if (params.ROCMVERSION != "6.0.1"){
|
||||
if (params.COMPILER_VERSION == "") {
|
||||
img = "${env.CK_DOCKERHUB}:ck_ub20.04_rocm${params.ROCMVERSION}"
|
||||
}
|
||||
@@ -84,7 +84,7 @@ def build_compiler(){
|
||||
compiler = '/opt/rocm/bin/hipcc'
|
||||
}
|
||||
else{
|
||||
if (params.COMPILER_VERSION == "amd-stg-open" || params.COMPILER_COMMIT != ""){
|
||||
if (params.COMPILER_VERSION == "amd-staging" || params.COMPILER_VERSION == "amd-mainline-open" || params.COMPILER_COMMIT != ""){
|
||||
compiler = "/llvm-project/build/bin/clang++"
|
||||
}
|
||||
else{
|
||||
@@ -135,6 +135,7 @@ def buildDocker(install_prefix){
|
||||
echo "Building image: ${image_name}"
|
||||
retimage = docker.build("${image_name}", dockerArgs + ' .')
|
||||
retimage.push()
|
||||
sh 'docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi'
|
||||
}
|
||||
else{
|
||||
echo "Checking for image: ${image_name}"
|
||||
@@ -293,7 +294,7 @@ def buildHipClangJob(Map conf=[:]){
|
||||
dockerOpts = dockerOpts + " --env HSA_XNACK=1 "
|
||||
}
|
||||
def dockerArgs = "--build-arg PREFIX=${prefixpath} --build-arg compiler_version='${params.COMPILER_VERSION}' --build-arg compiler_commit='${params.COMPILER_COMMIT}' --build-arg ROCMVERSION='${params.ROCMVERSION}' "
|
||||
if (params.COMPILER_VERSION == "amd-stg-open" || params.COMPILER_COMMIT != ""){
|
||||
if (params.COMPILER_VERSION == "amd-staging" || params.COMPILER_VERSION == "amd-mainline-open" || params.COMPILER_COMMIT != ""){
|
||||
dockerOpts = dockerOpts + " --env HIP_CLANG_PATH='/llvm-project/build/bin' "
|
||||
}
|
||||
|
||||
@@ -302,9 +303,9 @@ def buildHipClangJob(Map conf=[:]){
|
||||
def retimage
|
||||
(retimage, image) = getDockerImage(conf)
|
||||
|
||||
gitStatusWrapper(credentialsId: "${status_wrapper_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCmSoftwarePlatform', repo: 'composable_kernel-internal') {
|
||||
gitStatusWrapper(credentialsId: "${status_wrapper_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCm', repo: 'composable_kernel-internal') {
|
||||
withDockerContainer(image: image, args: dockerOpts + ' -v=/var/jenkins/:/var/jenkins') {
|
||||
timeout(time: 5, unit: 'HOURS')
|
||||
timeout(time: 48, unit: 'HOURS')
|
||||
{
|
||||
cmake_build(conf)
|
||||
}
|
||||
@@ -348,14 +349,14 @@ def runCKProfiler(Map conf=[:]){
|
||||
dockerOpts = dockerOpts + " --env HSA_XNACK=1 "
|
||||
}
|
||||
def dockerArgs = "--build-arg PREFIX=${prefixpath} --build-arg compiler_version='${params.COMPILER_VERSION}' --build-arg compiler_commit='${params.COMPILER_COMMIT}' --build-arg ROCMVERSION='${params.ROCMVERSION}' "
|
||||
if (params.COMPILER_VERSION == "amd-stg-open" || params.COMPILER_COMMIT != ""){
|
||||
if (params.COMPILER_VERSION == "amd-staging" || params.COMPILER_VERSION == "amd-mainline-open" || params.COMPILER_COMMIT != ""){
|
||||
dockerOpts = dockerOpts + " --env HIP_CLANG_PATH='/llvm-project/build/bin' "
|
||||
}
|
||||
|
||||
def variant = env.STAGE_NAME
|
||||
def retimage
|
||||
|
||||
gitStatusWrapper(credentialsId: "${status_wrapper_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCmSoftwarePlatform', repo: 'composable_kernel-internal') {
|
||||
gitStatusWrapper(credentialsId: "${status_wrapper_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCm', repo: 'composable_kernel-internal') {
|
||||
try {
|
||||
(retimage, image) = getDockerImage(conf)
|
||||
withDockerContainer(image: image, args: dockerOpts) {
|
||||
@@ -479,7 +480,7 @@ def Build_CK(Map conf=[:]){
|
||||
dockerOpts = dockerOpts + " --env HSA_XNACK=1 "
|
||||
}
|
||||
def dockerArgs = "--build-arg PREFIX=${prefixpath} --build-arg compiler_version='${params.COMPILER_VERSION}' --build-arg compiler_commit='${params.COMPILER_COMMIT}' --build-arg ROCMVERSION='${params.ROCMVERSION}' "
|
||||
if (params.COMPILER_VERSION == "amd-stg-open" || params.COMPILER_COMMIT != ""){
|
||||
if (params.COMPILER_VERSION == "amd-staging" || params.COMPILER_VERSION == "amd-mainline-open" || params.COMPILER_COMMIT != ""){
|
||||
dockerOpts = dockerOpts + " --env HIP_CLANG_PATH='/llvm-project/build/bin' "
|
||||
}
|
||||
|
||||
@@ -487,7 +488,7 @@ def Build_CK(Map conf=[:]){
|
||||
def retimage
|
||||
def navi_node = 0
|
||||
|
||||
gitStatusWrapper(credentialsId: "${status_wrapper_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCmSoftwarePlatform', repo: 'composable_kernel-internal') {
|
||||
gitStatusWrapper(credentialsId: "${status_wrapper_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCm', repo: 'composable_kernel-internal') {
|
||||
try {
|
||||
(retimage, image) = getDockerImage(conf)
|
||||
withDockerContainer(image: image, args: dockerOpts) {
|
||||
@@ -553,14 +554,14 @@ def Build_CK(Map conf=[:]){
|
||||
sh """#!/bin/bash
|
||||
rm -rf "${params.hipTensor_branch}".zip
|
||||
rm -rf hipTensor-"${params.hipTensor_branch}"
|
||||
wget https://github.com/ROCmSoftwarePlatform/hipTensor/archive/refs/heads/"${params.hipTensor_branch}".zip
|
||||
wget https://github.com/ROCm/hipTensor/archive/refs/heads/"${params.hipTensor_branch}".zip
|
||||
unzip -o "${params.hipTensor_branch}".zip
|
||||
"""
|
||||
dir("hipTensor-${params.hipTensor_branch}"){
|
||||
sh """#!/bin/bash
|
||||
mkdir -p build
|
||||
ls -ltr
|
||||
CC=hipcc CXX=hipcc cmake -Bbuild . -D CMAKE_PREFIX_PATH="/opt/rocm;${env.WORKSPACE}/install"
|
||||
CC=hipcc CXX=hipcc cmake -Bbuild . -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install"
|
||||
cmake --build build -- -j
|
||||
"""
|
||||
}
|
||||
@@ -605,7 +606,7 @@ def process_results(Map conf=[:]){
|
||||
def variant = env.STAGE_NAME
|
||||
def retimage
|
||||
|
||||
gitStatusWrapper(credentialsId: "${status_wrapper_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCmSoftwarePlatform', repo: 'composable_kernel-internal') {
|
||||
gitStatusWrapper(credentialsId: "${status_wrapper_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCm', repo: 'composable_kernel-internal') {
|
||||
try {
|
||||
(retimage, image) = getDockerImage(conf)
|
||||
}
|
||||
@@ -666,20 +667,20 @@ pipeline {
|
||||
description: "Force building docker image (default: false), set to true if docker image needs to be updated.")
|
||||
string(
|
||||
name: 'ROCMVERSION',
|
||||
defaultValue: '5.7',
|
||||
description: 'Specify which ROCM version to use: 5.7 (default).')
|
||||
defaultValue: '6.0',
|
||||
description: 'Specify which ROCM version to use: 6.0 (default).')
|
||||
string(
|
||||
name: 'COMPILER_VERSION',
|
||||
defaultValue: '',
|
||||
description: 'Specify which version of compiler to use: release, amd-stg-open, or leave blank (default).')
|
||||
description: 'Specify which version of compiler to use: release, amd-staging, amd-mainline-open, or leave blank (default).')
|
||||
string(
|
||||
name: 'COMPILER_COMMIT',
|
||||
defaultValue: '',
|
||||
description: 'Specify which commit of compiler branch to use: leave blank to use the latest commit, or use 5541927df00eabd6a110180170eca7785d436ee3 (default) commit of amd-stg-open branch.')
|
||||
description: 'Specify which commit of compiler branch to use: leave blank to use the latest commit (default), or use some specific commit of llvm-project branch.')
|
||||
string(
|
||||
name: 'BUILD_COMPILER',
|
||||
defaultValue: 'hipcc',
|
||||
description: 'Specify whether to build CK with hipcc (default) or with clang.')
|
||||
defaultValue: 'clang',
|
||||
description: 'Specify whether to build CK with hipcc or with clang (default).')
|
||||
booleanParam(
|
||||
name: "RUN_FULL_QA",
|
||||
defaultValue: false,
|
||||
@@ -694,12 +695,20 @@ pipeline {
|
||||
description: "Use the CK build to verify hipTensor build and tests (default: ON)")
|
||||
string(
|
||||
name: 'hipTensor_branch',
|
||||
defaultValue: 'develop',
|
||||
description: 'Specify which branch of hipTensor to use (default: develop)')
|
||||
defaultValue: 'mainline',
|
||||
description: 'Specify which branch of hipTensor to use (default: mainline)')
|
||||
booleanParam(
|
||||
name: "USE_SCCACHE",
|
||||
defaultValue: true,
|
||||
description: "Use the sccache for building CK (default: ON)")
|
||||
booleanParam(
|
||||
name: "RUN_CPPCHECK",
|
||||
defaultValue: false,
|
||||
description: "Run the cppcheck static analysis (default: OFF)")
|
||||
booleanParam(
|
||||
name: "RUN_PERFORMANCE_TESTS",
|
||||
defaultValue: false,
|
||||
description: "Run the performance tests (default: OFF)")
|
||||
}
|
||||
environment{
|
||||
dbuser = "${dbuser}"
|
||||
@@ -726,7 +735,39 @@ pipeline {
|
||||
}
|
||||
stage("Static checks") {
|
||||
parallel{
|
||||
stage('Clang Format and Cppcheck') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.RUN_CPPCHECK.toBoolean() }
|
||||
}
|
||||
agent{ label rocmnode("nogpu") }
|
||||
environment{
|
||||
execute_cmd = "find .. -not -path \'*.git*\' -iname \'*.h\' \
|
||||
-o -not -path \'*.git*\' -iname \'*.hpp\' \
|
||||
-o -not -path \'*.git*\' -iname \'*.cpp\' \
|
||||
-o -iname \'*.h.in\' \
|
||||
-o -iname \'*.hpp.in\' \
|
||||
-o -iname \'*.cpp.in\' \
|
||||
-o -iname \'*.cl\' \
|
||||
| grep -v 'build/' \
|
||||
| xargs -n 1 -P 1 -I{} -t sh -c \'clang-format-12 -style=file {} | diff - {}\' && \
|
||||
/cppcheck/build/bin/cppcheck ../* -v -j \$(nproc) -I ../include -I ../profiler/include -I ../library/include \
|
||||
-D CK_ENABLE_FP64 -D CK_ENABLE_FP32 -D CK_ENABLE_FP16 -D CK_ENABLE_FP8 -D CK_ENABLE_BF16 -D CK_ENABLE_BF8 -D CK_ENABLE_INT8 -D DL_KERNELS \
|
||||
-D __gfx908__ -D __gfx90a__ -D __gfx940__ -D __gfx941__ -D __gfx942__ -D __gfx1030__ -D __gfx1100__ -D __gfx1101__ -D __gfx1102__ \
|
||||
-U __gfx803__ -U __gfx900__ -U __gfx906__ -U CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 \
|
||||
--file-filter=*.cpp --force --enable=all --output-file=ck_cppcheck.log"
|
||||
}
|
||||
steps{
|
||||
buildHipClangJobAndReboot(setup_cmd: "", build_cmd: "", execute_cmd: execute_cmd, no_reboot:true)
|
||||
archiveArtifacts "build/ck_cppcheck.log"
|
||||
cleanWs()
|
||||
}
|
||||
}
|
||||
stage('Clang Format') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { !params.RUN_CPPCHECK.toBoolean() }
|
||||
}
|
||||
agent{ label rocmnode("nogpu") }
|
||||
environment{
|
||||
execute_cmd = "find .. -not -path \'*.git*\' -iname \'*.h\' \
|
||||
@@ -759,8 +800,15 @@ pipeline {
|
||||
}
|
||||
agent{ label rocmnode("gfx908 || gfx90a") }
|
||||
environment{
|
||||
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942" -DCMAKE_EXE_LINKER_FLAGS=" -L ${env.WORKSPACE}/script -T hip_fatbin_insert " """
|
||||
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DGPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """
|
||||
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install \
|
||||
-DGPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942" \
|
||||
-DCMAKE_EXE_LINKER_FLAGS=" -L ${env.WORKSPACE}/script -T hip_fatbin_insert " \
|
||||
-DCMAKE_CXX_FLAGS=" -O3 " """
|
||||
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && \
|
||||
cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" \
|
||||
-DGPU_TARGETS="gfx908;gfx90a;gfx940;gfx941;gfx942" \
|
||||
-DCMAKE_CXX_COMPILER="${build_compiler()}" \
|
||||
-DCMAKE_CXX_FLAGS=" -O3 " .. && make -j """
|
||||
}
|
||||
steps{
|
||||
Build_CK_and_Reboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local')
|
||||
@@ -775,8 +823,12 @@ pipeline {
|
||||
}
|
||||
agent{ label rocmnode("gfx908 || gfx90a") }
|
||||
environment{
|
||||
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx908;gfx90a" """
|
||||
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DGPU_TARGETS="gfx908;gfx90a" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """
|
||||
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx908;gfx90a" -DCMAKE_CXX_FLAGS=" -O3 " """
|
||||
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && \
|
||||
cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" \
|
||||
-DGPU_TARGETS="gfx908;gfx90a" \
|
||||
-DCMAKE_CXX_COMPILER="${build_compiler()}" \
|
||||
-DCMAKE_CXX_FLAGS=" -O3 " .. && make -j """
|
||||
}
|
||||
steps{
|
||||
Build_CK_and_Reboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local')
|
||||
@@ -791,8 +843,12 @@ pipeline {
|
||||
}
|
||||
agent{ label rocmnode("navi21") }
|
||||
environment{
|
||||
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx1030" -DDL_KERNELS=ON """
|
||||
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DGPU_TARGETS="gfx1030" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """
|
||||
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx1030" -DDL_KERNELS=ON -DCMAKE_CXX_FLAGS=" -O3 " """
|
||||
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && \
|
||||
cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" \
|
||||
-DGPU_TARGETS="gfx1030" \
|
||||
-DCMAKE_CXX_COMPILER="${build_compiler()}" \
|
||||
-DCMAKE_CXX_FLAGS=" -O3 " .. && make -j """
|
||||
}
|
||||
steps{
|
||||
Build_CK_and_Reboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local')
|
||||
@@ -807,8 +863,12 @@ pipeline {
|
||||
}
|
||||
agent{ label rocmnode("navi32") }
|
||||
environment{
|
||||
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx1101" -DDL_KERNELS=ON """
|
||||
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DGPU_TARGETS="gfx1101" -DDL_KERNELS=ON -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """
|
||||
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx1101" -DDL_KERNELS=ON -DCMAKE_CXX_FLAGS=" -O3 " """
|
||||
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && \
|
||||
cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" \
|
||||
-DGPU_TARGETS="gfx1101" \
|
||||
-DCMAKE_CXX_COMPILER="${build_compiler()}" \
|
||||
-DCMAKE_CXX_FLAGS=" -O3 " .. && make -j """
|
||||
}
|
||||
steps{
|
||||
Build_CK_and_Reboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local')
|
||||
@@ -826,7 +886,7 @@ pipeline {
|
||||
{
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { !params.RUN_FULL_QA.toBoolean() }
|
||||
expression { !params.RUN_FULL_QA.toBoolean() && params.RUN_PERFORMANCE_TESTS.toBoolean() }
|
||||
}
|
||||
options { retry(2) }
|
||||
agent{ label rocmnode("gfx908 || gfx90a")}
|
||||
@@ -842,7 +902,7 @@ pipeline {
|
||||
{
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.RUN_FULL_QA.toBoolean() }
|
||||
expression { params.RUN_FULL_QA.toBoolean() && params.RUN_PERFORMANCE_TESTS.toBoolean() }
|
||||
}
|
||||
options { retry(2) }
|
||||
agent{ label rocmnode("gfx90a")}
|
||||
@@ -861,6 +921,10 @@ pipeline {
|
||||
parallel
|
||||
{
|
||||
stage("Process results"){
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.RUN_PERFORMANCE_TESTS.toBoolean() }
|
||||
}
|
||||
agent { label 'mici' }
|
||||
steps{
|
||||
process_results()
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -7,7 +7,7 @@ Copyright (c) 2020 , Advanced Micro Devices, Inc. (Xiaoyan Zhou)
|
||||
Copyright (c) 2021-2022, Advanced Micro Devices, Inc. (Jianfeng Yan)
|
||||
|
||||
SPDX-License-Identifier: MIT
|
||||
Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
@@ -32,7 +32,6 @@ python3 -m sphinx -T -E -b html -d _build/doctrees -D language=en . _build/html
|
||||
```
|
||||
|
||||
You can find a list of our developers and contributors on our [Contributors](/CONTRIBUTORS.md) page.
|
||||
page.
|
||||
|
||||
```note
|
||||
If you use CK, cite us as follows:
|
||||
@@ -71,7 +70,7 @@ Docker images are available on [DockerHub](https://hub.docker.com/r/rocm/composa
|
||||
3. Clone CK source code from the GitHub repository and start the build:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ROCmSoftwarePlatform/composable_kernel.git && \
|
||||
git clone https://github.com/ROCm/composable_kernel.git && \
|
||||
cd composable_kernel && \
|
||||
mkdir build && \
|
||||
cd build
|
||||
|
||||
@@ -185,6 +185,7 @@ int main(int argc, char* argv[])
|
||||
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
|
||||
|
||||
@@ -204,6 +204,7 @@ int main(int argc, char* argv[])
|
||||
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
|
||||
|
||||
@@ -197,6 +197,7 @@ int main(int argc, char* argv[])
|
||||
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
|
||||
|
||||
@@ -190,6 +190,7 @@ int main(int argc, char* argv[])
|
||||
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
|
||||
|
||||
@@ -200,6 +200,7 @@ int main(int argc, char* argv[])
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
add_executable(client_layernorm2d_bwd_data layernorm2d_bwd_data.cpp)
|
||||
target_link_libraries(client_layernorm2d_bwd_data PRIVATE composable_kernel::device_other_operations)
|
||||
|
||||
add_executable(client_layernorm2d_bwd_gamma_beta layernorm2d_bwd_gamma_beta.cpp)
|
||||
target_link_libraries(client_layernorm2d_bwd_gamma_beta PRIVATE composable_kernel::device_other_operations)
|
||||
|
||||
add_executable(client_layernorm2d_fwd layernorm2d_fwd.cpp)
|
||||
target_link_libraries(client_layernorm2d_fwd PRIVATE composable_kernel::device_other_operations)
|
||||
|
||||
|
||||
170
client_example/05_layernorm/layernorm2d_bwd_data.cpp
Normal file
170
client_example/05_layernorm/layernorm2d_bwd_data.cpp
Normal file
@@ -0,0 +1,170 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iomanip>
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_normalization_bwd_data.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/layernorm_bwd_data.hpp"
|
||||
|
||||
using DYDataType = float;
|
||||
using XDataType = float;
|
||||
using GammaDataType = float;
|
||||
using MeanInvStdDataType = float;
|
||||
using DXDataType = float;
|
||||
|
||||
constexpr int Rank = 2;
|
||||
constexpr int NumReduceDim = 1;
|
||||
|
||||
struct SimpleDeviceMem
|
||||
{
|
||||
SimpleDeviceMem() = delete;
|
||||
|
||||
SimpleDeviceMem(std::size_t mem_size) : p_mem_{}
|
||||
{
|
||||
(void)hipMalloc(static_cast<void**>(&p_mem_), mem_size);
|
||||
}
|
||||
|
||||
void* GetDeviceBuffer() { return p_mem_; }
|
||||
|
||||
~SimpleDeviceMem() { (void)hipFree(p_mem_); }
|
||||
|
||||
void* p_mem_;
|
||||
};
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
ck::index_t M = 1024;
|
||||
ck::index_t N = 1024;
|
||||
|
||||
SimpleDeviceMem dy_dev(sizeof(DYDataType) * M * N);
|
||||
SimpleDeviceMem x_dev(sizeof(XDataType) * M * N);
|
||||
SimpleDeviceMem gamma_dev(sizeof(GammaDataType) * N);
|
||||
SimpleDeviceMem mean_dev(sizeof(MeanInvStdDataType) * M);
|
||||
SimpleDeviceMem inv_std_dev(sizeof(MeanInvStdDataType) * M);
|
||||
SimpleDeviceMem dx_dev(sizeof(DXDataType) * M * N);
|
||||
|
||||
using DeviceOp = ck::tensor_operation::device::DeviceNormalizationBwdData<DYDataType,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
MeanInvStdDataType,
|
||||
DXDataType,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
|
||||
// get device op instances
|
||||
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
DeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_op_name;
|
||||
bool found = false;
|
||||
int best_op_id = -1;
|
||||
float best_ave_time = std::numeric_limits<float>::max();
|
||||
float best_gb_per_sec = 0;
|
||||
|
||||
// profile device operation instances
|
||||
std::cout << "Run all instances and do timing" << std::endl;
|
||||
|
||||
for(int i = 0; i < op_ptrs.size(); ++i)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer({M, N}, // lengths
|
||||
{N, 1}, // dyStrides
|
||||
{N, 1}, // xStrides
|
||||
{0, 1}, // gammaStrides
|
||||
{1, 0}, // meanStrides
|
||||
{1, 0}, // invStdStrides
|
||||
{N, 1}, // dxStrides
|
||||
{1}, // reduceDims
|
||||
dy_dev.GetDeviceBuffer(),
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
mean_dev.GetDeviceBuffer(),
|
||||
inv_std_dev.GetDeviceBuffer(),
|
||||
dx_dev.GetDeviceBuffer());
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
|
||||
std::size_t num_byte = sizeof(DYDataType) * M * N + sizeof(XDataType) * M * N +
|
||||
sizeof(GammaDataType) * N + sizeof(MeanInvStdDataType) * M * 2 +
|
||||
sizeof(DXDataType) * M * N;
|
||||
|
||||
float gb_per_sec = num_byte / 1.E6 / ave_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << gb_per_sec << " GB/s, "
|
||||
<< op_name << std::endl;
|
||||
|
||||
if(ave_time < best_ave_time)
|
||||
{
|
||||
found = true;
|
||||
best_op_id = i;
|
||||
best_op_name = op_name;
|
||||
best_ave_time = ave_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << op_name << " does not support this problem" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_gb_per_sec << " GB/s, "
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
<< std::endl;
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer({M, N}, // lengths
|
||||
{N, 1}, // dyStrides
|
||||
{N, 1}, // xStrides
|
||||
{0, 1}, // gammaStrides
|
||||
{1, 0}, // meanStrides
|
||||
{1, 0}, // invStdStrides
|
||||
{N, 1}, // dxStrides
|
||||
{1}, // reduceDims
|
||||
dy_dev.GetDeviceBuffer(),
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
mean_dev.GetDeviceBuffer(),
|
||||
inv_std_dev.GetDeviceBuffer(),
|
||||
dx_dev.GetDeviceBuffer());
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
|
||||
}
|
||||
|
||||
std::cout << "Done" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
171
client_example/05_layernorm/layernorm2d_bwd_gamma_beta.cpp
Normal file
171
client_example/05_layernorm/layernorm2d_bwd_gamma_beta.cpp
Normal file
@@ -0,0 +1,171 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iomanip>
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_normalization_bwd_gamma_beta.hpp"
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/layernorm_bwd_gamma_beta.hpp"
|
||||
|
||||
using DYDataType = float;
|
||||
using XDataType = float;
|
||||
using GammaDataType = float;
|
||||
using MeanInvStdDataType = float;
|
||||
using DGammaDataType = float;
|
||||
using DBetaDataType = float;
|
||||
|
||||
constexpr int Rank = 2;
|
||||
constexpr int NumReduceDim = 1;
|
||||
|
||||
struct SimpleDeviceMem
|
||||
{
|
||||
SimpleDeviceMem() = delete;
|
||||
|
||||
SimpleDeviceMem(std::size_t mem_size) : p_mem_{}
|
||||
{
|
||||
(void)hipMalloc(static_cast<void**>(&p_mem_), mem_size);
|
||||
}
|
||||
|
||||
void* GetDeviceBuffer() { return p_mem_; }
|
||||
|
||||
~SimpleDeviceMem() { (void)hipFree(p_mem_); }
|
||||
|
||||
void* p_mem_;
|
||||
};
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
ck::index_t M = 1024;
|
||||
ck::index_t N = 1024;
|
||||
|
||||
SimpleDeviceMem dy_dev(sizeof(DYDataType) * M * N);
|
||||
SimpleDeviceMem x_dev(sizeof(XDataType) * M * N);
|
||||
SimpleDeviceMem mean_dev(sizeof(MeanInvStdDataType) * M);
|
||||
SimpleDeviceMem inv_std_dev(sizeof(MeanInvStdDataType) * M);
|
||||
SimpleDeviceMem dgamma_dev(sizeof(DGammaDataType) * N);
|
||||
SimpleDeviceMem dbeta_dev(sizeof(DBetaDataType) * N);
|
||||
|
||||
using DeviceOp =
|
||||
ck::tensor_operation::device::DeviceNormalizationBwdGammaBeta<DYDataType,
|
||||
XDataType,
|
||||
MeanInvStdDataType,
|
||||
DGammaDataType,
|
||||
DBetaDataType,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
|
||||
// get device op instances
|
||||
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
DeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_op_name;
|
||||
bool found = false;
|
||||
int best_op_id = -1;
|
||||
float best_ave_time = std::numeric_limits<float>::max();
|
||||
float best_gb_per_sec = 0;
|
||||
|
||||
// profile device operation instances
|
||||
std::cout << "Run all instances and do timing" << std::endl;
|
||||
|
||||
std::size_t num_bytes = sizeof(DYDataType) * M * N + sizeof(XDataType) * M * N +
|
||||
sizeof(MeanInvStdDataType) * M * 2 + sizeof(DGammaDataType) * N +
|
||||
sizeof(DBetaDataType) * N;
|
||||
|
||||
for(int i = 0; i < op_ptrs.size(); ++i)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer({M, N}, // inLengths
|
||||
{N, 1}, // dyStrides
|
||||
{N, 1}, // xStrides
|
||||
{1, 0}, // meanStrides
|
||||
{1, 0}, // invStdStrides
|
||||
{N}, // outLengths
|
||||
{1}, // dgammaStrides
|
||||
{1}, // dbetaStrides
|
||||
{0}, // reduceDims
|
||||
dy_dev.GetDeviceBuffer(),
|
||||
x_dev.GetDeviceBuffer(),
|
||||
mean_dev.GetDeviceBuffer(),
|
||||
inv_std_dev.GetDeviceBuffer(),
|
||||
dgamma_dev.GetDeviceBuffer(),
|
||||
dbeta_dev.GetDeviceBuffer());
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
float gb_per_sec = num_bytes / 1.E6 / ave_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << gb_per_sec << " GB/s, "
|
||||
<< op_name << std::endl;
|
||||
|
||||
if(ave_time < best_ave_time)
|
||||
{
|
||||
found = true;
|
||||
best_op_id = i;
|
||||
best_op_name = op_name;
|
||||
best_ave_time = ave_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << op_name << " does not support this problem" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_gb_per_sec << " GB/s, "
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
<< std::endl;
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer({M, N}, // inLengths
|
||||
{N, 1}, // dyStrides
|
||||
{N, 1}, // xStrides
|
||||
{1, 0}, // meanStrides
|
||||
{1, 0}, // invStdStrides
|
||||
{N}, // outLengths
|
||||
{1}, // dgammaStrides
|
||||
{1}, // dbetaStrides
|
||||
{0}, // reduceDims
|
||||
dy_dev.GetDeviceBuffer(),
|
||||
x_dev.GetDeviceBuffer(),
|
||||
mean_dev.GetDeviceBuffer(),
|
||||
inv_std_dev.GetDeviceBuffer(),
|
||||
dgamma_dev.GetDeviceBuffer(),
|
||||
dbeta_dev.GetDeviceBuffer());
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
|
||||
}
|
||||
|
||||
std::cout << "Done" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -16,7 +16,7 @@ using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using SaveMeanInvStdDataType = float;
|
||||
using SaveMeanInvStdDataType = ck::half_t;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
#define SAVE_MEAN_INV_STD
|
||||
@@ -150,6 +150,7 @@ int main(int argc, char* argv[])
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
|
||||
@@ -16,7 +16,7 @@ using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using BetaDataType = ck::half_t;
|
||||
using YDataType = ck::half_t;
|
||||
using SaveMeanInvStdDataType = float;
|
||||
using SaveMeanInvStdDataType = ck::half_t;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
#define SAVE_MEAN_INV_STD
|
||||
@@ -155,6 +155,7 @@ int main(int argc, char* argv[])
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
|
||||
@@ -140,6 +140,7 @@ int main(int argc, char* argv[])
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
|
||||
@@ -142,6 +142,7 @@ int main()
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
|
||||
@@ -204,6 +204,7 @@ int main(int argc, char* argv[])
|
||||
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
|
||||
|
||||
@@ -1,2 +1,8 @@
|
||||
add_executable(client_groupnorm_swish groupnorm_swish.cpp)
|
||||
target_link_libraries(client_groupnorm_swish PRIVATE composable_kernel::device_other_operations)
|
||||
add_executable(client_groupnorm_bwd_data groupnorm_bwd_data.cpp)
|
||||
target_link_libraries(client_groupnorm_bwd_data PRIVATE composable_kernel::device_other_operations)
|
||||
|
||||
add_executable(client_groupnorm_bwd_gamma_beta groupnorm_bwd_gamma_beta.cpp)
|
||||
target_link_libraries(client_groupnorm_bwd_gamma_beta PRIVATE composable_kernel::device_other_operations)
|
||||
|
||||
add_executable(client_groupnorm_swish_fwd groupnorm_swish_fwd.cpp)
|
||||
target_link_libraries(client_groupnorm_swish_fwd PRIVATE composable_kernel::device_other_operations)
|
||||
|
||||
182
client_example/18_groupnorm/groupnorm_bwd_data.cpp
Normal file
182
client_example/18_groupnorm/groupnorm_bwd_data.cpp
Normal file
@@ -0,0 +1,182 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iomanip>
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_normalization_bwd_data.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/groupnorm_bwd_data.hpp"
|
||||
|
||||
using DYDataType = float;
|
||||
using XDataType = float;
|
||||
using GammaDataType = float;
|
||||
using MeanInvStdDataType = float;
|
||||
using DXDataType = float;
|
||||
|
||||
constexpr int Rank = 5;
|
||||
constexpr int NumReduceDim = 3;
|
||||
|
||||
struct SimpleDeviceMem
|
||||
{
|
||||
SimpleDeviceMem() = delete;
|
||||
|
||||
SimpleDeviceMem(std::size_t mem_size) : p_mem_{}
|
||||
{
|
||||
(void)hipMalloc(static_cast<void**>(&p_mem_), mem_size);
|
||||
}
|
||||
|
||||
void* GetDeviceBuffer() { return p_mem_; }
|
||||
|
||||
~SimpleDeviceMem() { (void)hipFree(p_mem_); }
|
||||
|
||||
void* p_mem_;
|
||||
};
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
ck::index_t N = 32;
|
||||
ck::index_t H = 16;
|
||||
ck::index_t W = 16;
|
||||
ck::index_t G = 64;
|
||||
ck::index_t C = 128;
|
||||
|
||||
std::size_t length = N * H * W * G * C;
|
||||
|
||||
std::vector<ck::index_t> strideDy = {H * W * G * C, W * G * C, G * C, C, 1};
|
||||
std::vector<ck::index_t> strideX = strideDy;
|
||||
std::vector<ck::index_t> strideDx = strideDy;
|
||||
|
||||
std::vector<ck::index_t> strideGamma = {0, 0, 0, C, 1};
|
||||
std::vector<ck::index_t> strideMeanInvStd = {G, 0, 0, 1, 0};
|
||||
|
||||
SimpleDeviceMem dy_dev(sizeof(DYDataType) * length);
|
||||
SimpleDeviceMem x_dev(sizeof(XDataType) * length);
|
||||
SimpleDeviceMem gamma_dev(sizeof(GammaDataType) * G * C);
|
||||
SimpleDeviceMem mean_dev(sizeof(MeanInvStdDataType) * N * G);
|
||||
SimpleDeviceMem inv_std_dev(sizeof(MeanInvStdDataType) * N * G);
|
||||
SimpleDeviceMem dx_dev(sizeof(DXDataType) * length);
|
||||
|
||||
using DeviceOp = ck::tensor_operation::device::DeviceNormalizationBwdData<DYDataType,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
MeanInvStdDataType,
|
||||
DXDataType,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
|
||||
// get device op instances
|
||||
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
DeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_op_name;
|
||||
bool found = false;
|
||||
int best_op_id = -1;
|
||||
float best_ave_time = std::numeric_limits<float>::max();
|
||||
float best_gb_per_sec = 0;
|
||||
|
||||
// profile device operation instances
|
||||
std::cout << "Run all instances and do timing" << std::endl;
|
||||
|
||||
for(int i = 0; i < op_ptrs.size(); ++i)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer({N, H, W, G, C},
|
||||
strideDy,
|
||||
strideX,
|
||||
strideGamma,
|
||||
strideMeanInvStd,
|
||||
strideMeanInvStd,
|
||||
strideDx,
|
||||
{1, 2, 4}, // reduceDims
|
||||
dy_dev.GetDeviceBuffer(),
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
mean_dev.GetDeviceBuffer(),
|
||||
inv_std_dev.GetDeviceBuffer(),
|
||||
dx_dev.GetDeviceBuffer());
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
|
||||
std::size_t num_byte = sizeof(DYDataType) * length + sizeof(XDataType) * length +
|
||||
sizeof(GammaDataType) * G * C +
|
||||
sizeof(MeanInvStdDataType) * N * G * 2 +
|
||||
sizeof(DXDataType) * length;
|
||||
|
||||
float gb_per_sec = num_byte / 1.E6 / ave_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << gb_per_sec << " GB/s, "
|
||||
<< op_name << std::endl;
|
||||
|
||||
if(ave_time < best_ave_time)
|
||||
{
|
||||
found = true;
|
||||
best_op_id = i;
|
||||
best_op_name = op_name;
|
||||
best_ave_time = ave_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << op_name << " does not support this problem" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_gb_per_sec << " GB/s, "
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
<< std::endl;
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer({N, H, W, G, C},
|
||||
strideDy,
|
||||
strideX,
|
||||
strideGamma,
|
||||
strideMeanInvStd,
|
||||
strideMeanInvStd,
|
||||
strideDx,
|
||||
{1, 2, 4}, // reduceDims
|
||||
dy_dev.GetDeviceBuffer(),
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
mean_dev.GetDeviceBuffer(),
|
||||
inv_std_dev.GetDeviceBuffer(),
|
||||
dx_dev.GetDeviceBuffer());
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
|
||||
}
|
||||
|
||||
std::cout << "Done" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
180
client_example/18_groupnorm/groupnorm_bwd_gamma_beta.cpp
Normal file
180
client_example/18_groupnorm/groupnorm_bwd_gamma_beta.cpp
Normal file
@@ -0,0 +1,180 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iomanip>
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/device_normalization_bwd_gamma_beta.hpp"
|
||||
|
||||
#include "ck/library/tensor_operation_instance/gpu/groupnorm_bwd_gamma_beta.hpp"
|
||||
|
||||
using DYDataType = float;
|
||||
using XDataType = float;
|
||||
using GammaDataType = float;
|
||||
using MeanInvStdDataType = float;
|
||||
using DGammaDataType = float;
|
||||
using DBetaDataType = float;
|
||||
|
||||
constexpr int Rank = 5;
|
||||
constexpr int NumReduceDim = 3;
|
||||
|
||||
struct SimpleDeviceMem
|
||||
{
|
||||
SimpleDeviceMem() = delete;
|
||||
|
||||
SimpleDeviceMem(std::size_t mem_size) : p_mem_{}
|
||||
{
|
||||
(void)hipMalloc(static_cast<void**>(&p_mem_), mem_size);
|
||||
}
|
||||
|
||||
void* GetDeviceBuffer() { return p_mem_; }
|
||||
|
||||
~SimpleDeviceMem() { (void)hipFree(p_mem_); }
|
||||
|
||||
void* p_mem_;
|
||||
};
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
ck::index_t N = 32;
|
||||
ck::index_t H = 16;
|
||||
ck::index_t W = 16;
|
||||
ck::index_t G = 64;
|
||||
ck::index_t C = 128;
|
||||
|
||||
std::size_t length = N * H * W * G * C;
|
||||
|
||||
std::vector<ck::index_t> strideDy = {H * W * G * C, W * G * C, G * C, C, 1};
|
||||
std::vector<ck::index_t> strideX = strideDy;
|
||||
std::vector<ck::index_t> strideMeanInvStd = {G, 0, 0, 1, 0};
|
||||
std::vector<ck::index_t> strideDGammaBeta = {C, 1};
|
||||
|
||||
SimpleDeviceMem dy_dev(sizeof(DYDataType) * length);
|
||||
SimpleDeviceMem x_dev(sizeof(XDataType) * length);
|
||||
SimpleDeviceMem mean_dev(sizeof(MeanInvStdDataType) * N * G);
|
||||
SimpleDeviceMem inv_std_dev(sizeof(MeanInvStdDataType) * N * G);
|
||||
SimpleDeviceMem dgamma_dev(sizeof(DGammaDataType) * G * C);
|
||||
SimpleDeviceMem dbeta_dev(sizeof(DBetaDataType) * G * C);
|
||||
|
||||
using DeviceOp =
|
||||
ck::tensor_operation::device::DeviceNormalizationBwdGammaBeta<DYDataType,
|
||||
XDataType,
|
||||
MeanInvStdDataType,
|
||||
DGammaDataType,
|
||||
DBetaDataType,
|
||||
Rank,
|
||||
NumReduceDim>;
|
||||
|
||||
// get device op instances
|
||||
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
||||
DeviceOp>::GetInstances();
|
||||
|
||||
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
||||
|
||||
std::string best_op_name;
|
||||
bool found = false;
|
||||
int best_op_id = -1;
|
||||
float best_ave_time = std::numeric_limits<float>::max();
|
||||
float best_gb_per_sec = 0;
|
||||
|
||||
// profile device operation instances
|
||||
std::cout << "Run all instances and do timing" << std::endl;
|
||||
|
||||
std::size_t num_bytes = sizeof(DYDataType) * length + sizeof(XDataType) * length +
|
||||
sizeof(GammaDataType) * G * C + sizeof(MeanInvStdDataType) * N * G * 2 +
|
||||
sizeof(DGammaDataType) * G * C + sizeof(DBetaDataType) * G * C;
|
||||
|
||||
for(int i = 0; i < op_ptrs.size(); ++i)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer({N, H, W, G, C},
|
||||
strideDy,
|
||||
strideX,
|
||||
strideMeanInvStd,
|
||||
strideMeanInvStd,
|
||||
{G, C},
|
||||
strideDGammaBeta,
|
||||
strideDGammaBeta,
|
||||
{0, 1, 2}, // reduceDims
|
||||
dy_dev.GetDeviceBuffer(),
|
||||
x_dev.GetDeviceBuffer(),
|
||||
mean_dev.GetDeviceBuffer(),
|
||||
inv_std_dev.GetDeviceBuffer(),
|
||||
dgamma_dev.GetDeviceBuffer(),
|
||||
dbeta_dev.GetDeviceBuffer());
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
std::string op_name = op_ptr->GetTypeString();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true});
|
||||
float gb_per_sec = num_bytes / 1.E6 / ave_time;
|
||||
|
||||
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << gb_per_sec << " GB/s, "
|
||||
<< op_name << std::endl;
|
||||
|
||||
if(ave_time < best_ave_time)
|
||||
{
|
||||
found = true;
|
||||
best_op_id = i;
|
||||
best_op_name = op_name;
|
||||
best_ave_time = ave_time;
|
||||
best_gb_per_sec = gb_per_sec;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << op_name << " does not support this problem" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_gb_per_sec << " GB/s, "
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
<< std::endl;
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer({N, H, W, G, C},
|
||||
strideDy,
|
||||
strideX,
|
||||
strideMeanInvStd,
|
||||
strideMeanInvStd,
|
||||
{G, C},
|
||||
strideDGammaBeta,
|
||||
strideDGammaBeta,
|
||||
{0, 1, 2}, // reduceDims
|
||||
dy_dev.GetDeviceBuffer(),
|
||||
x_dev.GetDeviceBuffer(),
|
||||
mean_dev.GetDeviceBuffer(),
|
||||
inv_std_dev.GetDeviceBuffer(),
|
||||
dgamma_dev.GetDeviceBuffer(),
|
||||
dbeta_dev.GetDeviceBuffer());
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
||||
{
|
||||
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
||||
SimpleDeviceMem workspace(workspace_sz);
|
||||
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace.GetDeviceBuffer());
|
||||
|
||||
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
|
||||
}
|
||||
|
||||
std::cout << "Done" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -94,7 +94,6 @@ int main(int argc, char* argv[])
|
||||
|
||||
SimpleDeviceMem in_device_buf(sizeof(InDataType) * in_tensor_size);
|
||||
SimpleDeviceMem out_device_buf(sizeof(OutDataType) * out_tensor_size);
|
||||
SimpleDeviceMem out_indices_device_buf(sizeof(IndexDataType) * out_tensor_size);
|
||||
|
||||
using DeviceOp = ck::tensor_operation::device::DevicePoolFwd<InOutRank,
|
||||
WindowRank,
|
||||
@@ -123,22 +122,22 @@ int main(int argc, char* argv[])
|
||||
|
||||
for(int i = 0; i < op_ptrs.size(); ++i)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
||||
static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
|
||||
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
|
||||
static_cast<IndexDataType*>(out_indices_device_buf.GetDeviceBuffer()),
|
||||
in_length,
|
||||
window_spatial_lengths,
|
||||
out_length,
|
||||
in_tensor_stride,
|
||||
out_tensor_stride,
|
||||
out_tensor_stride,
|
||||
window_strides,
|
||||
window_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads,
|
||||
{2, 3, 4});
|
||||
auto& op_ptr = op_ptrs[i];
|
||||
auto argument_ptr =
|
||||
op_ptr->MakeArgumentPointer(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
|
||||
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
|
||||
nullptr,
|
||||
in_length,
|
||||
window_spatial_lengths,
|
||||
out_length,
|
||||
in_tensor_stride,
|
||||
out_tensor_stride,
|
||||
out_tensor_stride,
|
||||
window_strides,
|
||||
window_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads,
|
||||
{2, 3, 4});
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
@@ -184,21 +183,21 @@ int main(int argc, char* argv[])
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
<< std::endl;
|
||||
|
||||
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
||||
static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
|
||||
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
|
||||
static_cast<IndexDataType*>(out_indices_device_buf.GetDeviceBuffer()),
|
||||
in_length,
|
||||
window_spatial_lengths,
|
||||
out_length,
|
||||
in_tensor_stride,
|
||||
out_tensor_stride,
|
||||
out_tensor_stride,
|
||||
window_strides,
|
||||
window_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads,
|
||||
{2, 3, 4});
|
||||
auto argument_ptr =
|
||||
op_ptr->MakeArgumentPointer(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
|
||||
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
|
||||
nullptr,
|
||||
in_length,
|
||||
window_spatial_lengths,
|
||||
out_length,
|
||||
in_tensor_stride,
|
||||
out_tensor_stride,
|
||||
out_tensor_stride,
|
||||
window_strides,
|
||||
window_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads,
|
||||
{2, 3, 4});
|
||||
|
||||
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
||||
|
||||
|
||||
@@ -191,6 +191,7 @@ int main(int argc, char* argv[])
|
||||
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
|
||||
|
||||
@@ -117,6 +117,7 @@ int main()
|
||||
<< best_op_name << std::endl;
|
||||
|
||||
// run the best intance
|
||||
if(found)
|
||||
{
|
||||
auto& op_ptr = op_ptrs[best_op_id];
|
||||
std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString()
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
using InLayout = ck::tensor_layout::convolution::NDHWGC;
|
||||
using WeiLayout = ck::tensor_layout::convolution::GKZYXC;
|
||||
using OutLayout = ck::tensor_layout::convolution::NDHWGK;
|
||||
using BiasLayout = ck::tensor_layout::convolution::G_K;
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using ScaleAddScaleAddRelu = ck::tensor_operation::element_wise::ScaleAddScaleAddRelu;
|
||||
|
||||
@@ -64,6 +65,9 @@ int execute_conv_fwd_scaleadd_scaleadd_relu()
|
||||
std::array<ck::index_t, 6> out_lengths{G, N, K, Do, Ho, Wo};
|
||||
std::array<ck::index_t, 6> out_strides{
|
||||
K, Do * Ho * Wo * G * K, 1, Ho * Wo * G * K, Wo * G * K, G * K};
|
||||
// Logical broadcast bias (we have to pass bias lengths in the same format as output - GNKDHW)
|
||||
std::array<ck::index_t, 6> bias_lengths{G, 1, K, 1, 1, 1};
|
||||
std::array<ck::index_t, 6> bias_strides{K, 0, 1, 0, 0, 0};
|
||||
|
||||
std::array<ck::index_t, NumDimSpatial> filter_strides{1, 1, 1};
|
||||
std::array<ck::index_t, NumDimSpatial> filter_dilations{1, 1, 1};
|
||||
@@ -74,13 +78,13 @@ int execute_conv_fwd_scaleadd_scaleadd_relu()
|
||||
SimpleDeviceMem wei(sizeof(WeiDataType) * G * K * Z * Y * X * C);
|
||||
SimpleDeviceMem out(sizeof(OutDataType) * N * Do * Ho * Wo * G * K);
|
||||
SimpleDeviceMem d0(sizeof(std::tuple_element_t<0, DDataTypes>) * N * Do * Ho * Wo * G * K);
|
||||
SimpleDeviceMem d1(sizeof(std::tuple_element_t<1, DDataTypes>) * N * Do * Ho * Wo * G * K);
|
||||
SimpleDeviceMem d1(sizeof(std::tuple_element_t<1, DDataTypes>) * G * K);
|
||||
|
||||
using DeviceOp = ck::tensor_operation::device::DeviceGroupedConvFwdMultipleABD<
|
||||
NumDimSpatial,
|
||||
InLayout,
|
||||
WeiLayout,
|
||||
ck::Tuple<OutLayout, OutLayout>,
|
||||
ck::Tuple<OutLayout, BiasLayout>,
|
||||
OutLayout,
|
||||
InDataType,
|
||||
WeiDataType,
|
||||
@@ -117,8 +121,8 @@ int execute_conv_fwd_scaleadd_scaleadd_relu()
|
||||
in_strides,
|
||||
wei_lengths,
|
||||
wei_strides,
|
||||
{out_lengths, out_lengths},
|
||||
{out_strides, out_strides},
|
||||
{out_lengths, bias_lengths},
|
||||
{out_strides, bias_strides},
|
||||
out_lengths,
|
||||
out_strides,
|
||||
filter_strides,
|
||||
@@ -187,8 +191,8 @@ int execute_conv_fwd_scaleadd_scaleadd_relu()
|
||||
in_strides,
|
||||
wei_lengths,
|
||||
wei_strides,
|
||||
{out_lengths, out_lengths},
|
||||
{out_strides, out_strides},
|
||||
{out_lengths, bias_lengths},
|
||||
{out_strides, bias_strides},
|
||||
out_lengths,
|
||||
out_strides,
|
||||
filter_strides,
|
||||
|
||||
4
client_example/25_wrapper/CMakeLists.txt
Normal file
4
client_example/25_wrapper/CMakeLists.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
add_executable(client_tensor_transform_using_wrapper tensor_transform_using_wrapper.cpp)
|
||||
target_link_libraries(client_tensor_transform_using_wrapper PRIVATE composable_kernel::device_other_operations)
|
||||
add_executable(client_wrapper_img2col wrapper_img2col.cpp)
|
||||
target_link_libraries(client_wrapper_img2col PRIVATE composable_kernel::device_other_operations)
|
||||
114
client_example/25_wrapper/tensor_transform_using_wrapper.cpp
Normal file
114
client_example/25_wrapper/tensor_transform_using_wrapper.cpp
Normal file
@@ -0,0 +1,114 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2023-2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
|
||||
#include "ck/utility/number.hpp"
|
||||
#include "ck/utility/tuple.hpp"
|
||||
#include "ck/utility/sequence.hpp"
|
||||
|
||||
#include "ck/wrapper/layout.hpp"
|
||||
|
||||
using DataType = int;
|
||||
|
||||
template <typename Layout>
|
||||
void Print1d(const Layout& layout)
|
||||
{
|
||||
std::cout << "Print1d" << std::endl;
|
||||
for(ck::index_t w = 0; w < ck::wrapper::size(layout); w++)
|
||||
{
|
||||
std::cout << layout(ck::make_tuple(w)) << " ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
|
||||
template <typename Layout>
|
||||
void Print2d(const Layout& layout)
|
||||
{
|
||||
std::cout << "Print2d" << std::endl;
|
||||
for(ck::index_t h = 0; h < ck::wrapper::size<0>(layout); h++)
|
||||
{
|
||||
for(ck::index_t w = 0; w < ck::wrapper::size<1>(layout); w++)
|
||||
{
|
||||
std::cout << layout(ck::make_tuple(h, w)) << " ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
// Print in (x,y),z pattern
|
||||
template <typename Layout>
|
||||
void Print3dCustom(const Layout& layout)
|
||||
{
|
||||
std::cout << "Print3dCustom" << std::endl;
|
||||
for(ck::index_t d = 0; d < ck::wrapper::size<0>(ck::wrapper::get<0>(layout)); d++)
|
||||
{
|
||||
for(ck::index_t h = 0; h < ck::wrapper::size<1>(ck::wrapper::get<0>(layout)); h++)
|
||||
{
|
||||
for(ck::index_t w = 0; w < ck::wrapper::size<1>(layout); w++)
|
||||
{
|
||||
std::cout << layout(ck::make_tuple(ck::make_tuple(d, h), w)) << " ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
// Layout traverse in row-major
|
||||
std::cout << "Note: Layout traverse in column-major" << std::endl;
|
||||
// Basic descriptor 0, 1, 2, ... 30, 31 (compile-time descriptor)
|
||||
// (dims:4,8 strides:1,4)
|
||||
const auto shape_4x8 = ck::make_tuple(ck::Number<4>{}, ck::Number<8>{});
|
||||
const auto layout_4x8_s1x4 = ck::wrapper::make_layout(shape_4x8);
|
||||
std::cout << "dims:4,8 strides:1,4" << std::endl;
|
||||
Print2d(layout_4x8_s1x4);
|
||||
using Cord1x1Type = ck::Tuple<ck::Number<1>, ck::Number<1>>;
|
||||
constexpr ck::index_t offset_1x1 = layout_4x8_s1x4.template operator()<Cord1x1Type>();
|
||||
std::cout << "Constexpr calculated [1, 1] offset:" << offset_1x1 << std::endl;
|
||||
|
||||
// Basic descriptor 0, 1, 8, 9, 16, 17, ... 30, 31 (runtime descriptor)
|
||||
// dims:4,(2,4) strides:2,(1,8)
|
||||
const auto shape_4x2x4 = ck::make_tuple(4, ck::make_tuple(2, 4));
|
||||
const auto strides_s2x1x8 = ck::make_tuple(2, ck::make_tuple(1, 8));
|
||||
const auto layout_4x2x4_s2x1x8 = ck::wrapper::make_layout(shape_4x2x4, strides_s2x1x8);
|
||||
|
||||
std::cout << "dims:4,(2,4) strides:2,(1,8)" << std::endl;
|
||||
Print2d(layout_4x2x4_s2x1x8);
|
||||
|
||||
// Basic descriptor 0, 1, 8, 9, 16, 17, ... 30, 31 (compile-time descriptor)
|
||||
// dims:(2,2),(2,4) strides:((1,4),(2,8)
|
||||
const auto shape_2x2x2x4 = ck::make_tuple(ck::make_tuple(ck::Number<2>{}, ck::Number<2>{}),
|
||||
ck::make_tuple(ck::Number<2>{}, ck::Number<4>{}));
|
||||
const auto strides_s1x4x2x8 = ck::make_tuple(ck::make_tuple(ck::Number<1>{}, ck::Number<4>{}),
|
||||
ck::make_tuple(ck::Number<2>{}, ck::Number<8>{}));
|
||||
static const auto layout_2x2x2x4_s1x4x2x8 =
|
||||
ck::wrapper::make_layout(shape_2x2x2x4, strides_s1x4x2x8);
|
||||
|
||||
std::cout << "dims:(2,2),(2,4) strides:(1,4),(2,8)" << std::endl;
|
||||
Print2d(layout_2x2x2x4_s1x4x2x8);
|
||||
Print3dCustom(layout_2x2x2x4_s1x4x2x8);
|
||||
|
||||
// Basic descriptor 0, 1, 8, 9, 16, 17, ... 30, 31 (compile-time descriptor)
|
||||
// dims:((2,2),2),4 strides:((1,4),2),8
|
||||
// Transform to 2d
|
||||
const auto shape_2x2x2x4_nested = ck::make_tuple(
|
||||
ck::make_tuple(ck::make_tuple(ck::Number<2>{}, ck::Number<2>{}), ck::Number<2>{}),
|
||||
ck::Number<4>{});
|
||||
const auto strides_s1x4x2x8_nested = ck::make_tuple(
|
||||
ck::make_tuple(ck::make_tuple(ck::Number<1>{}, ck::Number<4>{}), ck::Number<2>{}),
|
||||
ck::Number<8>{});
|
||||
static const auto layout_2x2x2x4_s1x4x2x8_nested =
|
||||
ck::wrapper::make_layout(shape_2x2x2x4_nested, strides_s1x4x2x8_nested);
|
||||
|
||||
std::cout << "dims:((2,2),2),4 strides:((1,4),2),8" << std::endl;
|
||||
Print1d(layout_2x2x2x4_s1x4x2x8_nested);
|
||||
Print2d(layout_2x2x2x4_s1x4x2x8_nested);
|
||||
Print3dCustom(layout_2x2x2x4_s1x4x2x8_nested);
|
||||
|
||||
return 0;
|
||||
}
|
||||
180
client_example/25_wrapper/wrapper_img2col.cpp
Normal file
180
client_example/25_wrapper/wrapper_img2col.cpp
Normal file
@@ -0,0 +1,180 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <numeric>
|
||||
#include <cstdlib>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <initializer_list>
|
||||
#include <vector>
|
||||
|
||||
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
||||
|
||||
#include "ck/host_utility/kernel_launch.hpp"
|
||||
#include "ck/utility/common_header.hpp"
|
||||
#include "ck/wrapper/layout.hpp"
|
||||
#include "ck/wrapper/tensor.hpp"
|
||||
#include "ck/wrapper/operations/copy.hpp"
|
||||
|
||||
static constexpr ck::index_t NumDimSpatial = 3;
|
||||
using DataType = float;
|
||||
using InputLayout = ck::tensor_layout::convolution::NDHWGC;
|
||||
|
||||
struct SimpleDeviceMem
|
||||
{
|
||||
SimpleDeviceMem() = delete;
|
||||
|
||||
SimpleDeviceMem(std::size_t mem_size) : p_mem_{}
|
||||
{
|
||||
(void)hipMalloc(static_cast<void**>(&p_mem_), mem_size);
|
||||
}
|
||||
|
||||
void* GetDeviceBuffer() { return p_mem_; }
|
||||
|
||||
~SimpleDeviceMem() { (void)hipFree(p_mem_); }
|
||||
|
||||
void* p_mem_;
|
||||
};
|
||||
|
||||
// Test copy from Global to Global through LDS and VGPR
|
||||
template <typename InputTensor,
|
||||
typename OutputTensor,
|
||||
typename BlockShape,
|
||||
typename ThreadLayoutShape>
|
||||
__global__ void DeviceImageToColumnPad0(InputTensor input_tensor,
|
||||
OutputTensor output_tensor,
|
||||
const BlockShape tile_shape,
|
||||
const ThreadLayoutShape thread_layout)
|
||||
{
|
||||
const ck::index_t block_idx = static_cast<ck::index_t>(blockIdx.x);
|
||||
|
||||
// Get local tiles for global memory
|
||||
auto input_local_tile = ck::wrapper::make_local_tile(input_tensor, tile_shape, block_idx);
|
||||
auto output_local_tile = ck::wrapper::make_local_tile(output_tensor, tile_shape, block_idx);
|
||||
|
||||
// Get partition per thread
|
||||
const auto input_local_partition =
|
||||
ck::wrapper::make_local_partition(input_local_tile, thread_layout, threadIdx.x);
|
||||
auto output_local_partition =
|
||||
ck::wrapper::make_local_partition(output_local_tile, thread_layout, threadIdx.x);
|
||||
|
||||
// Perform copy
|
||||
using DimAccessOrder = ck::Tuple<ck::Number<0>, ck::Number<1>>;
|
||||
constexpr ck::index_t vector_dim = 1;
|
||||
constexpr ck::index_t scalar_per_vector = 4;
|
||||
ck::wrapper::copy<DimAccessOrder, vector_dim, scalar_per_vector>(input_local_partition,
|
||||
output_local_partition);
|
||||
}
|
||||
|
||||
void PerformImageToColumnPad0(const ck::index_t G,
|
||||
const ck::index_t N,
|
||||
const ck::index_t Di,
|
||||
const ck::index_t Hi,
|
||||
const ck::index_t Wi,
|
||||
const ck::index_t Do,
|
||||
const ck::index_t Ho,
|
||||
const ck::index_t Wo,
|
||||
const ck::index_t C,
|
||||
const ck::index_t Z,
|
||||
const ck::index_t Y,
|
||||
const ck::index_t X,
|
||||
std::array<ck::index_t, NumDimSpatial> filter_strides,
|
||||
std::array<ck::index_t, NumDimSpatial> filter_dilations)
|
||||
{
|
||||
const ck::index_t ZYXC = Z * Y * X * C;
|
||||
const ck::index_t GC = G * C;
|
||||
|
||||
// shape: (G, (Wo, Ho, Do, N)), (C, X, Y, Z))
|
||||
const auto shape = ck::make_tuple(ck::make_tuple(G, ck::make_tuple(Wo, Ho, Do, N)),
|
||||
ck::make_tuple(C, X, Y, Z));
|
||||
const auto in_strides =
|
||||
ck::make_tuple(ck::make_tuple(C,
|
||||
ck::make_tuple(filter_strides[2] * GC,
|
||||
filter_strides[1] * Wi * GC,
|
||||
filter_strides[0] * Hi * Wi * GC,
|
||||
Di * Hi * Wi * GC)),
|
||||
ck::make_tuple(1,
|
||||
filter_dilations[2] * GC,
|
||||
filter_dilations[1] * Wi * GC,
|
||||
filter_dilations[0] * Hi * Wi * GC));
|
||||
const auto in_layout = ck::wrapper::make_layout(shape, in_strides);
|
||||
|
||||
const auto out_strides = ck::make_tuple(
|
||||
ck::make_tuple(
|
||||
ZYXC,
|
||||
ck::make_tuple(ZYXC * G, Wo * ZYXC * G, Ho * Wo * ZYXC * G, Do * Ho * Wo * ZYXC * G)),
|
||||
ck::make_tuple(1, C, X * C, Y * X * C));
|
||||
const auto out_layout = ck::wrapper::make_layout(shape, out_strides);
|
||||
|
||||
const ck::index_t input_size = N * Di * Hi * Wi * GC;
|
||||
// Global memory buffers
|
||||
SimpleDeviceMem in_buf(input_size * sizeof(DataType));
|
||||
SimpleDeviceMem out_buf(ck::wrapper::size(out_layout) * sizeof(DataType));
|
||||
|
||||
// User can choose appropriate number of threads and sizes per block
|
||||
const auto thread_layout = ck::make_tuple(ck::Number<8>{}, ck::Number<16>{});
|
||||
// This example doesn't support padding, user should select tile sizes
|
||||
// which divides the shape completely
|
||||
const auto tile_shape = ck::make_tuple(ck::Number<32>{}, ck::Number<64>{});
|
||||
|
||||
// Create buffers for global memory
|
||||
auto input_tensor_global = ck::wrapper::make_tensor<ck::wrapper::MemoryTypeEnum::Global>(
|
||||
static_cast<const DataType*>(in_buf.GetDeviceBuffer()), in_layout);
|
||||
auto output_tensor_global = ck::wrapper::make_tensor<ck::wrapper::MemoryTypeEnum::Global>(
|
||||
static_cast<DataType*>(out_buf.GetDeviceBuffer()), out_layout);
|
||||
|
||||
const ck::index_t grid_size = ck::math::integer_divide_ceil(ck::wrapper::size<0>(in_layout),
|
||||
ck::wrapper::size<0>(tile_shape)) *
|
||||
ck::math::integer_divide_ceil(ck::wrapper::size<1>(in_layout),
|
||||
ck::wrapper::size<1>(tile_shape));
|
||||
|
||||
const auto kernel = DeviceImageToColumnPad0<decltype(input_tensor_global),
|
||||
decltype(output_tensor_global),
|
||||
decltype(tile_shape),
|
||||
decltype(thread_layout)>;
|
||||
const float avg_time = launch_and_time_kernel(StreamConfig{nullptr, true},
|
||||
kernel,
|
||||
dim3(grid_size),
|
||||
dim3(ck::wrapper::size(thread_layout)),
|
||||
0,
|
||||
input_tensor_global,
|
||||
output_tensor_global,
|
||||
tile_shape,
|
||||
thread_layout);
|
||||
|
||||
std::size_t num_btype = G * N * Do * Ho * Wo * ZYXC * 2 * sizeof(DataType);
|
||||
float gb_per_sec = num_btype / 1.E6 / avg_time;
|
||||
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << gb_per_sec << " GB/s, "
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
constexpr ck::index_t G = 4; // number of groups
|
||||
constexpr ck::index_t N = 32; // batch
|
||||
constexpr ck::index_t C = 64; // input channel (per group)
|
||||
constexpr ck::index_t Z = 3; // filter D
|
||||
constexpr ck::index_t Y = 3; // filter H
|
||||
constexpr ck::index_t X = 3; // filter W
|
||||
constexpr ck::index_t Di = 9; // input D
|
||||
constexpr ck::index_t Hi = 9; // input H
|
||||
constexpr ck::index_t Wi = 7; // input W
|
||||
constexpr ck::index_t Do = 7; // output D
|
||||
constexpr ck::index_t Ho = 7; // output H
|
||||
constexpr ck::index_t Wo = 5; // output W
|
||||
PerformImageToColumnPad0(G,
|
||||
N,
|
||||
Di,
|
||||
Hi,
|
||||
Wi,
|
||||
Do,
|
||||
Ho,
|
||||
Wo,
|
||||
C,
|
||||
Z,
|
||||
Y,
|
||||
X,
|
||||
{1, 1, 1} /*filter_strides*/,
|
||||
{1, 1, 1} /*filter_dilations*/);
|
||||
return 0;
|
||||
}
|
||||
@@ -149,7 +149,7 @@ function(clang_tidy_check TARGET)
|
||||
add_custom_target(${tidy_target}
|
||||
# for some targets clang-tidy not able to get information from .clang-tidy
|
||||
DEPENDS ${SOURCE}
|
||||
COMMAND ${CLANG_TIDY_COMMAND} "-config=\{CheckOptions: \[\{key: bugprone-reserved-identifier.AllowedIdentifiers,value: __HIP_PLATFORM_HCC__\; __HIP_ROCclr__\}\]\}" ${SOURCE} "-export-fixes=${CLANG_TIDY_FIXIT_DIR}/${TARGET}-${tidy_file}.yaml"
|
||||
COMMAND ${CLANG_TIDY_COMMAND} "-config=\{CheckOptions: \[\{key: bugprone-reserved-identifier.AllowedIdentifiers,value: __HIP_PLATFORM_HCC__\; __HIP_PLATFORM_AMD__\; __HIP_ROCclr__\}\]\}" ${SOURCE} "-export-fixes=${CLANG_TIDY_FIXIT_DIR}/${TARGET}-${tidy_file}.yaml"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
COMMENT "clang-tidy: Running clang-tidy on target ${SOURCE}..."
|
||||
)
|
||||
|
||||
28
cmake/getopt.cmake
Normal file
28
cmake/getopt.cmake
Normal file
@@ -0,0 +1,28 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
# Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
add_library(getopt::getopt INTERFACE IMPORTED GLOBAL)
|
||||
|
||||
if(WIN32)
|
||||
include(FetchContent)
|
||||
|
||||
FetchContent_Declare(
|
||||
getopt
|
||||
GIT_REPOSITORY https://github.com/apwojcik/getopt.git
|
||||
GIT_TAG main
|
||||
SYSTEM
|
||||
)
|
||||
|
||||
set(__build_shared_libs ${BUILD_SHARED_LIBS})
|
||||
set(BUILD_SHARED_LIBS OFF CACHE INTERNAL "")
|
||||
|
||||
FetchContent_MakeAvailable(getopt)
|
||||
|
||||
# Restore the old value of BUILD_SHARED_LIBS
|
||||
set(BUILD_SHARED_LIBS ${__build_shared_libs} CACHE BOOL "Type of libraries to build" FORCE)
|
||||
|
||||
FetchContent_GetProperties(getopt)
|
||||
|
||||
target_link_libraries(getopt::getopt INTERFACE wingetopt)
|
||||
target_include_directories(getopt::getopt INTERFACE ${getopt_SOURCE_DIR}/src)
|
||||
endif()
|
||||
@@ -1,50 +0,0 @@
|
||||
include(FetchContent)
|
||||
|
||||
set(GOOGLETEST_DIR "" CACHE STRING "Location of local GoogleTest repo to build against")
|
||||
|
||||
if(GOOGLETEST_DIR)
|
||||
set(FETCHCONTENT_SOURCE_DIR_GOOGLETEST ${GOOGLETEST_DIR} CACHE STRING "GoogleTest source directory override")
|
||||
endif()
|
||||
|
||||
message(STATUS "Fetching GoogleTest")
|
||||
|
||||
list(APPEND GTEST_CMAKE_CXX_FLAGS
|
||||
-Wno-undef
|
||||
-Wno-reserved-identifier
|
||||
-Wno-global-constructors
|
||||
-Wno-missing-noreturn
|
||||
-Wno-disabled-macro-expansion
|
||||
-Wno-used-but-marked-unused
|
||||
-Wno-switch-enum
|
||||
-Wno-zero-as-null-pointer-constant
|
||||
-Wno-unused-member-function
|
||||
-Wno-comma
|
||||
-Wno-old-style-cast
|
||||
-Wno-deprecated
|
||||
-Wno-unsafe-buffer-usage
|
||||
)
|
||||
message(STATUS "Suppressing googltest warnings with flags: ${GTEST_CMAKE_CXX_FLAGS}")
|
||||
|
||||
FetchContent_Declare(
|
||||
googletest
|
||||
GIT_REPOSITORY https://github.com/google/googletest.git
|
||||
GIT_TAG b85864c64758dec007208e56af933fc3f52044ee
|
||||
)
|
||||
|
||||
# Will be necessary for windows build
|
||||
# set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
|
||||
FetchContent_GetProperties(googletest)
|
||||
if(NOT googletest_POPULATED)
|
||||
FetchContent_Populate(googletest)
|
||||
add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
target_compile_options(gtest PRIVATE ${GTEST_CMAKE_CXX_FLAGS})
|
||||
target_compile_options(gtest_main PRIVATE ${GTEST_CMAKE_CXX_FLAGS})
|
||||
target_compile_options(gmock PRIVATE ${GTEST_CMAKE_CXX_FLAGS})
|
||||
target_compile_options(gmock_main PRIVATE ${GTEST_CMAKE_CXX_FLAGS})
|
||||
|
||||
set_target_properties(gtest PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||
set_target_properties(gtest_main PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||
set_target_properties(gmock PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||
set_target_properties(gmock_main PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||
70
cmake/gtest.cmake
Normal file
70
cmake/gtest.cmake
Normal file
@@ -0,0 +1,70 @@
|
||||
include(FetchContent)
|
||||
|
||||
set(GOOGLETEST_DIR "" CACHE STRING "Location of local GoogleTest repo to build against")
|
||||
|
||||
if(GOOGLETEST_DIR)
|
||||
set(FETCHCONTENT_SOURCE_DIR_GOOGLETEST ${GOOGLETEST_DIR} CACHE STRING "GoogleTest source directory override")
|
||||
endif()
|
||||
|
||||
FetchContent_Declare(
|
||||
GTest
|
||||
GIT_REPOSITORY https://github.com/google/googletest.git
|
||||
GIT_TAG f8d7d77c06936315286eb55f8de22cd23c188571
|
||||
)
|
||||
|
||||
# Suppress ROCMChecks WARNING on GoogleTests
|
||||
set(ROCM_DISABLE_CHECKS FALSE)
|
||||
macro(rocm_check_toolchain_var var access value list_file)
|
||||
if(NOT ROCM_DISABLE_CHECKS)
|
||||
_rocm_check_toolchain_var("${var}" "${access}" "${value}" "${list_file}")
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
if(WIN32)
|
||||
set(gtest_force_shared_crt ON CACHE_INTERNAL "")
|
||||
endif()
|
||||
|
||||
set(BUILD_GMOCK OFF CACHE INTERNAL "")
|
||||
set(INSTALL_GTEST OFF CACHE INTERNAL "")
|
||||
|
||||
# Store the current value of BUILD_SHARED_LIBS
|
||||
set(__build_shared_libs ${BUILD_SHARED_LIBS})
|
||||
set(BUILD_SHARED_LIBS OFF CACHE INTERNAL "")
|
||||
|
||||
set(ROCM_DISABLE_CHECKS TRUE)
|
||||
FetchContent_MakeAvailable(GTest)
|
||||
set(ROCM_DISABLE_CHECKS FALSE)
|
||||
|
||||
# Restore the old value of BUILD_SHARED_LIBS
|
||||
set(BUILD_SHARED_LIBS ${__build_shared_libs} CACHE BOOL "Type of libraries to build" FORCE)
|
||||
|
||||
set(BUILD_GMOCK OFF CACHE INTERNAL "")
|
||||
set(INSTALL_GTEST OFF CACHE INTERNAL "")
|
||||
|
||||
set(GTEST_CXX_FLAGS
|
||||
-Wno-undef
|
||||
-Wno-reserved-identifier
|
||||
-Wno-global-constructors
|
||||
-Wno-missing-noreturn
|
||||
-Wno-disabled-macro-expansion
|
||||
-Wno-used-but-marked-unused
|
||||
-Wno-switch-enum
|
||||
-Wno-zero-as-null-pointer-constant
|
||||
-Wno-unused-member-function
|
||||
-Wno-comma
|
||||
-Wno-old-style-cast
|
||||
-Wno-deprecated
|
||||
-Wno-unsafe-buffer-usage
|
||||
-Wno-float-equal
|
||||
)
|
||||
|
||||
if(WIN32)
|
||||
list(APPEND GTEST_CXX_FLAGS
|
||||
-Wno-suggest-destructor-override
|
||||
-Wno-suggest-override
|
||||
-Wno-nonportable-system-include-path
|
||||
-Wno-language-extension-token)
|
||||
endif()
|
||||
|
||||
target_compile_options(gtest PRIVATE ${GTEST_CXX_FLAGS})
|
||||
target_compile_options(gtest_main PRIVATE ${GTEST_CXX_FLAGS})
|
||||
@@ -1,3 +1,3 @@
|
||||
ROCmSoftwarePlatform/rocm-recipes
|
||||
ROCm/rocm-recipes
|
||||
RadeonOpenCompute/rocm-cmake@04f694df2a8dc9d7e35fa4dee4ba5fa407ec04f8 --build
|
||||
danmar/cppcheck@2.9
|
||||
danmar/cppcheck@2.9
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
.. meta::
|
||||
:description: Composable Kernel documentation and API reference library
|
||||
:keywords: composable kernel, CK, ROCm, API, documentation
|
||||
|
||||
*******************
|
||||
API Reference Guide
|
||||
*******************
|
||||
.. _api-reference:
|
||||
|
||||
********************************************************************
|
||||
API reference guide
|
||||
********************************************************************
|
||||
|
||||
=================
|
||||
Introduction
|
||||
=================
|
||||
|
||||
This document contains details of the APIs for the Composable Kernel (CK) library and introduces
|
||||
some of the key design principles that are used to write new classes that extend CK functionality.
|
||||
@@ -30,7 +32,7 @@ DeviceMem
|
||||
Kernels For Flashattention
|
||||
---------------------------
|
||||
|
||||
The Flashattention algorithm is defined in :cite:t:`dao2022flashattention`. This sections lists
|
||||
The Flashattention algorithm is defined in :cite:t:`dao2022flashattention`. This section lists
|
||||
the classes that are used in the CK GPU implementation of Flashattention.
|
||||
|
||||
**Gridwise classes**
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
===================
|
||||
Contributor's Guide
|
||||
===================
|
||||
.. meta::
|
||||
:description: Composable Kernel documentation and API reference library
|
||||
:keywords: composable kernel, CK, ROCm, API, documentation
|
||||
|
||||
This chapter explains how to get started contributing to the Composable Kernel project and what are
|
||||
the contributing rules.
|
||||
.. _contributing-to:
|
||||
|
||||
********************************************************************
|
||||
Contributor's guide
|
||||
********************************************************************
|
||||
|
||||
This chapter explains the rules for contributing to the Composable Kernel project, and how to contribute.
|
||||
|
||||
Getting started
|
||||
===============
|
||||
@@ -14,23 +19,21 @@ Getting started
|
||||
build the library. You can also find some of this information in the
|
||||
`README file <https://github.com/ROCmSoftwarePlatform/composable_kernel/blob/develop/README.md>`_
|
||||
on the project's GitHub page.
|
||||
#. **Additional reading:** We also recommend reading a `blog post
|
||||
#. **Additional reading:** The blog post `AMD Composable Kernel library: efficient fused kernels for AI apps with just a few lines of code <https://community.amd.com/t5/instinct-accelerators/amd-composable-kernel-library-efficient-fused-kernels-for-ai/ba-p/553224>`_ provides a deeper understanding of the CK library and showcases its performance capabilities.
|
||||
<https://community.amd.com/t5/instinct-accelerators/amd-composable-kernel-library-efficient-fused-kernels-for-ai/ba-p/553224>`_
|
||||
from the AMD Community portal. It offers a deeper understanding of the library's objectives and
|
||||
showcases its performance capabilities.
|
||||
from the AMD Community portal. It offers a deeper understanding of the library's objectives and showcases its performance capabilities.
|
||||
#. **General information:** For broader information about AMD products, consider exploring the
|
||||
`AMD Developer Central portal <https://www.amd.com/en/developer.html>`_.
|
||||
|
||||
How do I contribute
|
||||
How to contribute
|
||||
===================
|
||||
|
||||
We deeply value contributions from our users. You can make an impact by reporting issues or
|
||||
proposing code enhancements through pull requests.
|
||||
You can make an impact by reporting issues or proposing code enhancements through pull requests.
|
||||
|
||||
Reporting issues
|
||||
----------------
|
||||
|
||||
We use `Github issues <https://github.com/ROCmSoftwarePlatform/composable_kernel/issues>`_
|
||||
Use `Github issues <https://github.com/ROCmSoftwarePlatform/composable_kernel/issues>`_
|
||||
to track public bugs and enhancement requests.
|
||||
|
||||
If you encounter an issue with the library, please check if the problem has already been
|
||||
@@ -59,7 +62,7 @@ issue. All reported issues must include:
|
||||
|
||||
* How frequently does this issue happen? Does it reproduce every time? Or is it a sporadic issue?
|
||||
|
||||
Before sumbitting any issue, ensure you have addressed all relevant questions from the checklist.
|
||||
Before submitting any issue, ensure you have addressed all relevant questions from the checklist.
|
||||
|
||||
Creating Pull Requests
|
||||
----------------------
|
||||
@@ -68,7 +71,7 @@ You can submit `Pull Requests (PR) on GitHub
|
||||
<https://github.com/ROCmSoftwarePlatform/composable_kernel/pulls>`_.
|
||||
|
||||
All contributors are required to develop their changes on a separate branch and then create a
|
||||
pull requrest to merge their changes into the `develop` branch, which is the default
|
||||
pull request to merge their changes into the `develop` branch, which is the default
|
||||
development branch in the Composable Kernel project. All external contributors must use their own
|
||||
forks of the project to develop their changes.
|
||||
|
||||
@@ -99,4 +102,4 @@ When submitting a Pull Request you should:
|
||||
Following the above guidelines ensures a seamless review process and faster assistance from our
|
||||
end.
|
||||
|
||||
Thank you for your commitment to enhancing the Composable Kernel project! We look forward to collaborating with you.
|
||||
Thank you for your commitment to enhancing the Composable Kernel project!
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
==========================
|
||||
Supported Primitives Guide
|
||||
==========================
|
||||
.. meta::
|
||||
:description: Composable Kernel documentation and API reference library
|
||||
:keywords: composable kernel, CK, ROCm, API, documentation
|
||||
|
||||
This document contains details of supported primitives in Composable Kernel (CK). In contrast to the
|
||||
API Reference Guide, the Supported Primitives Guide is an introduction to the math which underpins
|
||||
the algorithms implemented in CK.
|
||||
.. _supported-primitives:
|
||||
|
||||
********************************************************************
|
||||
Supported Primitives Guide
|
||||
********************************************************************
|
||||
|
||||
This document contains details of supported primitives in Composable Kernel (CK). In contrast to the API Reference Guide, the Supported Primitives Guide is an introduction to the math which underpins the algorithms implemented in CK.
|
||||
|
||||
------------
|
||||
Softmax
|
||||
------------
|
||||
|
||||
For vectors :math:`x^{(1)}, x^{(2)}, \ldots, x^{(T)}` of size :math:`B` we can decompose the
|
||||
For vectors :math:`x^{(1)}, x^{(2)}, \ldots, x^{(T)}` of size :math:`B` you can decompose the
|
||||
softmax of concatenated :math:`x = [ x^{(1)}\ | \ \ldots \ | \ x^{(T)} ]` as,
|
||||
|
||||
.. math::
|
||||
@@ -27,7 +31,7 @@ where :math:`f(x^{(j)}) = \exp( x^{(j)} - m(x^{(j)}) )` is of size :math:`B` and
|
||||
:math:`z(x^{(j)}) = f(x_1^{(j)})+ \ldots+ f(x_B^{(j)})` is a scalar.
|
||||
|
||||
For a matrix :math:`X` composed of :math:`T_r \times T_c` tiles, :math:`X_{ij}`, of size
|
||||
:math:`B_r \times B_c` we can compute the row-wise softmax as follows.
|
||||
:math:`B_r \times B_c` you can compute the row-wise softmax as follows.
|
||||
|
||||
For :math:`j` from :math:`1` to :math:`T_c`, and :math:`i` from :math:`1` to :math:`T_r` calculate,
|
||||
|
||||
|
||||
27
docs/conf.py
27
docs/conf.py
@@ -4,23 +4,34 @@
|
||||
# list see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
import subprocess
|
||||
import re
|
||||
|
||||
from rocm_docs import ROCmDocs
|
||||
|
||||
html_theme_options = {"flavor": "list"}
|
||||
|
||||
name = "Composable Kernel"
|
||||
get_version = r'sed -n -e "s/^rocm_setup_version(.* \([0-9\.]\{1,\}\).*/\1/p" ../CMakeLists.txt'
|
||||
version = subprocess.getoutput(get_version)
|
||||
if len(version) > 0:
|
||||
name = f"{name} {version}"
|
||||
with open('../CMakeLists.txt', encoding='utf-8') as f:
|
||||
match = re.search(r'.*set\(version ([0-9.]+)[^0-9.]+', f.read())
|
||||
if not match:
|
||||
raise ValueError("VERSION not found!")
|
||||
version_number = match[1]
|
||||
left_nav_title = f"Composable Kernel {version_number} Documentation"
|
||||
|
||||
# for PDF output on Read the Docs
|
||||
project = "Composable Kernel Documentation"
|
||||
author = "Advanced Micro Devices, Inc."
|
||||
copyright = "Copyright (c) 2023 Advanced Micro Devices, Inc. All rights reserved."
|
||||
version = version_number
|
||||
release = version_number
|
||||
|
||||
external_toc_path = "./sphinx/_toc.yml"
|
||||
|
||||
docs_core = ROCmDocs(f"{name} Documentation")
|
||||
docs_core.run_doxygen(doxygen_root="doxygen", doxygen_path="doxygen/docBin/xml")
|
||||
docs_core = ROCmDocs(left_nav_title)
|
||||
docs_core.run_doxygen(doxygen_root="doxygen", doxygen_path="doxygen/xml")
|
||||
docs_core.setup()
|
||||
|
||||
external_projects_current_project = "composable_kernel"
|
||||
|
||||
mathjax3_config = {
|
||||
'tex': {
|
||||
'macros': {
|
||||
|
||||
@@ -1,28 +1,50 @@
|
||||
===================
|
||||
.. meta::
|
||||
:description: Composable Kernel documentation and API reference library
|
||||
:keywords: composable kernel, CK, ROCm, API, documentation
|
||||
|
||||
.. _docker-hub:
|
||||
|
||||
********************************************************************
|
||||
CK Docker Hub
|
||||
********************************************************************
|
||||
|
||||
Why do I need this?
|
||||
===================
|
||||
|
||||
-------------------------------------
|
||||
Why do I need this?
|
||||
-------------------------------------
|
||||
To make things simpler, and bring Composable Kernel and its dependencies together,
|
||||
docker images can be found on `Docker Hub <https://hub.docker.com/r/rocm/composable_kernel/tags>`_. Docker images provide a complete image of the OS, the Composable Kernel library, and its dependencies in a single downloadable file.
|
||||
|
||||
To make our lives easier and bring Composable Kernel dependencies together, we recommend using
|
||||
docker images that can be found on `Docker Hub <https://hub.docker.com/r/rocm/composable_kernel>`_.
|
||||
Refer to `Docker Overview <https://docs.docker.com/get-started/overview/>`_ for more information on Docker images and containers.
|
||||
|
||||
-------------------------------------
|
||||
So what is Composable Kernel?
|
||||
-------------------------------------
|
||||
Which image is right for me?
|
||||
============================
|
||||
|
||||
Composable Kernel (CK) library aims to provide a programming model for writing performance critical
|
||||
kernels for machine learning workloads across multiple architectures including GPUs, CPUs, etc,
|
||||
through general purpose kernel languages, like HIP C++.
|
||||
The image naming includes information related to the docker image.
|
||||
For example ``ck_ub20.04_rocm6.0`` indicates the following:
|
||||
|
||||
To get the CK library::
|
||||
* ``ck`` - made for running Composable Kernel;
|
||||
* ``ub20.04`` - based on Ubuntu 20.04;
|
||||
* ``rocm6.0`` - ROCm platform version 6.0.
|
||||
|
||||
git clone https://github.com/ROCmSoftwarePlatform/composable_kernel.git
|
||||
Download a docker image suitable for your OS and ROCm release, run or start the docker container, and then resume the tutorial from this point. Use the ``docker pull`` command to download the file::
|
||||
|
||||
docker pull rocm/composable_kernel:ck_ub20.04_rocm6.0
|
||||
|
||||
|
||||
run a docker container::
|
||||
What is inside the image?
|
||||
-------------------------
|
||||
|
||||
The docker images have everything you need for running CK including:
|
||||
|
||||
* `ROCm <https://www.amd.com/en/graphics/servers-solutions-rocm>`_
|
||||
* `CMake <https://cmake.org/getting-started/>`_
|
||||
* `Compiler <https://github.com/RadeonOpenCompute/llvm-project>`_
|
||||
* `Composable Kernel library <https://github.com/ROCm/composable_kernel>`_
|
||||
|
||||
Running the docker container
|
||||
============================
|
||||
|
||||
After downloading the docker image, you can start the container using one of a number of commands. Start with the ``docker run`` command as shown below::
|
||||
|
||||
docker run \
|
||||
-it \
|
||||
@@ -30,70 +52,50 @@ run a docker container::
|
||||
--group-add sudo \
|
||||
-w /root/workspace \
|
||||
-v ${PATH_TO_LOCAL_WORKSPACE}:/root/workspace \
|
||||
rocm/composable_kernel:ck_ub20.04_rocm5.6 \
|
||||
rocm/composable_kernel:ck_ub20.04_rocm6.0 \
|
||||
/bin/bash
|
||||
|
||||
and build the CK::
|
||||
After starting the bash shell, the docker container current folder is `~/workspace`. The library path is ``~/workspace/composable_kernel``. Navigate to the library to begin the tutorial as explained in :ref:`hello-world`:
|
||||
|
||||
mkdir build && cd build
|
||||
# Need to specify target ID, example below is for gfx908 and gfx90a
|
||||
cmake \
|
||||
-D CMAKE_PREFIX_PATH=/opt/rocm \
|
||||
-D CMAKE_CXX_COMPILER=/opt/rocm/bin/hipcc \
|
||||
-D CMAKE_CXX_FLAGS="-O3" \
|
||||
-D CMAKE_BUILD_TYPE=Release \
|
||||
-D GPU_TARGETS="gfx908;gfx90a" \
|
||||
..
|
||||
.. note::
|
||||
|
||||
and::
|
||||
If your current folder is different from `${HOME}`, adjust the line ``-v ${HOME}:/root/workspace`` in the ``docker run`` command to fit your folder structure.
|
||||
|
||||
make -j examples tests
|
||||
Stop and restart the docker image
|
||||
=================================
|
||||
|
||||
To run all the test cases including tests and examples run::
|
||||
After finishing the tutorial, or just when you have completed your work session, you can close the docker container, or stop the docker container to restart it at another time. Closing the docker container means that it is still in the active state, and can be resumed from where you left it. Stopping the container closes it, and returns the image to its initial state.
|
||||
|
||||
make test
|
||||
Use the ``Ctrl-D`` option to exit the container, while leaving it active, so you can return to the container in its current state to resume the tutorial, or pickup your project where you left off.
|
||||
|
||||
We can also run specific examples or tests like::
|
||||
To restart the active container use the ``docker exec`` command to specify the container name and options as follows::
|
||||
|
||||
./bin/example_gemm_xdl_fp16
|
||||
./bin/test_gemm_fp16
|
||||
docker exec -it <container_name> bash
|
||||
|
||||
For more details visit `CK github repository <https://github.com/ROCmSoftwarePlatform/composable_kernel>`_,
|
||||
`CK examples <https://github.com/ROCmSoftwarePlatform/composable_kernel/tree/develop/example)>`_,
|
||||
`even more CK examples <https://github.com/ROCmSoftwarePlatform/composable_kernel/tree/develop/client_example>`_.
|
||||
Where:
|
||||
|
||||
-------------------------------------
|
||||
And what is inside?
|
||||
-------------------------------------
|
||||
* `exec` is the docker command
|
||||
* `-it` is the interactive option for `exec`
|
||||
* `<container_name>` specifies an active container on the system
|
||||
* `bash` specifies the command to run in the interactive shell
|
||||
|
||||
The docker images have everything you need for running CK including:
|
||||
.. note::
|
||||
|
||||
* `ROCm <https://www.amd.com/en/graphics/servers-solutions-rocm>`_
|
||||
* `CMake <https://cmake.org/>`_
|
||||
* `Compiler <https://github.com/RadeonOpenCompute/llvm-project>`_
|
||||
You can use the ``docker container ls`` command to list the active containers on the system.
|
||||
|
||||
-------------------------------------
|
||||
Which image is right for me?
|
||||
-------------------------------------
|
||||
To start a container from the image, use the ``docker start`` command::
|
||||
|
||||
Let's take a look at the image naming, for example ``ck_ub20.04_rocm5.6``. The image specs are:
|
||||
docker start <container_name>
|
||||
|
||||
* ``ck`` - made for running Composable Kernel;
|
||||
* ``ub20.04`` - based on Ubuntu 20.04;
|
||||
* ``rocm5.6`` - ROCm platform version 5.6.
|
||||
Then use the docker exec command as shown above to start the bash shell.
|
||||
|
||||
So just pick the right image for your project dependencies and you're all set.
|
||||
Use the ``docker stop`` command to stop the container and restore the image to its initial state::
|
||||
|
||||
-------------------------------------
|
||||
DIY starts here
|
||||
-------------------------------------
|
||||
docker stop <container_name>
|
||||
|
||||
Editing the docker image
|
||||
=======================
|
||||
|
||||
If you need to customize a docker image or just can't stop tinkering, feel free to adjust the
|
||||
If you want to customize the docker image, edit the
|
||||
`Dockerfile <https://github.com/ROCmSoftwarePlatform/composable_kernel/blob/develop/Dockerfile>`_
|
||||
for your needs.
|
||||
|
||||
-------------------------------------
|
||||
License
|
||||
-------------------------------------
|
||||
|
||||
CK is released under the MIT `license <https://github.com/ROCmSoftwarePlatform/composable_kernel/blob/develop/LICENSE>`_.
|
||||
from the GitHub repository to suit your needs.
|
||||
|
||||
@@ -58,7 +58,7 @@ PROJECT_LOGO =
|
||||
# entered, it will be relative to the location where doxygen was started. If
|
||||
# left blank the current directory will be used.
|
||||
|
||||
OUTPUT_DIRECTORY = docBin
|
||||
OUTPUT_DIRECTORY = .
|
||||
|
||||
# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
|
||||
# directories (in 2 levels) under the output directory of each output format and
|
||||
@@ -778,7 +778,9 @@ WARN_LOGFILE =
|
||||
INPUT = ../../include/ck/tensor_operation/gpu/grid \
|
||||
../../include/ck/tensor_operation/gpu/block \
|
||||
../../include/ck/tensor_operation/gpu/thread \
|
||||
../../library/include/ck/library/utility
|
||||
../../library/include/ck/library/utility \
|
||||
../../include/ck/wrapper
|
||||
|
||||
|
||||
# This tag can be used to specify the character encoding of the source files
|
||||
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
|
||||
|
||||
@@ -1,55 +1,39 @@
|
||||
============================
|
||||
.. meta::
|
||||
:description: Composable Kernel documentation and API reference library
|
||||
:keywords: composable kernel, CK, ROCm, API, documentation
|
||||
|
||||
.. _composable-kernel:
|
||||
|
||||
********************************************************************
|
||||
Composable Kernel User Guide
|
||||
============================
|
||||
********************************************************************
|
||||
|
||||
------------
|
||||
Introduction
|
||||
------------
|
||||
The Composable Kernel (CK) library provides a programming model for writing performance critical kernels for machine learning workloads across multiple architectures including GPUs and CPUs, through general purpose kernel languages like HIP C++. This document contains instructions for installing, using, and contributing to the Composable Kernel project. To learn more see :ref:`what-is-ck`.
|
||||
|
||||
This document contains instructions for installing, using, and contributing to Composable Kernel (CK).
|
||||
The CK documentation is structured as follows:
|
||||
|
||||
-----------
|
||||
Methodology
|
||||
-----------
|
||||
.. card:: Conceptual
|
||||
|
||||
Composable Kernel (CK) library aims to provide a programming model for writing performance critical
|
||||
kernels for machine learning workloads across multiple architectures including GPUs, CPUs, etc,
|
||||
through general purpose kernel languages, like HIP C++.
|
||||
* :ref:`what-is-ck`
|
||||
|
||||
CK utilizes two concepts to achieve performance portability and code maintainability:
|
||||
.. card:: Installation
|
||||
|
||||
* A tile-based programming model
|
||||
* Algorithm complexity reduction for complex ML operators, using innovative technique we call
|
||||
"Tensor Coordinate Transformation".
|
||||
* :ref:`docker-hub`
|
||||
|
||||
.. image:: data/ck_component.png
|
||||
:alt: CK Components
|
||||
.. card:: Tutorial
|
||||
|
||||
--------------
|
||||
Code Structure
|
||||
--------------
|
||||
* :ref:`hello-world`
|
||||
|
||||
Current CK library are structured into 4 layers:
|
||||
.. card:: API reference
|
||||
|
||||
* "Templated Tile Operators" layer
|
||||
* "Templated Kernel and Invoker" layer
|
||||
* "Instantiated Kernel and Invoker" layer
|
||||
* "Client API" layer
|
||||
* :ref:`supported-primitives`
|
||||
* :ref:`api-reference`
|
||||
* :ref:`wrapper`
|
||||
|
||||
.. image:: data/ck_layer.png
|
||||
:alt: CK Layers
|
||||
|
||||
Documentation Roadmap
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
The following is a list of CK documents in the suggested reading order:
|
||||
.. card:: Contributing to CK
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 5
|
||||
:caption: Contents:
|
||||
:numbered:
|
||||
* :ref:`contributing-to`
|
||||
|
||||
tutorial_hello_world
|
||||
dockerhub
|
||||
Supported_Primitives_Guide
|
||||
API_Reference_Guide
|
||||
Contributors_Guide
|
||||
To contribute to the documentation refer to `Contributing to ROCm <https://rocm.docs.amd.com/en/latest/contribute/index.html>`_.
|
||||
|
||||
You can find licensing information on the `Licensing <https://rocm.docs.amd.com/en/latest/about/license.html>`_ page.
|
||||
|
||||
2
docs/license.md
Normal file
2
docs/license.md
Normal file
@@ -0,0 +1,2 @@
|
||||
```{include} ../LICENSE.md
|
||||
```
|
||||
@@ -1,6 +0,0 @@
|
||||
=======
|
||||
License
|
||||
=======
|
||||
|
||||
.. include:: ../LICENSE
|
||||
:literal:
|
||||
@@ -1,10 +1,21 @@
|
||||
# Anywhere {branch} is used, the branch name will be substituted.
|
||||
# These comments will also be removed.
|
||||
defaults:
|
||||
numbered: False
|
||||
maxdepth: 6
|
||||
root: index
|
||||
subtrees:
|
||||
- caption: About
|
||||
entries:
|
||||
- file: license
|
||||
- entries:
|
||||
- file: what-is-ck.rst
|
||||
title: What is Composable Kernel?
|
||||
- file: dockerhub.rst
|
||||
title: Docker Hub
|
||||
- file: tutorial_hello_world.rst
|
||||
title: Hello World Tutorial
|
||||
- file: Supported_Primitives_Guide.rst
|
||||
title: Supported Primitives
|
||||
- file: API_Reference_Guide.rst
|
||||
title: API Reference
|
||||
- file: wrapper.rst
|
||||
title: Wrapper
|
||||
- file: Contributors_Guide.rst
|
||||
title: Contributing to CK
|
||||
- file: license.md
|
||||
title: License
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
rocm-docs-core>=0.20.0
|
||||
sphinxcontrib-bibtex==2.6.1
|
||||
rocm-docs-core==0.33.2
|
||||
sphinxcontrib-bibtex==2.6.2
|
||||
|
||||
@@ -16,7 +16,7 @@ beautifulsoup4==4.11.2
|
||||
# via pydata-sphinx-theme
|
||||
breathe==4.34.0
|
||||
# via rocm-docs-core
|
||||
certifi==2022.12.7
|
||||
certifi==2023.7.22
|
||||
# via requests
|
||||
cffi==1.15.1
|
||||
# via
|
||||
@@ -26,7 +26,7 @@ charset-normalizer==3.1.0
|
||||
# via requests
|
||||
click==8.1.3
|
||||
# via sphinx-external-toc
|
||||
cryptography==40.0.2
|
||||
cryptography==41.0.6
|
||||
# via pyjwt
|
||||
deprecated==1.2.13
|
||||
# via pygithub
|
||||
@@ -42,7 +42,7 @@ fastjsonschema==2.18.0
|
||||
# via rocm-docs-core
|
||||
gitdb==4.0.10
|
||||
# via gitpython
|
||||
gitpython==3.1.35
|
||||
gitpython==3.1.37
|
||||
# via rocm-docs-core
|
||||
idna==3.4
|
||||
# via requests
|
||||
@@ -88,9 +88,9 @@ pydata-sphinx-theme==0.13.3
|
||||
# via
|
||||
# rocm-docs-core
|
||||
# sphinx-book-theme
|
||||
pygithub==1.58.2
|
||||
pygithub==1.58.1
|
||||
# via rocm-docs-core
|
||||
pygments==2.14.0
|
||||
pygments==2.15.0
|
||||
# via
|
||||
# accessible-pygments
|
||||
# pydata-sphinx-theme
|
||||
@@ -109,11 +109,11 @@ pyyaml==6.0
|
||||
# pybtex
|
||||
# rocm-docs-core
|
||||
# sphinx-external-toc
|
||||
requests==2.28.2
|
||||
requests==2.31.0
|
||||
# via
|
||||
# pygithub
|
||||
# sphinx
|
||||
rocm-docs-core==0.29.0
|
||||
rocm-docs-core==0.33.2
|
||||
# via -r requirements.in
|
||||
six==1.16.0
|
||||
# via
|
||||
@@ -141,7 +141,7 @@ sphinx-book-theme==1.0.1
|
||||
# via rocm-docs-core
|
||||
sphinx-copybutton==0.5.1
|
||||
# via rocm-docs-core
|
||||
sphinx-design==0.3.0
|
||||
sphinx-design==0.4.1
|
||||
# via rocm-docs-core
|
||||
sphinx-external-toc==0.3.1
|
||||
# via rocm-docs-core
|
||||
@@ -149,7 +149,7 @@ sphinx-notfound-page==0.8.3
|
||||
# via rocm-docs-core
|
||||
sphinxcontrib-applehelp==1.0.4
|
||||
# via sphinx
|
||||
sphinxcontrib-bibtex==2.6.1
|
||||
sphinxcontrib-bibtex==2.6.2
|
||||
# via -r requirements.in
|
||||
sphinxcontrib-devhelp==1.0.2
|
||||
# via sphinx
|
||||
@@ -163,7 +163,7 @@ sphinxcontrib-serializinghtml==1.1.5
|
||||
# via sphinx
|
||||
typing-extensions==4.5.0
|
||||
# via pydata-sphinx-theme
|
||||
urllib3==1.26.15
|
||||
urllib3==1.26.18
|
||||
# via requests
|
||||
wrapt==1.15.0
|
||||
# via deprecated
|
||||
|
||||
@@ -1,52 +1,44 @@
|
||||
===============
|
||||
CK Hello world
|
||||
===============
|
||||
.. meta::
|
||||
:description: Composable Kernel documentation and API reference library
|
||||
:keywords: composable kernel, CK, ROCm, API, documentation
|
||||
|
||||
-------------------------------------
|
||||
Motivation
|
||||
-------------------------------------
|
||||
.. _hello-world:
|
||||
|
||||
This tutorial is aimed at engineers dealing with artificial intelligence and machine learning who
|
||||
would like to optimize their pipelines and squeeze every performance drop by adding Composable
|
||||
Kernel (CK) library to their projects. We would like to make the CK library approachable so
|
||||
the tutorial is not based on the latest release and doesn't have all the bleeding edge features,
|
||||
but it will be reproducible now and forever.
|
||||
********************************************************************
|
||||
Hello World Tutorial
|
||||
********************************************************************
|
||||
|
||||
During this tutorial we will have an introduction to the CK library, we will build it and run some
|
||||
examples and tests, so to say we will run a "Hello world" example. In future tutorials we will go
|
||||
in depth and breadth and get familiar with other tools and ways to integrate CK into your project.
|
||||
This tutorial is for engineers dealing with artificial intelligence and machine learning who
|
||||
would like to optimize pipelines and improve performance using the Composable
|
||||
Kernel (CK) library. This tutorial provides an introduction to the CK library. You will build the library and run some examples using a "Hello World" example.
|
||||
|
||||
-------------------------------------
|
||||
Description
|
||||
-------------------------------------
|
||||
===========
|
||||
|
||||
Modern AI technology solves more and more problems in all imaginable fields, but crafting fast and
|
||||
efficient workflows is still challenging. CK is one of the tools to make AI heavy lifting as fast
|
||||
and efficient as possible. CK is a collection of optimized AI operator kernels and tools to create
|
||||
new ones. The library has components required for majority of modern neural networks architectures
|
||||
including matrix multiplication, convolution, contraction, reduction, attention modules, variety of
|
||||
activation functions, fused operators and many more.
|
||||
Modern AI technology solves more and more problems in a variety of fields, but crafting fast and
|
||||
efficient workflows is still challenging. CK can make the AI workflow fast
|
||||
and efficient. CK is a collection of optimized AI operator kernels with tools to create
|
||||
new kernels. The library has components required for modern neural network architectures
|
||||
including matrix multiplication, convolution, contraction, reduction, attention modules, a variety of activation functions, and fused operators.
|
||||
|
||||
So how do we (almost) reach the speed of light? CK acceleration abilities are based on:
|
||||
CK library acceleration features are based on:
|
||||
|
||||
* Layered structure.
|
||||
* Tile-based computation model.
|
||||
* Tensor coordinate transformation.
|
||||
* Hardware acceleration use.
|
||||
* Support of low precision data types including fp16, bf16, int8 and int4.
|
||||
* Layered structure
|
||||
* Tile-based computation model
|
||||
* Tensor coordinate transformation
|
||||
* Hardware acceleration use
|
||||
* Support of low precision data types including fp16, bf16, int8 and int4
|
||||
|
||||
If you are excited and need more technical details and benchmarking results - read this awesome
|
||||
If you need more technical details and benchmarking results read the following
|
||||
`blog post <https://community.amd.com/t5/instinct-accelerators/amd-composable-kernel-library-efficient-fused-kernels-for-ai/ba-p/553224>`_.
|
||||
|
||||
For more details visit our `github repository <https://github.com/ROCmSoftwarePlatform/composable_kernel>`_.
|
||||
To download the library visit the `composable_kernel repository <https://github.com/ROCmSoftwarePlatform/composable_kernel>`_.
|
||||
|
||||
-------------------------------------
|
||||
Hardware targets
|
||||
-------------------------------------
|
||||
================
|
||||
|
||||
CK library fully supports `gfx908` and `gfx90a` GPU architectures and only some operators are
|
||||
supported for `gfx1030`. Let's check the hardware you have at hand and decide on the target
|
||||
GPU architecture.
|
||||
CK library fully supports `gfx908` and `gfx90a` GPU architectures, while only some operators are
|
||||
supported for `gfx1030` devices. Check your hardware to determine the target GPU architecture.
|
||||
|
||||
========== =========
|
||||
GPU Target AMD GPU
|
||||
@@ -59,47 +51,24 @@ gfx1030 Radeon PRO V620, W6800, W6800X, W6800X Duo, W6900X, RX 6800, RX 6
|
||||
There are also `cloud options <https://aws.amazon.com/ec2/instance-types/g4/>`_ you can find if
|
||||
you don't have an AMD GPU at hand.
|
||||
|
||||
-------------------------------------
|
||||
Build the library
|
||||
-------------------------------------
|
||||
=================
|
||||
|
||||
First let's clone the library and rebase to the tested version::
|
||||
This tutorial is based on the use of docker images as explained in :ref:`docker-hub`. Download a docker image suitable for your OS and ROCm release, run or start the docker container, and then resume the tutorial from this point.
|
||||
|
||||
git clone https://github.com/ROCmSoftwarePlatform/composable_kernel.git
|
||||
cd composable_kernel/
|
||||
git checkout tutorial_hello_world
|
||||
.. note::
|
||||
|
||||
To make our lives easier we prepared
|
||||
`docker images <https://hub.docker.com/r/rocm/composable_kernel>`_ with all the necessary
|
||||
dependencies. Pick the right image and create a container. In this tutorial we use
|
||||
``rocm/composable_kernel:ck_ub20.04_rocm5.6`` image, it is based on Ubuntu 20.04 and
|
||||
ROCm v5.6.
|
||||
You can also `install ROCm <https://rocm.docs.amd.com/projects/install-on-linux/en/latest/>`_ on your system, clone the `Composable Kernel repository <https://github.com/ROCmSoftwarePlatform/composable_kernel.git>`_ on GitHub, and use that to build and run the examples using the commands described below.
|
||||
|
||||
If your current folder is ``${HOME}``, start the docker container with::
|
||||
|
||||
docker run \
|
||||
-it \
|
||||
--privileged \
|
||||
--group-add sudo \
|
||||
-w /root/workspace \
|
||||
-v ${HOME}:/root/workspace \
|
||||
rocm/composable_kernel:ck_ub20.04_rocm5.6 \
|
||||
/bin/bash
|
||||
|
||||
If your current folder is different from ``${HOME}``, adjust the line ``-v ${HOME}:/root/workspace``
|
||||
to fit your folder structure.
|
||||
|
||||
Inside the docker container current folder is ``~/workspace``, library path is
|
||||
``~/workspace/composable_kernel``, navigate to the library::
|
||||
Both the docker container and GitHub repository include the Composable Kernel library. Navigate to the library::
|
||||
|
||||
cd composable_kernel/
|
||||
|
||||
Create and go to the ``build`` directory::
|
||||
Create and change to a ``build`` directory::
|
||||
|
||||
mkdir build && cd build
|
||||
|
||||
In the previous section we talked about target GPU architecture. Once you decide which one is right
|
||||
for you, run CMake using the right ``GPU_TARGETS`` flag::
|
||||
The previous section discussed supported GPU architecture. Once you decide which hardware targets are needed, run CMake using the ``GPU_TARGETS`` flag::
|
||||
|
||||
cmake \
|
||||
-D CMAKE_PREFIX_PATH=/opt/rocm \
|
||||
@@ -109,26 +78,25 @@ for you, run CMake using the right ``GPU_TARGETS`` flag::
|
||||
-D BUILD_DEV=OFF \
|
||||
-D GPU_TARGETS="gfx908;gfx90a;gfx1030" ..
|
||||
|
||||
If everything went well the CMake run will end up with::
|
||||
If everything goes well the CMake command will return::
|
||||
|
||||
-- Configuring done
|
||||
-- Generating done
|
||||
-- Build files have been written to: "/root/workspace/composable_kernel/build"
|
||||
|
||||
Finally, we can build examples and tests::
|
||||
Finally, you can build examples and tests::
|
||||
|
||||
make -j examples tests
|
||||
|
||||
If everything is smooth, you'll see::
|
||||
When complete you should see::
|
||||
|
||||
Scanning dependencies of target tests
|
||||
[100%] Built target tests
|
||||
|
||||
---------------------------
|
||||
Run examples and tests
|
||||
---------------------------
|
||||
======================
|
||||
|
||||
Examples are listed as test cases as well, so we can run all examples and tests with::
|
||||
Examples are listed as test cases as well, so you can run all examples and tests with::
|
||||
|
||||
ctest
|
||||
|
||||
@@ -136,38 +104,32 @@ You can check the list of all tests by running::
|
||||
|
||||
ctest -N
|
||||
|
||||
We can also run them separately, here is a separate example execution::
|
||||
You can also run examples separately as shown in the following example execution::
|
||||
|
||||
./bin/example_gemm_xdl_fp16 1 1 1
|
||||
|
||||
The arguments ``1 1 1`` mean that we want to run this example in the mode: verify results with CPU,
|
||||
initialize matrices with integers and benchmark the kernel execution. You can play around with
|
||||
these parameters and see how output and execution results change.
|
||||
The arguments ``1 1 1`` mean that you want to run this example in the mode: verify results with CPU, initialize matrices with integers, and benchmark the kernel execution. You can play around with these parameters and see how output and execution results change.
|
||||
|
||||
If everything goes well and you have a device based on `gfx908` or `gfx90a` architecture you should see
|
||||
something like::
|
||||
If you have a device based on `gfx908` or `gfx90a` architecture, and if the example runs as expected, you should see something like::
|
||||
|
||||
a_m_k: dim 2, lengths {3840, 4096}, strides {4096, 1}
|
||||
b_k_n: dim 2, lengths {4096, 4096}, strides {1, 4096}
|
||||
b_k_n: dim 2, lengths {4096, 4096}, strides {4096, 1}
|
||||
c_m_n: dim 2, lengths {3840, 4096}, strides {4096, 1}
|
||||
launch_and_time_kernel: grid_dim {480, 1, 1}, block_dim {256, 1, 1}
|
||||
Warm up 1 time
|
||||
Start running 10 times...
|
||||
Perf: 1.10017 ms, 117.117 TFlops, 87.6854 GB/s, DeviceGemmXdl<256, 256, 128, 4, 8, 32, 32, 4, 2> NumPrefetch: 1, LoopScheduler: Default, PipelineVersion: v1
|
||||
Perf: 1.08153 ms, 119.136 TFlops, 89.1972 GB/s, DeviceGemm_Xdl_CShuffle<Default, 256, 256, 128, 32, 8, 2, 32, 32, 4, 2, 8, 4, 1, 2> LoopScheduler: Interwave, PipelineVersion: v1
|
||||
|
||||
Meanwhile, running it on a `gfx1030` device should result in::
|
||||
However, running it on a `gfx1030` device should result in the following::
|
||||
|
||||
a_m_k: dim 2, lengths {3840, 4096}, strides {4096, 1}
|
||||
b_k_n: dim 2, lengths {4096, 4096}, strides {1, 4096}
|
||||
c_m_n: dim 2, lengths {3840, 4096}, strides {4096, 1}
|
||||
DeviceGemmXdl<256, 256, 128, 4, 8, 32, 32, 4, 2> NumPrefetch: 1, LoopScheduler: Default, PipelineVersion: v1 does not support this problem
|
||||
|
||||
But don't panic, some of the operators are supported on `gfx1030` architecture, so you can run a
|
||||
Don't worry, some operators are supported on `gfx1030` architecture, so you can run a
|
||||
separate example like::
|
||||
|
||||
./bin/example_gemm_dl_fp16 1 1 1
|
||||
|
||||
and it should result in something nice similar to::
|
||||
and it should return something like::
|
||||
|
||||
a_m_k: dim 2, lengths {3840, 4096}, strides {1, 4096}
|
||||
b_k_n: dim 2, lengths {4096, 4096}, strides {4096, 1}
|
||||
@@ -182,12 +144,9 @@ and it should result in something nice similar to::
|
||||
|
||||
.. note::
|
||||
|
||||
There was a new CMake flag ``DL_KERNELS`` added in the latest versions of CK. If you use one of
|
||||
the newest versions of the library and do not see the above results when running
|
||||
``example_gemm_dl_fp16``, it might be necessary to add ``-D DL_KERNELS=ON`` to your CMake command
|
||||
in order to build the operators supported on the `gfx1030` architecture.
|
||||
A new CMake flag ``DL_KERNELS`` has been added to the latest versions of CK. If you do not see the above results when running ``example_gemm_dl_fp16``, you might need to add ``-D DL_KERNELS=ON`` to your CMake command to build the operators supported on the `gfx1030` architecture.
|
||||
|
||||
We can also run a separate test::
|
||||
You can also run a separate test::
|
||||
|
||||
ctest -R test_gemm_fp16
|
||||
|
||||
@@ -198,13 +157,9 @@ If everything goes well you should see something like::
|
||||
|
||||
100% tests passed, 0 tests failed out of 1
|
||||
|
||||
-----------
|
||||
Summary
|
||||
-----------
|
||||
=======
|
||||
|
||||
In this tutorial we took the first look at the Composable Kernel library, built it on your system
|
||||
and ran some examples and tests. Stay tuned, in the next tutorial we will run kernels with different
|
||||
configs to find out the best one for your hardware and task.
|
||||
In this tutorial you took the first look at the Composable Kernel library, built it on your system and ran some examples and tests. In the next tutorial you will run kernels with different configurations to find out the best one for your hardware and task.
|
||||
|
||||
P.S.: Don't forget to switch off the cloud instance if you have launched one, you can find better
|
||||
ways to spend your money for sure!
|
||||
P.S.: If you are running on a cloud instance, don't forget to switch off the cloud instance.
|
||||
|
||||
41
docs/what-is-ck.rst
Normal file
41
docs/what-is-ck.rst
Normal file
@@ -0,0 +1,41 @@
|
||||
.. meta::
|
||||
:description: Composable Kernel documentation and API reference library
|
||||
:keywords: composable kernel, CK, ROCm, API, documentation
|
||||
|
||||
.. _what-is-ck:
|
||||
|
||||
********************************************************************
|
||||
What is the Composable Kernel library
|
||||
********************************************************************
|
||||
|
||||
|
||||
Methodology
|
||||
===========
|
||||
|
||||
The Composable Kernel (CK) library provides a programming model for writing performance critical kernels for machine learning workloads across multiple architectures including GPUs and CPUs, through general purpose kernel languages like HIP C++.
|
||||
|
||||
CK utilizes two concepts to achieve performance portability and code maintainability:
|
||||
|
||||
* A tile-based programming model
|
||||
* Algorithm complexity reduction for complex ML operators using an innovative technique called
|
||||
"Tensor Coordinate Transformation".
|
||||
|
||||
.. image:: data/ck_component.png
|
||||
:alt: CK Components
|
||||
|
||||
|
||||
Code Structure
|
||||
==============
|
||||
|
||||
The CK library is structured into 4 layers:
|
||||
|
||||
* "Templated Tile Operators" layer
|
||||
* "Templated Kernel and Invoker" layer
|
||||
* "Instantiated Kernel and Invoker" layer
|
||||
* "Client API" layer
|
||||
|
||||
It also includes a simple wrapper component used to perform tensor transform operations more easily and with fewer lines of code.
|
||||
|
||||
.. image:: data/ck_layer.png
|
||||
:alt: CK Layers
|
||||
|
||||
92
docs/wrapper.rst
Normal file
92
docs/wrapper.rst
Normal file
@@ -0,0 +1,92 @@
|
||||
.. meta::
|
||||
:description: Composable Kernel documentation and API reference library
|
||||
:keywords: composable kernel, CK, ROCm, API, documentation
|
||||
|
||||
.. _wrapper:
|
||||
|
||||
********************************************************************
|
||||
Wrapper
|
||||
********************************************************************
|
||||
|
||||
-------------------------------------
|
||||
Description
|
||||
-------------------------------------
|
||||
|
||||
.. note::
|
||||
|
||||
The wrapper is under development and its functionality is limited.
|
||||
|
||||
|
||||
The CK library provides a lightweight wrapper for more complex operations implemented in
|
||||
the library.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
const auto shape_4x2x4 = ck::make_tuple(4, ck::make_tuple(2, 4));
|
||||
const auto strides_s2x1x8 = ck::make_tuple(2, ck::make_tuple(1, 8));
|
||||
const auto layout = ck::wrapper::make_layout(shape_4x2x4, strides_s2x1x8);
|
||||
|
||||
std::array<ck::index_t, 32> data;
|
||||
auto tensor = ck::wrapper::make_tensor<ck::wrapper::MemoryTypeEnum::Generic>(&data[0], layout);
|
||||
|
||||
for(ck::index_t w = 0; w < size(tensor); w++) {
|
||||
tensor(w) = w;
|
||||
}
|
||||
|
||||
// slice() == slice(0, -1) (whole dimension)
|
||||
auto tensor_slice = tensor(ck::wrapper::slice(1, 3), ck::make_tuple(ck::wrapper::slice(), ck::wrapper::slice()));
|
||||
std::cout << "dims:2,(2,4) strides:2,(1,8)" << std::endl;
|
||||
for(ck::index_t h = 0; h < ck::wrapper::size<0>(tensor_slice); h++)
|
||||
{
|
||||
for(ck::index_t w = 0; w < ck::wrapper::size<1>(tensor_slice); w++)
|
||||
{
|
||||
std::cout << tensor_slice(h, w) << " ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
|
||||
Output::
|
||||
|
||||
dims:2,(2,4) strides:2,(1,8)
|
||||
1 5 9 13 17 21 25 29
|
||||
2 6 10 14 18 22 26 30
|
||||
|
||||
|
||||
Advanced examples:
|
||||
|
||||
* `Image to column <https://github.com/ROCm/composable_kernel/blob/develop/client_example/25_wrapper/wrapper_img2col.cpp>`_
|
||||
|
||||
-------------------------------------
|
||||
Layout
|
||||
-------------------------------------
|
||||
|
||||
.. doxygenstruct:: ck::wrapper::Layout
|
||||
|
||||
-------------------------------------
|
||||
Layout helpers
|
||||
-------------------------------------
|
||||
|
||||
.. doxygenfile:: layout_utils.hpp
|
||||
|
||||
-------------------------------------
|
||||
Tensor
|
||||
-------------------------------------
|
||||
|
||||
.. doxygenstruct:: ck::wrapper::Tensor
|
||||
|
||||
-------------------------------------
|
||||
Tensor helpers
|
||||
-------------------------------------
|
||||
|
||||
.. doxygenfile:: tensor_utils.hpp
|
||||
|
||||
.. doxygenfile:: tensor_partition.hpp
|
||||
|
||||
-------------------------------------
|
||||
Operations
|
||||
-------------------------------------
|
||||
|
||||
.. doxygenfile:: copy.hpp
|
||||
.. doxygenfile:: gemm.hpp
|
||||
@@ -19,6 +19,9 @@ add_custom_target(example_gemm_xdl)
|
||||
add_example_executable(example_gemm_xdl_fp16 gemm_xdl_fp16.cpp)
|
||||
add_example_dependencies(example_gemm_xdl example_gemm_xdl_fp16)
|
||||
|
||||
add_example_executable(example_gemm_xdl_fp16_v2 gemm_xdl_fp16_v2.cpp)
|
||||
add_example_dependencies(example_gemm_xdl example_gemm_xdl_fp16_v2)
|
||||
|
||||
add_example_executable(example_gemm_xdl_wavelet_fp16 gemm_xdl_wavelet_fp16.cpp)
|
||||
add_example_dependencies(example_gemm_xdl example_gemm_xdl_wavelet_fp16)
|
||||
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
#error Should compile this file with ck::int4_t support
|
||||
#endif
|
||||
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
|
||||
#include "common.hpp"
|
||||
|
||||
@@ -43,3 +41,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host::
|
||||
#include "run_gemm_example.inc"
|
||||
|
||||
int main(int argc, char* argv[]) { return !run_gemm_example(argc, argv); }
|
||||
#endif
|
||||
51
example/01_gemm/gemm_xdl_fp16_v2.cpp
Normal file
51
example/01_gemm/gemm_xdl_fp16_v2.cpp
Normal file
@@ -0,0 +1,51 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include "common.hpp"
|
||||
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle_v2.hpp"
|
||||
|
||||
using ADataType = ck::half_t;
|
||||
using BDataType = ck::half_t;
|
||||
using AccDataType = float;
|
||||
using CShuffleDataType = ck::half_t;
|
||||
using CDataType = ck::half_t;
|
||||
|
||||
using F16 = ck::half_t;
|
||||
using F32 = float;
|
||||
|
||||
using ALayout = Row;
|
||||
using BLayout = Row;
|
||||
using CLayout = Row;
|
||||
|
||||
using AElementOp = PassThrough;
|
||||
using BElementOp = PassThrough;
|
||||
using CElementOp = PassThrough;
|
||||
|
||||
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
||||
|
||||
// clang-format off
|
||||
using DeviceGemmInstance =
|
||||
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffleV2<
|
||||
ALayout, BLayout, CLayout,
|
||||
F16, F16, F16, F32, F16,
|
||||
PassThrough, PassThrough, PassThrough, GemmDefault,
|
||||
2, 256,
|
||||
256, 256,
|
||||
32, 8, 4,
|
||||
32, 32,
|
||||
4, 4,
|
||||
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
|
||||
2, 8, 8, 0,
|
||||
S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>,
|
||||
1, 8, 4, 0,
|
||||
1, 1, S<1, 32, 1, 8>, 8,
|
||||
ck::LoopScheduler::Default, ck::PipelineVersion::v1>;
|
||||
// clang-format on
|
||||
|
||||
using ReferenceGemmInstance = ck::tensor_operation::host::
|
||||
ReferenceGemm<ADataType, BDataType, CDataType, AccDataType, AElementOp, BElementOp, CElementOp>;
|
||||
|
||||
#include "run_gemm_example.inc"
|
||||
|
||||
int main(int argc, char* argv[]) { return !run_gemm_example(argc, argv); }
|
||||
@@ -1,9 +1,7 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
#error Should compile this file with ck::int4_t support
|
||||
#endif
|
||||
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
|
||||
#include "common.hpp"
|
||||
|
||||
@@ -44,3 +42,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host::
|
||||
#include "run_gemm_example.inc"
|
||||
|
||||
int main(int argc, char* argv[]) { return !run_gemm_example(argc, argv); }
|
||||
#endif
|
||||
@@ -1,9 +1,7 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
#error Should compile this file with ck::int4_t support
|
||||
#endif
|
||||
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
|
||||
#include "common.hpp"
|
||||
|
||||
@@ -58,3 +56,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataTyp
|
||||
#include "run_gemm_add_add_fastgelu_example.inc"
|
||||
|
||||
int main(int argc, char* argv[]) { return !run_gemm_add_add_fastgelu_example(argc, argv); }
|
||||
#endif
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
#error Should compile this file with ck::int4_t support
|
||||
#endif
|
||||
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
|
||||
#define BUILD_INT4_EXAMPLE
|
||||
|
||||
@@ -24,3 +22,4 @@ using RsDataType = ck::Tuple<R0DataType>;
|
||||
#include "run_convnd_fwd_max_example.inc"
|
||||
|
||||
int main(int argc, char* argv[]) { return !run_convnd_fwd_max_example(argc, argv); }
|
||||
#endif
|
||||
|
||||
@@ -299,8 +299,8 @@ int main(int argc, char* argv[])
|
||||
for(int i = 0; i < problem_size.group_count; i++)
|
||||
{
|
||||
problem_size.Ms.push_back(256 + 256 * i);
|
||||
problem_size.Ns.push_back(128 + 128 * i);
|
||||
problem_size.Ks.push_back(128 + 64 * i);
|
||||
problem_size.Ns.push_back(256);
|
||||
problem_size.Ks.push_back(128);
|
||||
|
||||
problem_size.stride_As.push_back(problem_size.Ks[i]);
|
||||
problem_size.stride_Bs.push_back(problem_size.Ks[i]);
|
||||
|
||||
@@ -300,8 +300,8 @@ int main(int argc, char* argv[])
|
||||
for(int i = 0; i < problem_size.group_count; i++)
|
||||
{
|
||||
problem_size.Ms.push_back(256 + 256 * i);
|
||||
problem_size.Ns.push_back(128 + 128 * i);
|
||||
problem_size.Ks.push_back(128 + 64 * i);
|
||||
problem_size.Ns.push_back(256);
|
||||
problem_size.Ks.push_back(128);
|
||||
|
||||
problem_size.stride_As.push_back(problem_size.Ks[i]);
|
||||
problem_size.stride_Bs.push_back(problem_size.Ks[i]);
|
||||
|
||||
@@ -272,15 +272,14 @@ int main(int argc, char* argv[])
|
||||
{
|
||||
for(int m = 0; m < M; ++m)
|
||||
{
|
||||
auto reduce0_acc = reduce0_op.GetIdentityValue<ReduceAccDataType>();
|
||||
auto reduce1_acc = reduce1_op.GetIdentityValue<ReduceAccDataType>();
|
||||
|
||||
auto reduce0_acc = reduce0_op.GetIdentityValue<ReduceAccDataType>();
|
||||
auto reduce1_acc = reduce1_op.GetIdentityValue<ReduceAccDataType>();
|
||||
ReduceAccDataType d0_val = 0;
|
||||
ReduceAccDataType d1_val = 0;
|
||||
for(int n = 0; n < N; ++n)
|
||||
{
|
||||
auto c_val =
|
||||
ck::type_convert<ReduceAccDataType>(c_g_m_n_host_result(batch, m, n));
|
||||
ReduceAccDataType d0_val;
|
||||
ReduceAccDataType d1_val;
|
||||
|
||||
UnaryIdenticElementOp{}(d0_val, c_val);
|
||||
UnarySquareElementOp{}(d1_val, c_val);
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
#error Should compile this file with ck::int4_t support
|
||||
#endif
|
||||
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
|
||||
#include "common.hpp"
|
||||
|
||||
@@ -29,3 +27,4 @@ using OutElementOp = ck::tensor_operation::element_wise::AddReluAdd;
|
||||
#include "run_grouped_conv_fwd_bias_relu_add_example.inc"
|
||||
|
||||
int main(int argc, char* argv[]) { return !run_grouped_conv_fwd_bias_relu_add_example(argc, argv); }
|
||||
#endif
|
||||
|
||||
@@ -9,9 +9,7 @@ Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
|
||||
Gemm1
|
||||
*/
|
||||
|
||||
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
#error Should compile this file with ck::int4_t support
|
||||
#endif
|
||||
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
|
||||
#include <iostream>
|
||||
#include <numeric>
|
||||
@@ -144,3 +142,4 @@ static_assert(sizeof(ck::int4_t) == sizeof(int8_t));
|
||||
#endif
|
||||
|
||||
int main(int argc, char* argv[]) { return run_batched_gemm_gemm_example(argc, argv) ? 0 : 1; }
|
||||
#endif
|
||||
|
||||
@@ -10,6 +10,9 @@ foreach(gpu IN LISTS GPU_TARGETS)
|
||||
add_example_executable(example_splitK_gemm_xdl_fp16 splitK_gemm_xdl_fp16.cpp)
|
||||
add_example_dependencies(example_splitK_gemm_xdl example_splitK_gemm_xdl_fp16)
|
||||
|
||||
add_example_executable(example_splitK_gemm_xdl_lds_direct_load_fp16 splitK_gemm_xdl_lds_direct_load_fp16.cpp)
|
||||
add_example_dependencies(example_splitK_gemm_xdl example_splitK_gemm_xdl_lds_direct_load_fp16)
|
||||
|
||||
add_example_executable(example_splitK_gemm_xdl_bf16 splitK_gemm_xdl_bf16.cpp)
|
||||
add_example_dependencies(example_splitK_gemm_xdl example_splitK_gemm_xdl_bf16)
|
||||
|
||||
|
||||
@@ -157,7 +157,7 @@ bool run_splitK_gemm(const ProblemSize& problem_size, const ExecutionConfig& con
|
||||
|
||||
if(config.time_kernel)
|
||||
{
|
||||
float ave_time = invoker.Run(argument, StreamConfig{nullptr, config.time_kernel});
|
||||
float ave_time = invoker.Run(argument, StreamConfig{nullptr, config.time_kernel, 1});
|
||||
|
||||
std::size_t flop = std::size_t(2) * M * N * K;
|
||||
std::size_t num_btype =
|
||||
|
||||
@@ -42,7 +42,7 @@ using AElementOp = PassThrough;
|
||||
using BElementOp = PassThrough;
|
||||
using CElementOp = PassThrough;
|
||||
|
||||
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
||||
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::KPadding;
|
||||
|
||||
using DeviceGemmInstance = ck::tensor_operation::device::DeviceGemmXdlSplitKCShuffle
|
||||
// clang-format off
|
||||
|
||||
@@ -0,0 +1,82 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <numeric>
|
||||
#include <initializer_list>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
|
||||
|
||||
#define DIRECT_LOAD 1
|
||||
|
||||
#if DIRECT_LOAD
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_splitk_c_shuffle_lds_direct_load.hpp"
|
||||
#else
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_splitk_c_shuffle.hpp"
|
||||
#endif
|
||||
|
||||
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
||||
|
||||
#include "ck/library/utility/check_err.hpp"
|
||||
#include "ck/library/utility/device_memory.hpp"
|
||||
#include "ck/library/utility/host_tensor.hpp"
|
||||
#include "ck/library/utility/host_tensor_generator.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
||||
#include "ck/library/utility/literals.hpp"
|
||||
|
||||
template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using F16 = ck::half_t;
|
||||
using F32 = float;
|
||||
|
||||
using Row = ck::tensor_layout::gemm::RowMajor;
|
||||
using Col = ck::tensor_layout::gemm::ColumnMajor;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
using ADataType = F16;
|
||||
using BDataType = F16;
|
||||
using AccDataType = F32;
|
||||
using CDataType = F16;
|
||||
|
||||
using ALayout = Row;
|
||||
using BLayout = Col;
|
||||
using CLayout = Row;
|
||||
|
||||
using AElementOp = PassThrough;
|
||||
using BElementOp = PassThrough;
|
||||
using CElementOp = PassThrough;
|
||||
|
||||
#if DIRECT_LOAD
|
||||
|
||||
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
|
||||
|
||||
using DeviceGemmInstance = ck::tensor_operation::device::DeviceGemmXdlSplitKCShuffle_LdsDirectLoad
|
||||
// clang-format off
|
||||
//######| AData| BData| CData| AccData| ALayout| BLayout| CLayout| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| K1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
|
||||
//######| Type| Type| Type| Type| | | | Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | XDL| XDL| Per| Per| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| AddExtraM| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MXdlPerWave_MWaveMPerXdl| ScalarPerVector|
|
||||
//######| | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | Wave| Wave| Lengths_KBatch_K0_M_K1| | | PerVector| | Lengths_KBatch_K0_N_K1| | | PerVector| | PerShuffle| PerShuffle| _NBlock_NXdlPerWave_NWaveNPerXdl| _NWaveNPerXdl|
|
||||
//######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
|
||||
< ADataType, BDataType, CDataType, AccDataType, ALayout, BLayout, CLayout, AElementOp, BElementOp, CElementOp, GemmDefault, 2, 128, 32, 16, 4, 16, 16, 16, 1, 1, S<1, 2, 8, 8>, S<0, 2, 1, 3>, 3, 2, true, S<1, 2, 8, 8>, S<0, 2, 1, 3>, 3, 2, true, 1, 1, S<1, 32, 1, 4>, 4>;
|
||||
// clang-format on
|
||||
|
||||
#else
|
||||
|
||||
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
||||
using DeviceGemmInstance = ck::tensor_operation::device::DeviceGemmXdlSplitKCShuffle
|
||||
// clang-format off
|
||||
//######| AData| BData| CData| AccData| ALayout| BLayout| CLayout| A| B| C| GEMM| Block| MPer| NPer| KPer| K1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
|
||||
//######| Type| Type| Type| Type| | | | Elementwise| Elementwise| Elementwise| Spacialization| Size| Block| Block| Block| | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MXdlPerWave_MWaveMPerXdl| ScalarPerVector|
|
||||
//######| | | | | | | | Operation| Operation| Operation| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NXdlPerWave_NWaveNPerXdl| _NWaveNPerXdl|
|
||||
//######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
|
||||
< ADataType, BDataType, CDataType, AccDataType, ALayout, BLayout, CLayout, AElementOp, BElementOp, CElementOp, GemmDefault, 256, 256, 128, 4, 8, 32, 32, 4, 2, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 64, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 3, 8, 8, true, 1, 1, S<1, 32, 1, 8>, 8>;
|
||||
// clang-format on
|
||||
|
||||
#endif
|
||||
|
||||
#include "run_splitK_gemm_example.inc"
|
||||
|
||||
int main(int argc, char* argv[]) { return !run_splitK_gemm_example(argc, argv); }
|
||||
@@ -1,9 +1,7 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
#error Should compile this file with ck::int4_t support
|
||||
#endif
|
||||
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
@@ -120,3 +118,4 @@ static_assert(sizeof(ck::int4_t) == sizeof(int8_t));
|
||||
#endif
|
||||
|
||||
int main(int argc, char* argv[]) { return run_grouped_conv_conv_fwd_example(argc, argv) ? 0 : 1; }
|
||||
#endif
|
||||
|
||||
@@ -5,4 +5,6 @@ add_example_executable(example_elementwise_permute_4D_fp16_row elementwise_permu
|
||||
add_example_executable(example_elementwise_permute_4D_fp32_col elementwise_permute_4D_fp32_col.cpp)
|
||||
add_example_executable(example_elementwise_permute_4D_fp16_col elementwise_permute_4D_fp16_col.cpp)
|
||||
add_example_executable(example_elementwise_permute elementwise_permute.cpp)
|
||||
add_example_executable(example_elementwise_permute_3d elementwise_permute_3d.cpp)
|
||||
if((NOT GPU_TARGETS MATCHES "gfx940") AND (NOT GPU_TARGETS MATCHES "gfx941") AND (NOT GPU_TARGETS MATCHES "gfx942"))
|
||||
add_example_executable(example_elementwise_permute_3d elementwise_permute_3d.cpp)
|
||||
endif()
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
|
||||
@@ -14,8 +17,8 @@
|
||||
using F16 = ck::half_t;
|
||||
using F32 = float;
|
||||
|
||||
using ADataType = F16;
|
||||
using BDataType = F16;
|
||||
using ADataType = F32;
|
||||
using BDataType = F32;
|
||||
|
||||
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
||||
using DeviceElementwisePermuteInstance =
|
||||
@@ -25,10 +28,10 @@ using DeviceElementwisePermuteInstance =
|
||||
2, // NumDim_m, {N, C}
|
||||
2, // NumDim_n, {H, W}
|
||||
1, // NumDim_k, {D}
|
||||
8, // MPerThread
|
||||
8, // NPerThread
|
||||
8, // KPerThread
|
||||
ck::Sequence<8>, // InScalarPerVectorSeq
|
||||
4, // MPerThread
|
||||
4, // NPerThread
|
||||
4, // KPerThread
|
||||
ck::Sequence<4>, // InScalarPerVectorSeq
|
||||
ck::Sequence<4>>; // OutScalarPerVectorSeq
|
||||
|
||||
template <typename HostTensorA, typename HostTensorB, typename Functor>
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
#include <random>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
|
||||
@@ -48,10 +52,8 @@ void host_elementwise4D(HostTensorB& B_nhwc,
|
||||
for(std::size_t n = 0; n < N; ++n)
|
||||
{
|
||||
ADataType tmp_val;
|
||||
// auto a_val = A_nchw(n, c, h, w);
|
||||
auto a_val = A_nchw.mData[(n) + (c * N) + (h * C * N) + (w * H * C * N)];
|
||||
functor_b(tmp_val, a_val);
|
||||
// functor_a(B_nhwc(n, h, w, c), scale * tmp_val);
|
||||
functor_a(B_nhwc.mData[(n) + (c * W * H * N) + (h * N) + (w * H * N)],
|
||||
scale * tmp_val);
|
||||
}
|
||||
@@ -62,12 +64,14 @@ int main()
|
||||
bool do_verification = true;
|
||||
bool time_kernel = true;
|
||||
|
||||
std::vector<std::size_t> nchw = {4, 2, 1, 8};
|
||||
std::vector<std::size_t> nhwc = {4, 1, 8, 2};
|
||||
std::vector<std::size_t> nchw = {16, 8, 32, 64};
|
||||
std::vector<std::size_t> nhwc = {16, 32, 64, 8};
|
||||
Tensor<ADataType> a(nchw);
|
||||
Tensor<BDataType> b(nhwc);
|
||||
float scale = 1.f;
|
||||
auto i = 0;
|
||||
std::mt19937 gen(11939);
|
||||
std::uniform_int_distribution<int> dis(0, 1);
|
||||
for(std::size_t w = 0; w < a.mDesc.GetLengths()[3]; ++w)
|
||||
for(std::size_t h = 0; h < a.mDesc.GetLengths()[2]; ++h)
|
||||
for(std::size_t c = 0; c < a.mDesc.GetLengths()[1]; ++c)
|
||||
@@ -75,7 +79,7 @@ int main()
|
||||
{
|
||||
a.mData[(n * nchw[1] * nchw[2] * nchw[3]) + (c * nchw[2] * nchw[3]) +
|
||||
(h * nchw[3]) + w] = i;
|
||||
i++;
|
||||
i = dis(gen);
|
||||
}
|
||||
|
||||
DeviceMem a_device_buf(sizeof(ADataType) * a.mDesc.GetElementSpaceSize());
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
|
||||
@@ -67,6 +70,8 @@ int main()
|
||||
|
||||
float scale = 1.f;
|
||||
auto i = 0;
|
||||
std::mt19937 gen(11939);
|
||||
std::uniform_int_distribution<int> dis(0, 1);
|
||||
for(std::size_t w = 0; w < a.mDesc.GetLengths()[3]; ++w)
|
||||
for(std::size_t h = 0; h < a.mDesc.GetLengths()[2]; ++h)
|
||||
for(std::size_t c = 0; c < a.mDesc.GetLengths()[1]; ++c)
|
||||
@@ -74,7 +79,7 @@ int main()
|
||||
{
|
||||
a.mData[(n * nchw[1] * nchw[2] * nchw[3]) + (c * nchw[2] * nchw[3]) +
|
||||
(h * nchw[3]) + w] = i;
|
||||
i++;
|
||||
i = dis(gen);
|
||||
}
|
||||
|
||||
DeviceMem a_device_buf(sizeof(ADataType) * a.mDesc.GetElementSpaceSize());
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
|
||||
|
||||
@@ -32,6 +32,8 @@ std::vector<ck::index_t> f_tensor_strides_ncdhw(ck::index_t N_,
|
||||
return {C_ * D * H * W, D * H * W, H * W, W, 1_uz};
|
||||
else if constexpr(ck::is_same<decltype(layout), ck::tensor_layout::convolution::NDHWC>::value)
|
||||
return {D * C_ * H * W, 1_uz, C_ * H * W, W * C_, C_};
|
||||
throw std::runtime_error("Pool3d_fwd: problem with layout. ");
|
||||
return {0, 0, 0, 0, 0};
|
||||
};
|
||||
|
||||
template <typename TensorLayout>
|
||||
@@ -53,6 +55,8 @@ HostTensorDescriptor f_host_tensor_descriptor(std::size_t N_,
|
||||
return HostTensorDescriptor({N_, C_, D, H, W},
|
||||
{D * C_ * H * W, 1_uz, C_ * H * W, W * C_, C_});
|
||||
}
|
||||
throw std::runtime_error("Pool3d_fwd: problem with layout. ");
|
||||
return HostTensorDescriptor({0, 0, 0, 0, 0}, {0, 0, 0, 0, 0});
|
||||
};
|
||||
|
||||
template <typename DevicePoolFwdInstance,
|
||||
|
||||
@@ -26,6 +26,8 @@ std::vector<ck::index_t> f_tensor_strides_ncdhw(ck::index_t N_,
|
||||
return {C_ * D * H * W, D * H * W, H * W, W, 1_uz};
|
||||
else if constexpr(ck::is_same<decltype(layout), ck::tensor_layout::convolution::NDHWC>::value)
|
||||
return {D * C_ * H * W, 1_uz, C_ * H * W, W * C_, C_};
|
||||
throw std::runtime_error("Avgpool3d_bwd: problem with layout. ");
|
||||
return {0, 0, 0, 0, 0};
|
||||
};
|
||||
|
||||
template <typename TensorLayout>
|
||||
@@ -47,6 +49,8 @@ HostTensorDescriptor f_host_tensor_descriptor(std::size_t N_,
|
||||
return HostTensorDescriptor({N_, C_, D, H, W},
|
||||
{D * C_ * H * W, 1_uz, C_ * H * W, W * C_, C_});
|
||||
}
|
||||
throw std::runtime_error("Avgpool3d_bwd: problem with layout. ");
|
||||
return HostTensorDescriptor({0, 0, 0, 0, 0}, {0, 0, 0, 0, 0});
|
||||
};
|
||||
|
||||
template <typename DevicePoolBwdInstance,
|
||||
|
||||
1
example/53_layernorm2d_bwd/CMakeLists.txt
Normal file
1
example/53_layernorm2d_bwd/CMakeLists.txt
Normal file
@@ -0,0 +1 @@
|
||||
add_example_executable(example_layernorm2d_bwd_fp32 layernorm2d_bwd_fp32.cpp)
|
||||
@@ -15,16 +15,17 @@
|
||||
#include "ck/library/utility/literals.hpp"
|
||||
|
||||
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_normalization_bwd_data_impl.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_normalization_bwd_gamma_beta_impl.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_layernorm_bwd.hpp"
|
||||
|
||||
using DYDataType = ck::half_t;
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using DYDataType = float;
|
||||
using XDataType = float;
|
||||
using GammaDataType = float;
|
||||
using MeanInvStdDataType = float;
|
||||
using DGammaDataType = ck::half_t;
|
||||
using DBetaDataType = ck::half_t;
|
||||
using DXDataType = ck::half_t;
|
||||
using DGammaDataType = float;
|
||||
using DBetaDataType = float;
|
||||
using DXDataType = float;
|
||||
using ComputeDataType = float;
|
||||
|
||||
constexpr int Rank = 2;
|
||||
@@ -39,6 +40,7 @@ constexpr int NumReduceDim = 1;
|
||||
// inv_std: [M, 1]
|
||||
|
||||
// Output shape
|
||||
// dx: [M, N]
|
||||
// dgamma: [1, N]
|
||||
// dbeta: [1, N]
|
||||
|
||||
@@ -46,8 +48,34 @@ constexpr int NumReduceDim = 1;
|
||||
// dbeta = reduce_sum(dy, axis=0)
|
||||
|
||||
// [CAUSION]
|
||||
// In DeviceNormalizationBwdGammaBetaImpl, M is invarient dimension, K is reduced dimension
|
||||
// Hence, M in this example and DeviceNormalizationBwdGammaBetaImpl is different
|
||||
// In DeviceNormalizationBwdDataImpl & DeviceNormalizationBwdGammaBetaImpl, M is Invariant
|
||||
// dimension, K is reduced dimension Hence, M in this example and
|
||||
// DeviceNormalizationBwdGammaBetaImpl is different
|
||||
using XDeviceInstance = ck::tensor_operation::device::DeviceNormalizationBwdDataImpl<
|
||||
DYDataType,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
MeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
DXDataType,
|
||||
Rank,
|
||||
NumReduceDim,
|
||||
256, // BlockSize
|
||||
8, // MThreadClusterSize
|
||||
32, // KThreadClusterSize
|
||||
1, // MThreadSliceSize
|
||||
4, // KThreadSliceSize
|
||||
true, // IsDYFastestDimReduced
|
||||
4, // DYSrcVectorSize
|
||||
true, // IsXFastestDimReduced
|
||||
4, // XSrcVectorSize
|
||||
true, // IsGammaFastestDimReduced
|
||||
4, // GammaSrcVectorSize
|
||||
false, // IsMeanInvStdFastestDimReduced
|
||||
1, // MeanInvStdSrcVectorSize
|
||||
true, // IsDXFastestDimReduced
|
||||
4>; // DXDstVectorSize
|
||||
|
||||
using GammaBetaDeviceInstance = ck::tensor_operation::device::DeviceNormalizationBwdGammaBetaImpl<
|
||||
DYDataType,
|
||||
XDataType,
|
||||
@@ -58,18 +86,18 @@ using GammaBetaDeviceInstance = ck::tensor_operation::device::DeviceNormalizatio
|
||||
Rank,
|
||||
NumReduceDim,
|
||||
256, // BlockSize
|
||||
8, // ClusterInvarient
|
||||
32, // ClusterReduce
|
||||
8, // SliceInvarient
|
||||
1, // SliceReduce
|
||||
8, // MThreadClusterSize
|
||||
32, // KThreadClusterSize
|
||||
4, // MThreadSliceSize
|
||||
1, // KThreadSliceSize
|
||||
false, // IsDYFastestDimReduced
|
||||
8, // DYSrcVectorSize
|
||||
4, // DYSrcVectorSize
|
||||
false, // IsXFastestDimReduced
|
||||
8, // XSrcVectorSize
|
||||
4, // XSrcVectorSize
|
||||
true, // IsMeanInvStdFastestDimReduced
|
||||
1, // MeanInvStdSrcVectorSize
|
||||
1, // DGammaDstVectorSize
|
||||
1>; // DBetaDstVectorSize
|
||||
4, // DGammaDstVectorSize
|
||||
4>; // DBetaDstVectorSize
|
||||
|
||||
int main()
|
||||
{
|
||||
@@ -96,16 +124,48 @@ int main()
|
||||
|
||||
DeviceMem dy_dev(sizeof(DYDataType) * dy.mDesc.GetElementSpaceSize());
|
||||
DeviceMem x_dev(sizeof(XDataType) * x.mDesc.GetElementSpaceSize());
|
||||
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem mean_dev(sizeof(MeanInvStdDataType) * mean.mDesc.GetElementSpaceSize());
|
||||
DeviceMem inv_std_dev(sizeof(MeanInvStdDataType) * inv_std.mDesc.GetElementSpaceSize());
|
||||
DeviceMem dx_dev(sizeof(DXDataType) * dx.mDesc.GetElementSpaceSize());
|
||||
DeviceMem dgamma_dev(sizeof(DGammaDataType) * dgamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem dbeta_dev(sizeof(DBetaDataType) * dbeta.mDesc.GetElementSpaceSize());
|
||||
|
||||
dy_dev.ToDevice(dy.mData.data());
|
||||
x_dev.ToDevice(x.mData.data());
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
mean_dev.ToDevice(mean.mData.data());
|
||||
inv_std_dev.ToDevice(inv_std.mData.data());
|
||||
|
||||
// backward x
|
||||
auto x_device_instance = XDeviceInstance{};
|
||||
|
||||
auto x_argument_ptr = x_device_instance.MakeArgumentPointer({M, N}, // lengths
|
||||
{N, 1}, // dyStrides
|
||||
{N, 1}, // xStrides
|
||||
{0, 1}, // gammaStrides
|
||||
{1, 0}, // meanStrides
|
||||
{1, 0}, // invStdStrides
|
||||
{N, 1}, // dxStrides
|
||||
{1}, // reduceDims
|
||||
dy_dev.GetDeviceBuffer(),
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
mean_dev.GetDeviceBuffer(),
|
||||
inv_std_dev.GetDeviceBuffer(),
|
||||
dx_dev.GetDeviceBuffer());
|
||||
|
||||
if(!x_device_instance.IsSupportedArgument(x_argument_ptr.get()))
|
||||
{
|
||||
std::cout << "The runtime parameters are not supported." << __FILE__ << ":" << __LINE__
|
||||
<< std::endl;
|
||||
return 1;
|
||||
};
|
||||
|
||||
auto x_invoker_ptr = x_device_instance.MakeInvokerPointer();
|
||||
x_invoker_ptr->Run(x_argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
||||
|
||||
// backward gamma & beta
|
||||
auto gamma_beta_device_instance = GammaBetaDeviceInstance{};
|
||||
auto gamma_beta_argument_ptr =
|
||||
gamma_beta_device_instance.MakeArgumentPointer({M, N}, // inLengths
|
||||
@@ -126,7 +186,8 @@ int main()
|
||||
|
||||
if(!gamma_beta_device_instance.IsSupportedArgument(gamma_beta_argument_ptr.get()))
|
||||
{
|
||||
std::cout << "The runtime parameters are not supported" << std::endl;
|
||||
std::cout << "The runtime parameters are not supported." << __FILE__ << ":" << __LINE__
|
||||
<< std::endl;
|
||||
return 1;
|
||||
};
|
||||
|
||||
@@ -156,9 +217,11 @@ int main()
|
||||
|
||||
dgamma_dev.FromDevice(dgamma.mData.data());
|
||||
dbeta_dev.FromDevice(dbeta.mData.data());
|
||||
dx_dev.FromDevice(dx.mData.data());
|
||||
|
||||
pass &= ck::utils::check_err(dgamma, host_dgamma, "Error: Incorrect dgamma", 1e-3, 1e-3);
|
||||
pass &= ck::utils::check_err(dbeta, host_dbeta, "Error: Incorrect dbeta", 1e-3, 1e-3);
|
||||
pass &= ck::utils::check_err(dx, host_dx, "Error: Incorrect dx", 1e-3, 1e-3);
|
||||
}
|
||||
|
||||
return (pass ? 0 : 1);
|
||||
@@ -1 +0,0 @@
|
||||
add_example_executable(example_layernorm2d_bwd_fp16 layernorm2d_bwd_fp16.cpp)
|
||||
@@ -1 +1 @@
|
||||
add_example_executable(example_groupnorm_bwd_fp16 groupnorm_bwd_fp16.cpp)
|
||||
add_example_executable(example_groupnorm_bwd_fp32 groupnorm_bwd_fp32.cpp)
|
||||
|
||||
@@ -15,23 +15,58 @@
|
||||
#include "ck/library/utility/literals.hpp"
|
||||
|
||||
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_normalization_bwd_data_impl.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_normalization_bwd_gamma_beta_impl.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_groupnorm_bwd.hpp"
|
||||
|
||||
using DYDataType = ck::half_t;
|
||||
using XDataType = ck::half_t;
|
||||
using GammaDataType = ck::half_t;
|
||||
using DYDataType = float;
|
||||
using XDataType = float;
|
||||
using GammaDataType = float;
|
||||
using MeanInvStdDataType = float;
|
||||
using DGammaDataType = ck::half_t;
|
||||
using DBetaDataType = ck::half_t;
|
||||
using DXDataType = ck::half_t;
|
||||
using DGammaDataType = float;
|
||||
using DBetaDataType = float;
|
||||
using DXDataType = float;
|
||||
using ComputeDataType = float;
|
||||
|
||||
constexpr int Rank = 5;
|
||||
constexpr int NumReduceDim = 3;
|
||||
|
||||
// Grouprnorm
|
||||
// kernel: M , K
|
||||
// kernel 1: M , K
|
||||
// dy: N, H, W, G, C -> N * G, H * W * C
|
||||
// x: N, H, W, G, C -> N * G, H * W * C
|
||||
// gamma: 1, 1, 1, G, C -> 1 * G, 1 * 1 * C
|
||||
// mean: N, 1, 1, G, 1 -> N * G, 1 * 1 * 1
|
||||
// rstd: N, 1, 1, G, 1 -> N * G, 1 * 1 * 1
|
||||
|
||||
// dx: N, H, W, G, C -> N * G, H * W * C
|
||||
|
||||
using XDeviceInstance = ck::tensor_operation::device::DeviceNormalizationBwdDataImpl<
|
||||
DYDataType,
|
||||
XDataType,
|
||||
GammaDataType,
|
||||
MeanInvStdDataType,
|
||||
ComputeDataType,
|
||||
DXDataType,
|
||||
Rank,
|
||||
NumReduceDim,
|
||||
256, // BlockSize
|
||||
8, // MThreadClusterSize
|
||||
32, // KThreadClusterSize
|
||||
1, // MThreadSliceSize
|
||||
4, // KThreadSliceSize
|
||||
true, // IsDYFastestDimReduced
|
||||
4, // DYSrcVectorSize
|
||||
true, // IsXFastestDimReduced
|
||||
4, // XSrcVectorSize
|
||||
true, // IsGammaFastestDimReduced
|
||||
4, // GammaSrcVectorSize
|
||||
false, // IsMeanInvStdFastestDimReduced
|
||||
1, // MeanInvStdSrcVectorSize
|
||||
true, // IsDXFastestDimReduced
|
||||
4>; // DXDstVectorSize
|
||||
|
||||
// kernel 2: M , K
|
||||
// dy: N, H, W, G, C -> G * C, N * H * W
|
||||
// x: N, H, W, G, C -> G * C, N * H * W
|
||||
// mean: N, 1, 1, G, 1 -> G * 1, N * 1 * 1
|
||||
@@ -52,18 +87,18 @@ using GammaBetaDeviceInstance = ck::tensor_operation::device::DeviceNormalizatio
|
||||
Rank,
|
||||
NumReduceDim,
|
||||
256, // BlockSize
|
||||
8, // ClusterInvarient
|
||||
8, // ClusterInvariant
|
||||
32, // ClusterReduce
|
||||
8, // SliceInvarient
|
||||
4, // SliceInvariant
|
||||
1, // SliceReduce
|
||||
false, // IsDYFastestDimReduced
|
||||
8, // DYSrcVectorSize
|
||||
4, // DYSrcVectorSize
|
||||
false, // IsXFastestDimReduced
|
||||
8, // XSrcVectorSize
|
||||
4, // XSrcVectorSize
|
||||
false, // IsMeanInvStdFastestDimReduced
|
||||
1, // MeanInvStdSrcVectorSize
|
||||
1, // DGammaDstVectorSize
|
||||
1>; // DBetaDstVectorSize
|
||||
4, // DGammaDstVectorSize
|
||||
4>; // DBetaDstVectorSize
|
||||
|
||||
int main()
|
||||
{
|
||||
@@ -93,20 +128,55 @@ int main()
|
||||
|
||||
DeviceMem dy_dev(sizeof(DYDataType) * dy.mDesc.GetElementSpaceSize());
|
||||
DeviceMem x_dev(sizeof(XDataType) * x.mDesc.GetElementSpaceSize());
|
||||
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem mean_dev(sizeof(MeanInvStdDataType) * mean.mDesc.GetElementSpaceSize());
|
||||
DeviceMem inv_std_dev(sizeof(MeanInvStdDataType) * inv_std.mDesc.GetElementSpaceSize());
|
||||
DeviceMem dx_dev(sizeof(DXDataType) * dx.mDesc.GetElementSpaceSize());
|
||||
DeviceMem dgamma_dev(sizeof(DGammaDataType) * dgamma.mDesc.GetElementSpaceSize());
|
||||
DeviceMem dbeta_dev(sizeof(DBetaDataType) * dbeta.mDesc.GetElementSpaceSize());
|
||||
|
||||
dy_dev.ToDevice(dy.mData.data());
|
||||
x_dev.ToDevice(x.mData.data());
|
||||
gamma_dev.ToDevice(gamma.mData.data());
|
||||
mean_dev.ToDevice(mean.mData.data());
|
||||
inv_std_dev.ToDevice(inv_std.mData.data());
|
||||
|
||||
std::vector<ck::index_t> dyStrides{dy.mDesc.GetStrides().begin(), dy.mDesc.GetStrides().end()};
|
||||
std::vector<ck::index_t> xStrides{x.mDesc.GetStrides().begin(), x.mDesc.GetStrides().end()};
|
||||
std::vector<ck::index_t> gammaStrides = {0, 0, 0, C, 1};
|
||||
std::vector<ck::index_t> meanStrides = {G, 0, 0, 1, 0};
|
||||
std::vector<ck::index_t> invStdStrides = {G, 0, 0, 1, 0};
|
||||
std::vector<ck::index_t> dxStrides{dx.mDesc.GetStrides().begin(), dx.mDesc.GetStrides().end()};
|
||||
|
||||
// backward x
|
||||
auto x_device_instance = XDeviceInstance{};
|
||||
|
||||
auto x_argument_ptr = x_device_instance.MakeArgumentPointer({N, H, W, G, C}, // lengths
|
||||
dyStrides, // dyStrides
|
||||
xStrides, // xStrides
|
||||
gammaStrides, // gammaStrides
|
||||
meanStrides, // meanStrides
|
||||
invStdStrides, // invStdStrides
|
||||
dxStrides, // dxStrides
|
||||
{1, 2, 4}, // reduceDims
|
||||
dy_dev.GetDeviceBuffer(),
|
||||
x_dev.GetDeviceBuffer(),
|
||||
gamma_dev.GetDeviceBuffer(),
|
||||
mean_dev.GetDeviceBuffer(),
|
||||
inv_std_dev.GetDeviceBuffer(),
|
||||
dx_dev.GetDeviceBuffer());
|
||||
|
||||
if(!x_device_instance.IsSupportedArgument(x_argument_ptr.get()))
|
||||
{
|
||||
std::cout << "The runtime parameters are not supported." << __FILE__ << ":" << __LINE__
|
||||
<< std::endl;
|
||||
return 1;
|
||||
};
|
||||
|
||||
auto x_invoker_ptr = x_device_instance.MakeInvokerPointer();
|
||||
x_invoker_ptr->Run(x_argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
||||
|
||||
// backward gamma & beta
|
||||
|
||||
auto gamma_beta_device_instance = GammaBetaDeviceInstance{};
|
||||
auto gamma_beta_argument_ptr =
|
||||
@@ -128,7 +198,8 @@ int main()
|
||||
|
||||
if(!gamma_beta_device_instance.IsSupportedArgument(gamma_beta_argument_ptr.get()))
|
||||
{
|
||||
std::cout << "The runtime parameters are not supported" << std::endl;
|
||||
std::cout << "The runtime parameters are not supported." << __FILE__ << ":" << __LINE__
|
||||
<< std::endl;
|
||||
return 1;
|
||||
};
|
||||
|
||||
@@ -158,9 +229,11 @@ int main()
|
||||
|
||||
dgamma_dev.FromDevice(dgamma.mData.data());
|
||||
dbeta_dev.FromDevice(dbeta.mData.data());
|
||||
dx_dev.FromDevice(dx.mData.data());
|
||||
|
||||
pass &= ck::utils::check_err(dgamma, host_dgamma, "Error: Incorrect dgamma", 1e-3, 1e-3);
|
||||
pass &= ck::utils::check_err(dbeta, host_dbeta, "Error: Incorrect dbeta", 1e-3, 1e-3);
|
||||
pass &= ck::utils::check_err(dx, host_dx, "Error: Incorrect dx", 1e-3, 1e-3);
|
||||
}
|
||||
|
||||
return (pass ? 0 : 1);
|
||||
@@ -42,6 +42,8 @@ foreach(gpu IN LISTS GPU_TARGETS)
|
||||
# ScaleAdd ScaleAdd Relu
|
||||
add_example_executable(example_convnd_fwd_xdl_scaleadd_scaleadd_relu_fp16 convnd_fwd_xdl_scaleadd_scaleadd_relu_fp16.cpp)
|
||||
add_example_dependencies(example_convnd_fwd_activ_xdl example_convnd_fwd_xdl_scaleadd_scaleadd_relu_fp16)
|
||||
add_example_executable(example_convnd_fwd_xdl_scaleadd_scaleadd_relu_bcasted_bias_fp16 convnd_fwd_xdl_scaleadd_scaleadd_relu_bcasted_bias_fp16.cpp)
|
||||
add_example_dependencies(example_convnd_fwd_activ_xdl example_convnd_fwd_xdl_scaleadd_scaleadd_relu_bcasted_bias_fp16)
|
||||
set(target 1)
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
@@ -0,0 +1,294 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <numeric>
|
||||
#include <type_traits>
|
||||
|
||||
#include "ck/ck.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
||||
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
||||
#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_abd_xdl_cshuffle.hpp"
|
||||
|
||||
#include "ck/library/utility/algorithm.hpp"
|
||||
#include "ck/library/utility/check_err.hpp"
|
||||
#include "ck/library/utility/device_memory.hpp"
|
||||
#include "ck/library/utility/host_tensor.hpp"
|
||||
#include "ck/library/utility/host_tensor_generator.hpp"
|
||||
#include "ck/library/utility/convolution_parameter.hpp"
|
||||
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
||||
#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd.hpp"
|
||||
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
||||
|
||||
constexpr ck::index_t NDimSpatial = 3;
|
||||
using InDataType = ck::half_t;
|
||||
using WeiDataType = ck::half_t;
|
||||
using AccDataType = float;
|
||||
using CShuffleDataType = ck::half_t;
|
||||
using OutDataType = ck::half_t;
|
||||
|
||||
template <ck::index_t... Is>
|
||||
using S = ck::Sequence<Is...>;
|
||||
|
||||
using InLayout = ck::tensor_layout::convolution::NDHWGC;
|
||||
using WeiLayout = ck::tensor_layout::convolution::GKZYXC;
|
||||
using OutLayout = ck::tensor_layout::convolution::NDHWGK;
|
||||
|
||||
using BiasLayout = ck::tensor_layout::convolution::G_K;
|
||||
|
||||
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
|
||||
|
||||
using OutElementOp = ck::tensor_operation::element_wise::ScaleAddScaleAddRelu;
|
||||
|
||||
static constexpr auto ConvSpec =
|
||||
ck::tensor_operation::device::ConvolutionForwardSpecialization::Default;
|
||||
|
||||
static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
|
||||
|
||||
template <typename OutElementOp>
|
||||
using DeviceGroupedConvNDFwdInstance =
|
||||
ck::tensor_operation::device::DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<
|
||||
NDimSpatial,
|
||||
InLayout,
|
||||
WeiLayout,
|
||||
ck::Tuple<OutLayout, BiasLayout>,
|
||||
OutLayout,
|
||||
InDataType,
|
||||
WeiDataType,
|
||||
AccDataType,
|
||||
CShuffleDataType,
|
||||
ck::Tuple<OutDataType, OutDataType>,
|
||||
OutDataType,
|
||||
InElementOp,
|
||||
WeiElementOp,
|
||||
OutElementOp,
|
||||
ConvSpec, // ConvForwardSpecialization
|
||||
GemmSpec, // GemmSpecialization
|
||||
1, //
|
||||
256, // BlockSize
|
||||
128, // MPerBlock
|
||||
256, // NPerBlock
|
||||
32, // KPerBlock
|
||||
8, // AK1
|
||||
8, // BK1
|
||||
32, // MPerXdl
|
||||
32, // NPerXdl
|
||||
2, // MXdlPerWave
|
||||
4, // NXdlPerWave
|
||||
S<4, 64, 1>, // ABlockTransferThreadClusterLengths_AK0_M_AK1
|
||||
S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder
|
||||
S<1, 0, 2>, // ABlockTransferSrcAccessOrder
|
||||
2, // ABlockTransferSrcVectorDim
|
||||
8, // ABlockTransferSrcScalarPerVector
|
||||
8, // ABlockTransferDstScalarPerVector_AK1
|
||||
1, // ABlockLdsExtraM
|
||||
S<4, 64, 1>, // BBlockTransferThreadClusterLengths_BK0_N_BK1
|
||||
S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder
|
||||
S<1, 0, 2>, // BBlockTransferSrcAccessOrder
|
||||
2, // BBlockTransferSrcVectorDim
|
||||
8, // BBlockTransferSrcScalarPerVector
|
||||
8, // BBlockTransferDstScalarPerVector_BK1
|
||||
1, // BBlockLdsExtraN
|
||||
1,
|
||||
1,
|
||||
S<1, 32, 1, 8>,
|
||||
8>;
|
||||
|
||||
using DeviceGroupedConvNDFwdActivInstance = DeviceGroupedConvNDFwdInstance<OutElementOp>;
|
||||
|
||||
namespace {
|
||||
// Use custom implementation to pass two more tensors for post op
|
||||
template <ck::index_t NDimSpatial,
|
||||
typename InDataType,
|
||||
typename WeiDataType,
|
||||
typename OutDataType,
|
||||
typename InElementOp,
|
||||
typename WeiElementOp,
|
||||
typename OutElementOp,
|
||||
typename DeviceConvNDFwdInstance>
|
||||
bool run_grouped_conv_fwd(bool do_verification,
|
||||
int init_method,
|
||||
bool time_kernel,
|
||||
const ck::utils::conv::ConvParam& conv_param,
|
||||
const HostTensorDescriptor& in_g_n_c_wis_desc,
|
||||
const HostTensorDescriptor& wei_g_k_c_xs_desc,
|
||||
const HostTensorDescriptor& out_g_n_k_wos_desc,
|
||||
const InElementOp& in_element_op,
|
||||
const WeiElementOp& wei_element_op,
|
||||
const OutElementOp& out_element_op)
|
||||
{
|
||||
constexpr ck::index_t NumDs = 2;
|
||||
const ck::index_t G = out_g_n_k_wos_desc.GetLengths()[0];
|
||||
const ck::index_t K = out_g_n_k_wos_desc.GetLengths()[2];
|
||||
|
||||
// Logical broadcast bias (we have to pass bias lengths in the same format as output - GNKDHW)
|
||||
std::array<ck::index_t, NDimSpatial + 3> bias_g_k_lengths;
|
||||
std::array<ck::index_t, NDimSpatial + 3> bias_g_k_strides;
|
||||
// Fill other lenghts than G,K with 1 and strides with 0
|
||||
bias_g_k_lengths.fill(1);
|
||||
bias_g_k_strides.fill(0);
|
||||
bias_g_k_lengths[0] = G;
|
||||
bias_g_k_lengths[2] = K;
|
||||
bias_g_k_strides[0] = K; // stride to G
|
||||
bias_g_k_strides[2] = 1; // stride to K
|
||||
const auto broadcasted_bias_desc = HostTensorDescriptor(bias_g_k_lengths, bias_g_k_strides);
|
||||
|
||||
// y = relu ( alpha1 * conv(x) + alpha2 * z + bias )
|
||||
Tensor<InDataType> in(in_g_n_c_wis_desc);
|
||||
Tensor<WeiDataType> wei(wei_g_k_c_xs_desc);
|
||||
Tensor<OutDataType> out_host(out_g_n_k_wos_desc);
|
||||
Tensor<OutDataType> out_device(out_g_n_k_wos_desc);
|
||||
std::array<Tensor<OutDataType>, NumDs> d_tensors = {Tensor<OutDataType>(out_g_n_k_wos_desc),
|
||||
Tensor<OutDataType>(broadcasted_bias_desc)};
|
||||
|
||||
std::cout << "in: " << in.mDesc << std::endl;
|
||||
std::cout << "wei: " << wei.mDesc << std::endl;
|
||||
std::cout << "out: " << out_host.mDesc << std::endl;
|
||||
std::cout << "z_tensor: " << d_tensors[0].mDesc << std::endl;
|
||||
std::cout << "bias_tensor: " << d_tensors[1].mDesc << std::endl;
|
||||
|
||||
// Make sure that we allocated only G * K values for bias
|
||||
assert(static_cast<ck::index_t>(d_tensors[1].mData.size()) == G * K);
|
||||
|
||||
switch(init_method)
|
||||
{
|
||||
case 0: break;
|
||||
case 1:
|
||||
in.GenerateTensorValue(GeneratorTensor_2<InDataType>{-2, 2});
|
||||
wei.GenerateTensorValue(GeneratorTensor_2<WeiDataType>{-2, 2});
|
||||
d_tensors[0].GenerateTensorValue(GeneratorTensor_2<OutDataType>{-2, 2});
|
||||
d_tensors[1].GenerateTensorValue(GeneratorTensor_2<OutDataType>{-2, 2});
|
||||
break;
|
||||
default:
|
||||
in.GenerateTensorValue(GeneratorTensor_3<InDataType>{-1.0, 1.0});
|
||||
wei.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.05, 0.05});
|
||||
d_tensors[0].GenerateTensorValue(GeneratorTensor_3<OutDataType>{-0.05, 0.05});
|
||||
d_tensors[1].GenerateTensorValue(GeneratorTensor_3<OutDataType>{-0.05, 0.05});
|
||||
}
|
||||
|
||||
DeviceMem in_device_buf(sizeof(InDataType) * in.mDesc.GetElementSpaceSize());
|
||||
DeviceMem wei_device_buf(sizeof(WeiDataType) * wei.mDesc.GetElementSpaceSize());
|
||||
DeviceMem z_buf(sizeof(OutDataType) * d_tensors[0].mDesc.GetElementSpaceSize());
|
||||
DeviceMem bias_buf(sizeof(OutDataType) * d_tensors[1].mDesc.GetElementSpaceSize());
|
||||
DeviceMem out_device_buf(sizeof(OutDataType) * out_device.mDesc.GetElementSpaceSize());
|
||||
|
||||
in_device_buf.ToDevice(in.mData.data());
|
||||
wei_device_buf.ToDevice(wei.mData.data());
|
||||
z_buf.ToDevice(d_tensors[0].mData.data());
|
||||
bias_buf.ToDevice(d_tensors[1].mData.data());
|
||||
|
||||
std::array<ck::index_t, NDimSpatial + 3> a_g_n_c_wis_lengths{};
|
||||
std::array<ck::index_t, NDimSpatial + 3> a_g_n_c_wis_strides{};
|
||||
std::array<ck::index_t, NDimSpatial + 3> b_g_k_c_xs_lengths{};
|
||||
std::array<ck::index_t, NDimSpatial + 3> b_g_k_c_xs_strides{};
|
||||
std::array<ck::index_t, NDimSpatial + 3> e_g_n_k_wos_lengths{};
|
||||
std::array<ck::index_t, NDimSpatial + 3> e_g_n_k_wos_strides{};
|
||||
std::array<ck::index_t, NDimSpatial> conv_filter_strides{};
|
||||
std::array<ck::index_t, NDimSpatial> conv_filter_dilations{};
|
||||
std::array<ck::index_t, NDimSpatial> input_left_pads{};
|
||||
std::array<ck::index_t, NDimSpatial> input_right_pads{};
|
||||
|
||||
auto copy = [](const auto& x, auto& y) { ck::ranges::copy(x, y.begin()); };
|
||||
|
||||
copy(in_g_n_c_wis_desc.GetLengths(), a_g_n_c_wis_lengths);
|
||||
copy(in_g_n_c_wis_desc.GetStrides(), a_g_n_c_wis_strides);
|
||||
copy(wei_g_k_c_xs_desc.GetLengths(), b_g_k_c_xs_lengths);
|
||||
copy(wei_g_k_c_xs_desc.GetStrides(), b_g_k_c_xs_strides);
|
||||
copy(out_g_n_k_wos_desc.GetLengths(), e_g_n_k_wos_lengths);
|
||||
copy(out_g_n_k_wos_desc.GetStrides(), e_g_n_k_wos_strides);
|
||||
copy(conv_param.conv_filter_strides_, conv_filter_strides);
|
||||
copy(conv_param.conv_filter_dilations_, conv_filter_dilations);
|
||||
copy(conv_param.input_left_pads_, input_left_pads);
|
||||
copy(conv_param.input_right_pads_, input_right_pads);
|
||||
|
||||
const std::array<const void*, NumDs> ds = {z_buf.GetDeviceBuffer(), bias_buf.GetDeviceBuffer()};
|
||||
|
||||
auto conv = DeviceConvNDFwdInstance{};
|
||||
auto invoker = conv.MakeInvoker();
|
||||
auto argument = conv.MakeArgument(in_device_buf.GetDeviceBuffer(),
|
||||
wei_device_buf.GetDeviceBuffer(),
|
||||
ds,
|
||||
out_device_buf.GetDeviceBuffer(),
|
||||
a_g_n_c_wis_lengths,
|
||||
a_g_n_c_wis_strides,
|
||||
b_g_k_c_xs_lengths,
|
||||
b_g_k_c_xs_strides,
|
||||
std::array<std::array<ck::index_t, NDimSpatial + 3>, NumDs>{
|
||||
e_g_n_k_wos_lengths, bias_g_k_lengths},
|
||||
std::array<std::array<ck::index_t, NDimSpatial + 3>, NumDs>{
|
||||
e_g_n_k_wos_strides, bias_g_k_strides},
|
||||
e_g_n_k_wos_lengths,
|
||||
e_g_n_k_wos_strides,
|
||||
conv_filter_strides,
|
||||
conv_filter_dilations,
|
||||
input_left_pads,
|
||||
input_right_pads,
|
||||
in_element_op,
|
||||
wei_element_op,
|
||||
out_element_op);
|
||||
|
||||
if(!conv.IsSupportedArgument(argument))
|
||||
{
|
||||
throw std::runtime_error("The device op with the specified compilation parameters does "
|
||||
"not support this convolution problem.");
|
||||
}
|
||||
|
||||
float avg_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel});
|
||||
|
||||
std::size_t flop = conv_param.GetFlops() + G * K +
|
||||
conv_param.GetOutputByte<OutDataType>() / sizeof(OutDataType);
|
||||
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>() +
|
||||
G * K * sizeof(OutDataType) + conv_param.GetOutputByte<OutDataType>();
|
||||
|
||||
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
||||
float gb_per_sec = num_btype / 1.E6 / avg_time;
|
||||
std::cout << "Perf: " << avg_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s, "
|
||||
<< conv.GetTypeString() << std::endl;
|
||||
|
||||
if(do_verification)
|
||||
{
|
||||
auto ref_conv =
|
||||
ck::tensor_operation::host::ReferenceConvFwd<NDimSpatial,
|
||||
InDataType,
|
||||
WeiDataType,
|
||||
OutDataType,
|
||||
InElementOp,
|
||||
WeiElementOp,
|
||||
OutElementOp,
|
||||
0, /*Num A Elementwise Tensors*/
|
||||
0, /*Num B Elementwise Tensors*/
|
||||
NumDs>();
|
||||
|
||||
auto ref_invoker = ref_conv.MakeInvoker();
|
||||
auto ref_argument = ref_conv.MakeArgument(in,
|
||||
wei,
|
||||
out_host,
|
||||
conv_param.conv_filter_strides_,
|
||||
conv_param.conv_filter_dilations_,
|
||||
conv_param.input_left_pads_,
|
||||
conv_param.input_right_pads_,
|
||||
in_element_op,
|
||||
wei_element_op,
|
||||
out_element_op,
|
||||
{},
|
||||
{},
|
||||
d_tensors);
|
||||
|
||||
ref_invoker.Run(ref_argument);
|
||||
|
||||
out_device_buf.FromDevice(out_device.mData.data());
|
||||
|
||||
return ck::utils::check_err(out_device, out_host, "Error: incorrect results!");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
#include "run_convnd_fwd_activ_example.inc"
|
||||
|
||||
int main(int argc, char* argv[]) { return !run_convnd_fwd_example(argc, argv); }
|
||||
@@ -24,7 +24,7 @@ bool run_convnd_fwd_example(int argc, char* argv[])
|
||||
// Following shapes are selected to avoid overflow. Expect inf in case of
|
||||
// size increase for some elementwise ops.
|
||||
ck::utils::conv::ConvParam conv_param{
|
||||
3, 1, 16, 128, 8, {3, 3, 3}, {17, 17, 17}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}};
|
||||
3, 2, 16, 128, 8, {3, 3, 3}, {17, 17, 17}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}};
|
||||
|
||||
if(argc == 1)
|
||||
{
|
||||
|
||||
@@ -44,16 +44,30 @@
|
||||
#define CK_USE_WAVES_PER_EU 0
|
||||
#endif
|
||||
|
||||
// define general macros for various architectures
|
||||
#if defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__)
|
||||
#define __gfx94__
|
||||
#endif
|
||||
#if defined(__gfx1010__) || defined(__gfx1011__) || defined(__gfx1012__)
|
||||
#define __gfx101__
|
||||
#endif
|
||||
#if defined(__gfx1030__) || defined(__gfx1031__) || defined(__gfx1032__) || \
|
||||
defined(__gfx1034__) || defined(__gfx1035__) || defined(__gfx1036__)
|
||||
#define __gfx103__
|
||||
#endif
|
||||
#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) || defined(__gfx1103__)
|
||||
#define __gfx11__
|
||||
#endif
|
||||
|
||||
// buffer resource
|
||||
#ifndef __HIP_DEVICE_COMPILE__ // for host code
|
||||
#define CK_BUFFER_RESOURCE_3RD_DWORD -1
|
||||
#elif defined(__gfx803__) || defined(__gfx900__) || defined(__gfx906__) || defined(__gfx908__) || \
|
||||
defined(__gfx90a__) || defined(__gfx940__) || defined(__gfx941__) || \
|
||||
defined(__gfx942__) // for GPU code
|
||||
defined(__gfx90a__) || defined(__gfx94__)
|
||||
#define CK_BUFFER_RESOURCE_3RD_DWORD 0x00020000
|
||||
#elif defined(__gfx1030__) // for GPU code
|
||||
#elif defined(__gfx103__)
|
||||
#define CK_BUFFER_RESOURCE_3RD_DWORD 0x31014000
|
||||
#elif defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) // for GPU code
|
||||
#elif defined(__gfx11__)
|
||||
#define CK_BUFFER_RESOURCE_3RD_DWORD 0x31004000
|
||||
#endif
|
||||
|
||||
@@ -61,12 +75,12 @@
|
||||
#ifndef __HIP_DEVICE_COMPILE__ // for host code, define nothing
|
||||
#elif defined(__gfx803__) || defined(__gfx900__) // for GPU code
|
||||
#define CK_USE_AMD_V_MAC_F32
|
||||
#elif defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx1030__) || \
|
||||
defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__) // for GPU code
|
||||
#elif defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx103__) || \
|
||||
defined(__gfx94__) // for GPU code
|
||||
#define CK_USE_AMD_V_FMAC_F32
|
||||
#define CK_USE_AMD_V_DOT2_F32_F16
|
||||
#define CK_USE_AMD_V_DOT4_I32_I8
|
||||
#elif defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__)
|
||||
#elif defined(__gfx11__)
|
||||
#define CK_USE_AMD_V_FMAC_F32
|
||||
#define CK_USE_AMD_V_DOT2_F32_F16
|
||||
#define CK_USE_AMD_V_DOT4_I32_I8_GFX11
|
||||
@@ -75,23 +89,22 @@
|
||||
// MFMA instruction
|
||||
#ifndef __HIP_DEVICE_COMPILE__ // for host code
|
||||
#define CK_USE_AMD_MFMA
|
||||
#elif defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx940__) || defined(__gfx941__) || \
|
||||
defined(__gfx942__) // for GPU code
|
||||
#elif defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx94__) // for GPU code
|
||||
#define CK_USE_AMD_MFMA
|
||||
#endif
|
||||
|
||||
#if(defined(__gfx90a__) || defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__))
|
||||
#if(defined(__gfx90a__) || defined(__gfx94__))
|
||||
#define CK_USE_AMD_MFMA_BF16_1K_OP
|
||||
#endif
|
||||
|
||||
#if defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__)
|
||||
#if defined(__gfx94__)
|
||||
#define CK_USE_AMD_MFMA_GFX940
|
||||
#endif
|
||||
|
||||
// WMMA instruction
|
||||
#ifndef __HIP_DEVICE_COMPILE__ // for host code
|
||||
#define CK_USE_AMD_WMMA
|
||||
#elif defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) // for GPU code
|
||||
#elif defined(__gfx11__) // for GPU code
|
||||
#define CK_USE_AMD_WMMA
|
||||
#endif
|
||||
|
||||
@@ -107,15 +120,13 @@
|
||||
// buffer atomic add: floating point
|
||||
#ifndef __HIP_DEVICE_COMPILE__ // for host code
|
||||
#define CK_USE_AMD_BUFFER_ATOMIC_ADD_FLOAT 1
|
||||
#elif defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx940__) || defined(__gfx941__) || \
|
||||
defined(__gfx942__) // for GPU code
|
||||
#elif defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx94__) // for GPU code
|
||||
#define CK_USE_AMD_BUFFER_ATOMIC_ADD_FLOAT 1
|
||||
#else // for GPU code
|
||||
#define CK_USE_AMD_BUFFER_ATOMIC_ADD_FLOAT 0
|
||||
#endif
|
||||
|
||||
#if(defined(__gfx90a__) || defined(__gfx940__) || defined(__gfx941__) || \
|
||||
defined(__gfx942__)) // for GPU code
|
||||
#if(defined(__gfx90a__) || defined(__gfx94__)) // for GPU code
|
||||
#define CK_USE_AMD_BUFFER_ATOMIC_MAX_FLOAT64 1
|
||||
#else
|
||||
#define CK_USE_AMD_BUFFER_ATOMIC_MAX_FLOAT64 0
|
||||
@@ -134,6 +145,9 @@
|
||||
// inner product using V_DOT with DPP8 modifiers
|
||||
#define CK_USE_AMD_V_DOT_DPP8_INLINE_ASM 1
|
||||
|
||||
// LDS direct loads using inline assembly
|
||||
#define CK_USE_AMD_LDS_DIRECT_LOAD_INLINE_ASM 1
|
||||
|
||||
// set stochastic rounding as default for f8 conversions
|
||||
#define CK_USE_SR_F8_CONVERSION 1
|
||||
|
||||
@@ -215,7 +229,7 @@
|
||||
// denorm test fix, required to work around dissue
|
||||
#ifndef CK_WORKAROUND_DENORM_FIX
|
||||
#define CK_WORKAROUND_DENORM_FIX 0
|
||||
#elif
|
||||
#else
|
||||
// enable only on MI200
|
||||
#define CK_WORKAROUND_DENORM_FIX = CK_WORKAROUND_DENORM_FIX && defined(__gfx90a__)
|
||||
#endif // CK_WORKAROUND_DENORM_FIX
|
||||
|
||||
@@ -26,7 +26,7 @@ inline std::string get_device_name()
|
||||
}
|
||||
const std::string raw_name(props.gcnArchName);
|
||||
|
||||
// https://github.com/ROCmSoftwarePlatform/MIOpen/blob/8498875aef84878e04c1eabefdf6571514891086/src/target_properties.cpp#L40
|
||||
// https://github.com/ROCm/MIOpen/blob/8498875aef84878e04c1eabefdf6571514891086/src/target_properties.cpp#L40
|
||||
static std::map<std::string, std::string> device_name_map = {
|
||||
{"Ellesmere", "gfx803"},
|
||||
{"Baffin", "gfx803"},
|
||||
@@ -65,4 +65,23 @@ inline bool is_lds_direct_load_supported()
|
||||
ck::get_device_name() == "gfx941" || ck::get_device_name() == "gfx942";
|
||||
}
|
||||
|
||||
inline bool is_navi1_supported()
|
||||
{
|
||||
return ck::get_device_name() == "gfx1010" || ck::get_device_name() == "gfx1011" ||
|
||||
ck::get_device_name() == "gfx1012";
|
||||
}
|
||||
|
||||
inline bool is_navi2_supported()
|
||||
{
|
||||
return ck::get_device_name() == "gfx1030" || ck::get_device_name() == "gfx1031" ||
|
||||
ck::get_device_name() == "gfx1032" || ck::get_device_name() == "gfx1034" ||
|
||||
ck::get_device_name() == "gfx1035" || ck::get_device_name() == "gfx1036";
|
||||
}
|
||||
|
||||
inline bool is_navi3_supported()
|
||||
{
|
||||
return ck::get_device_name() == "gfx1100" || ck::get_device_name() == "gfx1101" ||
|
||||
ck::get_device_name() == "gfx1102" || ck::get_device_name() == "gfx1103";
|
||||
}
|
||||
|
||||
} // namespace ck
|
||||
|
||||
@@ -12,21 +12,23 @@ inline void hip_check_error(hipError_t x)
|
||||
if(x != hipSuccess)
|
||||
{
|
||||
std::ostringstream ss;
|
||||
ss << "HIP runtime error: " << hipGetErrorString(x) << ". " << __FILE__ << ": " << __LINE__
|
||||
<< "in function: " << __func__;
|
||||
ss << "HIP runtime error: " << hipGetErrorString(x) << ". "
|
||||
<< "hip_check_error.hpp"
|
||||
<< ": " << __LINE__ << "in function: " << __func__;
|
||||
throw std::runtime_error(ss.str());
|
||||
}
|
||||
}
|
||||
|
||||
#define HIP_CHECK_ERROR(retval_or_funcall) \
|
||||
do \
|
||||
{ \
|
||||
hipError_t _tmpVal = retval_or_funcall; \
|
||||
if(_tmpVal != hipSuccess) \
|
||||
{ \
|
||||
std::ostringstream ostr; \
|
||||
ostr << "HIP Function Failed (" << __FILE__ << "," << __LINE__ << ") " \
|
||||
<< hipGetErrorString(_tmpVal); \
|
||||
throw std::runtime_error(ostr.str()); \
|
||||
} \
|
||||
#define HIP_CHECK_ERROR(retval_or_funcall) \
|
||||
do \
|
||||
{ \
|
||||
hipError_t _tmpVal = retval_or_funcall; \
|
||||
if(_tmpVal != hipSuccess) \
|
||||
{ \
|
||||
std::ostringstream ostr; \
|
||||
ostr << "HIP Function Failed (" \
|
||||
<< "hip_check_error.hpp" \
|
||||
<< "," << __LINE__ << ") " << hipGetErrorString(_tmpVal); \
|
||||
throw std::runtime_error(ostr.str()); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
@@ -30,7 +30,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
|
||||
block_dim.y,
|
||||
block_dim.z);
|
||||
|
||||
printf("Warm up 1 time\n");
|
||||
printf("Warm up %d times\n", stream_config.cold_niters_);
|
||||
#endif
|
||||
// warm up
|
||||
for(int i = 0; i < stream_config.cold_niters_; ++i)
|
||||
@@ -103,14 +103,17 @@ float launch_and_time_kernel_with_preprocess(const StreamConfig& stream_config,
|
||||
block_dim.y,
|
||||
block_dim.z);
|
||||
|
||||
printf("Warm up 1 time\n");
|
||||
printf("Warm up %d times\n", stream_config.cold_niters_);
|
||||
#endif
|
||||
// warm up
|
||||
preprocess();
|
||||
kernel<<<grid_dim, block_dim, lds_byte, stream_config.stream_id_>>>(args...);
|
||||
hip_check_error(hipGetLastError());
|
||||
for(int i = 0; i < stream_config.cold_niters_; ++i)
|
||||
{
|
||||
kernel<<<grid_dim, block_dim, lds_byte, stream_config.stream_id_>>>(args...);
|
||||
hip_check_error(hipGetLastError());
|
||||
}
|
||||
|
||||
const int nrepeat = 10;
|
||||
const int nrepeat = stream_config.nrepeat_;
|
||||
#if DEBUG_LOG
|
||||
printf("Start running %d times...\n", nrepeat);
|
||||
#endif
|
||||
|
||||
@@ -11,6 +11,6 @@ struct StreamConfig
|
||||
hipStream_t stream_id_ = nullptr;
|
||||
bool time_kernel_ = false;
|
||||
int log_level_ = 0;
|
||||
int cold_niters_ = 1;
|
||||
int nrepeat_ = 10;
|
||||
int cold_niters_ = 5;
|
||||
int nrepeat_ = 50;
|
||||
};
|
||||
|
||||
@@ -0,0 +1,999 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ck/utility/common_header.hpp"
|
||||
#include "ck/utility/loop_scheduler.hpp"
|
||||
#include "ck/tensor_operation/gpu/thread/threadwise_tensor_slice_transfer.hpp"
|
||||
#include "ck/tensor_operation/gpu/warp/xdlops_gemm.hpp"
|
||||
#include "ck/tensor_description/tensor_adaptor.hpp"
|
||||
|
||||
// Double LDS buffer
|
||||
// Prefetech 2 stage
|
||||
// Local prefetch 1 stage
|
||||
|
||||
namespace ck {
|
||||
|
||||
template <index_t BlockSize,
|
||||
index_t MPerBlock,
|
||||
index_t NPerBlock,
|
||||
index_t KPerBlock,
|
||||
index_t ABufferLoadWidth,
|
||||
index_t BBufferLoadWidth,
|
||||
index_t ALDSWriteWidth,
|
||||
index_t BLDSWriteWidth,
|
||||
index_t ALDSReadWidth,
|
||||
index_t BLDSReadWidth,
|
||||
index_t MRepeat,
|
||||
index_t NRepeat,
|
||||
index_t MPerXDL,
|
||||
index_t NPerXDL,
|
||||
index_t KPerXDL>
|
||||
struct BlockwiseGemmXdlops_pipeline_hotloop_inst
|
||||
{
|
||||
static constexpr index_t WaveSize = 64;
|
||||
static constexpr index_t WaveNumM = MPerBlock / (MRepeat * MPerXDL);
|
||||
static constexpr index_t WaveNumN = NPerBlock / (NRepeat * NPerXDL);
|
||||
|
||||
static constexpr index_t A_Buffer_Load_Inst_Num =
|
||||
MPerBlock * KPerBlock / (BlockSize * ABufferLoadWidth);
|
||||
static constexpr index_t B_Buffer_Load_Inst_Num =
|
||||
NPerBlock * KPerBlock / (BlockSize * BBufferLoadWidth);
|
||||
|
||||
static constexpr index_t A_LDS_Write_Inst_Num =
|
||||
MPerBlock * KPerBlock / (BlockSize * ALDSWriteWidth);
|
||||
static constexpr index_t B_LDS_Write_Inst_Num =
|
||||
NPerBlock * KPerBlock / (BlockSize * BLDSWriteWidth);
|
||||
|
||||
static constexpr index_t A_LDS_Read_Inst_Num =
|
||||
WaveNumN * MPerBlock * KPerBlock / (BlockSize * ALDSReadWidth);
|
||||
static constexpr index_t B_LDS_Read_Inst_Num =
|
||||
WaveNumM * MPerBlock * KPerBlock / (BlockSize * BLDSReadWidth);
|
||||
|
||||
static constexpr index_t C_MFMA_Inst_Num =
|
||||
MPerBlock * NPerBlock * KPerBlock / (BlockSize / WaveSize) / (MPerXDL * NPerXDL * KPerXDL);
|
||||
|
||||
static constexpr auto Print()
|
||||
{
|
||||
printf(" Blk/Wave Size: %d, %d, M/N/K PerBlk: %d, %d, %d, M/N/K PerXdl: %d, %d, %d\n",
|
||||
BlockSize,
|
||||
WaveSize,
|
||||
MPerBlock,
|
||||
NPerBlock,
|
||||
KPerBlock,
|
||||
MPerXDL,
|
||||
NPerXDL,
|
||||
KPerXDL);
|
||||
|
||||
printf(" A/B buffer load inst: %d, %d\n A/B LDS write inst: %d, %d\n A/B LDS read inst: "
|
||||
"%d, %d\n C MFMA inst: %d\n",
|
||||
A_Buffer_Load_Inst_Num,
|
||||
B_Buffer_Load_Inst_Num,
|
||||
A_LDS_Write_Inst_Num,
|
||||
B_LDS_Write_Inst_Num,
|
||||
A_LDS_Read_Inst_Num,
|
||||
B_LDS_Read_Inst_Num,
|
||||
C_MFMA_Inst_Num);
|
||||
}
|
||||
};
|
||||
|
||||
template <
|
||||
index_t BlockSize,
|
||||
typename FloatAB,
|
||||
typename FloatAcc,
|
||||
typename ATileDesc,
|
||||
typename BTileDesc,
|
||||
typename AMmaTileDesc,
|
||||
typename BMmaTileDesc,
|
||||
index_t MPerBlock,
|
||||
index_t NPerBlock,
|
||||
index_t KPerBlock,
|
||||
index_t MPerXDL,
|
||||
index_t NPerXDL,
|
||||
index_t MRepeat,
|
||||
index_t NRepeat,
|
||||
index_t KPack,
|
||||
bool TransposeC = false,
|
||||
index_t AMmaKStride =
|
||||
KPack* XdlopsGemm<FloatAB, MPerXDL, NPerXDL, KPack, FloatAB, TransposeC>{}.K0PerXdlops,
|
||||
index_t BMmaKStride =
|
||||
KPack* XdlopsGemm<FloatAB, MPerXDL, NPerXDL, KPack, FloatAB, TransposeC>{}.K0PerXdlops>
|
||||
struct BlockwiseGemmXdlops_pipeline_v4
|
||||
{
|
||||
static constexpr auto I0 = Number<0>{};
|
||||
static constexpr auto I1 = Number<1>{};
|
||||
static constexpr auto I2 = Number<2>{};
|
||||
static constexpr auto I3 = Number<3>{};
|
||||
|
||||
using ThisThreadBlock = ThisThreadBlock<BlockSize>;
|
||||
|
||||
static constexpr index_t WaveSize = get_warp_size();
|
||||
|
||||
static constexpr index_t A_K0 = ATileDesc{}.GetLength(I0);
|
||||
static constexpr index_t B_K0 = BTileDesc{}.GetLength(I0);
|
||||
static constexpr index_t A_K1 = ATileDesc{}.GetLength(I2);
|
||||
static constexpr index_t B_K1 = BTileDesc{}.GetLength(I2);
|
||||
|
||||
static constexpr auto xdlops_gemm =
|
||||
XdlopsGemm<FloatAB, MPerXDL, NPerXDL, KPack, FloatAB, TransposeC>{};
|
||||
|
||||
static constexpr index_t KPerThread = KPerBlock / xdlops_gemm.K0PerXdlops;
|
||||
static constexpr index_t KRepeat = KPerThread / KPack;
|
||||
|
||||
static constexpr index_t MWaves = MPerBlock / (MRepeat * MPerXDL);
|
||||
static constexpr index_t NWaves = NPerBlock / (NRepeat * NPerXDL);
|
||||
|
||||
using HotLoopInstList = BlockwiseGemmXdlops_pipeline_hotloop_inst<BlockSize,
|
||||
MPerBlock,
|
||||
NPerBlock,
|
||||
KPerBlock,
|
||||
A_K1,
|
||||
B_K1,
|
||||
A_K1,
|
||||
B_K1,
|
||||
KPack,
|
||||
KPack,
|
||||
MRepeat,
|
||||
NRepeat,
|
||||
MPerXDL,
|
||||
NPerXDL,
|
||||
xdlops_gemm.KPerXdlops>;
|
||||
|
||||
static_assert(KPerThread % KPack == 0,
|
||||
"Wrong KPack setting; try increasing KPerThread or decreasing KPack");
|
||||
|
||||
StaticBufferTupleOfVector<AddressSpaceEnum::Vgpr,
|
||||
FloatAcc,
|
||||
MRepeat * NRepeat,
|
||||
xdlops_gemm.GetRegSizePerXdlops(),
|
||||
true>
|
||||
c_thread_buf_;
|
||||
|
||||
__host__ __device__ constexpr auto& GetCThreadBuffer() { return c_thread_buf_; }
|
||||
|
||||
__device__ static auto GetWaveIdx()
|
||||
{
|
||||
const index_t thread_id = ThisThreadBlock::GetThreadId();
|
||||
|
||||
constexpr auto threadid_to_wave_idx_adaptor = make_single_stage_tensor_adaptor(
|
||||
make_tuple(make_merge_transform(make_tuple(MWaves, NWaves, WaveSize))),
|
||||
make_tuple(Sequence<0, 1, 2>{}),
|
||||
make_tuple(Sequence<0>{}));
|
||||
|
||||
return threadid_to_wave_idx_adaptor.CalculateBottomIndex(make_multi_index(thread_id));
|
||||
}
|
||||
|
||||
__device__ static auto CalculateAThreadOriginDataIndex()
|
||||
{
|
||||
const auto wave_idx = GetWaveIdx();
|
||||
|
||||
const auto waveId_m = wave_idx[I0];
|
||||
|
||||
const auto xdlops_a_idx = xdlops_gemm.CalculateAThreadOriginDataIndex();
|
||||
|
||||
return make_tuple(0, waveId_m, xdlops_a_idx[I1], KPack * xdlops_a_idx[I0]);
|
||||
}
|
||||
|
||||
__device__ static auto CalculateBThreadOriginDataIndex()
|
||||
{
|
||||
const auto wave_idx = GetWaveIdx();
|
||||
|
||||
const auto waveId_n = wave_idx[I1];
|
||||
|
||||
const auto xdlops_b_idx = xdlops_gemm.CalculateBThreadOriginDataIndex();
|
||||
|
||||
return make_tuple(0, waveId_n, xdlops_b_idx[I1], KPack * xdlops_b_idx[I0]);
|
||||
}
|
||||
|
||||
template <index_t m0, index_t n0, index_t xdlops_i, index_t blk_i>
|
||||
__device__ static auto
|
||||
CalculateCThreadOriginDataIndex(Number<m0>, Number<n0>, Number<xdlops_i>, Number<blk_i>)
|
||||
{
|
||||
const auto wave_idx = GetWaveIdx();
|
||||
|
||||
const auto waveId_m = wave_idx[I0];
|
||||
const auto waveId_n = wave_idx[I1];
|
||||
|
||||
const auto blk_idx = xdlops_gemm.GetBeginOfThreadBlk(xdlops_i, blk_i);
|
||||
|
||||
constexpr auto mrepeat_mwave_mperxdl_to_m_adaptor = make_single_stage_tensor_adaptor(
|
||||
make_tuple(make_unmerge_transform(make_tuple(MRepeat, MWaves, MPerXDL))),
|
||||
make_tuple(Sequence<0>{}),
|
||||
make_tuple(Sequence<0, 1, 2>{}));
|
||||
|
||||
constexpr auto nrepeat_nwave_nperxdl_to_n_adaptor = make_single_stage_tensor_adaptor(
|
||||
make_tuple(make_unmerge_transform(make_tuple(NRepeat, NWaves, NPerXDL))),
|
||||
make_tuple(Sequence<0>{}),
|
||||
make_tuple(Sequence<0, 1, 2>{}));
|
||||
|
||||
const index_t c_thread_m = mrepeat_mwave_mperxdl_to_m_adaptor.CalculateBottomIndex(
|
||||
make_tuple(m0, waveId_m, blk_idx[I0]))[I0];
|
||||
const index_t c_thread_n = nrepeat_nwave_nperxdl_to_n_adaptor.CalculateBottomIndex(
|
||||
make_tuple(n0, waveId_n, blk_idx[I1]))[I0];
|
||||
|
||||
return make_tuple(c_thread_m, c_thread_n);
|
||||
}
|
||||
|
||||
template <index_t m0, index_t n0, index_t xdlops_i, index_t blk_i>
|
||||
__device__ static auto
|
||||
CalculateCThreadOriginDataIndex8D(Number<m0>, Number<n0>, Number<xdlops_i>, Number<blk_i>)
|
||||
{
|
||||
const auto wave_idx = GetWaveIdx();
|
||||
|
||||
const auto waveId_m = wave_idx[I0];
|
||||
const auto waveId_n = wave_idx[I1];
|
||||
|
||||
const auto blk_idx = xdlops_gemm.GetBeginOfThreadBlk4D(xdlops_i, blk_i);
|
||||
|
||||
return make_tuple(
|
||||
m0, n0, waveId_m, waveId_n, blk_idx[I0], blk_idx[I1], blk_idx[I2], blk_idx[I3]);
|
||||
}
|
||||
|
||||
using Tuple4 = decltype(CalculateAThreadOriginDataIndex());
|
||||
|
||||
__host__ __device__
|
||||
BlockwiseGemmXdlops_pipeline_v4(Tuple4 a_origin = CalculateAThreadOriginDataIndex(),
|
||||
Tuple4 b_origin = CalculateBThreadOriginDataIndex())
|
||||
: a_thread_copy_(a_origin), b_thread_copy_(b_origin)
|
||||
{
|
||||
static_assert(AMmaTileDesc::IsKnownAtCompileTime() && BMmaTileDesc::IsKnownAtCompileTime(),
|
||||
"wrong! Desc should be known at compile-time");
|
||||
|
||||
static_assert(ThisThreadBlock::GetNumOfThread() == MWaves * NWaves * WaveSize,
|
||||
"ThisThreadBlock::GetNumOfThread() != MWaves * NWaves * WaveSize\n");
|
||||
|
||||
static_assert(MPerBlock % (MPerXDL * MRepeat) == 0 && NPerBlock % (NPerXDL * NRepeat) == 0,
|
||||
"wrong!");
|
||||
|
||||
// HotLoopInstList::Print();
|
||||
}
|
||||
|
||||
// transposed XDL output supporting C_xdl' = B_xdl' * A_xdl'
|
||||
__host__ __device__ static constexpr auto GetCThreadDescriptor_M0_N0_M1_N1_M2_N2_N3_N4()
|
||||
{
|
||||
constexpr auto c_m0_m1_m2_n_tblk_lens = xdlops_gemm.GetCM0M1M2NThreadBlkLengths();
|
||||
|
||||
constexpr auto M0 = c_m0_m1_m2_n_tblk_lens[I0];
|
||||
constexpr auto M1 = c_m0_m1_m2_n_tblk_lens[I1];
|
||||
constexpr auto M2 = c_m0_m1_m2_n_tblk_lens[I2];
|
||||
constexpr auto N = c_m0_m1_m2_n_tblk_lens[I3];
|
||||
|
||||
return make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MRepeat>{}, Number<NRepeat>{}, I1, I1, N, M0, M1, M2));
|
||||
}
|
||||
|
||||
// XDL output supporting C_xdl = A_xdl * B_xdl
|
||||
__host__ __device__ static constexpr auto GetCThreadDescriptor_M0_N0_M1_N1_M2_M3_M4_N2()
|
||||
{
|
||||
constexpr auto c_m0_m1_m2_n_tblk_lens = xdlops_gemm.GetCM0M1M2NThreadBlkLengths();
|
||||
|
||||
constexpr auto M0 = c_m0_m1_m2_n_tblk_lens[I0];
|
||||
constexpr auto M1 = c_m0_m1_m2_n_tblk_lens[I1];
|
||||
constexpr auto M2 = c_m0_m1_m2_n_tblk_lens[I2];
|
||||
constexpr auto N = c_m0_m1_m2_n_tblk_lens[I3];
|
||||
|
||||
return make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MRepeat>{}, Number<NRepeat>{}, I1, I1, M0, M1, M2, N));
|
||||
}
|
||||
|
||||
__host__ __device__ static constexpr auto GetCThreadDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2()
|
||||
{
|
||||
constexpr auto c_m0_m1_m2_n_tblk_lens = xdlops_gemm.GetCM0M1M2NThreadBlkLengths();
|
||||
|
||||
constexpr auto M0 = c_m0_m1_m2_n_tblk_lens[I0];
|
||||
constexpr auto M1 = c_m0_m1_m2_n_tblk_lens[I1];
|
||||
constexpr auto M2 = c_m0_m1_m2_n_tblk_lens[I2];
|
||||
constexpr auto N = c_m0_m1_m2_n_tblk_lens[I3];
|
||||
|
||||
return make_naive_tensor_descriptor_packed(
|
||||
make_tuple(I1, Number<MRepeat>{}, Number<NRepeat>{}, I1, I1, M0, M1, M2, N));
|
||||
}
|
||||
|
||||
// transposed XDL output supporting C_xdl' = B_xdl' * A_xdl'
|
||||
__host__ __device__ static constexpr auto GetCBlockDescriptor_M0_N0_M1_N1_M2_N2_N3_N4()
|
||||
{
|
||||
constexpr auto c_block_desc_m0_n0_m1_n1_m2_n2 =
|
||||
make_naive_tensor_descriptor_packed(make_tuple(Number<MRepeat>{},
|
||||
Number<NRepeat>{},
|
||||
Number<MWaves>{},
|
||||
Number<NWaves>{},
|
||||
Number<MPerXDL>{},
|
||||
Number<NPerXDL>{}));
|
||||
|
||||
return xdlops_gemm.MakeCDescriptor_M0_N0_M1_N1_M2_N2_N3_N4(c_block_desc_m0_n0_m1_n1_m2_n2);
|
||||
}
|
||||
|
||||
// XDL output supporting C_xdl = A_xdl * B_xdl
|
||||
__host__ __device__ static constexpr auto GetCBlockDescriptor_M0_N0_M1_N1_M2_M3_M4_N2()
|
||||
{
|
||||
constexpr auto c_block_desc_m0_n0_m1_n1_m2_n2 =
|
||||
make_naive_tensor_descriptor_packed(make_tuple(Number<MRepeat>{},
|
||||
Number<NRepeat>{},
|
||||
Number<MWaves>{},
|
||||
Number<NWaves>{},
|
||||
Number<MPerXDL>{},
|
||||
Number<NPerXDL>{}));
|
||||
|
||||
return xdlops_gemm.MakeCDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(c_block_desc_m0_n0_m1_n1_m2_n2);
|
||||
}
|
||||
|
||||
__host__ __device__ static constexpr auto GetCBlockDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2()
|
||||
{
|
||||
constexpr auto c_block_desc_g_m0_n0_m1_n1_m2_n2 =
|
||||
make_naive_tensor_descriptor_packed(make_tuple(I1,
|
||||
Number<MRepeat>{},
|
||||
Number<NRepeat>{},
|
||||
Number<MWaves>{},
|
||||
Number<NWaves>{},
|
||||
Number<MPerXDL>{},
|
||||
Number<NPerXDL>{}));
|
||||
|
||||
return xdlops_gemm.MakeCDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2(
|
||||
c_block_desc_g_m0_n0_m1_n1_m2_n2);
|
||||
}
|
||||
|
||||
template <typename CGridDesc_M_N>
|
||||
__host__ __device__ static constexpr auto
|
||||
MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(const CGridDesc_M_N& c_grid_desc_m_n)
|
||||
{
|
||||
const auto M = c_grid_desc_m_n.GetLength(I0);
|
||||
const auto N = c_grid_desc_m_n.GetLength(I1);
|
||||
|
||||
const auto c_grid_desc_m0_n0_m1_n1_m2_n2 = transform_tensor_descriptor(
|
||||
c_grid_desc_m_n,
|
||||
make_tuple(make_unmerge_transform(make_tuple(M / (MWaves * MPerXDL), MWaves, MPerXDL)),
|
||||
make_unmerge_transform(make_tuple(N / (NWaves * NPerXDL), NWaves, NPerXDL))),
|
||||
make_tuple(Sequence<0>{}, Sequence<1>{}),
|
||||
make_tuple(Sequence<0, 2, 4>{}, Sequence<1, 3, 5>{}));
|
||||
|
||||
return xdlops_gemm.MakeCDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(c_grid_desc_m0_n0_m1_n1_m2_n2);
|
||||
}
|
||||
|
||||
template <typename CGridDesc_G_M_N>
|
||||
__host__ __device__ static constexpr auto
|
||||
MakeCGridDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2(const CGridDesc_G_M_N& c_grid_desc_g_m_n)
|
||||
{
|
||||
const auto G = c_grid_desc_g_m_n.GetLength(I0);
|
||||
const auto M = c_grid_desc_g_m_n.GetLength(I1);
|
||||
const auto N = c_grid_desc_g_m_n.GetLength(I2);
|
||||
|
||||
const auto c_grid_desc_g_m0_n0_m1_n1_m2_n2 = transform_tensor_descriptor(
|
||||
c_grid_desc_g_m_n,
|
||||
make_tuple(make_pass_through_transform(G),
|
||||
make_unmerge_transform(make_tuple(M / (MWaves * MPerXDL), MWaves, MPerXDL)),
|
||||
make_unmerge_transform(make_tuple(N / (NWaves * NPerXDL), NWaves, NPerXDL))),
|
||||
make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}),
|
||||
make_tuple(Sequence<0>{}, Sequence<1, 3, 5>{}, Sequence<2, 4, 6>{}));
|
||||
|
||||
return xdlops_gemm.MakeCDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2(
|
||||
c_grid_desc_g_m0_n0_m1_n1_m2_n2);
|
||||
}
|
||||
|
||||
__device__ static constexpr auto HotLoopScheduler()
|
||||
{
|
||||
// schedule
|
||||
constexpr auto num_ds_read_inst =
|
||||
HotLoopInstList::A_LDS_Read_Inst_Num + HotLoopInstList::B_LDS_Read_Inst_Num;
|
||||
constexpr auto num_ds_write_inst =
|
||||
HotLoopInstList::A_LDS_Write_Inst_Num + HotLoopInstList::B_LDS_Write_Inst_Num;
|
||||
;
|
||||
constexpr auto num_buffer_load_inst =
|
||||
HotLoopInstList::A_Buffer_Load_Inst_Num + HotLoopInstList::B_Buffer_Load_Inst_Num;
|
||||
;
|
||||
constexpr auto num_mfma_inst = HotLoopInstList::C_MFMA_Inst_Num;
|
||||
|
||||
constexpr auto num_issue = num_buffer_load_inst;
|
||||
|
||||
static_for<0, num_issue, 1>{}([&](auto i) {
|
||||
ignore = i;
|
||||
__builtin_amdgcn_sched_group_barrier(0x008, 1, 0); // MFMA
|
||||
__builtin_amdgcn_sched_group_barrier(
|
||||
0x100, num_ds_read_inst / num_buffer_load_inst, 0); // DS read
|
||||
__builtin_amdgcn_sched_group_barrier(0x008, 1, 0); // MFMA
|
||||
__builtin_amdgcn_sched_group_barrier(
|
||||
0x200, num_ds_write_inst / num_buffer_load_inst, 0); // DS write
|
||||
__builtin_amdgcn_sched_group_barrier(0x008, 1, 0); // MFMA
|
||||
__builtin_amdgcn_sched_group_barrier(0x020, 1, 0); // VMEM read
|
||||
__builtin_amdgcn_sched_group_barrier(
|
||||
0x008, num_mfma_inst / num_buffer_load_inst - 3, 0); // MFMA
|
||||
});
|
||||
}
|
||||
|
||||
template <index_t stage>
|
||||
__device__ static constexpr auto TailScheduler()
|
||||
{
|
||||
}
|
||||
|
||||
template <>
|
||||
__device__ static constexpr auto TailScheduler<1>()
|
||||
{
|
||||
// schedule
|
||||
constexpr auto num_ds_read_inst =
|
||||
HotLoopInstList::A_LDS_Read_Inst_Num + HotLoopInstList::B_LDS_Read_Inst_Num;
|
||||
constexpr auto num_ds_write_inst =
|
||||
HotLoopInstList::A_LDS_Write_Inst_Num + HotLoopInstList::B_LDS_Write_Inst_Num;
|
||||
;
|
||||
constexpr auto num_mfma_inst = HotLoopInstList::C_MFMA_Inst_Num;
|
||||
|
||||
constexpr auto num_issue = num_ds_write_inst;
|
||||
|
||||
static_for<0, num_issue, 1>{}([&](auto i) {
|
||||
ignore = i;
|
||||
__builtin_amdgcn_sched_group_barrier(0x008, 1, 0); // MFMA
|
||||
__builtin_amdgcn_sched_group_barrier(0x200, 1, 0); // DS write
|
||||
__builtin_amdgcn_sched_group_barrier(0x008, 1, 0); // MFMA
|
||||
__builtin_amdgcn_sched_group_barrier(0x100, 1, 0); // DS read
|
||||
__builtin_amdgcn_sched_group_barrier(0x008, 1, 0); // MFMA
|
||||
__builtin_amdgcn_sched_group_barrier(
|
||||
0x100, num_ds_read_inst / num_ds_write_inst - 1, 0); // DS read
|
||||
__builtin_amdgcn_sched_group_barrier(
|
||||
0x008, num_mfma_inst / num_ds_write_inst - 3, 0); // MFMA
|
||||
});
|
||||
}
|
||||
|
||||
template <>
|
||||
__device__ static constexpr auto TailScheduler<2>()
|
||||
{
|
||||
// schedule
|
||||
constexpr auto num_ds_read_inst =
|
||||
HotLoopInstList::A_LDS_Read_Inst_Num + HotLoopInstList::B_LDS_Read_Inst_Num;
|
||||
constexpr auto num_mfma_inst = HotLoopInstList::C_MFMA_Inst_Num;
|
||||
|
||||
constexpr auto num_issue = num_ds_read_inst;
|
||||
|
||||
static_for<0, num_issue, 1>{}([&](auto i) {
|
||||
ignore = i;
|
||||
__builtin_amdgcn_sched_group_barrier(0x100, 1, 0); // DS read
|
||||
__builtin_amdgcn_sched_group_barrier(
|
||||
0x008, num_mfma_inst / num_ds_read_inst, 0); // MFMA
|
||||
});
|
||||
}
|
||||
|
||||
static constexpr AMmaTileDesc a_block_desc_m0_m1_m2_k;
|
||||
static constexpr BMmaTileDesc b_block_desc_n0_n1_n2_k;
|
||||
|
||||
template <bool HasMainLoop,
|
||||
index_t TailNum,
|
||||
typename AGridDesc,
|
||||
typename ABlockDesc,
|
||||
typename ABlockTransfer,
|
||||
typename AGridBuffer,
|
||||
typename ABlockBuffer,
|
||||
typename ABlockTransferStep,
|
||||
typename BGridDesc,
|
||||
typename BBlockDesc,
|
||||
typename BBlockTransfer,
|
||||
typename BGridBuffer,
|
||||
typename BBlockBuffer,
|
||||
typename BBlockTransferStep,
|
||||
typename CThreadBuffer>
|
||||
__device__ void Run(const AGridDesc& a_grid_desc,
|
||||
const ABlockDesc& a_block_desc,
|
||||
ABlockTransfer& a_blockwise_copy,
|
||||
const AGridBuffer& a_grid_buf,
|
||||
ABlockBuffer& a_block_buf,
|
||||
const ABlockTransferStep& a_block_copy_step,
|
||||
const BGridDesc& b_grid_desc,
|
||||
const BBlockDesc& b_block_desc,
|
||||
BBlockTransfer& b_blockwise_copy,
|
||||
const BGridBuffer& b_grid_buf,
|
||||
BBlockBuffer& b_block_buf,
|
||||
const BBlockTransferStep& b_block_copy_step,
|
||||
CThreadBuffer& c_thread_buf,
|
||||
index_t num_loop) const
|
||||
{
|
||||
__builtin_amdgcn_sched_barrier(0);
|
||||
auto a_thread_buf = make_static_buffer<AddressSpaceEnum::Vgpr, FloatAB>(
|
||||
a_thread_desc_.GetElementSpaceSize());
|
||||
auto b_thread_buf = make_static_buffer<AddressSpaceEnum::Vgpr, FloatAB>(
|
||||
b_thread_desc_.GetElementSpaceSize());
|
||||
|
||||
StaticallyIndexedArray<decltype(a_thread_buf), Number<2>{}> a_thread_bufs;
|
||||
StaticallyIndexedArray<decltype(b_thread_buf), Number<2>{}> b_thread_bufs;
|
||||
// Inst List:
|
||||
// ds_read_b128: 16
|
||||
// ds_write_b128: 8
|
||||
// buffer_load_dwordx4: 16
|
||||
// v_mfma: 0
|
||||
// -------------------------------------------------------------------------------------------
|
||||
|
||||
// Global prefetch 1th, Fill Ping LDS
|
||||
a_blockwise_copy.RunRead(a_grid_desc, a_grid_buf);
|
||||
b_blockwise_copy.RunRead(b_grid_desc, b_grid_buf);
|
||||
|
||||
a_blockwise_copy.MoveSrcSliceWindow(a_grid_desc, a_block_copy_step);
|
||||
b_blockwise_copy.MoveSrcSliceWindow(b_grid_desc, b_block_copy_step);
|
||||
|
||||
a_blockwise_copy.RunWrite(a_block_desc, a_block_buf.At(I0));
|
||||
b_blockwise_copy.RunWrite(b_block_desc, b_block_buf.At(I0));
|
||||
|
||||
// Local prefetch 1th, Fill Ping Reg
|
||||
block_sync_lds();
|
||||
static_for<0, KRepeat, 1>{}([&](auto k) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
a_thread_copy_.Run(a_block_desc_m0_m1_m2_k,
|
||||
make_tuple(m0, I0, I0, Number<k * AMmaKStride>{}),
|
||||
a_block_buf.At(I0),
|
||||
a_thread_desc_,
|
||||
make_tuple(m0, I0, k, I0),
|
||||
a_thread_bufs(I0));
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
b_thread_copy_.Run(b_block_desc_n0_n1_n2_k,
|
||||
make_tuple(n0, I0, I0, Number<k * BMmaKStride>{}),
|
||||
b_block_buf.At(I0),
|
||||
b_thread_desc_,
|
||||
make_tuple(n0, I0, k, I0),
|
||||
b_thread_bufs(I0));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// Global prefetch 2th, Fill Pong LDS
|
||||
a_blockwise_copy.RunRead(a_grid_desc, a_grid_buf);
|
||||
b_blockwise_copy.RunRead(b_grid_desc, b_grid_buf);
|
||||
|
||||
a_blockwise_copy.MoveSrcSliceWindow(a_grid_desc, a_block_copy_step);
|
||||
b_blockwise_copy.MoveSrcSliceWindow(b_grid_desc, b_block_copy_step);
|
||||
|
||||
a_blockwise_copy.RunWrite(a_block_desc, a_block_buf.At(I1));
|
||||
b_blockwise_copy.RunWrite(b_block_desc, b_block_buf.At(I1));
|
||||
|
||||
// Global prefetch 3rd
|
||||
a_blockwise_copy.RunRead(a_grid_desc, a_grid_buf);
|
||||
b_blockwise_copy.RunRead(b_grid_desc, b_grid_buf);
|
||||
|
||||
a_blockwise_copy.MoveSrcSliceWindow(a_grid_desc, a_block_copy_step);
|
||||
b_blockwise_copy.MoveSrcSliceWindow(b_grid_desc, b_block_copy_step);
|
||||
|
||||
// Initialize C
|
||||
c_thread_buf.Clear();
|
||||
|
||||
// main body
|
||||
if constexpr(HasMainLoop)
|
||||
{
|
||||
index_t i = 0;
|
||||
// This hot loop has two legacy loopover, to implement the double local buffer strategy
|
||||
do
|
||||
{
|
||||
// -------------------------------------------------------------------------------------------
|
||||
using PingP1 = Number<0>;
|
||||
using PongP1 = Number<1>;
|
||||
// MFMA: Ping Reg
|
||||
// DS_WRITE: To Ping LDS
|
||||
// DS_READ: Pong LDS to Pong Reg
|
||||
block_sync_lds();
|
||||
|
||||
static_for<0, KRepeat, 1>{}([&](auto k) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
a_thread_copy_.Run(a_block_desc_m0_m1_m2_k,
|
||||
make_tuple(m0, I0, I0, Number<k * AMmaKStride>{}),
|
||||
a_block_buf.At(PongP1{}),
|
||||
a_thread_desc_,
|
||||
make_tuple(m0, I0, k, I0),
|
||||
a_thread_bufs(PongP1{}));
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
b_thread_copy_.Run(b_block_desc_n0_n1_n2_k,
|
||||
make_tuple(n0, I0, I0, Number<k * BMmaKStride>{}),
|
||||
b_block_buf.At(PongP1{}),
|
||||
b_thread_desc_,
|
||||
make_tuple(n0, I0, k, I0),
|
||||
b_thread_bufs(PongP1{}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
a_blockwise_copy.RunWrite(a_block_desc, a_block_buf.At(PingP1{}));
|
||||
b_blockwise_copy.RunWrite(b_block_desc, b_block_buf.At(PingP1{}));
|
||||
|
||||
a_blockwise_copy.RunRead(a_grid_desc, a_grid_buf);
|
||||
b_blockwise_copy.RunRead(b_grid_desc, b_grid_buf);
|
||||
|
||||
a_blockwise_copy.MoveSrcSliceWindow(a_grid_desc, a_block_copy_step);
|
||||
b_blockwise_copy.MoveSrcSliceWindow(b_grid_desc, b_block_copy_step);
|
||||
|
||||
static_for<0, KRepeat, 1>{}([&](auto k0) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
vector_type<FloatAB, KPack> a_thread_vec;
|
||||
vector_type<FloatAB, KPack> b_thread_vec;
|
||||
|
||||
static_for<0, KPack, 1>{}([&](auto ik) {
|
||||
a_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
a_thread_bufs[PingP1{}][Number<a_thread_desc_.CalculateOffset(
|
||||
make_tuple(m0, I0, k0, ik))>{}];
|
||||
b_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
b_thread_bufs[PingP1{}][Number<b_thread_desc_.CalculateOffset(
|
||||
make_tuple(n0, I0, k0, ik))>{}];
|
||||
});
|
||||
|
||||
using mfma_input_type =
|
||||
typename vector_type<FloatAB, xdlops_gemm.K1PerXdlops>::type;
|
||||
|
||||
constexpr index_t c_offset =
|
||||
c_thread_desc_.CalculateOffset(make_tuple(m0, n0, 0));
|
||||
|
||||
xdlops_gemm.template Run(
|
||||
a_thread_vec.template AsType<mfma_input_type>(),
|
||||
b_thread_vec.template AsType<mfma_input_type>(),
|
||||
c_thread_buf.GetVectorTypeReference(Number<c_offset>{}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
HotLoopScheduler();
|
||||
__builtin_amdgcn_sched_barrier(0);
|
||||
|
||||
// -------------------------------------------------------------------------------------------
|
||||
using PingP2 = Number<1>;
|
||||
using PongP2 = Number<0>;
|
||||
// MFMA: Pong Reg
|
||||
// DS_WRITE: To Pong LDS
|
||||
// DS_READ: Ping LDS to Ping Reg
|
||||
block_sync_lds();
|
||||
|
||||
static_for<0, KRepeat, 1>{}([&](auto k) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
a_thread_copy_.Run(a_block_desc_m0_m1_m2_k,
|
||||
make_tuple(m0, I0, I0, Number<k * AMmaKStride>{}),
|
||||
a_block_buf.At(PongP2{}),
|
||||
a_thread_desc_,
|
||||
make_tuple(m0, I0, k, I0),
|
||||
a_thread_bufs(PongP2{}));
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
b_thread_copy_.Run(b_block_desc_n0_n1_n2_k,
|
||||
make_tuple(n0, I0, I0, Number<k * BMmaKStride>{}),
|
||||
b_block_buf.At(PongP2{}),
|
||||
b_thread_desc_,
|
||||
make_tuple(n0, I0, k, I0),
|
||||
b_thread_bufs(PongP2{}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
a_blockwise_copy.RunWrite(a_block_desc, a_block_buf.At(PingP2{}));
|
||||
b_blockwise_copy.RunWrite(b_block_desc, b_block_buf.At(PingP2{}));
|
||||
|
||||
a_blockwise_copy.RunRead(a_grid_desc, a_grid_buf);
|
||||
b_blockwise_copy.RunRead(b_grid_desc, b_grid_buf);
|
||||
|
||||
a_blockwise_copy.MoveSrcSliceWindow(a_grid_desc, a_block_copy_step);
|
||||
b_blockwise_copy.MoveSrcSliceWindow(b_grid_desc, b_block_copy_step);
|
||||
|
||||
static_for<0, KRepeat, 1>{}([&](auto k0) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
vector_type<FloatAB, KPack> a_thread_vec;
|
||||
vector_type<FloatAB, KPack> b_thread_vec;
|
||||
|
||||
static_for<0, KPack, 1>{}([&](auto ik) {
|
||||
a_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
a_thread_bufs[PingP2{}][Number<a_thread_desc_.CalculateOffset(
|
||||
make_tuple(m0, I0, k0, ik))>{}];
|
||||
b_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
b_thread_bufs[PingP2{}][Number<b_thread_desc_.CalculateOffset(
|
||||
make_tuple(n0, I0, k0, ik))>{}];
|
||||
});
|
||||
|
||||
using mfma_input_type =
|
||||
typename vector_type<FloatAB, xdlops_gemm.K1PerXdlops>::type;
|
||||
|
||||
constexpr index_t c_offset =
|
||||
c_thread_desc_.CalculateOffset(make_tuple(m0, n0, 0));
|
||||
|
||||
xdlops_gemm.template Run(
|
||||
a_thread_vec.template AsType<mfma_input_type>(),
|
||||
b_thread_vec.template AsType<mfma_input_type>(),
|
||||
c_thread_buf.GetVectorTypeReference(Number<c_offset>{}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
HotLoopScheduler();
|
||||
__builtin_amdgcn_sched_barrier(0);
|
||||
|
||||
i += 2;
|
||||
} while(i < (num_loop - 3));
|
||||
}
|
||||
|
||||
// tail
|
||||
if constexpr(TailNum == 3)
|
||||
{
|
||||
using PingP1 = Number<0>;
|
||||
using PongP1 = Number<1>;
|
||||
// MFMA: Ping Reg
|
||||
// DS_WRITE: To Ping LDS
|
||||
// DS_READ: Pong LDS to Pong Reg
|
||||
block_sync_lds();
|
||||
|
||||
static_for<0, KRepeat, 1>{}([&](auto k) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
a_thread_copy_.Run(a_block_desc_m0_m1_m2_k,
|
||||
make_tuple(m0, I0, I0, Number<k * AMmaKStride>{}),
|
||||
a_block_buf.At(PongP1{}),
|
||||
a_thread_desc_,
|
||||
make_tuple(m0, I0, k, I0),
|
||||
a_thread_bufs(PongP1{}));
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
b_thread_copy_.Run(b_block_desc_n0_n1_n2_k,
|
||||
make_tuple(n0, I0, I0, Number<k * BMmaKStride>{}),
|
||||
b_block_buf.At(PongP1{}),
|
||||
b_thread_desc_,
|
||||
make_tuple(n0, I0, k, I0),
|
||||
b_thread_bufs(PongP1{}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
a_blockwise_copy.RunWrite(a_block_desc, a_block_buf.At(PingP1{}));
|
||||
b_blockwise_copy.RunWrite(b_block_desc, b_block_buf.At(PingP1{}));
|
||||
|
||||
static_for<0, KRepeat, 1>{}([&](auto k0) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
vector_type<FloatAB, KPack> a_thread_vec;
|
||||
vector_type<FloatAB, KPack> b_thread_vec;
|
||||
|
||||
static_for<0, KPack, 1>{}([&](auto ik) {
|
||||
a_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
a_thread_bufs[PingP1{}][Number<a_thread_desc_.CalculateOffset(
|
||||
make_tuple(m0, I0, k0, ik))>{}];
|
||||
b_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
b_thread_bufs[PingP1{}][Number<b_thread_desc_.CalculateOffset(
|
||||
make_tuple(n0, I0, k0, ik))>{}];
|
||||
});
|
||||
|
||||
using mfma_input_type =
|
||||
typename vector_type<FloatAB, xdlops_gemm.K1PerXdlops>::type;
|
||||
|
||||
constexpr index_t c_offset =
|
||||
c_thread_desc_.CalculateOffset(make_tuple(m0, n0, 0));
|
||||
|
||||
xdlops_gemm.template Run(
|
||||
a_thread_vec.template AsType<mfma_input_type>(),
|
||||
b_thread_vec.template AsType<mfma_input_type>(),
|
||||
c_thread_buf.GetVectorTypeReference(Number<c_offset>{}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
TailScheduler<1>();
|
||||
__builtin_amdgcn_sched_barrier(0);
|
||||
|
||||
// -------------------------------------------------------------------------------------------
|
||||
using PingP2 = Number<1>;
|
||||
using PongP2 = Number<0>;
|
||||
// MFMA: Pong Reg
|
||||
// DS_WRITE: To Pong LDS
|
||||
// DS_READ: Ping LDS to Ping Reg
|
||||
block_sync_lds();
|
||||
|
||||
static_for<0, KRepeat, 1>{}([&](auto k) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
a_thread_copy_.Run(a_block_desc_m0_m1_m2_k,
|
||||
make_tuple(m0, I0, I0, Number<k * AMmaKStride>{}),
|
||||
a_block_buf.At(PongP2{}),
|
||||
a_thread_desc_,
|
||||
make_tuple(m0, I0, k, I0),
|
||||
a_thread_bufs(PongP2{}));
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
b_thread_copy_.Run(b_block_desc_n0_n1_n2_k,
|
||||
make_tuple(n0, I0, I0, Number<k * BMmaKStride>{}),
|
||||
b_block_buf.At(PongP2{}),
|
||||
b_thread_desc_,
|
||||
make_tuple(n0, I0, k, I0),
|
||||
b_thread_bufs(PongP2{}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
static_for<0, KRepeat, 1>{}([&](auto k0) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
vector_type<FloatAB, KPack> a_thread_vec;
|
||||
vector_type<FloatAB, KPack> b_thread_vec;
|
||||
|
||||
static_for<0, KPack, 1>{}([&](auto ik) {
|
||||
a_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
a_thread_bufs[PingP2{}][Number<a_thread_desc_.CalculateOffset(
|
||||
make_tuple(m0, I0, k0, ik))>{}];
|
||||
b_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
b_thread_bufs[PingP2{}][Number<b_thread_desc_.CalculateOffset(
|
||||
make_tuple(n0, I0, k0, ik))>{}];
|
||||
});
|
||||
|
||||
using mfma_input_type =
|
||||
typename vector_type<FloatAB, xdlops_gemm.K1PerXdlops>::type;
|
||||
|
||||
constexpr index_t c_offset =
|
||||
c_thread_desc_.CalculateOffset(make_tuple(m0, n0, 0));
|
||||
|
||||
xdlops_gemm.template Run(
|
||||
a_thread_vec.template AsType<mfma_input_type>(),
|
||||
b_thread_vec.template AsType<mfma_input_type>(),
|
||||
c_thread_buf.GetVectorTypeReference(Number<c_offset>{}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
TailScheduler<2>();
|
||||
__builtin_amdgcn_sched_barrier(0);
|
||||
|
||||
static_for<0, KRepeat, 1>{}([&](auto k) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
vector_type<FloatAB, KPack> a_thread_vec;
|
||||
vector_type<FloatAB, KPack> b_thread_vec;
|
||||
|
||||
static_for<0, KPack, 1>{}([&](auto ik) {
|
||||
a_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
a_thread_bufs[PongP2{}][Number<a_thread_desc_.CalculateOffset(
|
||||
make_tuple(m0, I0, k, ik))>{}];
|
||||
b_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
b_thread_bufs[PongP2{}][Number<b_thread_desc_.CalculateOffset(
|
||||
make_tuple(n0, I0, k, ik))>{}];
|
||||
});
|
||||
|
||||
using mfma_input_type =
|
||||
typename vector_type<FloatAB, xdlops_gemm.K1PerXdlops>::type;
|
||||
|
||||
constexpr index_t c_offset =
|
||||
c_thread_desc_.CalculateOffset(make_tuple(m0, n0, 0));
|
||||
|
||||
xdlops_gemm.template Run(
|
||||
a_thread_vec.template AsType<mfma_input_type>(),
|
||||
b_thread_vec.template AsType<mfma_input_type>(),
|
||||
c_thread_buf.GetVectorTypeReference(Number<c_offset>{}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// 64 v_mfma
|
||||
__builtin_amdgcn_sched_group_barrier(0x008, 64, 0); // MFMA
|
||||
__builtin_amdgcn_sched_barrier(0);
|
||||
}
|
||||
else if constexpr(TailNum == 2)
|
||||
{
|
||||
using PingP1 = Number<0>;
|
||||
using PongP1 = Number<1>;
|
||||
// MFMA: Ping Reg
|
||||
// DS_WRITE: To Ping LDS
|
||||
// DS_READ: Pong LDS to Pong Reg
|
||||
block_sync_lds();
|
||||
|
||||
static_for<0, KRepeat, 1>{}([&](auto k) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
a_thread_copy_.Run(a_block_desc_m0_m1_m2_k,
|
||||
make_tuple(m0, I0, I0, Number<k * AMmaKStride>{}),
|
||||
a_block_buf.At(PongP1{}),
|
||||
a_thread_desc_,
|
||||
make_tuple(m0, I0, k, I0),
|
||||
a_thread_bufs(PongP1{}));
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
b_thread_copy_.Run(b_block_desc_n0_n1_n2_k,
|
||||
make_tuple(n0, I0, I0, Number<k * BMmaKStride>{}),
|
||||
b_block_buf.At(PongP1{}),
|
||||
b_thread_desc_,
|
||||
make_tuple(n0, I0, k, I0),
|
||||
b_thread_bufs(PongP1{}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
static_for<0, KRepeat, 1>{}([&](auto k0) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
vector_type<FloatAB, KPack> a_thread_vec;
|
||||
vector_type<FloatAB, KPack> b_thread_vec;
|
||||
|
||||
static_for<0, KPack, 1>{}([&](auto ik) {
|
||||
a_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
a_thread_bufs[PingP1{}][Number<a_thread_desc_.CalculateOffset(
|
||||
make_tuple(m0, I0, k0, ik))>{}];
|
||||
b_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
b_thread_bufs[PingP1{}][Number<b_thread_desc_.CalculateOffset(
|
||||
make_tuple(n0, I0, k0, ik))>{}];
|
||||
});
|
||||
|
||||
using mfma_input_type =
|
||||
typename vector_type<FloatAB, xdlops_gemm.K1PerXdlops>::type;
|
||||
|
||||
constexpr index_t c_offset =
|
||||
c_thread_desc_.CalculateOffset(make_tuple(m0, n0, 0));
|
||||
|
||||
xdlops_gemm.template Run(
|
||||
a_thread_vec.template AsType<mfma_input_type>(),
|
||||
b_thread_vec.template AsType<mfma_input_type>(),
|
||||
c_thread_buf.GetVectorTypeReference(Number<c_offset>{}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
TailScheduler<2>();
|
||||
__builtin_amdgcn_sched_barrier(0);
|
||||
|
||||
// -------------------------------------------------------------------------------------------
|
||||
using PingP2 = Number<1>;
|
||||
// MFMA: Pong Reg
|
||||
// DS_WRITE: To Pong LDS
|
||||
// DS_READ: Ping LDS to Ping Reg
|
||||
|
||||
static_for<0, KRepeat, 1>{}([&](auto k0) {
|
||||
static_for<0, MRepeat, 1>{}([&](auto m0) {
|
||||
static_for<0, NRepeat, 1>{}([&](auto n0) {
|
||||
vector_type<FloatAB, KPack> a_thread_vec;
|
||||
vector_type<FloatAB, KPack> b_thread_vec;
|
||||
|
||||
static_for<0, KPack, 1>{}([&](auto ik) {
|
||||
a_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
a_thread_bufs[PingP2{}][Number<a_thread_desc_.CalculateOffset(
|
||||
make_tuple(m0, I0, k0, ik))>{}];
|
||||
b_thread_vec.template AsType<FloatAB>()(ik) =
|
||||
b_thread_bufs[PingP2{}][Number<b_thread_desc_.CalculateOffset(
|
||||
make_tuple(n0, I0, k0, ik))>{}];
|
||||
});
|
||||
|
||||
using mfma_input_type =
|
||||
typename vector_type<FloatAB, xdlops_gemm.K1PerXdlops>::type;
|
||||
|
||||
constexpr index_t c_offset =
|
||||
c_thread_desc_.CalculateOffset(make_tuple(m0, n0, 0));
|
||||
|
||||
xdlops_gemm.template Run(
|
||||
a_thread_vec.template AsType<mfma_input_type>(),
|
||||
b_thread_vec.template AsType<mfma_input_type>(),
|
||||
c_thread_buf.GetVectorTypeReference(Number<c_offset>{}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// 64 v_mfma
|
||||
__builtin_amdgcn_sched_group_barrier(0x008, 64, 0); // MFMA
|
||||
__builtin_amdgcn_sched_barrier(0);
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
// M1, N1 as double buffer index
|
||||
// Read buffer + Compute buffer
|
||||
// A[M0, M1, M2, KPack]
|
||||
static constexpr auto a_thread_desc_ = make_naive_tensor_descriptor(
|
||||
make_tuple(Number<MRepeat>{}, I1, Number<KRepeat>{}, Number<KPack>{}),
|
||||
make_tuple(
|
||||
Number<KPack>{}, Number<KPack * MRepeat * KPack>{}, Number<MRepeat * KPack>{}, I1));
|
||||
|
||||
// B[N0, N1, N2, KPack]
|
||||
static constexpr auto b_thread_desc_ = make_naive_tensor_descriptor(
|
||||
make_tuple(Number<NRepeat>{}, I1, Number<KRepeat>{}, Number<KPack>{}),
|
||||
make_tuple(
|
||||
Number<KPack>{}, Number<KPack * MRepeat * KPack>{}, Number<MRepeat * KPack>{}, I1));
|
||||
|
||||
// C[M, N, NumRegXdlops]
|
||||
static constexpr auto c_thread_desc_ = make_naive_tensor_descriptor_packed(
|
||||
make_tuple(Number<MRepeat>{}, Number<NRepeat>{}, xdlops_gemm.GetRegSizePerXdlops()));
|
||||
|
||||
using AThreadCopy = ThreadwiseTensorSliceTransfer_v4<FloatAB,
|
||||
FloatAB,
|
||||
decltype(a_block_desc_m0_m1_m2_k),
|
||||
decltype(a_thread_desc_),
|
||||
Sequence<1, 1, 1, KPack>,
|
||||
Sequence<0, 1, 2, 3>,
|
||||
3,
|
||||
A_K1,
|
||||
A_K1>;
|
||||
|
||||
using BThreadCopy = ThreadwiseTensorSliceTransfer_v4<FloatAB,
|
||||
FloatAB,
|
||||
decltype(b_block_desc_n0_n1_n2_k),
|
||||
decltype(b_thread_desc_),
|
||||
Sequence<1, 1, 1, KPack>,
|
||||
Sequence<0, 1, 2, 3>,
|
||||
3,
|
||||
B_K1,
|
||||
B_K1>;
|
||||
|
||||
AThreadCopy a_thread_copy_;
|
||||
BThreadCopy b_thread_copy_;
|
||||
};
|
||||
|
||||
} // namespace ck
|
||||
@@ -59,7 +59,9 @@ struct BaseOperator
|
||||
|
||||
virtual size_t GetWorkSpaceSize(const BaseArgument*) const { return 0; }
|
||||
|
||||
virtual void SetWorkSpacePointer(BaseArgument* p_arg, void* p_workspace) const
|
||||
virtual void SetWorkSpacePointer(BaseArgument* p_arg,
|
||||
void* p_workspace,
|
||||
const StreamConfig& = StreamConfig{}) const
|
||||
{
|
||||
assert(p_arg);
|
||||
p_arg->p_workspace_ = p_workspace;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user