Merge pull request #288 from shwina/wheel-build-and-publish-infra

Initial wheel build and publishing infrastructure
This commit is contained in:
Ashwin Srinath
2025-12-04 04:37:07 -05:00
committed by GitHub
15 changed files with 683 additions and 2 deletions

View File

@@ -0,0 +1,110 @@
name: Build and Test Python Wheels
on:
workflow_call:
workflow_dispatch:
defaults:
run:
shell: bash --noprofile --norc -euo pipefail {0}
jobs:
# Build wheels for all CUDA/Python combinations
build-wheels:
name: Build wheel (CUDA ${{ matrix.cuda }}, Python ${{ matrix.python }})
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
strategy:
fail-fast: false
matrix:
cuda: ['12', '13']
python: ['3.10', '3.11', '3.12', '3.13']
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build wheel
run: |
bash ci/build_pynvbench_wheel.sh -py-version ${{ matrix.python }} -cuda-version ${{ matrix.cuda }}
- name: Upload wheel artifact
uses: actions/upload-artifact@v4
with:
name: wheel-pynvbench-cu${{ matrix.cuda }}-py${{ matrix.python }}
path: wheelhouse/*.whl
retention-days: 7
if-no-files-found: error
# Test wheels for all CUDA/Python combinations
test-wheels:
name: Test wheel (CUDA ${{ matrix.cuda }}, Python ${{ matrix.python }})
needs: build-wheels
runs-on: linux-amd64-gpu-l4-latest-1
permissions:
id-token: write
contents: read
strategy:
fail-fast: false
matrix:
cuda: ['12', '13']
python: ['3.10', '3.11', '3.12', '3.13']
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Download wheel artifact
uses: actions/download-artifact@v4
with:
name: wheel-pynvbench-cu${{ matrix.cuda }}-py${{ matrix.python }}
path: wheelhouse
- name: Test wheel
run: |
# Use the same rapidsai/ci-wheel Docker image as build
if [[ "${{ matrix.cuda }}" == "12" ]]; then
cuda_full_version="12.9.1"
else
cuda_full_version="13.0.1"
fi
docker run --rm \
--workdir /workspace \
--gpus all \
--mount type=bind,source=$(pwd),target=/workspace/ \
--env py_version=${{ matrix.python }} \
--env cuda_version=${{ matrix.cuda }} \
rapidsai/ci-wheel:25.12-cuda${cuda_full_version}-rockylinux8-py${{ matrix.python }} \
/workspace/ci/test_pynvbench_inner.sh
verify-workflow:
name: Verify all builds and tests succeeded
if: ${{ always() }}
needs:
- build-wheels
- test-wheels
runs-on: ubuntu-latest
steps:
- name: Check build results
run: |
if [[ "${{ needs.build-wheels.result }}" != "success" ]]; then
echo "Wheel builds failed!"
exit 1
fi
if [[ "${{ needs.test-wheels.result }}" != "success" ]]; then
echo "Wheel tests failed!"
exit 1
fi
echo "All wheels built and tested successfully!"

58
.github/workflows/build-wheels.yml vendored Normal file
View File

@@ -0,0 +1,58 @@
name: Build Python Wheels (Manual)
on:
workflow_dispatch:
workflow_call:
defaults:
run:
shell: bash --noprofile --norc -euo pipefail {0}
jobs:
build-wheels:
name: Build wheel (CUDA ${{ matrix.cuda }}, Python ${{ matrix.python }})
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
strategy:
fail-fast: false
matrix:
cuda: ['12', '13']
python: ['3.10', '3.11', '3.12', '3.13']
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build wheel
run: |
bash ci/build_pynvbench_wheel.sh -py-version ${{ matrix.python }} -cuda-version ${{ matrix.cuda }}
- name: Upload wheel artifact
uses: actions/upload-artifact@v4
with:
name: wheel-pynvbench-cu${{ matrix.cuda }}-py${{ matrix.python }}
path: wheelhouse/*.whl
retention-days: 7
if-no-files-found: error
verify-wheels:
name: Verify all wheels built successfully
if: ${{ always() }}
needs: build-wheels
runs-on: ubuntu-latest
steps:
- name: Check build results
run: |
if [[ "${{ needs.build-wheels.result }}" != "success" ]]; then
echo "Wheel builds failed!"
exit 1
fi
echo "All wheels built successfully!"

View File

@@ -76,6 +76,13 @@ jobs:
per_cuda_compiler_matrix: ${{ toJSON(fromJSON(needs.compute-matrix.outputs.PER_CUDA_COMPILER_MATRIX)[ matrix.cuda_host_combination ]) }}
devcontainer_version: ${{ needs.compute-matrix.outputs.DEVCONTAINER_VERSION }}
python-wheels:
name: Python Wheels
permissions:
id-token: write
contents: read
uses: ./.github/workflows/build-and-test-python-wheels.yml
verify-devcontainers:
name: Verify Dev Containers
if: ${{ !contains(github.event.head_commit.message, '[skip-vdc]') }}
@@ -96,6 +103,7 @@ jobs:
if: ${{ always() }} # need to use always() instead of !cancelled() because skipped jobs count as success
needs:
- nvbench
- python-wheels
- verify-devcontainers
steps:
- name: Check status of all precursor jobs

52
.github/workflows/publish-wheels.yml vendored Normal file
View File

@@ -0,0 +1,52 @@
name: Publish Wheels to PyPI
on:
workflow_dispatch:
inputs:
run-id:
description: "The GitHub Actions run ID that generated validated artifacts"
required: true
type: string
wheel-dst:
description: "Which wheel index to publish to?"
required: true
type: choice
options:
- testpypi
- pypi
defaults:
run:
shell: bash --noprofile --norc -xeuo pipefail {0}
jobs:
publish-wheels:
name: Publish wheels to ${{ inputs.wheel-dst }}
runs-on: ubuntu-latest
environment:
name: ${{ inputs.wheel-dst }}
url: https://${{ (inputs.wheel-dst == 'testpypi' && 'test.') || '' }}pypi.org/p/pynvbench/
permissions:
id-token: write
contents: read
steps:
- name: Download all wheel artifacts
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
RUN_ID: ${{ inputs.run-id }}
run: |
gh run download ${RUN_ID} -D dl -R ${{ github.repository }} -p 'wheel-*'
mkdir -p dist
find dl -name '*.whl' -exec mv {} dist/ \;
rm -rf dl
ls -lh dist/
- name: Publish package distributions to PyPI
if: ${{ inputs.wheel-dst == 'pypi' }}
uses: pypa/gh-action-pypi-publish@release/v1
- name: Publish package distributions to TestPyPI
if: ${{ inputs.wheel-dst == 'testpypi' }}
uses: pypa/gh-action-pypi-publish@release/v1
with:
repository-url: https://test.pypi.org/legacy/

8
.gitignore vendored
View File

@@ -8,3 +8,11 @@ cmake-build-*
*~
compile_commands.json
CMakeUserPresets.json
# Python wheel builds
wheelhouse/
*.whl
*.egg-info/
__pycache__/
*.pyc
*.pyo

90
ci/build_pynvbench_wheel.sh Executable file
View File

@@ -0,0 +1,90 @@
#!/bin/bash
set -euo pipefail
ci_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
usage="Usage: $0 -py-version <python_version> -cuda-version <cuda_version>"
source "$ci_dir/util/python/common_arg_parser.sh"
# Parse arguments including CUDA version
parse_python_args "$@"
# Now parse CUDA version from remaining arguments
cuda_version=""
while [[ $# -gt 0 ]]; do
case $1 in
-cuda-version=*)
cuda_version="${1#*=}"
shift
;;
-cuda-version)
if [[ $# -lt 2 ]]; then
echo "Error: -cuda-version requires a value" >&2
exit 1
fi
cuda_version="$2"
shift 2
;;
*)
shift
;;
esac
done
# Check if py_version was provided (this script requires it)
require_py_version "$usage" || exit 1
if [[ -z "$cuda_version" ]]; then
echo "Error: -cuda-version is required"
echo "$usage"
exit 1
fi
echo "Docker socket: " $(ls /var/run/docker.sock)
# Map cuda_version to full version
if [[ "$cuda_version" == "12" ]]; then
cuda_full_version="12.9.1"
cuda_short="cu12"
elif [[ "$cuda_version" == "13" ]]; then
cuda_full_version="13.0.1"
cuda_short="cu13"
else
echo "Error: Unsupported CUDA version: $cuda_version"
exit 1
fi
# pynvbench must be built in a container that can produce manylinux wheels,
# and has the CUDA toolkit installed. We use the rapidsai/ci-wheel image for this.
readonly devcontainer_version=25.12
readonly devcontainer_distro=rockylinux8
if [[ "$(uname -m)" == "aarch64" ]]; then
readonly cuda_image=rapidsai/ci-wheel:${devcontainer_version}-cuda${cuda_full_version}-${devcontainer_distro}-py${py_version}-arm64
else
readonly cuda_image=rapidsai/ci-wheel:${devcontainer_version}-cuda${cuda_full_version}-${devcontainer_distro}-py${py_version}
fi
mkdir -p wheelhouse
echo "::group::⚒️ Building CUDA ${cuda_version} wheel on ${cuda_image}"
(
set -x
docker pull $cuda_image
docker run --rm -i \
--workdir /workspace/python \
--mount type=bind,source=$(pwd),target=/workspace/ \
--env py_version=${py_version} \
--env cuda_version=${cuda_version} \
$cuda_image \
/workspace/ci/build_pynvbench_wheel_inner.sh
# Prevent GHA runners from exhausting available storage with leftover images:
if [[ -n "${GITHUB_ACTIONS:-}" ]]; then
docker rmi -f $cuda_image
fi
)
echo "::endgroup::"
echo "Wheels in wheelhouse:"
ls -la wheelhouse/

View File

@@ -0,0 +1,89 @@
#!/bin/bash
set -euo pipefail
# Target script for `docker run` command in build_pynvbench_wheel.sh
# The /workspace pathnames are hard-wired here.
# Install GCC 13 toolset (needed for the build)
/workspace/ci/util/retry.sh 5 30 dnf -y install gcc-toolset-13-gcc gcc-toolset-13-gcc-c++
echo -e "#!/bin/bash\nsource /opt/rh/gcc-toolset-13/enable" >/etc/profile.d/enable_devtools.sh
source /etc/profile.d/enable_devtools.sh
# Check what's available
which gcc
gcc --version
which nvcc
nvcc --version
# Set up Python environment
source /workspace/ci/pyenv_helper.sh
setup_python_env "${py_version}"
which python
python --version
echo "Done setting up python env"
# Ensure we have full git history for setuptools_scm
if $(git rev-parse --is-shallow-repository); then
git fetch --unshallow
fi
cd /workspace/python
# Determine CUDA version from nvcc
cuda_major=$(nvcc --version | grep -oP 'release \K[0-9]+\.[0-9]+' | cut -d. -f1)
echo "Detected CUDA major version: ${cuda_major}"
# Configure compilers:
export CXX="$(which g++)"
export CUDACXX="$(which nvcc)"
export CUDAHOSTCXX="$(which g++)"
# Build the wheel
python -m pip wheel --no-deps --verbose --wheel-dir dist .
# Install auditwheel for manylinux compliance
python -m pip install auditwheel
# Repair wheel to make it manylinux compliant
mkdir -p dist_repaired
for wheel in dist/pynvbench-*.whl; do
if [[ -f "$wheel" ]]; then
echo "Repairing wheel: $wheel"
python -m auditwheel repair \
--exclude 'libcuda.so.1' \
--exclude 'libnvidia-ml.so.1' \
"$wheel" \
--wheel-dir dist_repaired
fi
done
# Rename wheel to include CUDA version suffix
mkdir -p /workspace/wheelhouse
for wheel in dist_repaired/pynvbench-*.whl; do
if [[ -f "$wheel" ]]; then
base_name=$(basename "$wheel" .whl)
# Append CUDA version to the local version identifier
# e.g., pynvbench-0.1.0.dev1+gabc123-cp312-cp312-manylinux_2_28_x86_64.whl
# becomes pynvbench-0.1.0.dev1+gabc123.cu12-cp312-cp312-manylinux_2_28_x86_64.whl
if [[ "$base_name" =~ ^(.*)-cp([0-9]+)-cp([0-9]+)-(.*) ]]; then
pkg_version="${BASH_REMATCH[1]}"
py_tag="cp${BASH_REMATCH[2]}"
abi_tag="cp${BASH_REMATCH[3]}"
platform="${BASH_REMATCH[4]}"
# If version has a local part (contains +), append .cu${cuda_major} to it
# Otherwise add +cu${cuda_major}
if [[ "$pkg_version" =~ \+ ]]; then
new_version="${pkg_version}.cu${cuda_major}"
else
new_version="${pkg_version}+cu${cuda_major}"
fi
new_name="${new_version}-${py_tag}-${abi_tag}-${platform}.whl"
mv "$wheel" "/workspace/wheelhouse/${new_name}"
echo "Renamed wheel to: ${new_name}"
else
# Fallback if regex doesn't match
mv "$wheel" /workspace/wheelhouse/
echo "Moved wheel: $(basename $wheel)"
fi
fi
done

View File

@@ -68,3 +68,15 @@ pull_request:
- {cuda: *cuda_curr_max, compiler: *llvm18, cpu: 'amd64'}
- {cuda: *cuda_curr_max, compiler: *llvm19, cpu: 'amd64'}
- {cuda: *cuda_curr_max, compiler: *llvm20, cpu: 'amd64'}
# Python wheel builds
python_wheels:
nvcc:
- {cuda: *cuda_prev_max, py_version: '3.10', cpu: 'amd64'}
- {cuda: *cuda_prev_max, py_version: '3.11', cpu: 'amd64'}
- {cuda: *cuda_prev_max, py_version: '3.12', cpu: 'amd64'}
- {cuda: *cuda_prev_max, py_version: '3.13', cpu: 'amd64'}
- {cuda: *cuda_curr_max, py_version: '3.10', cpu: 'amd64'}
- {cuda: *cuda_curr_max, py_version: '3.11', cpu: 'amd64'}
- {cuda: *cuda_curr_max, py_version: '3.12', cpu: 'amd64'}
- {cuda: *cuda_curr_max, py_version: '3.13', cpu: 'amd64'}

0
ci/pretty_printing.sh Normal file → Executable file
View File

47
ci/pyenv_helper.sh Executable file
View File

@@ -0,0 +1,47 @@
#!/bin/bash
setup_python_env() {
local py_version=$1
# check if pyenv is installed
if ! command -v pyenv &> /dev/null; then
rm -f /pyenv
curl -fsSL https://pyenv.run | bash
fi
# Install the build dependencies, check /etc/os-release to see if we are on ubuntu or rocky
if [ -f /etc/os-release ]; then
source /etc/os-release
if [ "$ID" = "ubuntu" ]; then
# Use the retry helper to mitigate issues with apt network errors:
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
retry() {
"${script_dir}/util/retry.sh" 5 30 "$@"
}
retry sudo apt update
retry sudo apt install -y make libssl-dev zlib1g-dev \
libbz2-dev libreadline-dev libsqlite3-dev curl git \
libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev
elif [ "$ID" = "rocky" ]; then
# we're inside the rockylinux container, sudo not required/available
dnf install -y make patch zlib-devel bzip2 bzip2-devel readline-devel \
sqlite sqlite-devel openssl-devel tk-devel libffi-devel xz-devel libuuid-devel \
gdbm-libs libnsl2
else
echo "Unsupported Linux distribution"
exit 1
fi
fi
# Always set up pyenv environment
export PYENV_ROOT="$HOME/.pyenv"
[[ -d $PYENV_ROOT/bin ]] && export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init - bash)"
# Using pyenv, install the Python version
PYENV_DEBUG=1 pyenv install -v "${py_version}"
pyenv local "${py_version}"
pip install --upgrade pip
}

79
ci/test_pynvbench.sh Executable file
View File

@@ -0,0 +1,79 @@
#!/bin/bash
set -euo pipefail
ci_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
usage="Usage: $0 -py-version <python_version> -cuda-version <cuda_version>"
source "$ci_dir/util/python/common_arg_parser.sh"
# Parse arguments including CUDA version
parse_python_args "$@"
cuda_version=""
while [[ $# -gt 0 ]]; do
case $1 in
-cuda-version=*)
cuda_version="${1#*=}"
shift
;;
-cuda-version)
if [[ $# -lt 2 ]]; then
echo "Error: -cuda-version requires a value" >&2
exit 1
fi
cuda_version="$2"
shift 2
;;
*)
shift
;;
esac
done
# Check if py_version was provided (this script requires it)
require_py_version "$usage" || exit 1
if [[ -z "$cuda_version" ]]; then
echo "Error: -cuda-version is required"
echo "$usage"
exit 1
fi
# Map cuda_version to full version
if [[ "$cuda_version" == "12" ]]; then
cuda_full_version="12.9.1"
elif [[ "$cuda_version" == "13" ]]; then
cuda_full_version="13.0.1"
else
echo "Error: Unsupported CUDA version: $cuda_version"
exit 1
fi
# Use the same rapidsai/ci-wheel images as the build
readonly devcontainer_version=25.12
readonly devcontainer_distro=rockylinux8
if [[ "$(uname -m)" == "aarch64" ]]; then
readonly cuda_image=rapidsai/ci-wheel:${devcontainer_version}-cuda${cuda_full_version}-${devcontainer_distro}-py${py_version}-arm64
else
readonly cuda_image=rapidsai/ci-wheel:${devcontainer_version}-cuda${cuda_full_version}-${devcontainer_distro}-py${py_version}
fi
echo "::group::🧪 Testing CUDA ${cuda_version} wheel on ${cuda_image}"
(
set -x
docker pull $cuda_image
docker run --rm -i \
--workdir /workspace \
--mount type=bind,source=$(pwd),target=/workspace/ \
--env py_version=${py_version} \
--env cuda_version=${cuda_version} \
$cuda_image \
/workspace/ci/test_pynvbench_inner.sh
# Prevent GHA runners from exhausting available storage with leftover images:
if [[ -n "${GITHUB_ACTIONS:-}" ]]; then
docker rmi -f $cuda_image
fi
)
echo "::endgroup::"

43
ci/test_pynvbench_inner.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/bin/bash
set -euo pipefail
# Target script for `docker run` command in test_pynvbench.sh
# The /workspace pathnames are hard-wired here.
# Install GCC 13 toolset (needed for builds that might happen during testing)
/workspace/ci/util/retry.sh 5 30 dnf -y install gcc-toolset-13-gcc gcc-toolset-13-gcc-c++
echo -e "#!/bin/bash\nsource /opt/rh/gcc-toolset-13/enable" >/etc/profile.d/enable_devtools.sh
source /etc/profile.d/enable_devtools.sh
# Set up Python environment (only if not already available)
source /workspace/ci/pyenv_helper.sh
if ! command -v python${py_version} &> /dev/null; then
setup_python_env "${py_version}"
fi
# Upgrade pip
python -m pip install --upgrade pip
echo "Python version: $(python --version)"
echo "CUDA version: $(nvcc --version | grep release)"
# Wheel should be in /workspace/wheelhouse (downloaded by workflow or built locally)
WHEELHOUSE_DIR="/workspace/wheelhouse"
# Find and install pynvbench wheel
# Look for .cu${cuda_version} in the version string (e.g., pynvbench-0.0.1.dev1+g123.cu12-...)
PYNVBENCH_WHEEL_PATH="$(ls ${WHEELHOUSE_DIR}/pynvbench-*.cu${cuda_version}-*.whl 2>/dev/null | head -1)"
if [[ -z "$PYNVBENCH_WHEEL_PATH" ]]; then
echo "Error: No pynvbench wheel found in ${WHEELHOUSE_DIR}"
echo "Looking for: pynvbench-*.cu${cuda_version}-*.whl"
echo "Contents of ${WHEELHOUSE_DIR}:"
ls -la ${WHEELHOUSE_DIR}/ || true
exit 1
fi
echo "Installing wheel: $PYNVBENCH_WHEEL_PATH"
python -m pip install "${PYNVBENCH_WHEEL_PATH}[test]"
# Run tests
cd "/workspace/python/test/"
python -m pytest -v test_nvbench.py

View File

@@ -0,0 +1,39 @@
#!/bin/bash
# Argument parser for Python CI scripts.
parse_python_args() {
# Initialize variables
py_version=""
while [[ $# -gt 0 ]]; do
case $1 in
-py-version=*)
py_version="${1#*=}"
shift
;;
-py-version)
if [[ $# -lt 2 ]]; then
echo "Error: -py-version requires a value" >&2
return 1
fi
py_version="$2"
shift 2
;;
*)
# Unknown argument, ignore
shift
;;
esac
done
# Export for use by calling script
export py_version
}
require_py_version() {
if [[ -z "$py_version" ]]; then
echo "Error: -py-version is required" >&2
[[ -n "$1" ]] && echo "$1" >&2
return 1
fi
}

32
ci/util/retry.sh Executable file
View File

@@ -0,0 +1,32 @@
#!/bin/bash
if [ "$#" -lt 3 ]; then
echo "Usage: $0 num_tries sleep_time command [args...]"
echo " num_tries: Number of attempts to run the command"
echo " sleep_time: Time to wait between attempts (in seconds)"
echo " command: The command to run"
echo " args: Arguments to pass to the command"
exit 1
fi
num_tries=$1
sleep_time=$2
shift 2
command=("${*@Q}")
# Loop until the command succeeds or we reach the maximum number of attempts:
for ((i=1; i<=num_tries; i++)); do
echo "Attempt ${i} of ${num_tries}: Running command '${command[*]}'"
status=0
eval "${command[*]}" || status=$?
if [ $status -eq 0 ]; then
echo "Command '${command[*]}' succeeded on attempt ${i}."
exit 0
else
echo "Command '${command[*]}' failed with status ${status}. Retrying in ${sleep_time} seconds..."
sleep $sleep_time
fi
done
echo "Command '${command[*]}' failed after ${num_tries} attempts."
exit 1

View File

@@ -6,12 +6,21 @@ build-backend = "scikit_build_core.build"
name = "pynvbench"
description = "CUDA Kernel Benchmarking Package"
authors = [{ name = "NVIDIA Corporation" }]
license = { text = "Apache-2.0 WITH LLVM-exception" }
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Environment :: GPU :: NVIDIA CUDA",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
]
requires-python = ">=3.9"
requires-python = ">=3.10"
dependencies = [
# pathfinder
"cuda-pathfinder",
@@ -32,7 +41,9 @@ readme = { file = "README.md", content-type = "text/markdown" }
test = ["pytest", "cupy-cuda12x", "numba"]
[project.urls]
Homepage = "https://developer.nvidia.com/"
Homepage = "https://github.com/NVIDIA/nvbench"
Repository = "https://github.com/NVIDIA/nvbench"
Issues = "https://github.com/NVIDIA/nvbench/issues"
[tool.scikit-build]
minimum-version = "build-system.requires"
@@ -53,6 +64,9 @@ provider = "scikit_build_core.metadata.setuptools_scm"
[tool.setuptools_scm]
root = ".."
git_describe_command = ["git", "describe", "--tags", "--match", "python-*"]
tag_regex = "^python-(?P<version>.*)$"
fallback_version = "0.0.0"
[tool.scikit-build.wheel.packages]
"cuda" = "cuda"