Initial wheel build and publishing infrastructure

This commit is contained in:
Ashwin Srinath
2025-12-03 10:14:15 -05:00
parent 34f1e2a7ee
commit 29389b5791
12 changed files with 417 additions and 2 deletions

58
.github/workflows/build-wheels.yml vendored Normal file
View File

@@ -0,0 +1,58 @@
name: Build Python Wheels
on:
workflow_dispatch:
defaults:
run:
shell: bash --noprofile --norc -euo pipefail {0}
jobs:
build-wheels:
name: Build wheel (CUDA ${{ matrix.cuda }}, Python ${{ matrix.python }})
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
strategy:
fail-fast: false
matrix:
cuda: ['12', '13']
python: ['3.10', '3.11', '3.12', '3.13']
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build wheel
run: |
bash ci/build_pynvbench_wheel.sh -py-version ${{ matrix.python }} -cuda-version ${{ matrix.cuda }}
- name: Upload wheel artifact
uses: actions/upload-artifact@v4
with:
name: wheel-pynvbench-cu${{ matrix.cuda }}-py${{ matrix.python }}
path: wheelhouse/*.whl
retention-days: 7
if-no-files-found: error
verify-wheels:
name: Verify all wheels built successfully
if: ${{ always() }}
needs: build-wheels
runs-on: ubuntu-latest
steps:
- name: Check build results
run: |
if [[ "${{ needs.build-wheels.result }}" != "success" ]]; then
echo "Wheel builds failed!"
exit 1
fi
echo "All wheels built successfully!"

53
.github/workflows/publish-wheels.yml vendored Normal file
View File

@@ -0,0 +1,53 @@
name: Publish Wheels to PyPI
on:
workflow_dispatch:
inputs:
run-id:
description: "The GitHub Actions run ID that generated validated artifacts"
required: true
type: string
wheel-dst:
description: "Which wheel index to publish to?"
required: true
type: choice
options:
- testpypi
- pypi
defaults:
run:
shell: bash --noprofile --norc -xeuo pipefail {0}
jobs:
publish-wheels:
name: Publish wheels to ${{ inputs.wheel-dst }}
runs-on: ubuntu-latest
environment:
name: ${{ inputs.wheel-dst }}
url: https://${{ (inputs.wheel-dst == 'testpypi' && 'test.') || '' }}pypi.org/p/pynvbench/
permissions:
id-token: write
contents: read
steps:
- name: Download all wheel artifacts
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
RUN_ID: ${{ inputs.run-id }}
run: |
gh run download ${RUN_ID} -D dl -R ${{ github.repository }} -p 'wheel-*'
mkdir -p dist
find dl -name '*.whl' -exec mv {} dist/ \;
rm -rf dl
ls -lh dist/
- name: Publish package distributions to PyPI
if: ${{ inputs.wheel-dst == 'pypi' }}
uses: pypa/gh-action-pypi-publish@release/v1
- name: Publish package distributions to TestPyPI
if: ${{ inputs.wheel-dst == 'testpypi' }}
uses: pypa/gh-action-pypi-publish@release/v1
with:
repository-url: https://test.pypi.org/legacy/

8
.gitignore vendored
View File

@@ -8,3 +8,11 @@ cmake-build-*
*~
compile_commands.json
CMakeUserPresets.json
# Python wheel builds
wheelhouse/
*.whl
*.egg-info/
__pycache__/
*.pyc
*.pyo

91
ci/build_pynvbench_wheel.sh Executable file
View File

@@ -0,0 +1,91 @@
#!/bin/bash
set -euo pipefail
ci_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
usage="Usage: $0 -py-version <python_version> -cuda-version <cuda_version>"
source "$ci_dir/util/python/common_arg_parser.sh"
# Parse arguments including CUDA version
parse_python_args "$@"
# Now parse CUDA version from remaining arguments
cuda_version=""
while [[ $# -gt 0 ]]; do
case $1 in
-cuda-version=*)
cuda_version="${1#*=}"
shift
;;
-cuda-version)
if [[ $# -lt 2 ]]; then
echo "Error: -cuda-version requires a value" >&2
exit 1
fi
cuda_version="$2"
shift 2
;;
*)
shift
;;
esac
done
# Check if py_version was provided (this script requires it)
require_py_version "$usage" || exit 1
if [[ -z "$cuda_version" ]]; then
echo "Error: -cuda-version is required"
echo "$usage"
exit 1
fi
echo "Docker socket: " $(ls /var/run/docker.sock)
# Map cuda_version to full version
if [[ "$cuda_version" == "12" ]]; then
cuda_full_version="12.9.1"
cuda_short="cu12"
elif [[ "$cuda_version" == "13" ]]; then
cuda_full_version="13.0.1"
cuda_short="cu13"
else
echo "Error: Unsupported CUDA version: $cuda_version"
exit 1
fi
# pynvbench must be built in a container that can produce manylinux wheels,
# and has the CUDA toolkit installed. We use the rapidsai/ci-wheel image for this.
readonly devcontainer_version=25.12
readonly devcontainer_distro=rockylinux8
if [[ "$(uname -m)" == "aarch64" ]]; then
readonly cuda_image=rapidsai/ci-wheel:${devcontainer_version}-cuda${cuda_full_version}-${devcontainer_distro}-py${py_version}-arm64
else
readonly cuda_image=rapidsai/ci-wheel:${devcontainer_version}-cuda${cuda_full_version}-${devcontainer_distro}-py${py_version}
fi
mkdir -p wheelhouse
echo "::group::⚒️ Building CUDA ${cuda_version} wheel on ${cuda_image}"
(
set -x
docker pull $cuda_image
docker run --rm -i \
--workdir /workspace/python \
--mount type=bind,source=$(pwd),target=/workspace/ \
--env py_version=${py_version} \
--env cuda_version=${cuda_version} \
$cuda_image \
/workspace/ci/build_pynvbench_wheel_inner.sh
# Prevent GHA runners from exhausting available storage with leftover images:
if [[ -n "${GITHUB_ACTIONS:-}" ]]; then
docker rmi -f $cuda_image
fi
)
echo "::endgroup::"
echo "Wheels in wheelhouse:"
ls -la wheelhouse/

View File

@@ -0,0 +1,57 @@
#!/bin/bash
set -euo pipefail
# Target script for `docker run` command in build_pynvbench_wheel.sh
# The /workspace pathnames are hard-wired here.
# Install GCC 13 toolset (needed for the build)
/workspace/ci/util/retry.sh 5 30 dnf -y install gcc-toolset-13-gcc gcc-toolset-13-gcc-c++
echo -e "#!/bin/bash\nsource /opt/rh/gcc-toolset-13/enable" >/etc/profile.d/enable_devtools.sh
source /etc/profile.d/enable_devtools.sh
# Check what's available
which gcc
gcc --version
which nvcc
nvcc --version
# Set up Python environment
source /workspace/ci/pyenv_helper.sh
setup_python_env "${py_version}"
which python
python --version
echo "Done setting up python env"
# Ensure we have full git history for setuptools_scm
if $(git rev-parse --is-shallow-repository); then
git fetch --unshallow
fi
cd /workspace/python
# Determine CUDA version from nvcc
cuda_major=$(nvcc --version | grep -oP 'release \K[0-9]+\.[0-9]+' | cut -d. -f1)
echo "Detected CUDA major version: ${cuda_major}"
# Configure compilers:
export CXX="$(which g++)"
export CUDACXX="$(which nvcc)"
export CUDAHOSTCXX="$(which g++)"
# Build the wheel
python -m pip wheel --no-deps --verbose --wheel-dir dist .
# Rename wheel to include CUDA version suffix
for wheel in dist/pynvbench-*.whl; do
if [[ -f "$wheel" ]]; then
base_name=$(basename "$wheel" .whl)
new_name="${base_name}+cu${cuda_major}-py${py_version//.}-linux_$(uname -m).whl"
mv "$wheel" "dist/${new_name}"
echo "Renamed wheel to: ${new_name}"
fi
done
# Move wheel to output directory
mkdir -p /workspace/wheelhouse
mv dist/pynvbench-*+cu*.whl /workspace/wheelhouse/

View File

@@ -68,3 +68,15 @@ pull_request:
- {cuda: *cuda_curr_max, compiler: *llvm18, cpu: 'amd64'}
- {cuda: *cuda_curr_max, compiler: *llvm19, cpu: 'amd64'}
- {cuda: *cuda_curr_max, compiler: *llvm20, cpu: 'amd64'}
# Python wheel builds
python_wheels:
nvcc:
- {cuda: *cuda_prev_max, py_version: '3.10', cpu: 'amd64'}
- {cuda: *cuda_prev_max, py_version: '3.11', cpu: 'amd64'}
- {cuda: *cuda_prev_max, py_version: '3.12', cpu: 'amd64'}
- {cuda: *cuda_prev_max, py_version: '3.13', cpu: 'amd64'}
- {cuda: *cuda_curr_max, py_version: '3.10', cpu: 'amd64'}
- {cuda: *cuda_curr_max, py_version: '3.11', cpu: 'amd64'}
- {cuda: *cuda_curr_max, py_version: '3.12', cpu: 'amd64'}
- {cuda: *cuda_curr_max, py_version: '3.13', cpu: 'amd64'}

0
ci/pretty_printing.sh Normal file → Executable file
View File

48
ci/pyenv_helper.sh Executable file
View File

@@ -0,0 +1,48 @@
#!/bin/bash
setup_python_env() {
local py_version=$1
# check if pyenv is installed
if ! command -v pyenv &> /dev/null; then
rm -f /pyenv
curl -fsSL https://pyenv.run | bash
fi
# Install the build dependencies, check /etc/os-release to see if we are on ubuntu or rocky
if [ -f /etc/os-release ]; then
source /etc/os-release
if [ "$ID" = "ubuntu" ]; then
# Use the retry helper to mitigate issues with apt network errors:
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
retry() {
"${script_dir}/util/retry.sh" 5 30 "$@"
}
retry sudo apt update
retry sudo apt install -y make libssl-dev zlib1g-dev \
libbz2-dev libreadline-dev libsqlite3-dev curl git \
libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev
elif [ "$ID" = "rocky" ]; then
# we're inside the rockylinux container, sudo not required/available
dnf install -y make patch zlib-devel bzip2 bzip2-devel readline-devel \
sqlite sqlite-devel openssl-devel tk-devel libffi-devel xz-devel libuuid-devel \
gdbm-libs libnsl2
else
echo "Unsupported Linux distribution"
exit 1
fi
fi
# Always set up pyenv environment
export PYENV_ROOT="$HOME/.pyenv"
[[ -d $PYENV_ROOT/bin ]] && export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init - bash)"
# Using pyenv, install the Python version
PYENV_DEBUG=1 pyenv install -v "${py_version}"
pyenv local "${py_version}"
pip install --upgrade pip
}

View File

@@ -0,0 +1,40 @@
#!/bin/bash
# Argument parser for Python CI scripts.
parse_python_args() {
# Initialize variables
py_version=""
while [[ $# -gt 0 ]]; do
case $1 in
-py-version=*)
py_version="${1#*=}"
shift
;;
-py-version)
if [[ $# -lt 2 ]]; then
echo "Error: -py-version requires a value" >&2
return 1
fi
py_version="$2"
shift 2
;;
*)
# Unknown argument, ignore
shift
;;
esac
done
# Export for use by calling script
export py_version
}
require_py_version() {
if [[ -z "$py_version" ]]; then
echo "Error: -py-version is required" >&2
[[ -n "$1" ]] && echo "$1" >&2
return 1
fi
}

33
ci/util/retry.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/bin/bash
if [ "$#" -lt 3 ]; then
echo "Usage: $0 num_tries sleep_time command [args...]"
echo " num_tries: Number of attempts to run the command"
echo " sleep_time: Time to wait between attempts (in seconds)"
echo " command: The command to run"
echo " args: Arguments to pass to the command"
exit 1
fi
num_tries=$1
sleep_time=$2
shift 2
command=("${*@Q}")
# Loop until the command succeeds or we reach the maximum number of attempts:
for ((i=1; i<=num_tries; i++)); do
echo "Attempt ${i} of ${num_tries}: Running command '${command[*]}'"
status=0
eval "${command[*]}" || status=$?
if [ $status -eq 0 ]; then
echo "Command '${command[*]}' succeeded on attempt ${i}."
exit 0
else
echo "Command '${command[*]}' failed with status ${status}. Retrying in ${sleep_time} seconds..."
sleep $sleep_time
fi
done
echo "Command '${command[*]}' failed after ${num_tries} attempts."
exit 1

1
python/.python-version Normal file
View File

@@ -0,0 +1 @@
3.12

View File

@@ -6,12 +6,21 @@ build-backend = "scikit_build_core.build"
name = "pynvbench"
description = "CUDA Kernel Benchmarking Package"
authors = [{ name = "NVIDIA Corporation" }]
license = { text = "Apache-2.0 WITH LLVM-exception" }
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Environment :: GPU :: NVIDIA CUDA",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
]
requires-python = ">=3.9"
requires-python = ">=3.10"
dependencies = [
# pathfinder
"cuda-pathfinder",
@@ -32,7 +41,9 @@ readme = { file = "README.md", content-type = "text/markdown" }
test = ["pytest", "cupy-cuda12x", "numba"]
[project.urls]
Homepage = "https://developer.nvidia.com/"
Homepage = "https://github.com/NVIDIA/nvbench"
Repository = "https://github.com/NVIDIA/nvbench"
Issues = "https://github.com/NVIDIA/nvbench/issues"
[tool.scikit-build]
minimum-version = "build-system.requires"
@@ -53,6 +64,9 @@ provider = "scikit_build_core.metadata.setuptools_scm"
[tool.setuptools_scm]
root = ".."
git_describe_command = ["git", "describe", "--tags", "--match", "python-*"]
tag_regex = "^python-(?P<version>.*)$"
fallback_version = "0.0.0"
[tool.scikit-build.wheel.packages]
"cuda" = "cuda"