Initial wheel build and publishing infrastructure

This commit is contained in:
Ashwin Srinath
2025-12-03 10:14:15 -05:00
parent 34f1e2a7ee
commit 29389b5791
12 changed files with 417 additions and 2 deletions

91
ci/build_pynvbench_wheel.sh Executable file
View File

@@ -0,0 +1,91 @@
#!/bin/bash
set -euo pipefail
ci_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
usage="Usage: $0 -py-version <python_version> -cuda-version <cuda_version>"
source "$ci_dir/util/python/common_arg_parser.sh"
# Parse arguments including CUDA version
parse_python_args "$@"
# Now parse CUDA version from remaining arguments
cuda_version=""
while [[ $# -gt 0 ]]; do
case $1 in
-cuda-version=*)
cuda_version="${1#*=}"
shift
;;
-cuda-version)
if [[ $# -lt 2 ]]; then
echo "Error: -cuda-version requires a value" >&2
exit 1
fi
cuda_version="$2"
shift 2
;;
*)
shift
;;
esac
done
# Check if py_version was provided (this script requires it)
require_py_version "$usage" || exit 1
if [[ -z "$cuda_version" ]]; then
echo "Error: -cuda-version is required"
echo "$usage"
exit 1
fi
echo "Docker socket: " $(ls /var/run/docker.sock)
# Map cuda_version to full version
if [[ "$cuda_version" == "12" ]]; then
cuda_full_version="12.9.1"
cuda_short="cu12"
elif [[ "$cuda_version" == "13" ]]; then
cuda_full_version="13.0.1"
cuda_short="cu13"
else
echo "Error: Unsupported CUDA version: $cuda_version"
exit 1
fi
# pynvbench must be built in a container that can produce manylinux wheels,
# and has the CUDA toolkit installed. We use the rapidsai/ci-wheel image for this.
readonly devcontainer_version=25.12
readonly devcontainer_distro=rockylinux8
if [[ "$(uname -m)" == "aarch64" ]]; then
readonly cuda_image=rapidsai/ci-wheel:${devcontainer_version}-cuda${cuda_full_version}-${devcontainer_distro}-py${py_version}-arm64
else
readonly cuda_image=rapidsai/ci-wheel:${devcontainer_version}-cuda${cuda_full_version}-${devcontainer_distro}-py${py_version}
fi
mkdir -p wheelhouse
echo "::group::⚒️ Building CUDA ${cuda_version} wheel on ${cuda_image}"
(
set -x
docker pull $cuda_image
docker run --rm -i \
--workdir /workspace/python \
--mount type=bind,source=$(pwd),target=/workspace/ \
--env py_version=${py_version} \
--env cuda_version=${cuda_version} \
$cuda_image \
/workspace/ci/build_pynvbench_wheel_inner.sh
# Prevent GHA runners from exhausting available storage with leftover images:
if [[ -n "${GITHUB_ACTIONS:-}" ]]; then
docker rmi -f $cuda_image
fi
)
echo "::endgroup::"
echo "Wheels in wheelhouse:"
ls -la wheelhouse/

View File

@@ -0,0 +1,57 @@
#!/bin/bash
set -euo pipefail
# Target script for `docker run` command in build_pynvbench_wheel.sh
# The /workspace pathnames are hard-wired here.
# Install GCC 13 toolset (needed for the build)
/workspace/ci/util/retry.sh 5 30 dnf -y install gcc-toolset-13-gcc gcc-toolset-13-gcc-c++
echo -e "#!/bin/bash\nsource /opt/rh/gcc-toolset-13/enable" >/etc/profile.d/enable_devtools.sh
source /etc/profile.d/enable_devtools.sh
# Check what's available
which gcc
gcc --version
which nvcc
nvcc --version
# Set up Python environment
source /workspace/ci/pyenv_helper.sh
setup_python_env "${py_version}"
which python
python --version
echo "Done setting up python env"
# Ensure we have full git history for setuptools_scm
if $(git rev-parse --is-shallow-repository); then
git fetch --unshallow
fi
cd /workspace/python
# Determine CUDA version from nvcc
cuda_major=$(nvcc --version | grep -oP 'release \K[0-9]+\.[0-9]+' | cut -d. -f1)
echo "Detected CUDA major version: ${cuda_major}"
# Configure compilers:
export CXX="$(which g++)"
export CUDACXX="$(which nvcc)"
export CUDAHOSTCXX="$(which g++)"
# Build the wheel
python -m pip wheel --no-deps --verbose --wheel-dir dist .
# Rename wheel to include CUDA version suffix
for wheel in dist/pynvbench-*.whl; do
if [[ -f "$wheel" ]]; then
base_name=$(basename "$wheel" .whl)
new_name="${base_name}+cu${cuda_major}-py${py_version//.}-linux_$(uname -m).whl"
mv "$wheel" "dist/${new_name}"
echo "Renamed wheel to: ${new_name}"
fi
done
# Move wheel to output directory
mkdir -p /workspace/wheelhouse
mv dist/pynvbench-*+cu*.whl /workspace/wheelhouse/

View File

@@ -68,3 +68,15 @@ pull_request:
- {cuda: *cuda_curr_max, compiler: *llvm18, cpu: 'amd64'}
- {cuda: *cuda_curr_max, compiler: *llvm19, cpu: 'amd64'}
- {cuda: *cuda_curr_max, compiler: *llvm20, cpu: 'amd64'}
# Python wheel builds
python_wheels:
nvcc:
- {cuda: *cuda_prev_max, py_version: '3.10', cpu: 'amd64'}
- {cuda: *cuda_prev_max, py_version: '3.11', cpu: 'amd64'}
- {cuda: *cuda_prev_max, py_version: '3.12', cpu: 'amd64'}
- {cuda: *cuda_prev_max, py_version: '3.13', cpu: 'amd64'}
- {cuda: *cuda_curr_max, py_version: '3.10', cpu: 'amd64'}
- {cuda: *cuda_curr_max, py_version: '3.11', cpu: 'amd64'}
- {cuda: *cuda_curr_max, py_version: '3.12', cpu: 'amd64'}
- {cuda: *cuda_curr_max, py_version: '3.13', cpu: 'amd64'}

0
ci/pretty_printing.sh Normal file → Executable file
View File

48
ci/pyenv_helper.sh Executable file
View File

@@ -0,0 +1,48 @@
#!/bin/bash
setup_python_env() {
local py_version=$1
# check if pyenv is installed
if ! command -v pyenv &> /dev/null; then
rm -f /pyenv
curl -fsSL https://pyenv.run | bash
fi
# Install the build dependencies, check /etc/os-release to see if we are on ubuntu or rocky
if [ -f /etc/os-release ]; then
source /etc/os-release
if [ "$ID" = "ubuntu" ]; then
# Use the retry helper to mitigate issues with apt network errors:
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
retry() {
"${script_dir}/util/retry.sh" 5 30 "$@"
}
retry sudo apt update
retry sudo apt install -y make libssl-dev zlib1g-dev \
libbz2-dev libreadline-dev libsqlite3-dev curl git \
libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev
elif [ "$ID" = "rocky" ]; then
# we're inside the rockylinux container, sudo not required/available
dnf install -y make patch zlib-devel bzip2 bzip2-devel readline-devel \
sqlite sqlite-devel openssl-devel tk-devel libffi-devel xz-devel libuuid-devel \
gdbm-libs libnsl2
else
echo "Unsupported Linux distribution"
exit 1
fi
fi
# Always set up pyenv environment
export PYENV_ROOT="$HOME/.pyenv"
[[ -d $PYENV_ROOT/bin ]] && export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init - bash)"
# Using pyenv, install the Python version
PYENV_DEBUG=1 pyenv install -v "${py_version}"
pyenv local "${py_version}"
pip install --upgrade pip
}

View File

@@ -0,0 +1,40 @@
#!/bin/bash
# Argument parser for Python CI scripts.
parse_python_args() {
# Initialize variables
py_version=""
while [[ $# -gt 0 ]]; do
case $1 in
-py-version=*)
py_version="${1#*=}"
shift
;;
-py-version)
if [[ $# -lt 2 ]]; then
echo "Error: -py-version requires a value" >&2
return 1
fi
py_version="$2"
shift 2
;;
*)
# Unknown argument, ignore
shift
;;
esac
done
# Export for use by calling script
export py_version
}
require_py_version() {
if [[ -z "$py_version" ]]; then
echo "Error: -py-version is required" >&2
[[ -n "$1" ]] && echo "$1" >&2
return 1
fi
}

33
ci/util/retry.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/bin/bash
if [ "$#" -lt 3 ]; then
echo "Usage: $0 num_tries sleep_time command [args...]"
echo " num_tries: Number of attempts to run the command"
echo " sleep_time: Time to wait between attempts (in seconds)"
echo " command: The command to run"
echo " args: Arguments to pass to the command"
exit 1
fi
num_tries=$1
sleep_time=$2
shift 2
command=("${*@Q}")
# Loop until the command succeeds or we reach the maximum number of attempts:
for ((i=1; i<=num_tries; i++)); do
echo "Attempt ${i} of ${num_tries}: Running command '${command[*]}'"
status=0
eval "${command[*]}" || status=$?
if [ $status -eq 0 ]; then
echo "Command '${command[*]}' succeeded on attempt ${i}."
exit 0
else
echo "Command '${command[*]}' failed with status ${status}. Retrying in ${sleep_time} seconds..."
sleep $sleep_time
fi
done
echo "Command '${command[*]}' failed after ${num_tries} attempts."
exit 1