Try an inner and outer script

This commit is contained in:
Ashwin Srinath
2025-12-03 13:21:53 -05:00
parent 9746aa14df
commit a7f92b7436
3 changed files with 85 additions and 45 deletions

View File

@@ -86,7 +86,7 @@ jobs:
--env py_version=${{ matrix.python }} \
--env cuda_version=${{ matrix.cuda }} \
rapidsai/ci-wheel:25.12-cuda${cuda_full_version}-rockylinux8-py${{ matrix.python }} \
/workspace/ci/test_pynvbench.sh -py-version ${{ matrix.python }} -cuda-version ${{ matrix.cuda }}
/workspace/ci/test_pynvbench_inner.sh
verify-workflow:
name: Verify all builds and tests succeeded

View File

@@ -1,18 +1,15 @@
#!/bin/bash
set -euo pipefail
# Enable verbose output for debugging
set -x
ci_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$ci_dir/pyenv_helper.sh"
usage="Usage: $0 -py-version <python_version> -cuda-version <cuda_version>"
# Parse common arguments
source "$ci_dir/util/python/common_arg_parser.sh"
# Parse arguments including CUDA version
parse_python_args "$@"
# Parse CUDA version
cuda_version=""
while [[ $# -gt 0 ]]; do
case $1 in
@@ -34,51 +31,49 @@ while [[ $# -gt 0 ]]; do
esac
done
# Check if py_version was provided (this script requires it)
require_py_version "$usage" || exit 1
if [[ -z "$cuda_version" ]]; then
echo "Error: -cuda-version is required"
echo "$usage"
exit 1
fi
# Determine CUDA major version from environment
cuda_major_version=$(nvcc --version | grep release | awk '{print $6}' | tr -d ',' | cut -d '.' -f 1 | cut -d 'V' -f 2)
# Setup Python environment (skip if we're already in ci-wheel container with correct Python)
echo "Checking for Python ${py_version}..."
if command -v python &> /dev/null; then
actual_py_version=$(python --version 2>&1 | awk '{print $2}' | cut -d. -f1,2)
echo "Found Python version: ${actual_py_version}"
if [[ "${actual_py_version}" == "${py_version}" ]]; then
echo "Python ${py_version} already available, skipping pyenv setup"
python -m pip install --upgrade pip
else
echo "Python version mismatch (found ${actual_py_version}, need ${py_version})"
echo "Setting up Python ${py_version} with pyenv"
setup_python_env "${py_version}"
fi
# Map cuda_version to full version
if [[ "$cuda_version" == "12" ]]; then
cuda_full_version="12.9.1"
elif [[ "$cuda_version" == "13" ]]; then
cuda_full_version="13.0.1"
else
echo "Python not found, setting up with pyenv"
setup_python_env "${py_version}"
fi
echo "Python setup complete, version: $(python --version)"
# Wheel should be in /workspace/wheelhouse (downloaded by workflow or built locally)
WHEELHOUSE_DIR="/workspace/wheelhouse"
# Find and install pynvbench wheel
# Look for .cu${cuda_version} in the version string (e.g., pynvbench-0.0.1.dev1+g123.cu12-...)
PYNVBENCH_WHEEL_PATH="$(ls ${WHEELHOUSE_DIR}/pynvbench-*.cu${cuda_version}-*.whl 2>/dev/null | head -1)"
if [[ -z "$PYNVBENCH_WHEEL_PATH" ]]; then
echo "Error: No pynvbench wheel found in ${WHEELHOUSE_DIR}"
echo "Looking for: pynvbench-*.cu${cuda_version}-*.whl"
echo "Contents of ${WHEELHOUSE_DIR}:"
ls -la ${WHEELHOUSE_DIR}/ || true
echo "Error: Unsupported CUDA version: $cuda_version"
exit 1
fi
echo "Installing wheel: $PYNVBENCH_WHEEL_PATH"
python -m pip install "${PYNVBENCH_WHEEL_PATH}[test]"
# Use the same rapidsai/ci-wheel images as the build
readonly devcontainer_version=25.12
readonly devcontainer_distro=rockylinux8
# Run tests
cd "/workspace/python/test/"
python -m pytest -v test_nvbench.py
if [[ "$(uname -m)" == "aarch64" ]]; then
readonly cuda_image=rapidsai/ci-wheel:${devcontainer_version}-cuda${cuda_full_version}-${devcontainer_distro}-py${py_version}-arm64
else
readonly cuda_image=rapidsai/ci-wheel:${devcontainer_version}-cuda${cuda_full_version}-${devcontainer_distro}-py${py_version}
fi
echo "::group::🧪 Testing CUDA ${cuda_version} wheel on ${cuda_image}"
(
set -x
docker pull $cuda_image
docker run --rm -i \
--workdir /workspace \
--mount type=bind,source=$(pwd),target=/workspace/ \
--env py_version=${py_version} \
--env cuda_version=${cuda_version} \
$cuda_image \
/workspace/ci/test_pynvbench_inner.sh
# Prevent GHA runners from exhausting available storage with leftover images:
if [[ -n "${GITHUB_ACTIONS:-}" ]]; then
docker rmi -f $cuda_image
fi
)
echo "::endgroup::"

45
ci/test_pynvbench_inner.sh Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/bash
set -euo pipefail
# Target script for `docker run` command in test_pynvbench.sh
# The /workspace pathnames are hard-wired here.
# Install GCC 13 toolset (needed for builds that might happen during testing)
/workspace/ci/util/retry.sh 5 30 dnf -y install gcc-toolset-13-gcc gcc-toolset-13-gcc-c++
echo -e "#!/bin/bash\nsource /opt/rh/gcc-toolset-13/enable" >/etc/profile.d/enable_devtools.sh
source /etc/profile.d/enable_devtools.sh
# Set up Python environment (only if not already available)
source /workspace/ci/pyenv_helper.sh
if ! command -v python${py_version} &> /dev/null; then
setup_python_env "${py_version}"
fi
# Upgrade pip
python -m pip install --upgrade pip
echo "Python version: $(python --version)"
echo "CUDA version: $(nvcc --version | grep release)"
# Wheel should be in /workspace/wheelhouse (downloaded by workflow or built locally)
WHEELHOUSE_DIR="/workspace/wheelhouse"
# Find and install pynvbench wheel
# Look for .cu${cuda_version} in the version string (e.g., pynvbench-0.0.1.dev1+g123.cu12-...)
PYNVBENCH_WHEEL_PATH="$(ls ${WHEELHOUSE_DIR}/pynvbench-*.cu${cuda_version}-*.whl 2>/dev/null | head -1)"
if [[ -z "$PYNVBENCH_WHEEL_PATH" ]]; then
echo "Error: No pynvbench wheel found in ${WHEELHOUSE_DIR}"
echo "Looking for: pynvbench-*.cu${cuda_version}-*.whl"
echo "Contents of ${WHEELHOUSE_DIR}:"
ls -la ${WHEELHOUSE_DIR}/ || true
exit 1
fi
echo "Installing wheel: $PYNVBENCH_WHEEL_PATH"
python -m pip install "${PYNVBENCH_WHEEL_PATH}[test]"
# Run tests
# Disable pyenv to prevent .python-version file from interfering
export PYENV_VERSION=system
cd "/workspace/python/test/"
python -m pytest -v test_nvbench.py