mirror of
https://github.com/NVIDIA/nvbench.git
synced 2026-05-14 10:07:25 +00:00
Implements `cuda.bench.results.BenchmarkResult` class to represent data from JSON output of benchmark execution.
The contains implements two class methods `BenchmarkResult.from_json(filename : str | os.PathLike, *, metadata : Any = None)` which expects well-formed JSON filename and `BenchmarkResult.empty(*, metadata : Any = None)` intended to represent failed result with reasons that can be recorded in metadata at user's discretion.
The `BenchmarkResult` implements mapping interface, supporting `.keys()`, `.values()`, `.items()` methods, `__len__`, `__contains__`, `__getitem__` and `__iter__` special methods.
Values in `BenchmarkResult` has type `cuda.bench.results.SubBenchmarkResult` which implements a list-like interface, i.e. implements `__len__`, `__getitem__`, and `__iter__` special methods. Values in this list-like structure correspond to measurements of individual states of a particular benchmark (the key in `BenchmarkResult`).
Elements of `SubBenchmarkResult` structure have type `SubBenchmarkState` that supports mapping protocol with axis_values as a key and represent data corresponding to measurements for a particular state (combination of settings for each axis).
The state provides `.samples` and `.frequencies` attributes storing raw execution duration values and estimates for average GPU frequencies.
Example usage:
```
import array, numpy as np, cuda.bench.results
r = cuda.bench.results.BenchmarkResult("perf_data/axes_run1.json")
r["copy_sweep_grid_shape"].centers_with_frequencies(
lambda t, f: np.median(np.asarray(t)*np.asarray(f)))
```
```
In [1]: import array, numpy as np, cuda.bench.results
In [2]: r = cuda.bench.results.BenchmarkResult("temp_data/axes_run1.json")
In [3]: list(r)
Out[3]:
['simple',
'single_float64_axis',
'copy_sweep_grid_shape',
'copy_type_sweep',
'copy_type_conversion_sweep',
'copy_type_and_block_size_sweep']
In [4]: r["simple"].centers(lambda t: np.percentile(t, [25,75]))
Out[4]: {'Device=0': array([0.00100966, 0.00101299])}
In [5]: r.centers(lambda t: np.percentile(t, [25,75]))["simple"]
Out[5]: {'Device=0': array([0.00100966, 0.00101299])}
In [6]: len(r)
Out[6]: 6
In [7]: "fake" in r
Out[7]: False
```
Each `SubBenchmarkState` implements
`.summaries` attribute - rich object that retains tag/name/hint/hide/description metadata.
* Add nvbench-json-summary to render NVBench JSON output as an NVBench-style
markdown summary table, including axis formatting, device sections, hidden
summary filtering, and summary hint formatting.
Update packaging, type stubs, and tests for the new namespace, renamed
classes, Python 3.10-compatible annotations, and summary-table generation.
* Split tests in test_benchmark_result into smaller tests
* Fix break due to file name change
* Add python/examples/benchmark_result_autotune.py
This example demonstrates using cuda.bench and cuda.bench.results
to implement simple auto-tuning, demonstrated on selecting of
tile shape hyperparameter for naive stencil kernel implemented
in numba-cuda.
* Resolve ruff PLE0604
* Fix for format_axis_value in json format script to handle None value
Add tests to cover such input.
* Address code rabbit review feedback
* Fix license header, add validation
* Addressed both issues raised in review
Malformed values are now represented in result as None.
Skipped benchmarks are no longer dropped, i.e., they are present
in BenchmarkResult data, but they are not reflected in summary
table in line with what NVBench-instrumented benchmarks do.
131 lines
3.5 KiB
Python
131 lines
3.5 KiB
Python
# Copyright 2025 NVIDIA Corporation
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 with the LLVM exception
|
|
# (the "License"); you may not use this file except in compliance with
|
|
# the License.
|
|
#
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://llvm.org/foundation/relicensing/LICENSE.txt
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import importlib
|
|
import importlib.metadata
|
|
import warnings
|
|
|
|
try:
|
|
__version__ = importlib.metadata.version("cuda-bench")
|
|
except Exception as e:
|
|
__version__ = "0.0.0dev"
|
|
warnings.warn(
|
|
"Could not retrieve version of cuda-bench package dynamically from its metadata. "
|
|
f"Exception {e} was raised. "
|
|
f"Version is set to fall-back value '{__version__}' instead."
|
|
)
|
|
|
|
|
|
_NVBENCH_EXPORTS = (
|
|
"Benchmark",
|
|
"CudaStream",
|
|
"Launch",
|
|
"NVBenchRuntimeError",
|
|
"State",
|
|
"register",
|
|
"run_all_benchmarks",
|
|
)
|
|
|
|
_NVBENCH_TEST_EXPORTS = (
|
|
"_test_cpp_exception",
|
|
"_test_py_exception",
|
|
)
|
|
|
|
__all__ = list(_NVBENCH_EXPORTS)
|
|
|
|
_nvbench_module = None
|
|
|
|
|
|
# Detect CUDA runtime version and load appropriate extension
|
|
def _get_cuda_major_version():
|
|
"""Detect the CUDA runtime major version."""
|
|
try:
|
|
import cuda.bindings
|
|
|
|
# Get CUDA version from cuda-bindings package version
|
|
# cuda-bindings version is in format like "12.9.1" or "13.0.0"
|
|
version_str = cuda.bindings.__version__
|
|
major = int(version_str.split(".")[0])
|
|
return major
|
|
except ImportError:
|
|
raise ImportError(
|
|
"cuda-bindings is required for runtime CUDA version detection. "
|
|
"Install with: pip install cuda-bench[cu12] or pip install cuda-bench[cu13]"
|
|
)
|
|
|
|
|
|
def _bind_nvbench_module(module):
|
|
for name in _NVBENCH_EXPORTS:
|
|
globals()[name] = getattr(module, name)
|
|
# Set module of exposed objects
|
|
globals()[name].__module__ = __name__
|
|
|
|
for name in _NVBENCH_TEST_EXPORTS:
|
|
globals()[name] = getattr(module, name)
|
|
|
|
# Expose the module as _nvbench for backward compatibility (e.g., for tests)
|
|
globals()["_nvbench"] = module
|
|
|
|
|
|
def _load_nvbench_module():
|
|
global _nvbench_module
|
|
|
|
if _nvbench_module is not None:
|
|
return _nvbench_module
|
|
|
|
cuda_major = _get_cuda_major_version()
|
|
extra_name = f"cu{cuda_major}"
|
|
module_fullname = f"cuda.bench.{extra_name}._nvbench"
|
|
|
|
try:
|
|
module = importlib.import_module(module_fullname)
|
|
except ImportError as e:
|
|
raise ImportError(
|
|
f"No cuda-bench extension found for CUDA {cuda_major}.x. "
|
|
f"This wheel may not include support for your CUDA version. "
|
|
f"Supported CUDA versions: 12, 13. "
|
|
f"Original error: {e}"
|
|
) from e
|
|
|
|
_bind_nvbench_module(module)
|
|
_nvbench_module = module
|
|
return module
|
|
|
|
|
|
def __getattr__(name):
|
|
if name == "_nvbench":
|
|
return _load_nvbench_module()
|
|
|
|
if name in _NVBENCH_EXPORTS + _NVBENCH_TEST_EXPORTS:
|
|
_load_nvbench_module()
|
|
return globals()[name]
|
|
|
|
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
|
|
|
|
|
def __dir__():
|
|
return sorted(
|
|
set(globals())
|
|
| set(_NVBENCH_EXPORTS)
|
|
| set(_NVBENCH_TEST_EXPORTS)
|
|
| {"_nvbench"}
|
|
)
|
|
|
|
|
|
__doc__ = """
|
|
CUDA Kernel Benchmarking Library Python API
|
|
"""
|