Add decorators for registering benchmarks and adding axis

cuda.bench.register(fn) continues returning Benchmark, and supports
legacy use.

New signature added:
   cuda.bench.register():
      Returns a decorator

```
@bench.register()
@bench.axis.float64("Duration (s)", [7e-5, 1e-4, 5e-4])
@bench.option.min_samples(120)
def single_float64_axis(state: bench.State):
   ...
```
This commit is contained in:
Oleksandr Pavlyk
2026-05-04 08:21:41 -05:00
parent 9ea77bccaa
commit e07f87910a
15 changed files with 585 additions and 79 deletions

View File

@@ -1,4 +1,4 @@
# Copyright 2025 NVIDIA Corporation
# Copyright 2025-2026 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 with the LLVM exception
# (the "License"); you may not use this file except in compliance with
@@ -39,6 +39,9 @@ def make_throughput_kernel(items_per_thread: int) -> cuda.dispatcher.CUDADispatc
return kernel
@bench.register()
@bench.axis.int64("Stride", [1, 2, 4])
@bench.axis.int64("ItemsPerThread", [1, 2, 3, 4])
def throughput_bench(state: bench.State) -> None:
stride = state.get_int64("Stride")
ipt = state.get_int64("ItemsPerThread")
@@ -67,8 +70,4 @@ def throughput_bench(state: bench.State) -> None:
if __name__ == "__main__":
b = bench.register(throughput_bench)
b.add_int64_axis("Stride", [1, 2, 4])
b.add_int64_axis("ItemsPerThread", [1, 2, 3, 4])
bench.run_all_benchmarks(sys.argv)

View File

@@ -1,4 +1,4 @@
# Copyright 2025 NVIDIA Corporation
# Copyright 2025-2026 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 with the LLVM exception
# (the "License"); you may not use this file except in compliance with
@@ -58,8 +58,9 @@ __global__ void sleep_kernel(double seconds) {
return mod.get_kernel("sleep_kernel")
@bench.register()
@bench.option.min_samples(1000)
def simple(state: bench.State):
state.set_min_samples(1000)
sleep_dur = 1e-3
krn = make_sleep_kernel()
launch_config = core.LaunchConfig(grid=1, block=1, shmem_size=0)
@@ -71,6 +72,8 @@ def simple(state: bench.State):
state.exec(launcher)
@bench.register()
@bench.axis.float64("Duration (s)", [7e-5, 1e-4, 5e-4])
def single_float64_axis(state: bench.State):
# get axis value, or default
default_sleep_dur = 3.14e-4
@@ -85,6 +88,7 @@ def single_float64_axis(state: bench.State):
state.exec(launcher)
@bench.register()
def default_value(state: bench.State):
single_float64_axis(state)
@@ -120,6 +124,9 @@ __global__ void copy_kernel(const T *in, U *out, ::cuda::std::size_t n)
return mod.get_kernel(instance_name)
@bench.register()
@bench.axis.int64("BlockSize", [2**x for x in range(6, 10, 2)])
@bench.axis.int64("NumBlocks", [2**x for x in range(6, 10, 2)])
def copy_sweep_grid_shape(state: bench.State):
block_size = state.get_int64("BlockSize")
num_blocks = state.get_int64("NumBlocks")
@@ -147,6 +154,8 @@ def copy_sweep_grid_shape(state: bench.State):
state.exec(launcher)
@bench.register()
@bench.axis.int64("TypeID", range(0, 6))
def copy_type_sweep(state: bench.State):
type_id = state.get_int64("TypeID")
@@ -186,21 +195,4 @@ def copy_type_sweep(state: bench.State):
if __name__ == "__main__":
# Benchmark without axes
bench.register(simple)
# benchmark with no axes, that uses default value
bench.register(default_value)
# specify axis
bench.register(single_float64_axis).add_float64_axis(
"Duration (s)", [7e-5, 1e-4, 5e-4]
)
copy1_bench = bench.register(copy_sweep_grid_shape)
copy1_bench.add_int64_axis("BlockSize", [2**x for x in range(6, 10, 2)])
copy1_bench.add_int64_axis("NumBlocks", [2**x for x in range(6, 10, 2)])
copy2_bench = bench.register(copy_type_sweep)
copy2_bench.add_int64_axis("TypeID", range(0, 6))
bench.run_all_benchmarks(sys.argv)

View File

@@ -1,4 +1,4 @@
# Copyright 2025 NVIDIA Corporation
# Copyright 2025-2026 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 with the LLVM exception
# (the "License"); you may not use this file except in compliance with
@@ -24,6 +24,8 @@ import cuda.core as core
host_sleep_duration = 0.1
@bench.register()
@bench.option.is_cpu_only()
def cpu_only_sleep_bench(state: bench.State) -> None:
def launcher(launch: bench.Launch):
time.sleep(host_sleep_duration)
@@ -66,6 +68,8 @@ __global__ void sleep_kernel(double seconds) {
return mod.get_kernel("sleep_kernel")
@bench.register()
@bench.axis.string("Sync", ["Do not sync", "Do sync"])
def mixed_sleep_bench(state: bench.State) -> None:
sync = state.get_string("Sync")
sync_flag = sync == "Do sync"
@@ -85,13 +89,4 @@ def mixed_sleep_bench(state: bench.State) -> None:
if __name__ == "__main__":
# time function only doing work (sleeping) on the host
# using CPU timer only
b = bench.register(cpu_only_sleep_bench)
b.set_is_cpu_only(True)
# time the function that does work on both GPU and CPU
b2 = bench.register(mixed_sleep_bench)
b2.add_string_axis("Sync", ["Do not sync", "Do sync"])
bench.run_all_benchmarks(sys.argv)

View File

@@ -1,4 +1,4 @@
# Copyright 2025 NVIDIA Corporation
# Copyright 2025-2026 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 with the LLVM exception
# (the "License"); you may not use this file except in compliance with
@@ -33,6 +33,9 @@ def as_cp_ExternalStream(cs: bench.CudaStream) -> cp.cuda.ExternalStream:
return cp.cuda.Stream.from_external(cs)
@bench.register()
@bench.axis.int64("numElems", [2**20, 2**22, 2**24])
@bench.axis.int64("numCols", [1024, 2048, 4096, 8192])
def segmented_reduce(state: bench.State):
"Benchmark segmented_reduce example"
n_elems = state.get_int64("numElems")
@@ -111,8 +114,4 @@ def segmented_reduce(state: bench.State):
if __name__ == "__main__":
b = bench.register(segmented_reduce)
b.add_int64_axis("numElems", [2**20, 2**22, 2**24])
b.add_int64_axis("numCols", [1024, 2048, 4096, 8192])
bench.run_all_benchmarks(sys.argv)

View File

@@ -1,4 +1,4 @@
# Copyright 2025 NVIDIA Corporation
# Copyright 2025-2026 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 with the LLVM exception
# (the "License"); you may not use this file except in compliance with
@@ -48,6 +48,9 @@ def as_cuda_Stream(cs: bench.CudaStream) -> cuda.cudadrv.driver.Stream:
return cuda.external_stream(cs.addressof())
@bench.register()
@bench.axis.int64("ThreadsPerBlock", [64, 128, 192, 256])
@bench.axis.power_of_two("NumBlocks", [10, 11, 12, 14, 16])
def multi_block_bench(state: bench.State):
threads_per_block = state.get_int64("ThreadsPerBlock")
num_blocks = state.get_int64("NumBlocks")
@@ -91,8 +94,4 @@ def multi_block_bench(state: bench.State):
if __name__ == "__main__":
b = bench.register(multi_block_bench)
b.add_int64_axis("ThreadsPerBlock", [64, 128, 192, 256])
b.add_int64_power_of_two_axis("NumBlocks", [10, 11, 12, 14, 16])
bench.run_all_benchmarks(sys.argv)

View File

@@ -1,4 +1,4 @@
# Copyright 2025 NVIDIA Corporation
# Copyright 2025-2026 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 with the LLVM exception
# (the "License"); you may not use this file except in compliance with
@@ -24,6 +24,9 @@ def as_cp_ExternalStream(cs: bench.CudaStream):
return cp.cuda.Stream.from_external(cs)
@bench.register()
@bench.axis.int64("numCols", [1024, 2048, 4096, 2 * 4096])
@bench.axis.int64("numRows", [1024, 2048, 4096, 2 * 4096])
def cupy_extract_by_mask(state: bench.State):
n_cols = state.get_int64("numCols")
n_rows = state.get_int64("numRows")
@@ -51,8 +54,4 @@ def cupy_extract_by_mask(state: bench.State):
if __name__ == "__main__":
b = bench.register(cupy_extract_by_mask)
b.add_int64_axis("numCols", [1024, 2048, 4096, 2 * 4096])
b.add_int64_axis("numRows", [1024, 2048, 4096, 2 * 4096])
bench.run_all_benchmarks(sys.argv)

View File

@@ -1,4 +1,4 @@
# Copyright 2025 NVIDIA Corporation
# Copyright 2025-2026 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 with the LLVM exception
# (the "License"); you may not use this file except in compliance with
@@ -602,6 +602,9 @@ class SGemm:
return
@bench.register()
@bench.axis.int64("R", [16, 64, 256])
@bench.axis.int64("N", [256, 512, 1024, 2048])
def cutlass_gemm(state: bench.State) -> None:
n = state.get_int64("N")
r = state.get_int64("R")
@@ -660,8 +663,4 @@ if __name__ == "__main__":
# see https://github.com/NVIDIA/cutlass/issues/3142
patch_cute_dsl()
gemm_b = bench.register(cutlass_gemm)
gemm_b.add_int64_axis("R", [16, 64, 256])
gemm_b.add_int64_axis("N", [256, 512, 1024, 2048])
bench.run_all_benchmarks(sys.argv)

View File

@@ -1,4 +1,4 @@
# Copyright 2025 NVIDIA Corporation
# Copyright 2025-2026 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 with the LLVM exception
# (the "License"); you may not use this file except in compliance with
@@ -57,6 +57,7 @@ __global__ void fill_kernel(T *buf, T v, ::cuda::std::size_t n)
return mod.get_kernel(instance_name)
@bench.register()
def synchronizing_bench(state: bench.State):
n_values = 64 * 1024 * 1024
n_bytes = n_values * ctypes.sizeof(ctypes.c_int32(0))
@@ -81,5 +82,4 @@ def synchronizing_bench(state: bench.State):
if __name__ == "__main__":
bench.register(synchronizing_bench)
bench.run_all_benchmarks(sys.argv)

View File

@@ -1,3 +1,19 @@
# Copyright 2026 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 with the LLVM exception
# (the "License"); you may not use this file except in compliance with
# the License.
#
# You may obtain a copy of the License at
#
# http://llvm.org/foundation/relicensing/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import cuda.bench as bench
@@ -12,9 +28,9 @@ def as_torch_cuda_Stream(
)
@bench.register()
@bench.option.throttle_threshold(0.25)
def torch_bench(state: bench.State) -> None:
state.set_throttle_threshold(0.25)
dev_id = state.get_device()
tc_s = as_torch_cuda_Stream(state.get_stream(), dev_id)
@@ -53,6 +69,4 @@ def torch_bench(state: bench.State) -> None:
if __name__ == "__main__":
bench.register(torch_bench)
bench.run_all_benchmarks(sys.argv)

View File

@@ -1,4 +1,4 @@
# Copyright 2025 NVIDIA Corporation
# Copyright 2025-2026 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 with the LLVM exception
# (the "License"); you may not use this file except in compliance with
@@ -57,6 +57,9 @@ __global__ void sleep_kernel(double seconds) {
return mod.get_kernel("sleep_kernel")
@bench.register()
@bench.axis.float64("Duration", [1e-4 + k * 0.25e-3 for k in range(5)])
@bench.axis.string("Kramble", ["Foo", "Bar", "Baz"])
def runtime_skip(state: bench.State):
duration = state.get_float64("Duration")
kramble = state.get_string("Kramble")
@@ -82,8 +85,4 @@ def runtime_skip(state: bench.State):
if __name__ == "__main__":
b = bench.register(runtime_skip)
b.add_float64_axis("Duration", [1e-4 + k * 0.25e-3 for k in range(5)])
b.add_string_axis("Kramble", ["Foo", "Bar", "Baz"])
bench.run_all_benchmarks(sys.argv)

View File

@@ -1,4 +1,4 @@
# Copyright 2025 NVIDIA Corporation
# Copyright 2025-2026 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 with the LLVM exception
# (the "License"); you may not use this file except in compliance with
@@ -39,6 +39,9 @@ def make_throughput_kernel(items_per_thread: int) -> cuda.dispatcher.CUDADispatc
return kernel
@bench.register()
@bench.axis.int64("Stride", [1, 2, 4])
@bench.axis.int64("ItemsPerThread", [1, 2, 3, 4])
def throughput_bench(state: bench.State) -> None:
stride = state.get_int64("Stride")
ipt = state.get_int64("ItemsPerThread")
@@ -69,8 +72,4 @@ def throughput_bench(state: bench.State) -> None:
if __name__ == "__main__":
b = bench.register(throughput_bench)
b.add_int64_axis("Stride", [1, 2, 4])
b.add_int64_axis("ItemsPerThread", [1, 2, 3, 4])
bench.run_all_benchmarks(sys.argv)