cuda.nvbench -> cuda.bench

Per PR review suggestion:
   - `cuda.parallel`    - device-wide algorithms/Thrust
   - `cuda.cooperative` - Cooperative algorithsm/CUB
   - `cuda.bench`       - Benchmarking/NVBench
This commit is contained in:
Oleksandr Pavlyk
2025-08-04 13:42:43 -05:00
parent c2a2acc9b6
commit b5e4b4ba31
19 changed files with 136 additions and 140 deletions

View File

@@ -16,12 +16,12 @@
import sys
import cuda.nvbench as nvbench
import cuda.bench as bench
import numpy as np
from numba import cuda
def as_cuda_stream(cs: nvbench.CudaStream) -> cuda.cudadrv.driver.Stream:
def as_cuda_stream(cs: bench.CudaStream) -> cuda.cudadrv.driver.Stream:
return cuda.external_stream(cs.addressof())
@@ -39,7 +39,7 @@ def make_throughput_kernel(items_per_thread: int) -> cuda.dispatcher.CUDADispatc
return kernel
def throughput_bench(state: nvbench.State) -> None:
def throughput_bench(state: bench.State) -> None:
stride = state.get_int64("Stride")
ipt = state.get_int64("ItemsPerThread")
@@ -58,7 +58,7 @@ def throughput_bench(state: nvbench.State) -> None:
krn = make_throughput_kernel(ipt)
def launcher(launch: nvbench.Launch):
def launcher(launch: bench.Launch):
exec_stream = as_cuda_stream(launch.get_stream())
krn[blocks_in_grid, threads_per_block, exec_stream, 0](
stride, elements, inp_arr, out_arr
@@ -68,8 +68,8 @@ def throughput_bench(state: nvbench.State) -> None:
if __name__ == "__main__":
b = nvbench.register(throughput_bench)
b = bench.register(throughput_bench)
b.add_int64_axis("Stride", [1, 2, 4])
b.add_int64_axis("ItemsPerThread", [1, 2, 3, 4])
nvbench.run_all_benchmarks(sys.argv)
bench.run_all_benchmarks(sys.argv)

View File

@@ -18,12 +18,12 @@ import ctypes
import sys
from typing import Dict, Optional, Tuple
import cuda.bench as bench
import cuda.cccl.headers as headers
import cuda.core.experimental as core
import cuda.nvbench as nvbench
def as_core_Stream(cs: nvbench.CudaStream) -> core.Stream:
def as_core_Stream(cs: bench.CudaStream) -> core.Stream:
return core.Stream.from_handle(cs.addressof())
@@ -58,34 +58,34 @@ __global__ void sleep_kernel(double seconds) {
return mod.get_kernel("sleep_kernel")
def simple(state: nvbench.State):
def simple(state: bench.State):
state.set_min_samples(1000)
sleep_dur = 1e-3
krn = make_sleep_kernel()
launch_config = core.LaunchConfig(grid=1, block=1, shmem_size=0)
def launcher(launch: nvbench.Launch):
def launcher(launch: bench.Launch):
s = as_core_Stream(launch.get_stream())
core.launch(s, launch_config, krn, sleep_dur)
state.exec(launcher)
def single_float64_axis(state: nvbench.State):
def single_float64_axis(state: bench.State):
# get axis value, or default
default_sleep_dur = 3.14e-4
sleep_dur = state.get_float64_or_default("Duration", default_sleep_dur)
krn = make_sleep_kernel()
launch_config = core.LaunchConfig(grid=1, block=1, shmem_size=0)
def launcher(launch: nvbench.Launch):
def launcher(launch: bench.Launch):
s = as_core_Stream(launch.get_stream())
core.launch(s, launch_config, krn, sleep_dur)
state.exec(launcher)
def default_value(state: nvbench.State):
def default_value(state: bench.State):
single_float64_axis(state)
@@ -120,7 +120,7 @@ __global__ void copy_kernel(const T *in, U *out, ::cuda::std::size_t n)
return mod.get_kernel(instance_name)
def copy_sweep_grid_shape(state: nvbench.State):
def copy_sweep_grid_shape(state: bench.State):
block_size = state.get_int64("BlockSize")
num_blocks = state.get_int64("NumBlocks")
@@ -140,14 +140,14 @@ def copy_sweep_grid_shape(state: nvbench.State):
krn = make_copy_kernel()
launch_config = core.LaunchConfig(grid=num_blocks, block=block_size, shmem_size=0)
def launcher(launch: nvbench.Launch):
def launcher(launch: bench.Launch):
s = as_core_Stream(launch.get_stream())
core.launch(s, launch_config, krn, input_buf, output_buf, num_values)
state.exec(launcher)
def copy_type_sweep(state: nvbench.State):
def copy_type_sweep(state: bench.State):
type_id = state.get_int64("TypeID")
types_map: Dict[int, Tuple[type, str]] = {
@@ -178,7 +178,7 @@ def copy_type_sweep(state: nvbench.State):
krn = make_copy_kernel(value_cuda_t, value_cuda_t)
launch_config = core.LaunchConfig(grid=256, block=256, shmem_size=0)
def launcher(launch: nvbench.Launch):
def launcher(launch: bench.Launch):
s = as_core_Stream(launch.get_stream())
core.launch(s, launch_config, krn, input_buf, output_buf, num_values)
@@ -187,20 +187,20 @@ def copy_type_sweep(state: nvbench.State):
if __name__ == "__main__":
# Benchmark without axes
nvbench.register(simple)
bench.register(simple)
# benchmark with no axes, that uses default value
nvbench.register(default_value)
bench.register(default_value)
# specify axis
nvbench.register(single_float64_axis).add_float64_axis(
bench.register(single_float64_axis).add_float64_axis(
"Duration (s)", [7e-5, 1e-4, 5e-4]
)
copy1_bench = nvbench.register(copy_sweep_grid_shape)
copy1_bench = bench.register(copy_sweep_grid_shape)
copy1_bench.add_int64_axis("BlockSize", [2**x for x in range(6, 10, 2)])
copy1_bench.add_int64_axis("NumBlocks", [2**x for x in range(6, 10, 2)])
copy2_bench = nvbench.register(copy_type_sweep)
copy2_bench = bench.register(copy_type_sweep)
copy2_bench.add_int64_axis("TypeID", range(0, 6))
nvbench.run_all_benchmarks(sys.argv)
bench.run_all_benchmarks(sys.argv)

View File

@@ -16,8 +16,8 @@
import sys
import cuda.bench as bench
import cuda.cccl.cooperative.experimental as coop
import cuda.nvbench as nvbench
import numba
import numpy as np
from numba import cuda
@@ -45,11 +45,11 @@ class BitsetRing:
return op1 & op2
def as_cuda_Stream(cs: nvbench.CudaStream) -> cuda.cudadrv.driver.Stream:
def as_cuda_Stream(cs: bench.CudaStream) -> cuda.cudadrv.driver.Stream:
return cuda.external_stream(cs.addressof())
def multi_block_bench(state: nvbench.State):
def multi_block_bench(state: bench.State):
threads_per_block = state.get_int64("ThreadsPerBlock")
num_blocks = state.get_int64("NumBlocks")
total_elements = threads_per_block * num_blocks
@@ -78,15 +78,11 @@ def multi_block_bench(state: nvbench.State):
d_inp = cuda.to_device(h_inp)
d_out = cuda.device_array(num_blocks, dtype=ring.dt)
cuda_s = as_cuda_Stream(state.get_stream())
# warmup
kernel[num_blocks, threads_per_block, cuda_s, 0](d_inp, d_out)
state.add_element_count(total_elements)
state.add_global_memory_reads(total_elements * h_inp.itemsize)
state.add_global_memory_writes(num_blocks * h_inp.itemsize)
def launcher(launch: nvbench.Launch):
def launcher(launch: bench.Launch):
cuda_s = as_cuda_Stream(launch.get_stream())
kernel[num_blocks, threads_per_block, cuda_s, 0](d_inp, d_out)
@@ -96,8 +92,8 @@ def multi_block_bench(state: nvbench.State):
if __name__ == "__main__":
patch.patch_numba_linker(lto=True)
b = nvbench.register(multi_block_bench)
b = bench.register(multi_block_bench)
b.add_int64_axis("ThreadsPerBlock", [64, 128, 192, 256])
b.add_int64_power_of_two_axis("NumBlocks", [10, 11, 12, 14, 16])
nvbench.run_all_benchmarks(sys.argv)
bench.run_all_benchmarks(sys.argv)

View File

@@ -16,10 +16,10 @@
import sys
import cuda.bench as bench
import cuda.cccl.parallel.experimental.algorithms as algorithms
import cuda.cccl.parallel.experimental.iterators as iterators
import cuda.core.experimental as core
import cuda.nvbench as nvbench
import cupy as cp
import numpy as np
@@ -34,22 +34,22 @@ class CCCLStream:
return (0, self._ptr)
def as_core_Stream(cs: nvbench.CudaStream) -> core.Stream:
def as_core_Stream(cs: bench.CudaStream) -> core.Stream:
return core.Stream.from_handle(cs.addressof())
def as_cccl_Stream(cs: nvbench.CudaStream) -> CCCLStream:
def as_cccl_Stream(cs: bench.CudaStream) -> CCCLStream:
return CCCLStream(cs.addressof())
def as_cp_ExternalStream(
cs: nvbench.CudaStream, dev_id: int | None = -1
cs: bench.CudaStream, dev_id: int | None = -1
) -> cp.cuda.ExternalStream:
h = cs.addressof()
return cp.cuda.ExternalStream(h, dev_id)
def segmented_reduce(state: nvbench.State):
def segmented_reduce(state: bench.State):
"Benchmark segmented_reduce example"
n_elems = state.get_int64("numElems")
n_cols = state.get_int64("numCols")
@@ -100,7 +100,7 @@ def segmented_reduce(state: nvbench.State):
with cp_stream:
temp_storage = cp.empty(temp_nbytes, dtype=cp.uint8)
def launcher(launch: nvbench.Launch):
def launcher(launch: bench.Launch):
s = as_cccl_Stream(launch.get_stream())
alg(
temp_storage,
@@ -117,8 +117,8 @@ def segmented_reduce(state: nvbench.State):
if __name__ == "__main__":
b = nvbench.register(segmented_reduce)
b = bench.register(segmented_reduce)
b.add_int64_axis("numElems", [2**20, 2**22, 2**24])
b.add_int64_axis("numCols", [1024, 2048, 4096, 8192])
nvbench.run_all_benchmarks(sys.argv)
bench.run_all_benchmarks(sys.argv)

View File

@@ -17,21 +17,21 @@
import sys
import time
import cuda.bench as bench
import cuda.cccl.headers as headers
import cuda.core.experimental as core
import cuda.nvbench as nvbench
host_sleep_duration = 0.1
def cpu_only_sleep_bench(state: nvbench.State) -> None:
def launcher(launch: nvbench.Launch):
def cpu_only_sleep_bench(state: bench.State) -> None:
def launcher(launch: bench.Launch):
time.sleep(host_sleep_duration)
state.exec(launcher)
def as_core_Stream(cs: nvbench.CudaStream) -> core.Stream:
def as_core_Stream(cs: bench.CudaStream) -> core.Stream:
return core.Stream.from_handle(cs.addressof())
@@ -66,7 +66,7 @@ __global__ void sleep_kernel(double seconds) {
return mod.get_kernel("sleep_kernel")
def mixed_sleep_bench(state: nvbench.State) -> None:
def mixed_sleep_bench(state: bench.State) -> None:
sync = state.get_string("Sync")
sync_flag = sync == "Do sync"
@@ -74,7 +74,7 @@ def mixed_sleep_bench(state: nvbench.State) -> None:
krn = make_sleep_kernel()
launch_config = core.LaunchConfig(grid=1, block=1, shmem_size=0)
def launcher(launch: nvbench.Launch):
def launcher(launch: bench.Launch):
# host overhead
time.sleep(host_sleep_duration)
# GPU computation
@@ -87,11 +87,11 @@ def mixed_sleep_bench(state: nvbench.State) -> None:
if __name__ == "__main__":
# time function only doing work (sleeping) on the host
# using CPU timer only
b = nvbench.register(cpu_only_sleep_bench)
b = bench.register(cpu_only_sleep_bench)
b.set_is_cpu_only(True)
# time the function that does work on both GPU and CPU
b2 = nvbench.register(mixed_sleep_bench)
b2 = bench.register(mixed_sleep_bench)
b2.add_string_axis("Sync", ["Do not sync", "Do sync"])
nvbench.run_all_benchmarks(sys.argv)
bench.run_all_benchmarks(sys.argv)

View File

@@ -16,18 +16,18 @@
import sys
import cuda.nvbench as nvbench
import cuda.bench as bench
import cupy as cp
def as_cp_ExternalStream(
cs: nvbench.CudaStream, dev_id: int | None = -1
cs: bench.CudaStream, dev_id: int | None = -1
) -> cp.cuda.ExternalStream:
h = cs.addressof()
return cp.cuda.ExternalStream(h, dev_id)
def cupy_extract_by_mask(state: nvbench.State):
def cupy_extract_by_mask(state: bench.State):
n_cols = state.get_int64("numCols")
n_rows = state.get_int64("numRows")
@@ -48,7 +48,7 @@ def cupy_extract_by_mask(state: nvbench.State):
mask = cp.ones((n_cols, n_rows), dtype=bool_dt)
_ = X[mask]
def launcher(launch: nvbench.Launch):
def launcher(launch: bench.Launch):
with as_cp_ExternalStream(launch.get_stream(), dev_id):
_ = X[mask]
@@ -56,8 +56,8 @@ def cupy_extract_by_mask(state: nvbench.State):
if __name__ == "__main__":
b = nvbench.register(cupy_extract_by_mask)
b = bench.register(cupy_extract_by_mask)
b.add_int64_axis("numCols", [1024, 2048, 4096, 2 * 4096])
b.add_int64_axis("numRows", [1024, 2048, 4096, 2 * 4096])
nvbench.run_all_benchmarks(sys.argv)
bench.run_all_benchmarks(sys.argv)

View File

@@ -17,19 +17,19 @@
import sys
import cuda.bench as bench
import cuda.bindings.driver as driver
import cuda.core.experimental as core
import cuda.nvbench as nvbench
import cupy as cp
import cutlass
import numpy as np
def as_bindings_Stream(cs: nvbench.CudaStream) -> driver.CUstream:
def as_bindings_Stream(cs: bench.CudaStream) -> driver.CUstream:
return driver.CUstream(cs.addressof())
def as_core_Stream(cs: nvbench.CudaStream) -> core.Stream:
def as_core_Stream(cs: bench.CudaStream) -> core.Stream:
return core.Stream.from_handle(cs.addressof())
@@ -47,7 +47,7 @@ def make_cp_array(
)
def cutlass_gemm(state: nvbench.State) -> None:
def cutlass_gemm(state: bench.State) -> None:
n = state.get_int64("N")
r = state.get_int64("R")
@@ -96,7 +96,7 @@ def cutlass_gemm(state: nvbench.State) -> None:
# warm-up to ensure compilation is not timed
plan.run(stream=s)
def launcher(launch: nvbench.Launch) -> None:
def launcher(launch: bench.Launch) -> None:
s = as_bindings_Stream(launch.get_stream())
plan.run(stream=s, sync=False)
@@ -104,10 +104,10 @@ def cutlass_gemm(state: nvbench.State) -> None:
if __name__ == "__main__":
gemm_b = nvbench.register(cutlass_gemm)
gemm_b = bench.register(cutlass_gemm)
gemm_b.add_int64_axis("R", [16, 64, 256])
gemm_b.add_int64_axis("N", [256, 512, 1024, 2048])
gemm_b.add_float64_axis("alpha", [1e-2])
nvbench.run_all_benchmarks(sys.argv)
bench.run_all_benchmarks(sys.argv)

View File

@@ -18,12 +18,12 @@ import ctypes
import sys
from typing import Optional
import cuda.bench as bench
import cuda.cccl.headers as headers
import cuda.core.experimental as core
import cuda.nvbench as nvbench
def as_core_Stream(cs: nvbench.CudaStream) -> core.Stream:
def as_core_Stream(cs: bench.CudaStream) -> core.Stream:
"Create view of native stream used by NVBench"
return core.Stream.from_handle(cs.addressof())
@@ -57,7 +57,7 @@ __global__ void fill_kernel(T *buf, T v, ::cuda::std::size_t n)
return mod.get_kernel(instance_name)
def synchronizing_bench(state: nvbench.State):
def synchronizing_bench(state: bench.State):
n_values = 64 * 1024 * 1024
n_bytes = n_values * ctypes.sizeof(ctypes.c_int32(0))
@@ -70,7 +70,7 @@ def synchronizing_bench(state: nvbench.State):
krn = make_fill_kernel()
launch_config = core.LaunchConfig(grid=256, block=256, shmem_size=0)
def launcher(launch: nvbench.Launch):
def launcher(launch: bench.Launch):
s = as_core_Stream(launch.get_stream())
core.launch(s, launch_config, krn, buffer, 0, n_values)
s.sync()
@@ -81,5 +81,5 @@ def synchronizing_bench(state: nvbench.State):
if __name__ == "__main__":
nvbench.register(synchronizing_bench)
nvbench.run_all_benchmarks(sys.argv)
bench.register(synchronizing_bench)
bench.run_all_benchmarks(sys.argv)

View File

@@ -1,18 +1,18 @@
import sys
import cuda.nvbench as nvbench
import cuda.bench as bench
import torch
def as_torch_cuda_Stream(
cs: nvbench.CudaStream, dev: int | None
cs: bench.CudaStream, dev: int | None
) -> torch.cuda.ExternalStream:
return torch.cuda.ExternalStream(
stream_ptr=cs.addressof(), device=torch.cuda.device(dev)
)
def torch_bench(state: nvbench.State) -> None:
def torch_bench(state: bench.State) -> None:
state.set_throttle_threshold(0.25)
dev_id = state.get_device()
@@ -31,7 +31,7 @@ def torch_bench(state: nvbench.State) -> None:
learning_rate = 1e-4
def launcher(launch: nvbench.Launch) -> None:
def launcher(launch: bench.Launch) -> None:
tc_s = as_torch_cuda_Stream(launch.get_stream(), dev_id)
with torch.cuda.stream(tc_s):
x2 = torch.square(x)
@@ -53,6 +53,6 @@ def torch_bench(state: nvbench.State) -> None:
if __name__ == "__main__":
nvbench.register(torch_bench)
bench.register(torch_bench)
nvbench.run_all_benchmarks(sys.argv)
bench.run_all_benchmarks(sys.argv)

View File

@@ -16,12 +16,12 @@
import sys
import cuda.bench as bench
import cuda.cccl.headers as headers
import cuda.core.experimental as core
import cuda.nvbench as nvbench
def as_core_Stream(cs: nvbench.CudaStream) -> core.Stream:
def as_core_Stream(cs: bench.CudaStream) -> core.Stream:
"Create view into native stream provided by NVBench"
return core.Stream.from_handle(cs.addressof())
@@ -57,7 +57,7 @@ __global__ void sleep_kernel(double seconds) {
return mod.get_kernel("sleep_kernel")
def runtime_skip(state: nvbench.State):
def runtime_skip(state: bench.State):
duration = state.get_float64("Duration")
kramble = state.get_string("Kramble")
@@ -74,7 +74,7 @@ def runtime_skip(state: nvbench.State):
krn = make_sleep_kernel()
launch_cfg = core.LaunchConfig(grid=1, block=1, shmem_size=0)
def launcher(launch: nvbench.Launch):
def launcher(launch: bench.Launch):
s = as_core_Stream(launch.get_stream())
core.launch(s, launch_cfg, krn, duration)
@@ -82,8 +82,8 @@ def runtime_skip(state: nvbench.State):
if __name__ == "__main__":
b = nvbench.register(runtime_skip)
b = bench.register(runtime_skip)
b.add_float64_axis("Duration", [1e-4 + k * 0.25e-3 for k in range(5)])
b.add_string_axis("Kramble", ["Foo", "Bar", "Baz"])
nvbench.run_all_benchmarks(sys.argv)
bench.run_all_benchmarks(sys.argv)

View File

@@ -16,12 +16,12 @@
import sys
import cuda.nvbench as nvbench
import cuda.bench as bench
import numpy as np
from numba import cuda
def as_cuda_stream(cs: nvbench.CudaStream) -> cuda.cudadrv.driver.Stream:
def as_cuda_stream(cs: bench.CudaStream) -> cuda.cudadrv.driver.Stream:
return cuda.external_stream(cs.addressof())
@@ -39,7 +39,7 @@ def make_throughput_kernel(items_per_thread: int) -> cuda.dispatcher.CUDADispatc
return kernel
def throughput_bench(state: nvbench.State) -> None:
def throughput_bench(state: bench.State) -> None:
stride = state.get_int64("Stride")
ipt = state.get_int64("ItemsPerThread")
@@ -59,7 +59,7 @@ def throughput_bench(state: nvbench.State) -> None:
krn = make_throughput_kernel(ipt)
def launcher(launch: nvbench.Launch):
def launcher(launch: bench.Launch):
exec_stream = as_cuda_stream(launch.get_stream())
krn[blocks_in_grid, threads_per_block, exec_stream, 0](
stride, elements, inp_arr, out_arr
@@ -69,8 +69,8 @@ def throughput_bench(state: nvbench.State) -> None:
if __name__ == "__main__":
b = nvbench.register(throughput_bench)
b = bench.register(throughput_bench)
b.add_int64_axis("Stride", [1, 2, 4])
b.add_int64_axis("ItemsPerThread", [1, 2, 3, 4])
nvbench.run_all_benchmarks(sys.argv)
bench.run_all_benchmarks(sys.argv)