Files
nvbench/python/examples/cccl_parallel_segmented_reduce.py
Oleksandr Pavlyk 5bdb30f4b6 Update to cccl_parallel_segmented_reduce example per changes in API
Update namespace changes. Use make_segmented_reduce factory function,
and update call signatures.
2026-04-01 08:18:15 -05:00

114 lines
3.0 KiB
Python

# Copyright 2025 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 with the LLVM exception
# (the "License"); you may not use this file except in compliance with
# the License.
#
# You may obtain a copy of the License at
#
# http://llvm.org/foundation/relicensing/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import cuda.bench as bench
import cuda.compute.algorithms as algorithms
import cuda.compute.iterators as iterators
import cuda.core as core
import cupy as cp
import numpy as np
from cuda.compute import OpKind
def as_core_Stream(cs: bench.CudaStream) -> core.Stream:
return core.Stream.from_handle(cs.addressof())
def as_cp_ExternalStream(cs: bench.CudaStream) -> cp.cuda.ExternalStream:
return cp.cuda.Stream.from_external(cs)
def segmented_reduce(state: bench.State):
"Benchmark segmented_reduce example"
n_elems = state.get_int64("numElems")
n_cols = state.get_int64("numCols")
n_rows = n_elems // n_cols
state.add_summary("numRows", n_rows)
cp_stream = as_cp_ExternalStream(state.get_stream())
def make_scaler(step):
def scale(row_id):
return row_id * step
return scale
zero = np.int32(0)
row_offset = make_scaler(np.int32(n_cols))
start_offsets = iterators.TransformIterator(
iterators.CountingIterator(zero), row_offset
)
end_offsets = start_offsets + 1
h_init = np.zeros(tuple(), dtype=np.int32)
with cp_stream:
rng = cp.random.default_rng()
mat = rng.integers(low=-31, high=32, dtype=np.int32, size=(n_rows, n_cols))
d_input = mat
d_output = cp.empty(n_rows, dtype=d_input.dtype)
add_op = OpKind.PLUS
alg = algorithms.make_segmented_reduce(
d_input, d_output, start_offsets, end_offsets, add_op, h_init
)
cccl_stream = state.get_stream()
# query size of temporary storage and allocate
temp_nbytes = alg(
None,
d_input,
d_output,
add_op,
n_rows,
start_offsets,
end_offsets,
h_init,
cccl_stream,
)
h_init = np.zeros(tuple(), dtype=np.int32)
with cp_stream:
temp_storage = cp.empty(temp_nbytes, dtype=cp.uint8)
def launcher(launch: bench.Launch):
s = launch.get_stream()
alg(
temp_storage,
d_input,
d_output,
add_op,
n_rows,
start_offsets,
end_offsets,
h_init,
s,
)
state.exec(launcher)
if __name__ == "__main__":
b = bench.register(segmented_reduce)
b.add_int64_axis("numElems", [2**20, 2**22, 2**24])
b.add_int64_axis("numCols", [1024, 2048, 4096, 8192])
bench.run_all_benchmarks(sys.argv)