[rocm-libraries] ROCm/rocm-libraries#5168 (commit 8b5afcb)

[CK] [CK_Tile] Add GroupConv to Kernel Dispatcher

## Motivation

This PR adds CK Tile group convolution (forward, backward-data,
backward-weight) support to the kernel dispatcher, matching and unifying
with the existing dispatcher GEMM infrastructure in architecture and
usability. The dispatcher provides a unified kernel dispatch system with
both C++ and Python frontends, and until now only supported GEMM
operations. This PR enables framework integrators to use the same
declarative kernel workflow for convolutions as they do for GEMM:
declare kernels, build a registry JIT, select kernels within the
registry at runtime, and dispatch to GPU. Future PRs will include
runtime kernel selection heuristics for autotuning of kernel parameters
based on (problem, hardware arch).

## Technical Details

Grouped convolution support has been added to the CK Tile Dispatcher
with generated_conv_backend.hpp enabling dispatcher.run(in, wei, out,
problem) for all 6 conv variants (fwd/bwdd/bwdw x 2D/3D), runtime
heuristic kernel selection, and GroupedConvKernelKey with full
ConvConfigBase fields. Python side adds parallel JIT via
registry.build(max_workers) and heuristic registry.select(). Includes 7
C++ and 6 Python examples covering all directions with CPU reference
validation, and shared infrastructure improvements (BaseRegistry CRTP,
structured exceptions). As a sanity check, JIT compile times for a
single kernel remains the same and for multiple kernels there is better
parallelism:
Kernels | 1 worker | 8 workers
1 | 7.7 s | 7.7 s
2 | 15.9 s | 8.2 s
4 | 33.4 s | 9.7 s
6 | 52.3 s | 10.2 s

## Test Plan

145 ephemeral unit tests have been added to test basic functionality.
All 30 examples/integration tests run end-to-end on gfx950 (MI350): 7
C++ conv, 7 C++ GEMM, 6 Python conv, 10 Python GEMM. CPU reference
validation for forward, backward-data, and backward-weight (2D) in both
C++ and Python examples pass.

## Test Result

30 examples pass. Peak performance: 132 TFLOPS (Batch-32 forward 56x56),
53 TFLOPS (pointwise 1x1). CPU reference accuracy: max_abs_diff < 0.002
for all directions (fp16 vs fp32 reference).

## Submission Checklist

- [x] Look over the contributing guidelines at
https://github.com/ROCm/ROCm/blob/develop/CONTRIBUTING.md#pull-requests.
This commit is contained in:
Vidyasagar Ananthan
2026-04-09 17:39:35 +00:00
committed by assistant-librarian[bot]
parent 4c0e73ab12
commit 920acd2c12
86 changed files with 15538 additions and 1500 deletions

View File

@@ -0,0 +1,271 @@
#!/usr/bin/env python3
# Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
# SPDX-License-Identifier: MIT
"""
Example 01: Basic Grouped Convolution
Demonstrates:
1. Three kernel configuration patterns (minimal, explicit, full ConvConfigBase)
2. Adding kernels to a registry
3. Validation and auto-correction
4. JIT compilation via registry.build()
5. GPU execution with CPU reference verification
Usage:
python3 01_basic_grouped_conv.py
python3 01_basic_grouped_conv.py --variant bwd_data
python3 01_basic_grouped_conv.py --arch gfx942
"""
import sys
import argparse
import time
import numpy as np
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "python"))
from grouped_conv_utils import (
GroupedConvKernelConfig,
GroupedConvProblem,
GroupedConvRegistry,
validate_grouped_conv_config,
auto_correct_grouped_conv_config,
detect_gpu_arch,
)
def cpu_conv2d_fwd(inp, wei, prob):
"""Naive CPU reference: 2D forward, NHWGC layout."""
N, Hi, Wi, G, Cpg = inp.shape
_, Kpg, Y, X, _ = wei.shape
Ho, Wo = prob.Ho, prob.Wo
out = np.zeros((N, Ho, Wo, G, Kpg), dtype=np.float32)
for n in range(N):
for g in range(G):
for ho in range(Ho):
for wo in range(Wo):
for k in range(Kpg):
s = 0.0
for y in range(Y):
for x in range(X):
hi = (
ho * prob.stride_h
- prob.pad_h
+ y * prob.dilation_h
)
wi = (
wo * prob.stride_w
- prob.pad_w
+ x * prob.dilation_w
)
if 0 <= hi < Hi and 0 <= wi < Wi:
for c in range(Cpg):
s += float(inp[n, hi, wi, g, c]) * float(
wei[g, k, y, x, c]
)
out[n, ho, wo, g, k] = s
return out
def main():
parser = argparse.ArgumentParser(description="Basic Grouped Conv Example")
parser.add_argument("--dtype", default="fp16", choices=["fp16", "bf16"])
parser.add_argument(
"--variant", default="forward", choices=["forward", "bwd_data", "bwd_weight"]
)
parser.add_argument("--ndim", type=int, default=2, choices=[2, 3])
parser.add_argument("--arch", default=detect_gpu_arch())
parser.add_argument(
"--workers", type=int, default=0, help="Max JIT workers (0=auto)"
)
args = parser.parse_args()
print("=" * 70)
print("Example 01: Basic Grouped Convolution")
print("=" * 70)
# =========================================================================
# Step 1: Three kernel configuration patterns
# =========================================================================
print("\n--- Step 1: Kernel Configuration Patterns ---")
# Pattern 1: MINIMAL -- only variant/dtype/arch, everything else auto-filled
config_minimal = GroupedConvKernelConfig(
variant=args.variant,
ndim_spatial=args.ndim,
arch=args.arch,
dtype=args.dtype,
)
print("\n Pattern 1: MINIMAL (defaults auto-filled)")
config_minimal.print_config(indent=" ")
# Pattern 2: EXPLICIT tile/wave/warp -- user controls tiling strategy
config_explicit = GroupedConvKernelConfig(
variant=args.variant,
ndim_spatial=args.ndim,
arch=args.arch,
dtype=args.dtype,
tile_m=1,
tile_n=64,
tile_k=64,
wave_m=1,
wave_n=4,
wave_k=1,
warp_tile_m=16,
warp_tile_n=16,
warp_tile_k=32,
pipeline="compv3",
scheduler="intrawave",
epilogue="cshuffle",
)
print("\n Pattern 2: EXPLICIT tile/wave/warp")
config_explicit.print_config(indent=" ")
# Pattern 3: FULL ConvConfigBase -- every parameter specified
config_full = GroupedConvKernelConfig(
variant=args.variant,
ndim_spatial=args.ndim,
arch=args.arch,
dtype=args.dtype,
tile_m=1,
tile_n=128,
tile_k=128,
wave_m=2,
wave_n=2,
wave_k=1,
warp_tile_m=32,
warp_tile_n=32,
warp_tile_k=16,
pipeline="compv3",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
num_wave_groups=1,
num_groups_to_merge=1,
)
print("\n Pattern 3: FULL (all ConvConfigBase fields)")
config_full.print_config(indent=" ")
# =========================================================================
# Step 2: Build a registry with multiple configs
# =========================================================================
print("\n--- Step 2: Build Registry ---")
registry = GroupedConvRegistry("basic_conv")
registry.add(config_minimal)
registry.add(config_explicit)
registry.add(config_full)
registry.print_registry()
# =========================================================================
# Step 3: Validate and auto-correct
# =========================================================================
print("\n--- Step 3: Validate & Auto-Correct ---")
for i, cfg in enumerate(registry.kernels):
result = validate_grouped_conv_config(cfg.to_dict())
if result.is_valid:
print(f" Config [{i}] {cfg.tile_str}: VALID")
else:
print(f" Config [{i}] {cfg.tile_str}: needs correction")
corrected, result = auto_correct_grouped_conv_config(cfg.to_dict())
print(f" After correction: valid={result.is_valid}")
# =========================================================================
# Step 4: JIT compile via registry.build()
# =========================================================================
print("\n--- Step 4: JIT Build (via registry.build()) ---")
# Use only the first config for the actual GPU run
jit_reg = GroupedConvRegistry("jit")
jit_reg.add(config_minimal)
workers = args.workers if args.workers > 0 else None
t0 = time.perf_counter()
runners = jit_reg.build(verbose=False, max_workers=workers)
jit_build_s = time.perf_counter() - t0
key = (args.variant, args.ndim)
if key not in runners:
print(" JIT build failed")
return 1
runner = runners[key]
print(f" JIT build: {jit_build_s:.3f} s")
print(f" Library: {runner.library_path}")
print(f" Kernels: {runner.lib.kernel_names()}")
# =========================================================================
# Step 5: Define problem + GPU execution
# =========================================================================
print("\n--- Step 5: GPU Execution ---")
prob = GroupedConvProblem(
N=1,
C=64,
K=128,
Hi=16,
Wi=16,
Y=3,
X=3,
stride_h=1,
stride_w=1,
pad_h=1,
pad_w=1,
direction=args.variant,
)
prob.print_problem()
inp = np.random.uniform(-0.5, 0.5, prob.input_shape()).astype(np.float16)
wei = np.random.uniform(-0.5, 0.5, prob.weight_shape()).astype(np.float16)
res = runner.run(inp, wei, prob)
if not res.success:
print(f" GPU execution failed: {res.error}")
runner.cleanup()
return 1
print(f" Time: {res.time_ms:.4f} ms")
print(f" TFLOPS: {res.tflops:.2f}")
print(
f" Output: shape={res.output.shape}, range=[{res.output.min():.3f}, {res.output.max():.3f}]"
)
# =========================================================================
# Step 6: CPU reference (forward 2D only)
# =========================================================================
verified = False
if args.variant == "forward" and args.ndim == 2:
print("\n--- Step 6: CPU Reference Verification ---")
ref = cpu_conv2d_fwd(inp, wei, prob)
gpu_f32 = res.output.astype(np.float32)
diff = np.abs(gpu_f32 - ref)
max_abs = diff.max()
max_rel = (diff / (np.abs(ref) + 1e-6)).max()
match = np.allclose(gpu_f32, ref, atol=0.05, rtol=0.05)
print(f" max_abs_diff: {max_abs:.6f}")
print(f" max_rel_diff: {max_rel:.6f}")
print(f" Match: {match}")
verified = match
runner.cleanup()
# Summary
print("\n" + "=" * 70)
status = (
"PASS" if res.success and (verified or args.variant != "forward") else "FAIL"
)
print(f" Status: {status}")
print(
f" {config_minimal.name} | {prob.gflops:.2f} GFLOPs | {res.tflops:.2f} TFLOPS"
)
print(f" JIT build time: {jit_build_s:.3f} s")
print(f" Registry: {len(registry)} configs (3 patterns demonstrated)")
print("=" * 70)
return 0 if status == "PASS" else 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,222 @@
#!/usr/bin/env python3
# Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
# SPDX-License-Identifier: MIT
"""
Example 02: Forward Convolution (2D + 3D)
Declares forward kernels with explicit tile/wave/warp/pipeline parameters,
builds a registry, JIT compiles, runs on GPU, and validates against CPU reference.
Usage:
python3 02_forward.py
python3 02_forward.py --arch gfx942
"""
import sys
import argparse
import time
import numpy as np
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "python"))
from grouped_conv_utils import (
GroupedConvKernelConfig,
GroupedConvProblem,
GroupedConvRegistry,
detect_gpu_arch,
)
def cpu_conv2d_fwd(inp, wei, prob):
"""Naive CPU reference: 2D forward, NHWGC layout."""
N, Hi, Wi, G, C = inp.shape
_, Kpg, Y, X, _ = wei.shape
Ho, Wo = prob.Ho, prob.Wo
out = np.zeros((N, Ho, Wo, G, Kpg), dtype=np.float32)
for n in range(N):
for g in range(G):
for ho in range(Ho):
for wo in range(Wo):
for k in range(Kpg):
s = 0.0
for y in range(Y):
for x in range(X):
hi = ho * prob.stride_h - prob.pad_h + y
wi = wo * prob.stride_w - prob.pad_w + x
if 0 <= hi < Hi and 0 <= wi < Wi:
for c in range(C):
s += float(inp[n, hi, wi, g, c]) * float(
wei[g, k, y, x, c]
)
out[n, ho, wo, g, k] = s
return out
def main():
parser = argparse.ArgumentParser(description="Forward Convolution (2D + 3D)")
parser.add_argument("--arch", default=detect_gpu_arch())
parser.add_argument("--dtype", default="fp16", choices=["fp16", "bf16"])
parser.add_argument(
"--workers", type=int, default=0, help="Max JIT workers (0=auto)"
)
args = parser.parse_args()
arch = args.arch
print("=" * 70)
print("Example 02: Forward Convolution (2D + 3D)")
print("=" * 70)
print(f"\n Arch: {arch}, Dtype: {args.dtype}")
# =========================================================================
# Step 1: Declare forward kernels with explicit parameters
# =========================================================================
print("\n--- Step 1: Declare Forward Kernels ---")
reg = GroupedConvRegistry("forward_conv")
# Forward 2D: compv4, 128x128 tile, wave 2x2x1, warp 32x32x16
reg.add(
GroupedConvKernelConfig(
variant="forward",
ndim_spatial=2,
arch=arch,
dtype=args.dtype,
tile_m=1,
tile_n=128,
tile_k=128,
wave_m=2,
wave_n=2,
wave_k=1,
warp_tile_m=32,
warp_tile_n=32,
warp_tile_k=16,
pipeline="compv4",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
)
)
# Forward 3D: compv3, 64x64 tile, wave 1x4x1, warp 16x16x32
reg.add(
GroupedConvKernelConfig(
variant="forward",
ndim_spatial=3,
arch=arch,
dtype=args.dtype,
tile_m=1,
tile_n=64,
tile_k=64,
wave_m=1,
wave_n=4,
wave_k=1,
warp_tile_m=16,
warp_tile_n=16,
warp_tile_k=32,
pipeline="compv3",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
)
)
reg.print_registry()
# =========================================================================
# Step 2: JIT build via registry
# =========================================================================
print("\n--- Step 2: JIT Build ---")
workers = args.workers if args.workers > 0 else None
t0 = time.perf_counter()
runners = reg.build(verbose=False, max_workers=workers)
jit_s = time.perf_counter() - t0
print(f" Built {len(runners)} runners in {jit_s:.1f}s")
for key in [("forward", 2), ("forward", 3)]:
tag = "OK" if key in runners else "FAILED"
print(f" {key[0]} {key[1]}D: {tag}")
if ("forward", 2) not in runners:
print(" ERROR: forward 2D JIT failed")
return 1
np_dtype = np.float16 if args.dtype in ["fp16", "bf16"] else np.float32
# =========================================================================
# Step 3: Forward 2D -- GPU + CPU reference
# =========================================================================
print("\n--- Step 3: Forward 2D ---")
prob_2d = GroupedConvProblem(
N=1, C=64, K=64, Hi=8, Wi=8, Y=3, X=3, pad_h=1, pad_w=1, direction="forward"
)
prob_2d.print_problem()
x = np.random.uniform(-0.5, 0.5, prob_2d.input_shape()).astype(np_dtype)
w = np.random.uniform(-0.5, 0.5, prob_2d.weight_shape()).astype(np_dtype)
res = runners[("forward", 2)].run(x, w, prob_2d)
print(f" Time: {res.time_ms:.4f} ms")
print(f" TFLOPS: {res.tflops:.2f}")
print(
f" Output: shape={res.output.shape}, nonzero={np.count_nonzero(res.output)}/{res.output.size}"
)
ref = cpu_conv2d_fwd(x, w, prob_2d)
diff = np.abs(res.output.astype(np.float32) - ref)
match_2d = np.allclose(res.output.astype(np.float32), ref, atol=0.05)
print(f" CPU ref: max_abs={diff.max():.6f}, match={match_2d}")
# =========================================================================
# Step 4: Forward 3D -- GPU + non-zero check
# =========================================================================
ok_3d = True
if ("forward", 3) in runners:
print("\n--- Step 4: Forward 3D ---")
prob_3d = GroupedConvProblem(
N=1,
C=64,
K=64,
Di=8,
Hi=8,
Wi=8,
Z=3,
Y=3,
X=3,
pad_d=1,
pad_h=1,
pad_w=1,
direction="forward",
)
prob_3d.print_problem()
x3 = np.random.uniform(-0.5, 0.5, prob_3d.input_shape()).astype(np_dtype)
w3 = np.random.uniform(-0.5, 0.5, prob_3d.weight_shape()).astype(np_dtype)
res3 = runners[("forward", 3)].run(x3, w3, prob_3d)
nz = np.count_nonzero(res3.output)
ok_3d = res3.success and nz > 0
print(f" Time: {res3.time_ms:.4f} ms")
print(f" TFLOPS: {res3.tflops:.2f}")
print(f" NonZero: {nz}/{res3.output.size}")
for r in runners.values():
r.cleanup()
passed = res.success and match_2d and ok_3d
print("\n" + "=" * 70)
print(f" Forward 2D: {'PASS' if match_2d else 'FAIL'} (CPU validated)")
print(f" Forward 3D: {'PASS' if ok_3d else 'FAIL'} (non-zero check)")
print(f" JIT build: {jit_s:.1f}s")
print(f" Status: {'PASS' if passed else 'FAIL'}")
print("=" * 70)
return 0 if passed else 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,214 @@
#!/usr/bin/env python3
# Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
# SPDX-License-Identifier: MIT
"""
Example 03: Backward Data Convolution (2D + 3D)
dX = ConvBwdData(dY, W)
Declares backward-data kernels with explicit parameters,
builds a registry, JIT compiles, runs on GPU, and validates
against a CPU reference.
Usage:
python3 03_bwd_data.py
"""
import sys
import argparse
import time
import numpy as np
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "python"))
from grouped_conv_utils import (
GroupedConvKernelConfig,
GroupedConvProblem,
GroupedConvRegistry,
detect_gpu_arch,
)
def cpu_conv2d_bwd_data(dy, wei, prob):
"""CPU ref: compute dX from dY and W."""
N, Ho, Wo, G, Kpg = dy.shape
_, _, Y, X, C = wei.shape
Hi, Wi = prob.Hi, prob.Wi
dx = np.zeros((N, Hi, Wi, G, C), dtype=np.float32)
for n in range(N):
for g in range(G):
for hi in range(Hi):
for wi in range(Wi):
for c in range(C):
s = 0.0
for y in range(Y):
for x in range(X):
ho = hi + prob.pad_h - y
wo = wi + prob.pad_w - x
if ho % prob.stride_h == 0 and wo % prob.stride_w == 0:
ho //= prob.stride_h
wo //= prob.stride_w
if 0 <= ho < Ho and 0 <= wo < Wo:
for k in range(Kpg):
s += float(dy[n, ho, wo, g, k]) * float(
wei[g, k, y, x, c]
)
dx[n, hi, wi, g, c] = s
return dx
def main():
parser = argparse.ArgumentParser(description="Backward Data (2D + 3D)")
parser.add_argument("--arch", default=detect_gpu_arch())
parser.add_argument("--dtype", default="fp16", choices=["fp16", "bf16"])
parser.add_argument("--workers", type=int, default=0)
args = parser.parse_args()
arch = args.arch
print("=" * 70)
print("Example 03: Backward Data Convolution (2D + 3D)")
print("=" * 70)
print(f"\n Arch: {arch}, Dtype: {args.dtype}")
print(" dX = ConvBwdData(dY, W)")
# =========================================================================
# Step 1: Declare bwd_data kernels
# =========================================================================
print("\n--- Step 1: Declare BwdData Kernels ---")
reg = GroupedConvRegistry("bwd_data_conv")
# BwdData 2D: compv3, 128x128 tile
reg.add(
GroupedConvKernelConfig(
variant="bwd_data",
ndim_spatial=2,
arch=arch,
dtype=args.dtype,
tile_m=1,
tile_n=128,
tile_k=128,
wave_m=2,
wave_n=2,
wave_k=1,
warp_tile_m=32,
warp_tile_n=32,
warp_tile_k=16,
pipeline="compv3",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
)
)
# BwdData 3D: compv3, 64x64 tile
reg.add(
GroupedConvKernelConfig(
variant="bwd_data",
ndim_spatial=3,
arch=arch,
dtype=args.dtype,
tile_m=1,
tile_n=64,
tile_k=64,
wave_m=1,
wave_n=4,
wave_k=1,
warp_tile_m=16,
warp_tile_n=16,
warp_tile_k=32,
pipeline="compv3",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
)
)
reg.print_registry()
# =========================================================================
# Step 2: JIT build
# =========================================================================
print("\n--- Step 2: JIT Build ---")
workers = args.workers if args.workers > 0 else None
t0 = time.perf_counter()
runners = reg.build(verbose=False, max_workers=workers)
jit_s = time.perf_counter() - t0
print(f" Built {len(runners)} runners in {jit_s:.1f}s")
if ("bwd_data", 2) not in runners:
print(" ERROR: bwd_data 2D JIT failed")
return 1
np_dtype = np.float16 if args.dtype in ["fp16", "bf16"] else np.float32
# =========================================================================
# Step 3: BwdData 2D -- GPU + CPU reference
# =========================================================================
print("\n--- Step 3: Backward Data 2D ---")
prob = GroupedConvProblem(
N=1, C=32, K=32, Hi=8, Wi=8, Y=3, X=3, pad_h=1, pad_w=1, direction="bwd_data"
)
prob.print_problem()
dy = np.random.uniform(-0.5, 0.5, prob.output_shape()).astype(np_dtype)
w = np.random.uniform(-0.5, 0.5, prob.weight_shape()).astype(np_dtype)
res = runners[("bwd_data", 2)].run(dy, w, prob)
print(f" Time: {res.time_ms:.4f} ms")
print(f" TFLOPS: {res.tflops:.2f}")
print(f" NonZero: {np.count_nonzero(res.output)}/{res.output.size}")
ref = cpu_conv2d_bwd_data(dy, w, prob)
diff = np.abs(res.output.astype(np.float32) - ref)
match_2d = np.allclose(res.output.astype(np.float32), ref, atol=0.1)
print(f" CPU ref: max_abs={diff.max():.6f}, match={match_2d}")
# =========================================================================
# Step 4: BwdData 3D -- GPU + non-zero check
# =========================================================================
ok_3d = True
if ("bwd_data", 3) in runners:
print("\n--- Step 4: Backward Data 3D ---")
prob3 = GroupedConvProblem(
N=1,
C=32,
K=32,
Di=6,
Hi=6,
Wi=6,
Z=3,
Y=3,
X=3,
pad_d=1,
pad_h=1,
pad_w=1,
direction="bwd_data",
)
dy3 = np.random.uniform(-0.5, 0.5, prob3.output_shape()).astype(np_dtype)
w3 = np.random.uniform(-0.5, 0.5, prob3.weight_shape()).astype(np_dtype)
res3 = runners[("bwd_data", 3)].run(dy3, w3, prob3)
nz = np.count_nonzero(res3.output)
ok_3d = res3.success and nz > 0
print(f" Time: {res3.time_ms:.4f} ms, NonZero: {nz}/{res3.output.size}")
for r in runners.values():
r.cleanup()
passed = res.success and match_2d and ok_3d
print("\n" + "=" * 70)
print(f" BwdData 2D: {'PASS' if match_2d else 'FAIL'} (CPU validated)")
print(f" BwdData 3D: {'PASS' if ok_3d else 'FAIL'}")
print(f" Status: {'PASS' if passed else 'FAIL'}")
print("=" * 70)
return 0 if passed else 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,224 @@
#!/usr/bin/env python3
# Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
# SPDX-License-Identifier: MIT
"""
Example 04: Backward Weight Convolution (2D + 3D)
dW = ConvBwdWeight(X, dY)
Declares backward-weight kernels with explicit parameters,
builds a registry, JIT compiles, runs on GPU, and validates
against a CPU reference.
Usage:
python3 04_bwd_weight.py
"""
import sys
import argparse
import time
import numpy as np
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "python"))
from grouped_conv_utils import (
GroupedConvKernelConfig,
GroupedConvProblem,
GroupedConvRegistry,
detect_gpu_arch,
)
def cpu_conv2d_bwd_weight(x, dy, prob):
"""CPU ref: compute dW from X and dY."""
N, Hi, Wi, G, C = x.shape
_, Ho, Wo, _, Kpg = dy.shape
Y, X_ = prob.Y, prob.X
dw = np.zeros((G, Kpg, Y, X_, C), dtype=np.float32)
for g in range(G):
for k in range(Kpg):
for y in range(Y):
for xf in range(X_):
for c in range(C):
s = 0.0
for n in range(N):
for ho in range(Ho):
for wo in range(Wo):
hi = ho * prob.stride_h - prob.pad_h + y
wi = wo * prob.stride_w - prob.pad_w + xf
if 0 <= hi < Hi and 0 <= wi < Wi:
s += float(x[n, hi, wi, g, c]) * float(
dy[n, ho, wo, g, k]
)
dw[g, k, y, xf, c] = s
return dw
def main():
parser = argparse.ArgumentParser(description="Backward Weight (2D + 3D)")
parser.add_argument("--arch", default=detect_gpu_arch())
parser.add_argument("--dtype", default="fp16", choices=["fp16", "bf16"])
parser.add_argument("--workers", type=int, default=0)
parser.add_argument(
"--split-k", type=int, default=1, help="Split-K factor for bwd_weight (k_batch)"
)
args = parser.parse_args()
arch = args.arch
print("=" * 70)
print("Example 04: Backward Weight Convolution (2D + 3D)")
print("=" * 70)
print(f"\n Arch: {arch}, Dtype: {args.dtype}")
print(" dW = ConvBwdWeight(X, dY)")
# =========================================================================
# Step 1: Declare bwd_weight kernels
# =========================================================================
print("\n--- Step 1: Declare BwdWeight Kernels ---")
reg = GroupedConvRegistry("bwd_weight_conv")
# BwdWeight 2D: compv3, 128x128 tile
reg.add(
GroupedConvKernelConfig(
variant="bwd_weight",
ndim_spatial=2,
arch=arch,
dtype=args.dtype,
tile_m=1,
tile_n=128,
tile_k=128,
wave_m=2,
wave_n=2,
wave_k=1,
warp_tile_m=32,
warp_tile_n=32,
warp_tile_k=16,
pipeline="compv3",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
)
)
# BwdWeight 3D: compv3, 64x64 tile
reg.add(
GroupedConvKernelConfig(
variant="bwd_weight",
ndim_spatial=3,
arch=arch,
dtype=args.dtype,
tile_m=1,
tile_n=64,
tile_k=64,
wave_m=1,
wave_n=4,
wave_k=1,
warp_tile_m=16,
warp_tile_n=16,
warp_tile_k=32,
pipeline="compv3",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
)
)
reg.print_registry()
# =========================================================================
# Step 2: JIT build
# =========================================================================
print("\n--- Step 2: JIT Build ---")
workers = args.workers if args.workers > 0 else None
t0 = time.perf_counter()
runners = reg.build(verbose=False, max_workers=workers)
jit_s = time.perf_counter() - t0
print(f" Built {len(runners)} runners in {jit_s:.1f}s")
if ("bwd_weight", 2) not in runners:
print(" ERROR: bwd_weight 2D JIT failed")
return 1
np_dtype = np.float16 if args.dtype in ["fp16", "bf16"] else np.float32
# =========================================================================
# Step 3: BwdWeight 2D -- GPU + CPU reference
# =========================================================================
print("\n--- Step 3: Backward Weight 2D ---")
prob = GroupedConvProblem(
N=1,
C=32,
K=32,
Hi=8,
Wi=8,
Y=3,
X=3,
pad_h=1,
pad_w=1,
direction="bwd_weight",
split_k=args.split_k,
)
prob.print_problem()
x = np.random.uniform(-0.5, 0.5, prob.input_shape()).astype(np_dtype)
dy = np.random.uniform(-0.5, 0.5, prob.output_shape()).astype(np_dtype)
res = runners[("bwd_weight", 2)].run(x, dy, prob)
print(f" Time: {res.time_ms:.4f} ms")
print(f" TFLOPS: {res.tflops:.2f}")
print(f" NonZero: {np.count_nonzero(res.output)}/{res.output.size}")
ref = cpu_conv2d_bwd_weight(x, dy, prob)
diff = np.abs(res.output.astype(np.float32) - ref)
match_2d = np.allclose(res.output.astype(np.float32), ref, atol=0.5)
print(f" CPU ref: max_abs={diff.max():.6f}, match={match_2d}")
# =========================================================================
# Step 4: BwdWeight 3D -- GPU + non-zero check
# =========================================================================
ok_3d = True
if ("bwd_weight", 3) in runners:
print("\n--- Step 4: Backward Weight 3D ---")
prob3 = GroupedConvProblem(
N=1,
C=32,
K=32,
Di=6,
Hi=6,
Wi=6,
Z=3,
Y=3,
X=3,
pad_d=1,
pad_h=1,
pad_w=1,
direction="bwd_weight",
)
x3 = np.random.uniform(-0.5, 0.5, prob3.input_shape()).astype(np_dtype)
dy3 = np.random.uniform(-0.5, 0.5, prob3.output_shape()).astype(np_dtype)
res3 = runners[("bwd_weight", 3)].run(x3, dy3, prob3)
nz = np.count_nonzero(res3.output)
ok_3d = res3.success and nz > 0
print(f" Time: {res3.time_ms:.4f} ms, NonZero: {nz}/{res3.output.size}")
for r in runners.values():
r.cleanup()
passed = res.success and match_2d and ok_3d
print("\n" + "=" * 70)
print(f" BwdWeight 2D: {'PASS' if match_2d else 'FAIL'} (CPU validated)")
print(f" BwdWeight 3D: {'PASS' if ok_3d else 'FAIL'}")
print(f" Status: {'PASS' if passed else 'FAIL'}")
print("=" * 70)
return 0 if passed else 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,318 @@
#!/usr/bin/env python3
# Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
# SPDX-License-Identifier: MIT
"""
Example 05: Multi-Problem GPU Benchmark
Declares kernels with explicit tile/wave/warp/pipeline parameters for
all directions, builds registries, JIT compiles, and benchmarks across
ResNet-like problem sizes with configurable warmup/repeat.
Usage:
python3 05_benchmark.py
python3 05_benchmark.py --warmup 3 --repeat 10
python3 05_benchmark.py --workers 4
"""
import sys
import argparse
import time
import numpy as np
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "python"))
from grouped_conv_utils import (
GroupedConvKernelConfig,
GroupedConvProblem,
GroupedConvRegistry,
detect_gpu_arch,
)
def compute_bytes(prob, dtype_bytes=2):
in_elems = 1
for d in prob.input_shape():
in_elems *= d
wei_elems = 1
for d in prob.weight_shape():
wei_elems *= d
out_elems = 1
for d in prob.output_shape():
out_elems *= d
return (in_elems + wei_elems + out_elems) * dtype_bytes
def main():
parser = argparse.ArgumentParser(description="Multi-Problem GPU Benchmark")
parser.add_argument("--arch", default=detect_gpu_arch())
parser.add_argument("--dtype", default="fp16", choices=["fp16", "bf16"])
parser.add_argument("--warmup", type=int, default=3, help="Warmup iterations")
parser.add_argument("--repeat", type=int, default=5, help="Benchmark iterations")
parser.add_argument(
"--workers", type=int, default=0, help="Max JIT workers (0=auto)"
)
args = parser.parse_args()
print("=" * 70)
print("Example 05: Multi-Problem GPU Benchmark")
print("=" * 70)
print(f"\n Arch: {args.arch}, Dtype: {args.dtype}")
print(f" Warmup: {args.warmup}, Repeat: {args.repeat}")
# =========================================================================
# Step 1: Declare all kernels with explicit parameters
# =========================================================================
print("\n--- Step 1: Declare Kernels ---")
reg = GroupedConvRegistry("benchmark")
# Forward 2D: compv4, 128x128 tile
reg.add(
GroupedConvKernelConfig(
variant="forward",
ndim_spatial=2,
arch=args.arch,
dtype=args.dtype,
tile_m=1,
tile_n=128,
tile_k=128,
wave_m=2,
wave_n=2,
wave_k=1,
warp_tile_m=32,
warp_tile_n=32,
warp_tile_k=16,
pipeline="compv4",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
)
)
# Forward 3D: compv3, 64x64 tile
reg.add(
GroupedConvKernelConfig(
variant="forward",
ndim_spatial=3,
arch=args.arch,
dtype=args.dtype,
tile_m=1,
tile_n=64,
tile_k=64,
wave_m=1,
wave_n=4,
wave_k=1,
warp_tile_m=16,
warp_tile_n=16,
warp_tile_k=32,
pipeline="compv3",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
)
)
# BwdData 2D: compv3, 128x128 tile
reg.add(
GroupedConvKernelConfig(
variant="bwd_data",
ndim_spatial=2,
arch=args.arch,
dtype=args.dtype,
tile_m=1,
tile_n=128,
tile_k=128,
wave_m=2,
wave_n=2,
wave_k=1,
warp_tile_m=32,
warp_tile_n=32,
warp_tile_k=16,
pipeline="compv3",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
)
)
# BwdWeight 2D: compv3, 128x128 tile
reg.add(
GroupedConvKernelConfig(
variant="bwd_weight",
ndim_spatial=2,
arch=args.arch,
dtype=args.dtype,
tile_m=1,
tile_n=128,
tile_k=128,
wave_m=2,
wave_n=2,
wave_k=1,
warp_tile_m=32,
warp_tile_n=32,
warp_tile_k=16,
pipeline="compv3",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
)
)
reg.print_registry()
# =========================================================================
# Step 2: JIT build
# =========================================================================
print("\n--- Step 2: JIT Build ---")
workers = args.workers if args.workers > 0 else None
t0 = time.perf_counter()
runner_by_key = reg.build(verbose=False, max_workers=workers)
jit_s = time.perf_counter() - t0
for key in [("forward", 2), ("forward", 3), ("bwd_data", 2), ("bwd_weight", 2)]:
tag = "OK" if key in runner_by_key else "FAILED"
print(f" {key[0]:12s} {key[1]}D: {tag}")
print(f" JIT build time: {jit_s:.3f} s")
missing = [
k
for k in [("forward", 2), ("forward", 3), ("bwd_data", 2), ("bwd_weight", 2)]
if k not in runner_by_key
]
if missing:
print(f"\n ERROR: missing {missing}")
return 1
np_dtype = np.float16 if args.dtype in ["fp16", "bf16"] else np.float32
def bench_run(runner, inp, wei, prob):
for _ in range(args.warmup):
runner.run(inp, wei, prob)
times = []
for _ in range(args.repeat):
r = runner.run(inp, wei, prob)
if r.success:
times.append(r.time_ms)
if not times:
return 0.0, 0.0
return min(times), sum(times) / len(times)
# =========================================================================
# Step 3: 2D Forward benchmark
# =========================================================================
print("\n--- Step 3: Forward 2D Benchmark ---")
print(
f"{'Problem':<18} {'N':>3} {'C':>4} {'K':>4} {'H':>3} {'W':>3} "
f"{'F':>3} {'Min(ms)':>9} {'Avg(ms)':>9} {'TFLOPS':>8} {'GB/s':>8}"
)
print("-" * 85)
all_ok = True
for label, n, c, k, h, w, y, x, s, p in [
("ResNet-stage2", 1, 64, 64, 56, 56, 3, 3, 1, 1),
("ResNet-stage3", 1, 128, 128, 28, 28, 3, 3, 1, 1),
("ResNet-stage4", 1, 256, 256, 14, 14, 3, 3, 1, 1),
("ResNet-stage5", 1, 512, 512, 7, 7, 3, 3, 1, 1),
("Pointwise-1x1", 1, 256, 256, 56, 56, 1, 1, 1, 0),
("Batch-8", 8, 64, 128, 56, 56, 3, 3, 1, 1),
("Batch-32", 32, 64, 128, 56, 56, 3, 3, 1, 1),
]:
prob = GroupedConvProblem(
N=n,
C=c,
K=k,
Hi=h,
Wi=w,
Y=y,
X=x,
stride_h=s,
stride_w=s,
pad_h=p,
pad_w=p,
direction="forward",
)
inp = np.random.uniform(-0.3, 0.3, prob.input_shape()).astype(np_dtype)
wei = np.random.uniform(-0.3, 0.3, prob.weight_shape()).astype(np_dtype)
min_ms, avg_ms = bench_run(runner_by_key[("forward", 2)], inp, wei, prob)
if avg_ms > 0:
tflops = prob.flops / (avg_ms * 1e9)
bw = compute_bytes(prob) / (avg_ms * 1e6)
print(
f"{label:<18} {n:>3} {c:>4} {k:>4} {h:>3} {w:>3} "
f"{y}x{x} {min_ms:>9.4f} {avg_ms:>9.4f} {tflops:>8.2f} {bw:>8.1f}"
)
else:
all_ok = False
# =========================================================================
# Step 4: 3D Forward
# =========================================================================
print("\n--- Step 4: Forward 3D ---")
for label, n, c, k, d, h, w, z, y, x in [
("3D-small", 1, 64, 64, 8, 16, 16, 3, 3, 3),
("3D-medium", 1, 64, 128, 16, 32, 32, 3, 3, 3),
]:
prob = GroupedConvProblem(
N=n, C=c, K=k, Di=d, Hi=h, Wi=w, Z=z, Y=y, X=x, direction="forward"
)
inp = np.random.uniform(-0.3, 0.3, prob.input_shape()).astype(np_dtype)
wei = np.random.uniform(-0.3, 0.3, prob.weight_shape()).astype(np_dtype)
min_ms, avg_ms = bench_run(runner_by_key[("forward", 3)], inp, wei, prob)
if avg_ms > 0:
tflops = prob.flops / (avg_ms * 1e9)
print(f" {label:<14} {min_ms:.4f} / {avg_ms:.4f} ms {tflops:.2f} TFLOPS")
# =========================================================================
# Step 5: Backward directions
# =========================================================================
print("\n--- Step 5: Backward Directions ---")
for label, direction in [
("bwd_data ResNet-s3", "bwd_data"),
("bwd_weight ResNet-s3", "bwd_weight"),
]:
prob = GroupedConvProblem(
N=1,
C=128,
K=128,
Hi=28,
Wi=28,
Y=3,
X=3,
stride_h=1,
stride_w=1,
pad_h=1,
pad_w=1,
direction=direction,
)
inp = np.random.uniform(-0.3, 0.3, prob.input_shape()).astype(np_dtype)
wei = np.random.uniform(-0.3, 0.3, prob.weight_shape()).astype(np_dtype)
min_ms, avg_ms = bench_run(runner_by_key[(direction, 2)], inp, wei, prob)
if avg_ms > 0:
tflops = prob.flops / (avg_ms * 1e9)
print(
f" {label:<14} {direction:>12} {min_ms:.4f} / {avg_ms:.4f} ms {tflops:.2f} TFLOPS"
)
for runner in runner_by_key.values():
runner.cleanup()
print("\n" + "=" * 70)
print(f" JIT build: {jit_s:.3f} s")
print(f" Warmup: {args.warmup}, Repeat: {args.repeat}")
print(f" Status: {'PASS' if all_ok else 'FAIL'}")
print("=" * 70)
return 0 if all_ok else 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,274 @@
#!/usr/bin/env python3
# Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
# SPDX-License-Identifier: MIT
"""
Example 06: Registry, Heuristic Selection & JSON Export
Declares multiple kernel configurations with different tile sizes,
builds a registry, demonstrates heuristic runtime kernel selection,
JSON round-trip, and GPU execution.
Usage:
python3 06_registry_json.py
python3 06_registry_json.py --workers 4
"""
import sys
import time
import argparse
import numpy as np
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "python"))
from grouped_conv_utils import (
GroupedConvKernelConfig,
GroupedConvProblem,
GroupedConvRegistry,
detect_gpu_arch,
)
def conv_heuristic(problem):
spatial = problem.Ho * problem.Wo
if spatial > 400:
return ["256", "128", "64"]
return ["64", "128", "256"]
def main():
parser = argparse.ArgumentParser(description="Registry, Heuristic & JSON")
parser.add_argument("--arch", default=detect_gpu_arch())
parser.add_argument("--dtype", default="fp16", choices=["fp16", "bf16"])
parser.add_argument("--workers", type=int, default=0)
args = parser.parse_args()
arch = args.arch
print("=" * 70)
print("Example 06: Registry, Heuristic Selection & JSON Export")
print("=" * 70)
print(f"\n Arch: {arch}, Dtype: {args.dtype}")
# Step 1: Declare kernels with full explicit parameters
print("\n--- Step 1: Declare Kernels + Build Registry ---")
reg = GroupedConvRegistry("conv_tiles")
reg.add(
GroupedConvKernelConfig(
variant="forward",
ndim_spatial=2,
arch=arch,
dtype=args.dtype,
tile_m=1,
tile_n=256,
tile_k=256,
wave_m=2,
wave_n=2,
wave_k=1,
warp_tile_m=32,
warp_tile_n=32,
warp_tile_k=16,
pipeline="compv3",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
num_wave_groups=1,
num_groups_to_merge=1,
)
)
reg.add(
GroupedConvKernelConfig(
variant="forward",
ndim_spatial=2,
arch=arch,
dtype=args.dtype,
tile_m=1,
tile_n=128,
tile_k=128,
wave_m=2,
wave_n=2,
wave_k=1,
warp_tile_m=32,
warp_tile_n=32,
warp_tile_k=16,
pipeline="compv4",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
num_wave_groups=1,
num_groups_to_merge=1,
)
)
reg.add(
GroupedConvKernelConfig(
variant="forward",
ndim_spatial=2,
arch=arch,
dtype=args.dtype,
tile_m=1,
tile_n=64,
tile_k=64,
wave_m=1,
wave_n=4,
wave_k=1,
warp_tile_m=16,
warp_tile_n=16,
warp_tile_k=32,
pipeline="compv3",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
block_per_cu=1,
num_wave_groups=1,
num_groups_to_merge=1,
)
)
reg.print_registry()
# Step 2: Heuristic kernel selection
print("\n--- Step 2: Heuristic Kernel Selection ---")
problems = [
(
"small_7x7",
GroupedConvProblem(
N=1,
C=512,
K=512,
Hi=7,
Wi=7,
Y=3,
X=3,
pad_h=1,
pad_w=1,
direction="forward",
),
),
(
"medium_14x14",
GroupedConvProblem(
N=1,
C=256,
K=256,
Hi=14,
Wi=14,
Y=3,
X=3,
pad_h=1,
pad_w=1,
direction="forward",
),
),
(
"large_56x56",
GroupedConvProblem(
N=1,
C=64,
K=128,
Hi=56,
Wi=56,
Y=3,
X=3,
pad_h=1,
pad_w=1,
direction="forward",
),
),
]
print(f" {'Problem':<16} {'Spatial':>8} {'Selected Kernel':<50}")
print(f" {'-' * 74}")
for label, prob in problems:
selected = reg.select(prob, heuristic=conv_heuristic)
spatial = prob.Ho * prob.Wo
sel_name = selected.name if selected else "none"
print(f" {label:<16} {spatial:>8} {sel_name:<50}")
# Step 3: JSON round-trip
print("\n--- Step 3: JSON Round-Trip ---")
json_str = reg.to_json()
print(f" Exported: {len(json_str)} bytes, {len(reg)} kernels")
imported = GroupedConvRegistry.from_json(json_str)
print(f" Imported: {len(imported)} kernels")
orig = reg.kernels[0]
imp = imported.kernels[0]
rt_ok = (
orig.vector_size_a == imp.vector_size_a
and orig.block_per_cu == imp.block_per_cu
and orig.tile_n == imp.tile_n
)
print(f" Full fields round-trip: {'OK' if rt_ok else 'FAIL'}")
# Step 4: JIT build + GPU execution
print("\n--- Step 4: JIT Build + GPU Execution ---")
workers = args.workers if args.workers > 0 else None
jit_reg = GroupedConvRegistry("jit_conv")
jit_reg.add(
GroupedConvKernelConfig(
variant="forward",
ndim_spatial=2,
arch=arch,
dtype=args.dtype,
tile_m=1,
tile_n=128,
tile_k=128,
wave_m=2,
wave_n=2,
wave_k=1,
warp_tile_m=32,
warp_tile_n=32,
warp_tile_k=16,
pipeline="compv4",
scheduler="intrawave",
epilogue="cshuffle",
vector_size_a=4,
vector_size_b=8,
vector_size_c=8,
)
)
t0 = time.perf_counter()
runners = jit_reg.build(verbose=False, max_workers=workers)
jit_s = time.perf_counter() - t0
if ("forward", 2) not in runners:
print(" JIT build failed")
return 1
runner = runners[("forward", 2)]
print(f" JIT build: {jit_s:.3f} s")
print(f" Library: {runner.library_path}")
prob = GroupedConvProblem(
N=1, C=128, K=128, Hi=16, Wi=16, Y=3, X=3, pad_h=1, pad_w=1, direction="forward"
)
np_dtype = np.float16 if args.dtype in ["fp16", "bf16"] else np.float32
inp = np.random.uniform(-0.3, 0.3, prob.input_shape()).astype(np_dtype)
wei = np.random.uniform(-0.3, 0.3, prob.weight_shape()).astype(np_dtype)
res = runner.run(inp, wei, prob)
runner.cleanup()
if res.success:
print(f" Time: {res.time_ms:.4f} ms")
print(f" TFLOPS: {res.tflops:.2f}")
print(f" NonZero: {np.count_nonzero(res.output)}/{res.output.size}")
gpu_ok = res.success
print("\n" + "=" * 70)
print(f" Registry: {len(reg)} kernels (3 tile configs)")
print(" Heuristic: spatial-based selection demonstrated")
print(f" JSON: round-trip {'OK' if rt_ok else 'FAIL'}")
print(f" GPU: {'OK' if gpu_ok else 'FAIL'}")
print(f" Status: {'PASS' if gpu_ok and rt_ok else 'FAIL'}")
print("=" * 70)
return 0 if gpu_ok and rt_ok else 1
if __name__ == "__main__":
sys.exit(main())