Files
composable_kernel/dispatcher/examples/gemm/python/05_numpy_integration.py
Vidyasagar Ananthan 920acd2c12 [rocm-libraries] ROCm/rocm-libraries#5168 (commit 8b5afcb)
[CK] [CK_Tile] Add GroupConv to Kernel Dispatcher

## Motivation

This PR adds CK Tile group convolution (forward, backward-data,
backward-weight) support to the kernel dispatcher, matching and unifying
with the existing dispatcher GEMM infrastructure in architecture and
usability. The dispatcher provides a unified kernel dispatch system with
both C++ and Python frontends, and until now only supported GEMM
operations. This PR enables framework integrators to use the same
declarative kernel workflow for convolutions as they do for GEMM:
declare kernels, build a registry JIT, select kernels within the
registry at runtime, and dispatch to GPU. Future PRs will include
runtime kernel selection heuristics for autotuning of kernel parameters
based on (problem, hardware arch).

## Technical Details

Grouped convolution support has been added to the CK Tile Dispatcher
with generated_conv_backend.hpp enabling dispatcher.run(in, wei, out,
problem) for all 6 conv variants (fwd/bwdd/bwdw x 2D/3D), runtime
heuristic kernel selection, and GroupedConvKernelKey with full
ConvConfigBase fields. Python side adds parallel JIT via
registry.build(max_workers) and heuristic registry.select(). Includes 7
C++ and 6 Python examples covering all directions with CPU reference
validation, and shared infrastructure improvements (BaseRegistry CRTP,
structured exceptions). As a sanity check, JIT compile times for a
single kernel remains the same and for multiple kernels there is better
parallelism:
Kernels | 1 worker | 8 workers
1 | 7.7 s | 7.7 s
2 | 15.9 s | 8.2 s
4 | 33.4 s | 9.7 s
6 | 52.3 s | 10.2 s

## Test Plan

145 ephemeral unit tests have been added to test basic functionality.
All 30 examples/integration tests run end-to-end on gfx950 (MI350): 7
C++ conv, 7 C++ GEMM, 6 Python conv, 10 Python GEMM. CPU reference
validation for forward, backward-data, and backward-weight (2D) in both
C++ and Python examples pass.

## Test Result

30 examples pass. Peak performance: 132 TFLOPS (Batch-32 forward 56x56),
53 TFLOPS (pointwise 1x1). CPU reference accuracy: max_abs_diff < 0.002
for all directions (fp16 vs fp32 reference).

## Submission Checklist

- [x] Look over the contributing guidelines at
https://github.com/ROCm/ROCm/blob/develop/CONTRIBUTING.md#pull-requests.
2026-04-09 17:39:35 +00:00

169 lines
4.9 KiB
Python

#!/usr/bin/env python3
# Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
# SPDX-License-Identifier: MIT
"""
Example 05: NumPy Integration
Shows how to create a GPU-accelerated matmul wrapper.
Usage:
python3 05_numpy_integration.py
python3 05_numpy_integration.py --help
python3 05_numpy_integration.py --dtype bf16
"""
import sys
import argparse
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "python"))
import numpy as np
from ctypes_utils import (
KernelConfig,
Dispatcher,
setup_gemm_dispatcher,
cleanup_gemm,
reset_for_example,
detect_gpu_arch,
)
class GPUMatmul:
"""GPU-accelerated matrix multiplication wrapper."""
def __init__(self, dispatcher: Dispatcher):
self.dispatcher = dispatcher
def __call__(self, A: np.ndarray, B: np.ndarray) -> np.ndarray:
"""Compute C = A @ B on GPU with CPU fallback."""
M, K = A.shape
K2, N = B.shape
if K != K2:
raise ValueError(f"Dimension mismatch: {A.shape} @ {B.shape}")
if not self.dispatcher.is_supported(M, N, K):
return np.matmul(A, B)
result = self.dispatcher.run(A, B, M, N, K)
return result.output if result.success else np.matmul(A, B)
def main():
parser = argparse.ArgumentParser(
description="NumPy Integration Example - GPU-accelerated matmul wrapper",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python3 05_numpy_integration.py # Default FP16
python3 05_numpy_integration.py --dtype bf16 # BF16 mode
""",
)
parser.add_argument(
"--dtype",
default="fp16",
choices=["fp16", "bf16", "fp32"],
help="Data type (default: fp16)",
)
parser.add_argument(
"--arch",
default=detect_gpu_arch(),
help="Target architecture (auto-detected from rocminfo)",
)
args = parser.parse_args()
reset_for_example()
print("=" * 60)
print("Example 05: NumPy Integration")
print("=" * 60)
# =========================================================================
# Step 1: Setup dispatcher
# =========================================================================
print("\nStep 1: Setup Dispatcher")
config = KernelConfig(
dtype_a=args.dtype,
dtype_b=args.dtype,
dtype_c=args.dtype,
tile_m=128,
tile_n=128,
tile_k=32,
gfx_arch=args.arch,
)
setup = setup_gemm_dispatcher(config, registry_name="numpy", verbose=True)
if not setup.success:
print(f" ERROR: {setup.error}")
return 1
dispatcher = setup.dispatcher
np_dtype = np.float16 if args.dtype in ["fp16", "bf16"] else np.float32
# =========================================================================
# Step 2: Create GPU matmul wrapper
# =========================================================================
print("\nStep 2: Create GPUMatmul")
gpu_matmul = GPUMatmul(dispatcher=dispatcher)
print(" gpu_matmul ready")
# =========================================================================
# Step 3: Demo - Simple multiplication using gpu_matmul
# =========================================================================
print("\nStep 3: Demo - Simple Multiplication")
A = np.random.randn(1024, 512).astype(np_dtype) * 0.1
B = np.random.randn(512, 256).astype(np_dtype) * 0.1
# Use the gpu_matmul wrapper
C = gpu_matmul(A, B)
print(f" gpu_matmul result: {C.shape}, sum={C.sum():.4f}")
M, K = A.shape
_, N = B.shape
result = dispatcher.run(A, B, M, N, K)
print(f" A: {A.shape}, B: {B.shape} -> C: {result.output.shape}")
print(f" GPU: {result.time_ms:.4f} ms, {result.tflops:.2f} TFLOPS")
# =========================================================================
# Step 4: Demo - FFN block
# =========================================================================
print("\nStep 4: Demo - FFN Block")
batch, hidden, ffn = 128, 768, 3072
X = np.random.randn(batch, hidden).astype(np_dtype) * 0.02
W1 = np.random.randn(hidden, ffn).astype(np_dtype) * 0.02
W2 = np.random.randn(ffn, hidden).astype(np_dtype) * 0.02
result1 = dispatcher.run(X, W1, batch, ffn, hidden)
H = result1.output
result2 = dispatcher.run(H, W2, batch, hidden, ffn)
print(f" X: {X.shape} -> H: {H.shape} -> Y: {result2.output.shape}")
print(f" Total: {result1.time_ms + result2.time_ms:.4f} ms")
# Cleanup
cleanup_gemm()
# Summary
print("\n" + "=" * 60)
print("NumPy Integration Pattern:")
print("=" * 60)
print(" 1. setup_gemm_dispatcher(config)")
print(" 2. GPUMatmul(dispatcher)")
print(" 3. C = gpu_matmul(A, B)")
print("=" * 60)
return 0
if __name__ == "__main__":
sys.exit(main())