mirror of
https://github.com/ROCm/composable_kernel.git
synced 2026-05-12 01:10:17 +00:00
[rocm-libraries] ROCm/rocm-libraries#5168 (commit 8b5afcb)
[CK] [CK_Tile] Add GroupConv to Kernel Dispatcher ## Motivation This PR adds CK Tile group convolution (forward, backward-data, backward-weight) support to the kernel dispatcher, matching and unifying with the existing dispatcher GEMM infrastructure in architecture and usability. The dispatcher provides a unified kernel dispatch system with both C++ and Python frontends, and until now only supported GEMM operations. This PR enables framework integrators to use the same declarative kernel workflow for convolutions as they do for GEMM: declare kernels, build a registry JIT, select kernels within the registry at runtime, and dispatch to GPU. Future PRs will include runtime kernel selection heuristics for autotuning of kernel parameters based on (problem, hardware arch). ## Technical Details Grouped convolution support has been added to the CK Tile Dispatcher with generated_conv_backend.hpp enabling dispatcher.run(in, wei, out, problem) for all 6 conv variants (fwd/bwdd/bwdw x 2D/3D), runtime heuristic kernel selection, and GroupedConvKernelKey with full ConvConfigBase fields. Python side adds parallel JIT via registry.build(max_workers) and heuristic registry.select(). Includes 7 C++ and 6 Python examples covering all directions with CPU reference validation, and shared infrastructure improvements (BaseRegistry CRTP, structured exceptions). As a sanity check, JIT compile times for a single kernel remains the same and for multiple kernels there is better parallelism: Kernels | 1 worker | 8 workers 1 | 7.7 s | 7.7 s 2 | 15.9 s | 8.2 s 4 | 33.4 s | 9.7 s 6 | 52.3 s | 10.2 s ## Test Plan 145 ephemeral unit tests have been added to test basic functionality. All 30 examples/integration tests run end-to-end on gfx950 (MI350): 7 C++ conv, 7 C++ GEMM, 6 Python conv, 10 Python GEMM. CPU reference validation for forward, backward-data, and backward-weight (2D) in both C++ and Python examples pass. ## Test Result 30 examples pass. Peak performance: 132 TFLOPS (Batch-32 forward 56x56), 53 TFLOPS (pointwise 1x1). CPU reference accuracy: max_abs_diff < 0.002 for all directions (fp16 vs fp32 reference). ## Submission Checklist - [x] Look over the contributing guidelines at https://github.com/ROCm/ROCm/blob/develop/CONTRIBUTING.md#pull-requests.
This commit is contained in:
committed by
assistant-librarian[bot]
parent
4c0e73ab12
commit
920acd2c12
@@ -3,7 +3,7 @@
|
||||
|
||||
# This directory contains Python utilities for the dispatcher examples.
|
||||
# The main utility file is ctypes_utils.py which is used by GEMM Python examples.
|
||||
# Conv Python examples use their own conv_utils.py in the examples directory.
|
||||
# Grouped conv Python examples use grouped_conv_utils.py in this directory.
|
||||
|
||||
# No build targets needed - these are pure Python utilities.
|
||||
message(STATUS "Python utilities directory configured (no build targets)")
|
||||
|
||||
@@ -4,6 +4,19 @@ This directory contains Python utilities used by the dispatcher examples.
|
||||
|
||||
## Contents
|
||||
|
||||
### Shared Utilities (used by both GEMM and Grouped Conv)
|
||||
|
||||
- `dispatcher_common.py` - Shared dispatcher infrastructure
|
||||
- Path helpers (`get_dispatcher_root`, `get_build_dir`, etc.)
|
||||
- `ValidationResultBase` - Structured validation feedback
|
||||
- `validate_wave_config`, `validate_warp_tile_config`, `validate_trait_combo`
|
||||
- `auto_correct_wave`, `auto_correct_trait` - Auto-correction helpers
|
||||
- `Colors` - Cross-platform ANSI color support
|
||||
- `print_phase`, `print_success`, `print_error`, `print_info` - Phased output
|
||||
- `cleanup_generated_kernels` - Cleanup helper
|
||||
|
||||
### GEMM Utilities
|
||||
|
||||
- `ctypes_utils.py` - Core ctypes utilities for GEMM Python examples
|
||||
- `KernelConfig` - Kernel configuration dataclass
|
||||
- `setup_gemm_dispatcher()` - Setup dispatcher with auto-correction
|
||||
@@ -11,11 +24,15 @@ This directory contains Python utilities used by the dispatcher examples.
|
||||
- `GemmRunner` - GPU execution helper
|
||||
- Auto-correction and validation utilities
|
||||
|
||||
- `conv_utils.py` - Core utilities for Conv Python examples
|
||||
- `ConvSignature`, `ConvAlgorithm` - Convolution configuration
|
||||
- `ConvProblem` - Problem definition
|
||||
- `GpuConvRunner` - GPU execution helper
|
||||
- `EnhancedConvCodegenRunner` - Kernel codegen utilities
|
||||
### Grouped Convolution Utilities
|
||||
|
||||
- `grouped_conv_utils.py` - Utilities for grouped convolution
|
||||
- `GroupedConvValidationResult` - Validation result (extends `ValidationResultBase`)
|
||||
- `validate_grouped_conv_config` - Validate a grouped conv config
|
||||
- `auto_correct_grouped_conv_config` - Auto-correct invalid configs
|
||||
- `get_grouped_conv_default_config` - Get default config for a variant
|
||||
- `GroupedConvDataType` - Data type enum (FP16, BF16, FP32, FP8, BF8, INT8)
|
||||
- `format_grouped_conv_summary` - Human-readable config summary
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -36,21 +53,26 @@ from ctypes_utils import (
|
||||
)
|
||||
```
|
||||
|
||||
### Conv Examples
|
||||
|
||||
The Conv Python examples in `dispatcher/examples/conv/python/` import:
|
||||
### Grouped Conv Usage
|
||||
|
||||
```python
|
||||
import sys
|
||||
from pathlib import Path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "python"))
|
||||
|
||||
from conv_utils import (
|
||||
ConvSignature,
|
||||
ConvAlgorithm,
|
||||
ConvProblem,
|
||||
GpuConvRunner,
|
||||
from grouped_conv_utils import (
|
||||
validate_grouped_conv_config,
|
||||
auto_correct_grouped_conv_config,
|
||||
get_grouped_conv_default_config,
|
||||
GroupedConvDataType,
|
||||
)
|
||||
|
||||
# Get a default config
|
||||
config = get_grouped_conv_default_config(variant="forward", arch="gfx942")
|
||||
|
||||
# Validate
|
||||
result = validate_grouped_conv_config(config)
|
||||
print(f"Valid: {result.is_valid}")
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
@@ -37,6 +37,43 @@ import multiprocessing
|
||||
import time
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# GPU Architecture Auto-Detection
|
||||
# =============================================================================
|
||||
|
||||
_detected_arch: Optional[str] = None
|
||||
|
||||
|
||||
def detect_gpu_arch(fallback: str = "gfx942") -> str:
|
||||
"""
|
||||
Auto-detect the GPU architecture by querying rocminfo.
|
||||
|
||||
Caches the result after the first call. Falls back to `fallback` if
|
||||
detection fails (e.g. no GPU, rocminfo not installed).
|
||||
"""
|
||||
global _detected_arch
|
||||
if _detected_arch is not None:
|
||||
return _detected_arch
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["/opt/rocm/bin/rocminfo"], capture_output=True, text=True, timeout=10
|
||||
)
|
||||
for line in result.stdout.splitlines():
|
||||
stripped = line.strip()
|
||||
if stripped.startswith("Name:") and "gfx" in stripped:
|
||||
# Extract e.g. "gfx950" from "Name: gfx950"
|
||||
name = stripped.split(":", 1)[1].strip()
|
||||
if name.startswith("gfx") and name[3:].isdigit():
|
||||
_detected_arch = name
|
||||
return _detected_arch
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_detected_arch = fallback
|
||||
return _detected_arch
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Path Configuration
|
||||
# =============================================================================
|
||||
@@ -159,9 +196,9 @@ class ValidationResult:
|
||||
def print_result(self, indent: str = " "):
|
||||
"""Print validation result."""
|
||||
if self.is_valid:
|
||||
print(f"{indent}✓ Configuration valid")
|
||||
print(f"{indent}OK Configuration valid")
|
||||
else:
|
||||
print(f"{indent}⚠ Configuration has issues:")
|
||||
print(f"{indent}WARNING Configuration has issues:")
|
||||
for err in self.errors:
|
||||
print(f"{indent} - {err}")
|
||||
|
||||
@@ -300,7 +337,7 @@ def auto_correct_kernel_config(
|
||||
# Check each fix and describe what changed
|
||||
if "scheduler" in fixes and fixes["scheduler"] != config.scheduler:
|
||||
corrections.append(
|
||||
f"Scheduler: {config.scheduler} → {fixes['scheduler']} "
|
||||
f"Scheduler: {config.scheduler} -> {fixes['scheduler']} "
|
||||
f"('{config.scheduler}' not supported with pipeline={config.pipeline}, epilogue={config.epilogue})"
|
||||
)
|
||||
|
||||
@@ -309,7 +346,7 @@ def auto_correct_kernel_config(
|
||||
new_wave = f"[{fixes.get('wave_m', config.wave_m)}, {fixes.get('wave_n', config.wave_n)}, {fixes.get('wave_k', config.wave_k)}]"
|
||||
if old_wave != new_wave:
|
||||
corrections.append(
|
||||
f"Wave config: {old_wave} → {new_wave} "
|
||||
f"Wave config: {old_wave} -> {new_wave} "
|
||||
f"(original not supported on {config.gfx_arch})"
|
||||
)
|
||||
|
||||
@@ -318,7 +355,7 @@ def auto_correct_kernel_config(
|
||||
new_warp = f"[{fixes.get('warp_m', config.warp_m)}, {fixes.get('warp_n', config.warp_n)}, {fixes.get('warp_k', config.warp_k)}]"
|
||||
if old_warp != new_warp:
|
||||
corrections.append(
|
||||
f"Warp tile: {old_warp} → {new_warp} "
|
||||
f"Warp tile: {old_warp} -> {new_warp} "
|
||||
f"(original not supported for {config.dtype_a} on {config.gfx_arch})"
|
||||
)
|
||||
|
||||
@@ -386,13 +423,13 @@ def print_auto_correction(
|
||||
indent: Indentation for output
|
||||
"""
|
||||
if not corrections:
|
||||
print(f"{indent}✓ Configuration valid - no corrections needed")
|
||||
print(f"{indent}OK Configuration valid - no corrections needed")
|
||||
return
|
||||
|
||||
print(f"\n{indent}⚠ AUTO-CORRECTION APPLIED:")
|
||||
print(f"\n{indent}WARNING AUTO-CORRECTION APPLIED:")
|
||||
print(f"{indent}" + "-" * 50)
|
||||
for correction in corrections:
|
||||
print(f"{indent} • {correction}")
|
||||
print(f"{indent} - {correction}")
|
||||
print(f"{indent}" + "-" * 50)
|
||||
print()
|
||||
|
||||
@@ -976,6 +1013,226 @@ def _run_codegen_subprocess(args: Dict[str, Any]) -> CodegenResult:
|
||||
)
|
||||
|
||||
|
||||
def _run_hipcc_subprocess(args: dict) -> Tuple[bool, Optional[Path], str]:
|
||||
"""Module-level function to run hipcc compilation in parallel."""
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
compile_cmd = args["compile_cmd"]
|
||||
link_cmd = args["link_cmd"]
|
||||
lib_path = Path(args["lib_path"])
|
||||
|
||||
try:
|
||||
res_c = subprocess.run(compile_cmd, capture_output=True, text=True, timeout=300)
|
||||
if res_c.returncode != 0:
|
||||
return False, None, f"Compile failed: {res_c.stderr[:200]}"
|
||||
|
||||
res_l = subprocess.run(link_cmd, capture_output=True, text=True, timeout=300)
|
||||
if res_l.returncode != 0:
|
||||
return False, None, f"Link failed: {res_l.stderr[:200]}"
|
||||
|
||||
return True, lib_path, ""
|
||||
except subprocess.TimeoutExpired:
|
||||
return False, None, "Timeout"
|
||||
except Exception as e:
|
||||
return False, None, str(e)
|
||||
|
||||
|
||||
def _generate_single_kernel_subprocess(args: dict) -> Tuple[bool, Optional[str], str]:
|
||||
"""Module-level function: generate ONE kernel .hpp via --config JSON file.
|
||||
|
||||
Used by setup_multiple_gemm_dispatchers for per-config parallel codegen.
|
||||
Returns (success, header_path_or_None, error_msg).
|
||||
"""
|
||||
import subprocess
|
||||
import json
|
||||
import tempfile
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
out_dir = Path(args["output_dir"])
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write the single-config JSON to a temp file
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
|
||||
json.dump(args["tile_config_json"], f)
|
||||
config_file = f.name
|
||||
|
||||
cmd = [
|
||||
args["python"],
|
||||
str(args["codegen_script"]),
|
||||
"--output-dir",
|
||||
str(out_dir),
|
||||
"--datatype",
|
||||
args["dtype"],
|
||||
"--layout",
|
||||
args["layout"],
|
||||
"--gpu-target",
|
||||
args["gpu_target"],
|
||||
"--config",
|
||||
config_file,
|
||||
"--variants",
|
||||
"standard",
|
||||
]
|
||||
|
||||
res = subprocess.run(cmd, capture_output=True, text=True, timeout=300)
|
||||
os.unlink(config_file)
|
||||
|
||||
if res.returncode != 0:
|
||||
return False, None, f"Codegen failed: {res.stderr[:200]}"
|
||||
|
||||
# Find the generated .hpp using the expected name pattern
|
||||
pattern = args["hpp_glob_pattern"]
|
||||
matches = sorted(out_dir.glob(pattern))
|
||||
if matches:
|
||||
return True, str(matches[0]), ""
|
||||
else:
|
||||
return False, None, f"No .hpp matching {pattern} after codegen"
|
||||
|
||||
except Exception as e:
|
||||
return False, None, str(e)
|
||||
|
||||
|
||||
def _parse_triplet(text: str) -> Optional[Tuple[int, int, int]]:
|
||||
parts = text.split("x")
|
||||
if len(parts) != 3:
|
||||
return None
|
||||
try:
|
||||
return (int(parts[0]), int(parts[1]), int(parts[2]))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _parse_gemm_header_metadata(header: Path) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Parse GEMM header name into configuration metadata.
|
||||
|
||||
Expected stem format:
|
||||
gemm_{dtype}_{layout}_{pipeline}_{epilogue}_{scheduler}
|
||||
_{pad_m}_{pad_n}_{pad_k}_{persistent}
|
||||
_{tile_m}x{tile_n}x{tile_k}_{wave_m}x{wave_n}x{wave_k}_{warp_m}x{warp_n}x{warp_k}
|
||||
"""
|
||||
parts = header.stem.split("_")
|
||||
if len(parts) < 13 or parts[0] != "gemm":
|
||||
return None
|
||||
|
||||
tile = _parse_triplet(parts[10])
|
||||
wave = _parse_triplet(parts[11])
|
||||
warp = _parse_triplet(parts[12])
|
||||
if tile is None or wave is None or warp is None:
|
||||
return None
|
||||
|
||||
def _as_bool(v: str) -> bool:
|
||||
return v.lower() == "true"
|
||||
|
||||
return {
|
||||
"dtype": parts[1],
|
||||
"layout": parts[2],
|
||||
"pipeline": parts[3],
|
||||
"epilogue": parts[4],
|
||||
"scheduler": parts[5],
|
||||
"pad_m": _as_bool(parts[6]),
|
||||
"pad_n": _as_bool(parts[7]),
|
||||
"pad_k": _as_bool(parts[8]),
|
||||
"persistent": _as_bool(parts[9]),
|
||||
"tile": tile,
|
||||
"wave": wave,
|
||||
"warp": warp,
|
||||
}
|
||||
|
||||
|
||||
def _generate_arch_valid_gemm_headers(
|
||||
python_exe: str,
|
||||
codegen_script: Path,
|
||||
output_dir: Path,
|
||||
dtype: str,
|
||||
layout: str,
|
||||
gpu_target: str,
|
||||
variant: str = "standard",
|
||||
) -> Tuple[bool, List[Path], str]:
|
||||
"""Generate (or reuse) an arch-filtered kernel catalog for fallback selection."""
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
pattern = f"gemm_{dtype}_{layout}_*.hpp"
|
||||
existing = sorted(output_dir.glob(pattern))
|
||||
if existing:
|
||||
return True, existing, ""
|
||||
|
||||
cmd = [
|
||||
python_exe,
|
||||
str(codegen_script),
|
||||
"--output-dir",
|
||||
str(output_dir),
|
||||
"--datatype",
|
||||
dtype,
|
||||
"--layout",
|
||||
layout,
|
||||
"--gpu-target",
|
||||
gpu_target,
|
||||
"--variants",
|
||||
variant,
|
||||
]
|
||||
res = subprocess.run(cmd, capture_output=True, text=True, timeout=600)
|
||||
if res.returncode != 0:
|
||||
err = (res.stderr or res.stdout or "").strip()[:500]
|
||||
return False, [], f"Catalog codegen failed: {err}"
|
||||
|
||||
generated = sorted(output_dir.glob(pattern))
|
||||
if not generated:
|
||||
return False, [], "Catalog codegen produced no GEMM headers"
|
||||
return True, generated, ""
|
||||
|
||||
|
||||
def _select_best_arch_valid_gemm_header(
|
||||
config: "KernelConfig",
|
||||
headers: List[Path],
|
||||
) -> Tuple[Optional[Path], Optional[Dict[str, Any]]]:
|
||||
"""Choose nearest arch-valid header for a requested GEMM config."""
|
||||
best: Optional[Path] = None
|
||||
best_meta: Optional[Dict[str, Any]] = None
|
||||
best_score: Optional[Tuple[int, int, int, int, int, int]] = None
|
||||
|
||||
for h in headers:
|
||||
meta = _parse_gemm_header_metadata(h)
|
||||
if meta is None:
|
||||
continue
|
||||
if meta["dtype"] != config.dtype_a or meta["layout"] != config.layout:
|
||||
continue
|
||||
|
||||
tile = meta["tile"]
|
||||
wave = meta["wave"]
|
||||
warp = meta["warp"]
|
||||
tile_delta = (
|
||||
abs(tile[0] - config.tile_m)
|
||||
+ abs(tile[1] - config.tile_n)
|
||||
+ abs(tile[2] - config.tile_k)
|
||||
)
|
||||
wave_delta = (
|
||||
abs(wave[0] - config.wave_m)
|
||||
+ abs(wave[1] - config.wave_n)
|
||||
+ abs(wave[2] - config.wave_k)
|
||||
)
|
||||
warp_delta = (
|
||||
abs(warp[0] - config.warp_m)
|
||||
+ abs(warp[1] - config.warp_n)
|
||||
+ abs(warp[2] - config.warp_k)
|
||||
)
|
||||
score = (
|
||||
0 if meta["pipeline"] == config.pipeline else 1,
|
||||
0 if meta["scheduler"] == config.scheduler else 1,
|
||||
0 if meta["epilogue"] == config.epilogue else 1,
|
||||
tile_delta,
|
||||
wave_delta,
|
||||
warp_delta,
|
||||
)
|
||||
if best_score is None or score < best_score:
|
||||
best_score = score
|
||||
best = h
|
||||
best_meta = meta
|
||||
|
||||
return best, best_meta
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Preshuffle Utilities
|
||||
# =============================================================================
|
||||
@@ -1319,7 +1576,7 @@ class CodegenRunner:
|
||||
result = future.result()
|
||||
results.append(result)
|
||||
if verbose:
|
||||
status = "✓" if result.success else "✗"
|
||||
status = "OK" if result.success else "FAIL"
|
||||
print(
|
||||
f" {status} {variant}: {result.kernel_count} kernels in {result.elapsed_seconds:.2f}s"
|
||||
)
|
||||
@@ -1337,7 +1594,7 @@ class CodegenRunner:
|
||||
)
|
||||
)
|
||||
if verbose:
|
||||
print(f" ✗ {variant}: FAILED - {e}")
|
||||
print(f" FAIL {variant}: FAILED - {e}")
|
||||
|
||||
total_time = time.time() - start_total
|
||||
if verbose:
|
||||
@@ -1399,7 +1656,7 @@ class CodegenRunner:
|
||||
result = future.result()
|
||||
results.append(result)
|
||||
if verbose:
|
||||
status = "✓" if result.success else "✗"
|
||||
status = "OK" if result.success else "FAIL"
|
||||
print(
|
||||
f" {status} {tile_str}: {result.kernel_count} kernels in {result.elapsed_seconds:.2f}s"
|
||||
)
|
||||
@@ -1417,7 +1674,7 @@ class CodegenRunner:
|
||||
)
|
||||
)
|
||||
if verbose:
|
||||
print(f" ✗ {tile_str}: FAILED - {e}")
|
||||
print(f" FAIL {tile_str}: FAILED - {e}")
|
||||
|
||||
total_time = time.time() - start_total
|
||||
if verbose:
|
||||
@@ -1481,7 +1738,7 @@ class CodegenRunner:
|
||||
result = future.result()
|
||||
results.append(result)
|
||||
if verbose:
|
||||
status = "✓" if result.success else "✗"
|
||||
status = "OK" if result.success else "FAIL"
|
||||
print(
|
||||
f" {status} {variant}: {result.kernel_count} kernels in {result.elapsed_seconds:.2f}s"
|
||||
)
|
||||
@@ -1499,7 +1756,7 @@ class CodegenRunner:
|
||||
)
|
||||
)
|
||||
if verbose:
|
||||
print(f" ✗ {variant}: FAILED - {e}")
|
||||
print(f" FAIL {variant}: FAILED - {e}")
|
||||
|
||||
total_time = time.time() - start_total
|
||||
if verbose:
|
||||
@@ -1767,7 +2024,7 @@ class CodegenRunner:
|
||||
link_cmd, capture_output=True, text=True, timeout=300
|
||||
)
|
||||
if result.returncode == 0:
|
||||
print(f" ✓ Library rebuilt: {lib_path.name}")
|
||||
print(f" OK Library rebuilt: {lib_path.name}")
|
||||
# Clean up object file
|
||||
obj_file.unlink(missing_ok=True)
|
||||
return lib_path
|
||||
@@ -1781,6 +2038,105 @@ class CodegenRunner:
|
||||
print(f" Build error: {e}")
|
||||
return None
|
||||
|
||||
def build_libraries_parallel(
|
||||
self, configs_and_headers: List[Tuple[KernelConfig, Path]], verbose: bool = True
|
||||
) -> List[Optional[Path]]:
|
||||
"""
|
||||
Build multiple libraries in parallel using ProcessPoolExecutor.
|
||||
Returns a list of library paths (or None if a build failed) in the same order.
|
||||
"""
|
||||
import time
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
|
||||
start_time = time.time()
|
||||
build_dir = get_build_dir()
|
||||
root = get_dispatcher_root()
|
||||
ck_root = root.parent
|
||||
ctypes_source = root / "bindings/ctypes/gemm_ctypes_lib.cpp"
|
||||
static_lib = build_dir / "libck_tile_dispatcher.a"
|
||||
|
||||
if not ctypes_source.exists() or not static_lib.exists():
|
||||
if verbose:
|
||||
print(" Required source or static library missing for parallel build.")
|
||||
return [None] * len(configs_and_headers)
|
||||
|
||||
args_list = []
|
||||
for config, kernel_header in configs_and_headers:
|
||||
lib_name = f"libdispatcher_gemm_{config.dtype_a}_{config.layout}_{config.tile_str}_{config.pipeline}.so"
|
||||
lib_path = build_dir / "examples" / lib_name
|
||||
obj_file = lib_path.with_suffix(".o")
|
||||
|
||||
compile_cmd = [
|
||||
"/opt/rocm/bin/hipcc",
|
||||
"-c",
|
||||
"-fPIC",
|
||||
"-O3",
|
||||
f"-I{root / 'include'}",
|
||||
f"-I{ck_root / 'include'}",
|
||||
f"-I{ck_root}",
|
||||
f"-I{root / 'build/generated_kernels'}",
|
||||
"-DCK_TILE_SINGLE_KERNEL_INCLUDE",
|
||||
f"-include{kernel_header}",
|
||||
"-D__HIP_PLATFORM_AMD__",
|
||||
f"--offload-arch={config.gfx_arch}",
|
||||
f'-DGFX_ARCH="{config.gfx_arch}"',
|
||||
"-mllvm",
|
||||
"-enable-noalias-to-md-conversion=0",
|
||||
"-Wno-undefined-func-template",
|
||||
"-Wno-float-equal",
|
||||
str(ctypes_source),
|
||||
"-o",
|
||||
str(obj_file),
|
||||
]
|
||||
|
||||
link_cmd = [
|
||||
"/opt/rocm/bin/hipcc",
|
||||
"-shared",
|
||||
"-fPIC",
|
||||
f"--offload-arch={config.gfx_arch}",
|
||||
"--hip-link",
|
||||
str(obj_file),
|
||||
str(static_lib),
|
||||
"-o",
|
||||
str(lib_path),
|
||||
]
|
||||
|
||||
args_list.append(
|
||||
{
|
||||
"compile_cmd": compile_cmd,
|
||||
"link_cmd": link_cmd,
|
||||
"lib_path": str(lib_path),
|
||||
"config_name": f"{config.dtype_a}_{config.layout}_{config.tile_str}",
|
||||
}
|
||||
)
|
||||
|
||||
if verbose:
|
||||
print(
|
||||
f"Building {len(args_list)} libraries in parallel (workers={self.max_workers})..."
|
||||
)
|
||||
|
||||
results_map = {}
|
||||
with ProcessPoolExecutor(max_workers=self.max_workers) as executor:
|
||||
futures = {
|
||||
executor.submit(_run_hipcc_subprocess, args): i
|
||||
for i, args in enumerate(args_list)
|
||||
}
|
||||
for future in as_completed(futures):
|
||||
idx = futures[future]
|
||||
success, lib_path, err = future.result()
|
||||
results_map[idx] = Path(lib_path) if success else None
|
||||
if verbose:
|
||||
status = "OK" if success else f"FAIL ({err})"
|
||||
print(
|
||||
f" {status} {Path(lib_path).name if success else args_list[idx]['config_name']}"
|
||||
)
|
||||
|
||||
if verbose:
|
||||
elapsed = time.time() - start_time
|
||||
print(f"Parallel build finished in {elapsed:.2f}s")
|
||||
|
||||
return [results_map[i] for i in range(len(configs_and_headers))]
|
||||
|
||||
def generate_preselected(
|
||||
self, preset: str = "fp16_rcr_essential", output_dir: Optional[Path] = None
|
||||
) -> CodegenResult:
|
||||
@@ -1933,6 +2289,28 @@ class Registry:
|
||||
"""Bind to a loaded dispatcher library."""
|
||||
self._lib = lib
|
||||
|
||||
def build(
|
||||
self,
|
||||
verbose: bool = False,
|
||||
max_workers: Optional[int] = None,
|
||||
) -> List["GemmSetupResult"]:
|
||||
"""Parallel JIT compile all kernels in this registry.
|
||||
|
||||
Args:
|
||||
verbose: Print progress during build.
|
||||
max_workers: Max parallel codegen/compile processes (default: cpu_count capped at 8).
|
||||
|
||||
Returns a GemmSetupResult per registered kernel (same order as get_kernels()).
|
||||
"""
|
||||
if not self._kernels:
|
||||
return []
|
||||
return setup_multiple_gemm_dispatchers(
|
||||
self._kernels,
|
||||
registry_name=self._name,
|
||||
verbose=verbose,
|
||||
max_workers=max_workers,
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Registry(name='{self._name}', kernels={self.kernel_count})"
|
||||
|
||||
@@ -2109,7 +2487,7 @@ def setup_gemm_dispatcher(
|
||||
log(" Validating config...")
|
||||
validation = validate_kernel_config(config)
|
||||
if not validation.is_valid:
|
||||
log(" ⚠ Auto-correcting configuration...")
|
||||
log(" WARNING Auto-correcting configuration...")
|
||||
config, was_modified, corrections = auto_correct_kernel_config(
|
||||
config, verbose=verbose
|
||||
)
|
||||
@@ -2128,13 +2506,13 @@ def setup_gemm_dispatcher(
|
||||
|
||||
codegen_result = codegen.generate_from_config(config)
|
||||
if not codegen_result.success:
|
||||
log(" ⚠ Kernel generation: using existing")
|
||||
log(" WARNING Kernel generation: using existing")
|
||||
|
||||
# Step 3: Find matching kernel header
|
||||
kernel_header = find_matching_kernel_header(config)
|
||||
result.kernel_header = kernel_header
|
||||
if not kernel_header:
|
||||
log(" ⚠ No matching kernel header found")
|
||||
log(" WARNING No matching kernel header found")
|
||||
|
||||
# Step 4: Load library
|
||||
log(" Loading library...")
|
||||
@@ -2188,11 +2566,11 @@ def setup_gemm_dispatcher(
|
||||
result.error = "Failed to load rebuilt library"
|
||||
return result
|
||||
result.lib = lib
|
||||
log(f" ✓ Rebuilt library: {lib.get_kernel_name()}")
|
||||
log(f" OK Rebuilt library: {lib.get_kernel_name()}")
|
||||
else:
|
||||
log(" ⚠ Rebuild failed, using existing library")
|
||||
log(" WARNING Rebuild failed, using existing library")
|
||||
else:
|
||||
log(" ⚠ No kernel header found for config, using existing library")
|
||||
log(" WARNING No kernel header found for config, using existing library")
|
||||
|
||||
# Step 5: Create registry and dispatcher
|
||||
log(" Creating registry and dispatcher...")
|
||||
@@ -2203,12 +2581,305 @@ def setup_gemm_dispatcher(
|
||||
dispatcher = Dispatcher(registry=registry, lib=lib)
|
||||
result.dispatcher = dispatcher
|
||||
|
||||
log(f" ✓ Ready: {lib.get_kernel_name()}")
|
||||
log(f" OK Ready: {lib.get_kernel_name()}")
|
||||
|
||||
result.success = True
|
||||
return result
|
||||
|
||||
|
||||
def setup_multiple_gemm_dispatchers(
|
||||
configs: List[KernelConfig],
|
||||
registry_name: str = "gemm_registry",
|
||||
verbose: bool = True,
|
||||
max_workers: Optional[int] = None,
|
||||
) -> List[GemmSetupResult]:
|
||||
"""
|
||||
Setup multiple GEMM dispatchers in parallel.
|
||||
|
||||
Pipeline:
|
||||
1. Validate + auto-correct each config
|
||||
2. Parallel codegen: generate .hpp for each config via --config JSON
|
||||
3. Parallel hipcc: compile each .hpp -> .so
|
||||
4. Load + wire up each .so into a GemmSetupResult
|
||||
|
||||
Each config gets its own .so, so different tile sizes can coexist.
|
||||
|
||||
Args:
|
||||
max_workers: Max parallel processes for codegen/compile (default: cpu_count capped at 8).
|
||||
"""
|
||||
import sys
|
||||
|
||||
results = [GemmSetupResult(success=False, config=c) for c in configs]
|
||||
max_workers = max_workers or min(multiprocessing.cpu_count(), 8)
|
||||
|
||||
# -- Step 1: Validate & correct ---------------------------------------
|
||||
valid_configs = []
|
||||
for i, c in enumerate(configs):
|
||||
val = validate_kernel_config(c)
|
||||
if not val.is_valid:
|
||||
c, modified, corrections = auto_correct_kernel_config(c, verbose=False)
|
||||
results[i].config = c
|
||||
results[i].corrections = corrections
|
||||
valid_configs.append(c)
|
||||
|
||||
# -- Step 2: Parallel codegen (one --config JSON per config) ----------
|
||||
codegen_script = get_codegen_path()
|
||||
output_dir = get_generated_kernels_dir()
|
||||
|
||||
codegen_args = []
|
||||
for c in valid_configs:
|
||||
tile_str = c.tile_str
|
||||
wave_str = f"{c.wave_m}x{c.wave_n}x{c.wave_k}"
|
||||
warp_str = f"{c.warp_m}x{c.warp_n}x{c.warp_k}"
|
||||
|
||||
tile_config_json = {
|
||||
"tile_config": {
|
||||
"tile_m": [c.tile_m],
|
||||
"tile_n": [c.tile_n],
|
||||
"tile_k": [c.tile_k],
|
||||
"warp_m": [c.wave_m],
|
||||
"warp_n": [c.wave_n],
|
||||
"warp_k": [c.wave_k],
|
||||
"warp_tile_m": [c.warp_m],
|
||||
"warp_tile_n": [c.warp_n],
|
||||
"warp_tile_k": [c.warp_k],
|
||||
},
|
||||
"trait_config": {
|
||||
"pipeline": [c.pipeline],
|
||||
"epilogue": [c.epilogue],
|
||||
"scheduler": [c.scheduler],
|
||||
"pad_m": [c.pad_m],
|
||||
"pad_n": [c.pad_n],
|
||||
"pad_k": [c.pad_k],
|
||||
"persistent": [False],
|
||||
},
|
||||
}
|
||||
|
||||
hpp_pattern = (
|
||||
f"gemm_{c.dtype_a}_{c.layout}_{c.pipeline}_{c.epilogue}_{c.scheduler}"
|
||||
f"_*_{tile_str}_{wave_str}_{warp_str}.hpp"
|
||||
)
|
||||
|
||||
codegen_args.append(
|
||||
{
|
||||
"python": sys.executable,
|
||||
"codegen_script": str(codegen_script),
|
||||
"output_dir": str(output_dir),
|
||||
"dtype": c.dtype_a,
|
||||
"layout": c.layout,
|
||||
"gpu_target": c.gfx_arch,
|
||||
"tile_config_json": tile_config_json,
|
||||
"hpp_glob_pattern": hpp_pattern,
|
||||
}
|
||||
)
|
||||
|
||||
if verbose:
|
||||
print(
|
||||
f"Generating {len(codegen_args)} kernel headers in parallel (workers={max_workers})..."
|
||||
)
|
||||
|
||||
headers: List[Optional[Path]] = [None] * len(valid_configs)
|
||||
with ProcessPoolExecutor(max_workers=max_workers) as executor:
|
||||
futures = {
|
||||
executor.submit(_generate_single_kernel_subprocess, a): i
|
||||
for i, a in enumerate(codegen_args)
|
||||
}
|
||||
for future in as_completed(futures):
|
||||
idx = futures[future]
|
||||
ok, hdr_str, err = future.result()
|
||||
if ok and hdr_str:
|
||||
headers[idx] = Path(hdr_str)
|
||||
results[idx].kernel_header = Path(hdr_str)
|
||||
if verbose:
|
||||
print(
|
||||
f" OK [{idx}] {valid_configs[idx].tile_str}: {Path(hdr_str).name}"
|
||||
)
|
||||
else:
|
||||
results[idx].error = f"Codegen: {err}"
|
||||
if verbose:
|
||||
print(f" FAIL [{idx}] {valid_configs[idx].tile_str}: {err}")
|
||||
|
||||
# For configs rejected by arch filter, map to nearest arch-valid header.
|
||||
fallback_needed = [i for i, h in enumerate(headers) if h is None]
|
||||
if fallback_needed:
|
||||
if verbose:
|
||||
print(
|
||||
f"Resolving {len(fallback_needed)} configs via arch-valid GEMM catalog..."
|
||||
)
|
||||
|
||||
catalog_cache: Dict[Tuple[str, str, str, str], List[Path]] = {}
|
||||
for i in fallback_needed:
|
||||
c = valid_configs[i]
|
||||
key = (c.gfx_arch, c.dtype_a, c.layout, c.variant)
|
||||
if key not in catalog_cache:
|
||||
catalog_dir = (
|
||||
output_dir
|
||||
/ "_arch_valid_catalog"
|
||||
/ (f"{c.gfx_arch}_{c.dtype_a}_{c.layout}_{c.variant}")
|
||||
)
|
||||
ok, catalog_headers, err = _generate_arch_valid_gemm_headers(
|
||||
python_exe=sys.executable,
|
||||
codegen_script=codegen_script,
|
||||
output_dir=catalog_dir,
|
||||
dtype=c.dtype_a,
|
||||
layout=c.layout,
|
||||
gpu_target=c.gfx_arch,
|
||||
variant=c.variant,
|
||||
)
|
||||
if not ok:
|
||||
catalog_headers = []
|
||||
if verbose:
|
||||
print(f" FAIL [{i}] catalog generation: {err}")
|
||||
catalog_cache[key] = catalog_headers
|
||||
|
||||
chosen, meta = _select_best_arch_valid_gemm_header(c, catalog_cache[key])
|
||||
if chosen is None or meta is None:
|
||||
continue
|
||||
|
||||
headers[i] = chosen
|
||||
results[i].kernel_header = chosen
|
||||
results[i].error = ""
|
||||
|
||||
# Keep Python-side config aligned with the selected kernel header.
|
||||
valid_configs[i].pipeline = str(meta["pipeline"])
|
||||
valid_configs[i].epilogue = str(meta["epilogue"])
|
||||
valid_configs[i].scheduler = str(meta["scheduler"])
|
||||
valid_configs[i].pad_m = bool(meta["pad_m"])
|
||||
valid_configs[i].pad_n = bool(meta["pad_n"])
|
||||
valid_configs[i].pad_k = bool(meta["pad_k"])
|
||||
valid_configs[i].tile_m = int(meta["tile"][0])
|
||||
valid_configs[i].tile_n = int(meta["tile"][1])
|
||||
valid_configs[i].tile_k = int(meta["tile"][2])
|
||||
valid_configs[i].wave_m = int(meta["wave"][0])
|
||||
valid_configs[i].wave_n = int(meta["wave"][1])
|
||||
valid_configs[i].wave_k = int(meta["wave"][2])
|
||||
valid_configs[i].warp_m = int(meta["warp"][0])
|
||||
valid_configs[i].warp_n = int(meta["warp"][1])
|
||||
valid_configs[i].warp_k = int(meta["warp"][2])
|
||||
results[i].config = valid_configs[i]
|
||||
|
||||
if verbose:
|
||||
print(f" INFO [{i}] mapped to arch-valid header: {chosen.name}")
|
||||
|
||||
# -- Step 3: Parallel hipcc compilation -------------------------------
|
||||
root = get_dispatcher_root()
|
||||
ck_root = root.parent
|
||||
build_dir = get_build_dir()
|
||||
ctypes_source = root / "bindings" / "ctypes" / "gemm_ctypes_lib.cpp"
|
||||
static_lib = build_dir / "libck_tile_dispatcher.a"
|
||||
|
||||
if not ctypes_source.exists() or not static_lib.exists():
|
||||
for i in range(len(valid_configs)):
|
||||
if results[i].error == "":
|
||||
results[
|
||||
i
|
||||
].error = "Missing ctypes source or static library for compilation"
|
||||
return results
|
||||
|
||||
compile_jobs = []
|
||||
compile_index_map = {}
|
||||
for i, c in enumerate(valid_configs):
|
||||
hdr = headers[i]
|
||||
if hdr is None:
|
||||
continue
|
||||
|
||||
lib_name = (
|
||||
f"libdispatcher_gemm_{c.dtype_a}_{c.layout}_{c.tile_str}_{c.pipeline}.so"
|
||||
)
|
||||
lib_path = build_dir / "examples" / lib_name
|
||||
obj_file = lib_path.with_suffix(".o")
|
||||
|
||||
compile_cmd = [
|
||||
"/opt/rocm/bin/hipcc",
|
||||
"-c",
|
||||
"-fPIC",
|
||||
"-O3",
|
||||
f"-I{root / 'include'}",
|
||||
f"-I{ck_root / 'include'}",
|
||||
f"-I{ck_root}",
|
||||
f"-I{str(output_dir)}",
|
||||
"-DCK_TILE_SINGLE_KERNEL_INCLUDE",
|
||||
f"-include{hdr}",
|
||||
"-D__HIP_PLATFORM_AMD__",
|
||||
f"--offload-arch={c.gfx_arch}",
|
||||
f'-DGFX_ARCH="{c.gfx_arch}"',
|
||||
"-mllvm",
|
||||
"-enable-noalias-to-md-conversion=0",
|
||||
"-Wno-undefined-func-template",
|
||||
"-Wno-float-equal",
|
||||
str(ctypes_source),
|
||||
"-o",
|
||||
str(obj_file),
|
||||
]
|
||||
link_cmd = [
|
||||
"/opt/rocm/bin/hipcc",
|
||||
"-shared",
|
||||
"-fPIC",
|
||||
f"--offload-arch={c.gfx_arch}",
|
||||
"--hip-link",
|
||||
str(obj_file),
|
||||
str(static_lib),
|
||||
"-o",
|
||||
str(lib_path),
|
||||
]
|
||||
|
||||
compile_index_map[len(compile_jobs)] = i
|
||||
compile_jobs.append(
|
||||
{
|
||||
"compile_cmd": compile_cmd,
|
||||
"link_cmd": link_cmd,
|
||||
"lib_path": str(lib_path),
|
||||
}
|
||||
)
|
||||
|
||||
if verbose and compile_jobs:
|
||||
print(
|
||||
f"Compiling {len(compile_jobs)} libraries in parallel (workers={max_workers})..."
|
||||
)
|
||||
|
||||
lib_paths: Dict[int, Optional[Path]] = {}
|
||||
with ProcessPoolExecutor(max_workers=max_workers) as executor:
|
||||
futures = {
|
||||
executor.submit(_run_hipcc_subprocess, job): j
|
||||
for j, job in enumerate(compile_jobs)
|
||||
}
|
||||
for future in as_completed(futures):
|
||||
j = futures[future]
|
||||
i = compile_index_map[j]
|
||||
ok, lp, err = future.result()
|
||||
if ok and lp:
|
||||
lib_paths[i] = Path(lp)
|
||||
if verbose:
|
||||
print(f" OK [{i}] {valid_configs[i].tile_str}: {Path(lp).name}")
|
||||
else:
|
||||
results[i].error = f"Compile: {err}"
|
||||
if verbose:
|
||||
print(f" FAIL [{i}] {valid_configs[i].tile_str}: {err}")
|
||||
|
||||
# -- Step 4: Load libraries and create dispatchers --------------------
|
||||
for i, c in enumerate(valid_configs):
|
||||
lp = lib_paths.get(i)
|
||||
if lp is None:
|
||||
continue
|
||||
|
||||
lib = DispatcherLib.load(lp)
|
||||
if lib is not None and lib.initialize():
|
||||
results[i].lib = lib
|
||||
reg = Registry(name=f"{registry_name}_{i}", lib=lib)
|
||||
reg.register_kernel(c)
|
||||
results[i].registry = reg
|
||||
results[i].dispatcher = Dispatcher(registry=reg, lib=lib)
|
||||
results[i].success = True
|
||||
else:
|
||||
results[i].error = "Failed to load compiled library"
|
||||
|
||||
if verbose:
|
||||
ok_count = sum(1 for r in results if r.success)
|
||||
print(f"Setup complete: {ok_count}/{len(results)} dispatchers ready")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def cleanup_gemm():
|
||||
"""
|
||||
Cleanup function to call after running GEMM examples.
|
||||
|
||||
372
dispatcher/python/dispatcher_common.py
Normal file
372
dispatcher/python/dispatcher_common.py
Normal file
@@ -0,0 +1,372 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Shared Python dispatcher utilities for GEMM and grouped convolution.
|
||||
|
||||
Extracted from ctypes_utils.py (GEMM) + compile_grouped_conv_examples.py (grouped conv).
|
||||
Both ctypes_utils.py and grouped_conv_utils.py import from here to
|
||||
eliminate duplication.
|
||||
|
||||
Best-of-both:
|
||||
- Validation and auto-correction return typed objects (GEMM pattern)
|
||||
- Colors class with cross-platform ANSI handling (conv pattern)
|
||||
- Phased output helpers (conv pattern)
|
||||
- logging module instead of bare print() (shared improvement)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import shutil
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Path Configuration
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def get_dispatcher_root() -> Path:
|
||||
"""Get the dispatcher root directory (parent of python/)."""
|
||||
return Path(__file__).parent.parent
|
||||
|
||||
|
||||
def get_ck_root() -> Path:
|
||||
"""Get the CK root directory (parent of dispatcher/)."""
|
||||
return get_dispatcher_root().parent
|
||||
|
||||
|
||||
def get_build_dir() -> Path:
|
||||
"""Get the build directory."""
|
||||
return get_dispatcher_root() / "build"
|
||||
|
||||
|
||||
def get_generated_kernels_dir() -> Path:
|
||||
"""Get the generated kernels directory."""
|
||||
return get_build_dir() / "generated_kernels"
|
||||
|
||||
|
||||
def get_codegen_dir() -> Path:
|
||||
"""Get the codegen scripts directory."""
|
||||
return get_dispatcher_root() / "codegen"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Architecture Filter Data
|
||||
# ============================================================================
|
||||
|
||||
_arch_data_cache: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
def detect_gpu_arch(fallback: str = "gfx942") -> str:
|
||||
"""Detect the GPU architecture from rocminfo. Falls back to the given default."""
|
||||
import subprocess
|
||||
|
||||
try:
|
||||
out = subprocess.check_output(
|
||||
["rocminfo"], text=True, stderr=subprocess.DEVNULL
|
||||
)
|
||||
for line in out.splitlines():
|
||||
if "Name:" in line and "gfx" in line:
|
||||
return line.split()[-1].strip()
|
||||
except Exception:
|
||||
pass
|
||||
return fallback
|
||||
|
||||
|
||||
def get_arch_filter_data() -> Dict[str, Any]:
|
||||
"""Load arch filter data from arch_specs_generated if available.
|
||||
|
||||
Returns dict with keys: trait_unsupported, warp_combos,
|
||||
warp_tile_combos, supported_archs.
|
||||
"""
|
||||
global _arch_data_cache
|
||||
if _arch_data_cache is not None:
|
||||
return _arch_data_cache
|
||||
|
||||
codegen_dir = get_dispatcher_root() / "codegen"
|
||||
sys.path.insert(0, str(codegen_dir))
|
||||
|
||||
try:
|
||||
from arch_specs_generated import (
|
||||
TRAIT_UNSUPPORTED_COMBINATIONS,
|
||||
WARP_SUPPORTED_COMBINATIONS,
|
||||
WARP_TILE_SUPPORTED_COMBINATIONS,
|
||||
get_supported_archs,
|
||||
)
|
||||
|
||||
_arch_data_cache = {
|
||||
"trait_unsupported": TRAIT_UNSUPPORTED_COMBINATIONS,
|
||||
"warp_combos": WARP_SUPPORTED_COMBINATIONS,
|
||||
"warp_tile_combos": WARP_TILE_SUPPORTED_COMBINATIONS,
|
||||
"supported_archs": get_supported_archs(),
|
||||
}
|
||||
except ImportError:
|
||||
_arch_data_cache = {
|
||||
"trait_unsupported": {
|
||||
("compv3", "cshuffle", "interwave"),
|
||||
("compv3", "default", "interwave"),
|
||||
("compv4", "cshuffle", "interwave"),
|
||||
("compv4", "default", "interwave"),
|
||||
},
|
||||
"warp_combos": {
|
||||
"gfx942": [[1, 4, 1], [2, 2, 1], [4, 1, 1]],
|
||||
"gfx90a": [[1, 4, 1], [2, 2, 1], [4, 1, 1]],
|
||||
},
|
||||
"warp_tile_combos": {
|
||||
"gfx942": {"fp16_fp16_fp32": [[16, 16, 16], [32, 32, 16]]},
|
||||
"gfx90a": {"fp16_fp16_fp32": [[16, 16, 16], [32, 32, 16]]},
|
||||
},
|
||||
"supported_archs": ["gfx90a", "gfx942", "gfx950"],
|
||||
}
|
||||
|
||||
return _arch_data_cache
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Validation Result
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationResultBase:
|
||||
"""Result of kernel config validation (shared base for GEMM and conv)."""
|
||||
|
||||
is_valid: bool
|
||||
errors: List[str] = field(default_factory=list)
|
||||
warnings: List[str] = field(default_factory=list)
|
||||
suggested_fixes: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def print_result(self, indent: str = " "):
|
||||
if self.is_valid:
|
||||
print(f"{indent}OK Configuration valid")
|
||||
else:
|
||||
print(f"{indent}WARNING Configuration has issues:")
|
||||
for err in self.errors:
|
||||
print(f"{indent} - {err}")
|
||||
if self.warnings:
|
||||
for warn in self.warnings:
|
||||
print(f"{indent} Warning: {warn}")
|
||||
if self.suggested_fixes:
|
||||
print(f"{indent} Suggested fixes:")
|
||||
for key, val in self.suggested_fixes.items():
|
||||
print(f"{indent} {key}: {val}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Validation Helpers
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def validate_wave_config(wave_cfg: List[int], arch: str) -> Tuple[bool, str]:
|
||||
"""Validate a [wave_m, wave_n, wave_k] config for *arch*.
|
||||
|
||||
Returns (is_valid, error_message). Empty string on success.
|
||||
"""
|
||||
data = get_arch_filter_data()
|
||||
valid_waves = data["warp_combos"].get(arch, [[2, 2, 1]])
|
||||
if wave_cfg in valid_waves:
|
||||
return True, ""
|
||||
valid_str = ", ".join(f"[{c[0]},{c[1]},{c[2]}]" for c in valid_waves)
|
||||
return (
|
||||
False,
|
||||
f"Unsupported wave configuration {wave_cfg} for {arch}. "
|
||||
f"Valid wave configs: {valid_str}",
|
||||
)
|
||||
|
||||
|
||||
def validate_warp_tile_config(
|
||||
warp_cfg: List[int], arch: str, dtype: str
|
||||
) -> Tuple[bool, str]:
|
||||
"""Validate a [warp_m, warp_n, warp_k] config for *arch*/*dtype*.
|
||||
|
||||
Returns (is_valid, error_message). Empty string on success.
|
||||
"""
|
||||
data = get_arch_filter_data()
|
||||
acc = "int32" if dtype == "int8" else "fp32"
|
||||
dtype_key = f"{dtype}_{dtype}_{acc}"
|
||||
valid_tiles = (
|
||||
data["warp_tile_combos"]
|
||||
.get(arch, {})
|
||||
.get(dtype_key, [[32, 32, 16], [16, 16, 16]])
|
||||
)
|
||||
if warp_cfg in valid_tiles:
|
||||
return True, ""
|
||||
valid_str = ", ".join(f"[{c[0]},{c[1]},{c[2]}]" for c in valid_tiles[:5])
|
||||
return (
|
||||
False,
|
||||
f"Unsupported warp tile {warp_cfg} for {arch}/{dtype}. "
|
||||
f"Valid warp tiles: {valid_str}",
|
||||
)
|
||||
|
||||
|
||||
def validate_trait_combo(
|
||||
pipeline: str, epilogue: str, scheduler: str
|
||||
) -> Tuple[bool, str]:
|
||||
"""Validate a (pipeline, epilogue, scheduler) combination.
|
||||
|
||||
Returns (is_valid, error_message). Empty string on success.
|
||||
"""
|
||||
data = get_arch_filter_data()
|
||||
combo = (pipeline, epilogue, scheduler)
|
||||
if combo in data["trait_unsupported"]:
|
||||
return (
|
||||
False,
|
||||
f"Unsupported trait combination: pipeline={pipeline}, "
|
||||
f"epilogue={epilogue}, scheduler={scheduler}",
|
||||
)
|
||||
return True, ""
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Auto-Correction Helpers
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def auto_correct_wave(wave_cfg: List[int], arch: str) -> List[int]:
|
||||
"""Return the first valid wave config for *arch*.
|
||||
|
||||
If *wave_cfg* is already valid, returns it unchanged.
|
||||
"""
|
||||
data = get_arch_filter_data()
|
||||
valid_waves = data["warp_combos"].get(arch, [[2, 2, 1]])
|
||||
if wave_cfg in valid_waves:
|
||||
return wave_cfg
|
||||
return valid_waves[0] if valid_waves else [2, 2, 1]
|
||||
|
||||
|
||||
def auto_correct_trait(pipeline: str, scheduler: str) -> Tuple[str, str]:
|
||||
"""Return a corrected (pipeline, scheduler) pair.
|
||||
|
||||
If the compute pipeline doesn't support interwave, switch to intrawave.
|
||||
"""
|
||||
data = get_arch_filter_data()
|
||||
for epilogue in ("cshuffle", "default"):
|
||||
if (pipeline, epilogue, scheduler) in data["trait_unsupported"]:
|
||||
return pipeline, "intrawave"
|
||||
return pipeline, scheduler
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Colors (adopted from compile_grouped_conv_examples.py -- cross-platform)
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class Colors:
|
||||
"""Cross-platform ANSI color support.
|
||||
|
||||
Respects sys.platform (no ANSI on Windows) and isatty() check so
|
||||
piped/redirected output stays clean.
|
||||
"""
|
||||
|
||||
_GREEN = "\033[0;32m"
|
||||
_YELLOW = "\033[1;33m"
|
||||
_RED = "\033[0;31m"
|
||||
_CYAN = "\033[0;36m"
|
||||
_BOLD = "\033[1m"
|
||||
_NC = "\033[0m"
|
||||
|
||||
@classmethod
|
||||
def _use_color(cls) -> bool:
|
||||
return (
|
||||
sys.platform != "win32"
|
||||
and hasattr(sys.stdout, "isatty")
|
||||
and sys.stdout.isatty()
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def green(cls, text: str) -> str:
|
||||
if cls._use_color():
|
||||
return f"{cls._GREEN}{text}{cls._NC}"
|
||||
return text
|
||||
|
||||
@classmethod
|
||||
def red(cls, text: str) -> str:
|
||||
if cls._use_color():
|
||||
return f"{cls._RED}{text}{cls._NC}"
|
||||
return text
|
||||
|
||||
@classmethod
|
||||
def yellow(cls, text: str) -> str:
|
||||
if cls._use_color():
|
||||
return f"{cls._YELLOW}{text}{cls._NC}"
|
||||
return text
|
||||
|
||||
@classmethod
|
||||
def cyan(cls, text: str) -> str:
|
||||
if cls._use_color():
|
||||
return f"{cls._CYAN}{text}{cls._NC}"
|
||||
return text
|
||||
|
||||
@classmethod
|
||||
def bold(cls, text: str) -> str:
|
||||
if cls._use_color():
|
||||
return f"{cls._BOLD}{text}{cls._NC}"
|
||||
return text
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Phased Output Helpers
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def print_phase(number: int, description: str) -> None:
|
||||
"""Print a phase header (e.g. 'Phase 1: Codegen')."""
|
||||
print(f"\n{'=' * 60}")
|
||||
print(f" Phase {number}: {description}")
|
||||
print(f"{'=' * 60}")
|
||||
|
||||
|
||||
def print_success(message: str) -> None:
|
||||
"""Print a success message."""
|
||||
print(f" OK {Colors.green(message)}")
|
||||
|
||||
|
||||
def print_error(message: str) -> None:
|
||||
"""Print an error message."""
|
||||
print(f" FAIL {Colors.red(message)}")
|
||||
|
||||
|
||||
def print_info(message: str) -> None:
|
||||
"""Print an info message."""
|
||||
print(f" {Colors.cyan(message)}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Cleanup Helpers
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def cleanup_generated_kernels(gen_dir: Optional[Path] = None) -> None:
|
||||
"""Remove generated kernel directory if it exists."""
|
||||
if gen_dir is None:
|
||||
gen_dir = get_generated_kernels_dir()
|
||||
if gen_dir.exists():
|
||||
shutil.rmtree(gen_dir, ignore_errors=True)
|
||||
log.info("Cleaned up generated kernels at %s", gen_dir)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tool Helpers
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def find_hipcc() -> Optional[str]:
|
||||
"""Find the hipcc compiler."""
|
||||
import os
|
||||
|
||||
candidates = [
|
||||
os.environ.get("HIPCC"),
|
||||
"/opt/rocm/bin/hipcc",
|
||||
shutil.which("hipcc"),
|
||||
]
|
||||
for path in candidates:
|
||||
if path and os.path.isfile(path):
|
||||
return path
|
||||
return None
|
||||
1806
dispatcher/python/grouped_conv_utils.py
Normal file
1806
dispatcher/python/grouped_conv_utils.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user