fix pypi cuda install (#1763)

This commit is contained in:
Jianwei Dong
2025-12-29 11:19:43 +08:00
committed by GitHub
parent 63796374c1
commit 559a3ad4ac
4 changed files with 243 additions and 19 deletions

View File

@@ -16,6 +16,7 @@ option(LLAMA_AVX512_FANCY_SIMD "llama: enable AVX512-VL, AVX512-BW, AVX512-DQ, A
option(KTRANSFORMERS_USE_CUDA "ktransformers: use CUDA" OFF)
option(KTRANSFORMERS_USE_MUSA "ktransformers: use MUSA" OFF)
option(KTRANSFORMERS_USE_ROCM "ktransformers: use ROCM" OFF)
option(KTRANSFORMERS_CUDA_STATIC_RUNTIME "ktransformers: statically link CUDA runtime" ON)
option(KTRANSFORMERS_CPU_USE_KML "ktransformers: CPU use KML" OFF)
option(KTRANSFORMERS_CPU_USE_AMX_AVX512 "ktransformers: CPU use AMX or AVX512" OFF)
option(KTRANSFORMERS_CPU_USE_AMX "ktransformers: CPU use AMX" OFF)
@@ -415,6 +416,25 @@ if(KTRANSFORMERS_USE_CUDA)
message(STATUS "enabling CUDA")
enable_language(CUDA)
add_compile_definitions(KTRANSFORMERS_USE_CUDA=1)
# Set default CUDA architectures if not specified
# Target: SM 80/86 (Ampere), 89 (Ada), 90 (Hopper)
if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
set(CMAKE_CUDA_ARCHITECTURES "80;86;89;90" CACHE STRING "CUDA architectures" FORCE)
message(STATUS "CUDA architectures (default): ${CMAKE_CUDA_ARCHITECTURES}")
else()
message(STATUS "CUDA architectures (user): ${CMAKE_CUDA_ARCHITECTURES}")
endif()
# Optimization flags
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -O3 --use_fast_math")
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
set(CMAKE_CUDA_STANDARD 17)
set(CMAKE_CUDA_STANDARD_REQUIRED ON)
message(STATUS "CUDA compiler: ${CMAKE_CUDA_COMPILER}")
message(STATUS "CUDA toolkit: ${CUDAToolkit_VERSION}")
message(STATUS "CUDA flags: ${CMAKE_CUDA_FLAGS}")
elseif(KTRANSFORMERS_USE_ROCM)
find_package(HIP REQUIRED)
if(HIP_FOUND)
@@ -629,7 +649,32 @@ endif()
if(KTRANSFORMERS_USE_CUDA)
target_link_libraries(${PROJECT_NAME} PRIVATE "${CUDAToolkit_LIBRARY_DIR}/libcudart.so")
# Link CUDA runtime (static or dynamic)
if(KTRANSFORMERS_CUDA_STATIC_RUNTIME)
# Platform-aware static library path
if(WIN32)
set(CUDART_STATIC_LIB "${CUDAToolkit_LIBRARY_DIR}/cudart_static.lib")
else()
set(CUDART_STATIC_LIB "${CUDAToolkit_LIBRARY_DIR}/libcudart_static.a")
endif()
if(EXISTS "${CUDART_STATIC_LIB}")
target_link_libraries(${PROJECT_NAME} PRIVATE "${CUDART_STATIC_LIB}")
message(STATUS "CUDA runtime: static (${CUDART_STATIC_LIB})")
# Linux needs additional libs for static cudart
if(UNIX AND NOT APPLE)
target_link_libraries(${PROJECT_NAME} PRIVATE rt pthread dl)
endif()
else()
message(WARNING "Static CUDA runtime not found, using dynamic")
target_link_libraries(${PROJECT_NAME} PRIVATE CUDA::cudart)
endif()
else()
# Dynamic linking
target_link_libraries(${PROJECT_NAME} PRIVATE CUDA::cudart)
message(STATUS "CUDA runtime: dynamic")
endif()
endif()
if(KTRANSFORMERS_USE_ROCM)
add_compile_definitions(USE_HIP=1)

View File

@@ -43,16 +43,18 @@ High-performance kernel operations for KTransformers, featuring CPU-optimized Mo
### Option 1: Install from PyPI (Recommended for Most Users)
Install the latest stable version:
#### CPU-Only Installation
Install the latest CPU-only version:
```bash
pip install kt-kernel
pip install "kt-kernel==0.5.0+cpu"
```
Or install a specific version:
Or let pip auto-select the latest CPU version:
```bash
pip install kt-kernel==0.4.3
pip install kt-kernel # Defaults to CPU version
```
> **Note**: Check the [latest version on PyPI](https://pypi.org/project/kt-kernel/#history)
@@ -68,6 +70,43 @@ pip install kt-kernel==0.4.3
- Linux x86-64 (manylinux_2_17 compatible)
- CPU with AVX2 support (Intel Haswell 2013+, AMD Zen+)
#### CUDA Installation (GPU Acceleration)
For NVIDIA GPU-accelerated inference:
```bash
pip install "kt-kernel==0.5.0+cuda118"
```
**Features:**
-**Multi-architecture support**: Single wheel supports SM 80/86/89/90 (Ampere, Ada, Hopper)
-**Static CUDA runtime**: No CUDA toolkit installation required
-**Broad compatibility**: Works with CUDA 11.8+ and 12.x drivers
-**PyTorch compatible**: Works with any PyTorch CUDA variant (cu118, cu121, cu124)
**Requirements:**
- Python 3.10, 3.11, or 3.12
- Linux x86-64 (manylinux_2_17 compatible)
- NVIDIA GPU with compute capability 8.0+ (Ampere or newer)
- ✅ Supported: A100, RTX 3000/4000 series, H100
- ❌ Not supported: V100, P100, GTX 1000/2000 series (too old)
- NVIDIA driver with CUDA 11.8+ or 12.x support (no CUDA toolkit needed)
**GPU Compatibility Matrix:**
| GPU Architecture | Compute Capability | Supported | Example GPUs |
|-----------------|-------------------|-----------|-------------|
| Hopper | 9.0 | ✅ | H100, H200 |
| Ada Lovelace | 8.9 | ✅ | RTX 4090, 4080, 4070 |
| Ampere | 8.6 | ✅ | RTX 3090, 3080, 3070, 3060 |
| Ampere | 8.0 | ✅ | A100, A30 |
| Turing | 7.5 | ❌ | RTX 2080, T4 |
| Volta | 7.0 | ❌ | V100 |
**CUDA Driver Compatibility:**
- CUDA 11.8, 11.9, 12.0-12.6+: Full support
- CUDA 11.0-11.7: Not supported (use CPU version or upgrade driver)
**CPU Variants Included:**
The wheel includes 3 optimized variants that are **automatically selected at runtime** based on your CPU:

View File

@@ -610,6 +610,9 @@ class CMakeBuild(build_ext):
_forward_str_env(cmake_args, "CPUINFER_LTO_JOBS", "CPUINFER_LTO_JOBS")
_forward_str_env(cmake_args, "CPUINFER_LTO_MODE", "CPUINFER_LTO_MODE")
# CUDA static runtime toggle
_forward_bool_env(cmake_args, "CPUINFER_CUDA_STATIC_RUNTIME", "KTRANSFORMERS_CUDA_STATIC_RUNTIME")
# GPU backends (mutually exclusive expected)
if _env_get_bool("CPUINFER_USE_CUDA", False):
cmake_args.append("-DKTRANSFORMERS_USE_CUDA=ON")
@@ -632,11 +635,11 @@ class CMakeBuild(build_ext):
hostcxx = os.environ["CUDAHOSTCXX"]
cmake_args.append(f"-DCMAKE_CUDA_HOST_COMPILER={hostcxx}")
print(f"-- Using CUDA host compiler from CUDAHOSTCXX: {hostcxx}")
# Respect user-provided architectures only (no default auto-detection).
archs_env = os.environ.get("CPUINFER_CUDA_ARCHS", "").strip()
# Set CUDA architectures (default: Ampere/Ada/Hopper)
archs_env = os.environ.get("CPUINFER_CUDA_ARCHS", "80;86;89;90").strip()
if archs_env and not any("CMAKE_CUDA_ARCHITECTURES" in a for a in cmake_args):
cmake_args.append(f"-DCMAKE_CUDA_ARCHITECTURES={archs_env}")
print(f"-- Set CUDA architectures from CPUINFER_CUDA_ARCHS: {archs_env}")
print(f"-- Set CUDA architectures: {archs_env}")
if _env_get_bool("CPUINFER_USE_ROCM", False):
cmake_args.append("-DKTRANSFORMERS_USE_ROCM=ON")
if _env_get_bool("CPUINFER_USE_MUSA", False):
@@ -685,15 +688,34 @@ class CMakeBuild(build_ext):
################################################################################
# Import version from shared version.py at project root
# Read base version from version.py
_version_file = Path(__file__).resolve().parent.parent / "version.py"
if _version_file.exists():
_version_ns = {}
with open(_version_file, "r", encoding="utf-8") as f:
exec(f.read(), _version_ns)
VERSION = os.environ.get("CPUINFER_VERSION", _version_ns.get("__version__", "0.4.2"))
_base_version = _version_ns.get("__version__", "0.5.0")
else:
VERSION = os.environ.get("CPUINFER_VERSION", "0.4.2")
_base_version = "0.5.0"
# Auto-detect version suffix based on build type
if "CPUINFER_VERSION" in os.environ:
# User explicitly set version (e.g., for testing)
VERSION = os.environ["CPUINFER_VERSION"]
print(f"-- Explicit version: {VERSION}")
else:
# Auto-detect suffix based on CUDA usage
cuda_enabled = _env_get_bool("CPUINFER_USE_CUDA", False)
if cuda_enabled:
# CUDA build: add +cuda118 suffix
# (CUDA 11.8 is the build toolkit version for compatibility with 11.8+ and 12.x)
VERSION = f"{_base_version}+cuda118"
print(f"-- CUDA wheel version: {VERSION}")
else:
# CPU-only build: add +cpu suffix
VERSION = f"{_base_version}+cpu"
print(f"-- CPU wheel version: {VERSION}")
################################################################################
# Setup