mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2026-03-15 02:47:22 +00:00
* update README for kt-kernel * style: format C++ and Python code in kt-kernel - Format C++ files: task_queue, ext_bindings, and MoE operators - Format Python utility modules: amx, llamafile, and loader - Improve code readability and consistency
31 lines
843 B
Python
31 lines
843 B
Python
# KT-Kernel: High-performance kernel operations for KTransformers
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
"""
|
|
KT-Kernel provides high-performance kernel operations for KTransformers,
|
|
including CPU-optimized MoE inference with AMX, AVX, and KML support.
|
|
|
|
Example usage:
|
|
>>> from kt_kernel import KTMoEWrapper
|
|
>>> wrapper = KTMoEWrapper(
|
|
... layer_idx=0,
|
|
... num_experts=8,
|
|
... num_experts_per_tok=2,
|
|
... hidden_size=4096,
|
|
... moe_intermediate_size=14336,
|
|
... num_gpu_experts=2,
|
|
... cpuinfer_threads=32,
|
|
... threadpool_count=2,
|
|
... weight_path="/path/to/weights",
|
|
... chunked_prefill_size=512,
|
|
... method="AMXINT4"
|
|
... )
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
from .experts import KTMoEWrapper
|
|
|
|
__version__ = "0.1.0"
|
|
__all__ = ["KTMoEWrapper"]
|