mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2026-04-28 10:11:33 +00:00
* refactor: move legacy code to archive/ directory - Moved ktransformers, csrc, third_party, merge_tensors to archive/ - Moved build scripts and configurations to archive/ - Kept kt-kernel, KT-SFT, doc, and README files in root - Preserved complete git history for all moved files * refactor: restructure repository to focus on kt-kernel and KT-SFT modules * fix README * fix README * fix README * fix README * docs: add performance benchmarks to kt-kernel section Add comprehensive performance data for kt-kernel to match KT-SFT's presentation: - AMX kernel optimization: 21.3 TFLOPS (3.9× faster than PyTorch) - Prefill phase: up to 20× speedup vs baseline - Decode phase: up to 4× speedup - NUMA optimization: up to 63% throughput improvement - Multi-GPU (8×L20): 227.85 tokens/s total throughput with DeepSeek-R1 FP8 Source: https://lmsys.org/blog/2025-10-22-KTransformers/ This provides users with concrete performance metrics for both core modules, making it easier to understand the capabilities of each component. * refactor: improve kt-kernel performance data with specific hardware and models Replace generic performance descriptions with concrete benchmarks: - Specify exact hardware: 8×L20 GPU + Xeon Gold 6454S, Single/Dual-socket Xeon + AMX - Include specific models: DeepSeek-R1-0528 (FP8), DeepSeek-V3 (671B) - Show detailed metrics: total throughput, output throughput, concurrency details - Match KT-SFT presentation style for consistency This provides users with actionable performance data they can use to evaluate hardware requirements and expected performance for their use cases. * fix README * docs: clean up performance table and improve formatting * add pic for README * refactor: simplify .gitmodules and backup legacy submodules - Remove 7 legacy submodules from root .gitmodules (archive/third_party/*) - Keep only 2 active submodules for kt-kernel (llama.cpp, pybind11) - Backup complete .gitmodules to archive/.gitmodules - Add documentation in archive/README.md for researchers who need legacy submodules This reduces initial clone size by ~500MB and avoids downloading unused dependencies. * refactor: move doc/ back to root directory Keep documentation in root for easier access and maintenance. * refactor: consolidate all images to doc/assets/ - Move kt-kernel/assets/heterogeneous_computing.png to doc/assets/ - Remove KT-SFT/assets/ (images already in doc/assets/) - Update KT-SFT/README.md image references to ../doc/assets/ - Eliminates ~7.9MB image duplication - Centralizes all documentation assets in one location * fix pic path for README
62 lines
1.8 KiB
Python
62 lines
1.8 KiB
Python
import sys
|
|
sys.path.append('./build')
|
|
sys.path.append('./src')
|
|
import torch
|
|
import kvc2_ext
|
|
from kvc2_utils import get_tensor_from_data_ptr
|
|
|
|
# Create a kvc2 instance
|
|
path = "/mnt/data/kvc2"
|
|
kvc2_instance = kvc2_ext.create_kvc2(path,int(10e9)) # 10 G memory pool
|
|
kvc2_ext.load(kvc2_instance)
|
|
|
|
# Start IO thread
|
|
print("Start IO thread")
|
|
kvc2_ext.start_io_thread(kvc2_instance)
|
|
print("IO thread started")
|
|
|
|
# Create CacheInfoInput
|
|
test_info = kvc2_ext.CacheInfoInput()
|
|
test_info.model_type = kvc2_ext.ModelType.MT_DeepseekV2
|
|
test_info.cache_type = kvc2_ext.CacheType.CT_KeyCache
|
|
test_info.quant_type = kvc2_ext.QuantType.QT_F32
|
|
|
|
print("Element size: ", test_info.element_size())
|
|
|
|
# Generate random test IDs (length = 2560)
|
|
torch.manual_seed(123)
|
|
length = 2560
|
|
test_id = torch.randint(0, 65536, (length,), dtype=torch.uint16).contiguous()
|
|
block_count = (length+255) // 256
|
|
# print("Test ID: ", test_id)
|
|
|
|
# Generate test data based on element size and hidden layer count
|
|
element_size = test_info.element_size()
|
|
hidden_layer_count = test_info.hidden_layer_count()
|
|
|
|
def read_cmp_and_release(kvc2_instance,cache_info,ids,length):
|
|
handle = kvc2_ext.lookup(kvc2_instance, cache_info, ids, length)
|
|
if kvc2_ext.is_nullptr(handle):
|
|
print("Handle is nullptr.")
|
|
exit()
|
|
matched_length = kvc2_ext.matched_length(handle)
|
|
matched_data = kvc2_ext.handle_data(handle)
|
|
print('Matched length: ', matched_length)
|
|
if matched_length >0:
|
|
print(f'First layer address {[hex(x) for x in matched_data[0]]}')
|
|
read_data = get_tensor_from_data_ptr(matched_data,element_size)
|
|
|
|
print("Just read check ok.")
|
|
kvc2_ext.release(handle)
|
|
|
|
|
|
l = 128
|
|
while l<=length:
|
|
read_cmp_and_release(kvc2_instance,test_info,test_id.data_ptr(),l)
|
|
l+=128
|
|
|
|
kvc2_ext.destroy_kvc2(kvc2_instance)
|
|
|
|
|
|
print("Test completed successfully.")
|