mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
Better GLM-4.7-Flash long context TG performance
This commit is contained in:
@@ -106,6 +106,26 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst
|
||||
// Hence, we use it only for DeepSeek with MLA enabled, where head sizes are 576, 512,
|
||||
// so no other implementation works.
|
||||
//
|
||||
if (new_mma_available(cc) && K->ne[0] == 576 && V->ne[0] == 512 && Q->ne[1] == 1 &&
|
||||
Q->ne[2]/K->ne[2] == 20 && K->ne[1] > 8192) {
|
||||
// GLM-4.7-Flash TG hack: split 20 heads into 16+4 heads
|
||||
auto local_Q = *Q;
|
||||
auto local_dst = *dst;
|
||||
|
||||
local_Q.ne[2] = 16;
|
||||
local_dst.ne[1] = 16;
|
||||
local_dst.src[0] = &local_Q;
|
||||
ggml_cuda_flash_attn_ext_mma_new(ctx, &local_dst);
|
||||
|
||||
local_Q.ne[2] = 4;
|
||||
local_Q.data = (char *)local_Q.data + local_Q.nb[2]*16;
|
||||
local_dst.ne[1] = 4;
|
||||
local_dst.data = (char *)local_dst.data + local_dst.nb[1]*16;
|
||||
ggml_cuda_flash_attn_ext_mma_new(ctx, &local_dst);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (new_mma_available(cc) && ((K->ne[0] == 576 && V->ne[0] == 512) || (K->ne[0] == 192 && V->ne[0] == 128 && mma_better_than_turing(cc)))) {
|
||||
//printf("Using ggml_cuda_flash_attn_ext_mma_new\n");
|
||||
ggml_cuda_flash_attn_ext_mma_new(ctx, dst);
|
||||
|
||||
Reference in New Issue
Block a user