Better GLM-4.7-Flash long context TG performance

This commit is contained in:
Kawrakow
2026-01-23 06:00:31 +00:00
parent 2a7cc09149
commit d774dca828

View File

@@ -106,6 +106,26 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst
// Hence, we use it only for DeepSeek with MLA enabled, where head sizes are 576, 512,
// so no other implementation works.
//
if (new_mma_available(cc) && K->ne[0] == 576 && V->ne[0] == 512 && Q->ne[1] == 1 &&
Q->ne[2]/K->ne[2] == 20 && K->ne[1] > 8192) {
// GLM-4.7-Flash TG hack: split 20 heads into 16+4 heads
auto local_Q = *Q;
auto local_dst = *dst;
local_Q.ne[2] = 16;
local_dst.ne[1] = 16;
local_dst.src[0] = &local_Q;
ggml_cuda_flash_attn_ext_mma_new(ctx, &local_dst);
local_Q.ne[2] = 4;
local_Q.data = (char *)local_Q.data + local_Q.nb[2]*16;
local_dst.ne[1] = 4;
local_dst.data = (char *)local_dst.data + local_dst.nb[1]*16;
ggml_cuda_flash_attn_ext_mma_new(ctx, &local_dst);
return;
}
if (new_mma_available(cc) && ((K->ne[0] == 576 && V->ne[0] == 512) || (K->ne[0] == 192 && V->ne[0] == 128 && mma_better_than_turing(cc)))) {
//printf("Using ggml_cuda_flash_attn_ext_mma_new\n");
ggml_cuda_flash_attn_ext_mma_new(ctx, dst);