From 0a6542b503af91058c2f1a676ca38ace17e33395 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 3 Mar 2025 18:52:34 +0200 Subject: [PATCH] CUDA FA WIP - It actually works! No TG yet, but for PP I can run FA with fp16 cache and it gets the same answer. --- ggml/src/ggml-cuda/fattn.cu | 1 + 1 file changed, 1 insertion(+) diff --git a/ggml/src/ggml-cuda/fattn.cu b/ggml/src/ggml-cuda/fattn.cu index bbfc1ccb..cdfd5161 100644 --- a/ggml/src/ggml-cuda/fattn.cu +++ b/ggml/src/ggml-cuda/fattn.cu @@ -343,6 +343,7 @@ static void ggml_cuda_flash_attn_ext_vec_f32(ggml_backend_cuda_context & ctx, gg void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * KQV = dst; const ggml_tensor * Q = dst->src[0]; + const ggml_tensor * V = dst->src[2]; ggml_cuda_set_device(ctx.device); const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc;