From bf1d0561254fb8a45be2d3e9897a8dd917301839 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Sun, 9 Feb 2025 09:24:52 +0200 Subject: [PATCH] Make sure we do have wk_b and wv_b before enabling MLA --- src/llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama.cpp b/src/llama.cpp index 1d7ecbde..17d25733 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -3175,7 +3175,7 @@ static bool llama_kv_cache_init( struct ggml_context * ctx = offload ? ctx_map.at(model.buft_layer[i].buft) : cache.ctxs.front(); ggml_tensor * k; ggml_tensor * v; - if (cparams.mla_attn) { + if (cparams.mla_attn && model.layers[i].wk_b && model.layers[i].wv_b) { k = ggml_new_tensor_1d(ctx, type_k, 1); v = ggml_new_tensor_1d(ctx, type_v, 1); }