From e5622a2e91c70f6cbd663dac43a5f07ee2d03e60 Mon Sep 17 00:00:00 2001 From: usrlocalben Date: Wed, 4 Feb 2026 04:57:50 -0500 Subject: [PATCH] Fix Phi-3, Phi-4 (#1226) * fix phi3 tensor setup * avoid SWA for Phi-4 --- src/llama-build-context.cpp | 11 +++++++++-- src/llama-load-tensors.cpp | 6 ++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/src/llama-build-context.cpp b/src/llama-build-context.cpp index f44b4c2c..f5509070 100644 --- a/src/llama-build-context.cpp +++ b/src/llama-build-context.cpp @@ -4539,7 +4539,14 @@ ggml_cgraph * llm_build_context::build_phi3() { struct ggml_tensor * inp_pos = build_inp_pos(); // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa(); + struct ggml_tensor * KQ_mask; + if (hparams.n_swa == 0) { + // Phi-4 does not use SWA + KQ_mask = build_inp_KQ_mask(); + } + else { + KQ_mask = build_inp_KQ_mask_swa(); + } for (int il = 0; il < n_layer; ++il) { auto residual = inpL; @@ -4593,7 +4600,7 @@ ggml_cgraph * llm_build_context::build_phi3() { cur = llm_build_kv(ctx0, lctx, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask_swa, n_tokens, kv_head, n_kv, 1.0f, cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); } if (il == n_layer - 1) { diff --git a/src/llama-load-tensors.cpp b/src/llama-load-tensors.cpp index d66333ea..fd4b5162 100644 --- a/src/llama-load-tensors.cpp +++ b/src/llama-load-tensors.cpp @@ -1316,6 +1316,12 @@ bool create_tensors_helper::create_phi3_tensors(const LLM_TN & tn) { model.tok_embd = create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); + // output + { + model.output_norm = create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output = create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); + } + for (int i = 0; i < n_layer; ++i) { ggml_context * ctx_layer = ctx_for_layer(i); ggml_context * ctx_split = ctx_for_layer_split(i);