Fix Phi-3, Phi-4 (#1226)

* fix phi3 tensor setup

* avoid SWA for Phi-4
This commit is contained in:
usrlocalben
2026-02-04 04:57:50 -05:00
committed by GitHub
parent f8acfc2bf0
commit e5622a2e91
2 changed files with 15 additions and 2 deletions

View File

@@ -4539,7 +4539,14 @@ ggml_cgraph * llm_build_context::build_phi3() {
struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa();
struct ggml_tensor * KQ_mask;
if (hparams.n_swa == 0) {
// Phi-4 does not use SWA
KQ_mask = build_inp_KQ_mask();
}
else {
KQ_mask = build_inp_KQ_mask_swa();
}
for (int il = 0; il < n_layer; ++il) {
auto residual = inpL;
@@ -4593,7 +4600,7 @@ ggml_cgraph * llm_build_context::build_phi3() {
cur = llm_build_kv(ctx0, lctx, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask_swa, n_tokens, kv_head, n_kv, 1.0f, cb, il);
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
}
if (il == n_layer - 1) {

View File

@@ -1316,6 +1316,12 @@ bool create_tensors_helper::create_phi3_tensors(const LLM_TN & tn) {
model.tok_embd = create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
// output
{
model.output_norm = create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
model.output = create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
}
for (int i = 0; i < n_layer; ++i) {
ggml_context * ctx_layer = ctx_for_layer(i);
ggml_context * ctx_split = ctx_for_layer_split(i);