From 0a18f1fadd4c25faed942d3489b7bbadc2bbf09f Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 12 Jan 2026 16:22:53 +0000 Subject: [PATCH] All the others --- src/llama-build-context.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/llama-build-context.cpp b/src/llama-build-context.cpp index dd637982..af66ce3e 100644 --- a/src/llama-build-context.cpp +++ b/src/llama-build-context.cpp @@ -1941,7 +1941,7 @@ ggml_cgraph * llm_build_context::build_llama() { LLM_FFN_SILU, false, false, 0.0, LLM_EXPERT_GATING_FUNC_SIGMOID, - cb, il, gf, true); + cb, il, gf, true, model.layers[il].ffn_up_gate_exps); // Shared experts ggml_tensor * shexp_out = llm_build_ffn(ctx0, lctx, nullptr, ffn_inp_normed, @@ -2774,7 +2774,7 @@ ggml_cgraph * llm_build_context::build_dbrx() { LLM_FFN_SILU, true, false, 0.0, LLM_EXPERT_GATING_FUNC_SOFTMAX, - cb, il, gf); + cb, il, gf, false, model.layers[il].ffn_up_gate_exps); cb(cur, "ffn_moe_out", il); cur = ggml_add(ctx0, cur, ffn_inp); @@ -3862,7 +3862,7 @@ ggml_cgraph * llm_build_context::build_qwen2moe() { LLM_FFN_SILU, false, false, 0.0, LLM_EXPERT_GATING_FUNC_SOFTMAX, - cb, il, gf); + cb, il, gf, false, model.layers[il].ffn_up_gate_exps); cb(cur, "ffn_moe_out", il); // FFN shared expert @@ -6784,7 +6784,7 @@ ggml_cgraph * llm_build_context::build_deepseek2() { LLM_FFN_SILU, hparams.expert_weights_norm, true, hparams.expert_weights_scale, (enum llm_expert_gating_func_type) hparams.expert_gating_func, - cb, il, gf); + cb, il, gf, false, model.layers[il].ffn_up_gate_exps); cb(moe_out, "ffn_moe_out", il); // FFN shared expert @@ -8017,7 +8017,7 @@ ggml_cgraph * llm_build_context::build_dots1() { LLM_FFN_SILU, hparams.expert_weights_norm, true, hparams.expert_weights_scale, (enum llm_expert_gating_func_type) hparams.expert_gating_func, - cb, il, gf); + cb, il, gf, false, model.layers[il].ffn_up_gate_exps); cb(moe_out, "ffn_moe_out", il); { @@ -8287,7 +8287,7 @@ ggml_cgraph * llm_build_context::build_hunyuan_moe() { n_expert, n_expert_used, LLM_FFN_SILU, true, false, 0.0f, LLM_EXPERT_GATING_FUNC_SOFTMAX, - LLM_FFN_SILU, cb, il, gf, true); + LLM_FFN_SILU, cb, il, gf, true, model.layers[il].ffn_up_gate_exps); cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); @@ -8360,7 +8360,7 @@ ggml_cgraph * llm_build_context::build_mimo2() { n_expert, n_expert_used, LLM_FFN_SILU, true, false, 0.0f, LLM_EXPERT_GATING_FUNC_SIGMOID, - LLM_FFN_SILU, cb, il, gf, true); + LLM_FFN_SILU, cb, il, gf, true, model.layers[il].ffn_up_gate_exps); } cur = lctx.cvec.apply_to(ctx0, cur, il); @@ -8537,7 +8537,7 @@ ggml_cgraph * llm_build_context::build_bailingmoe2() { LLM_FFN_SILU, hparams.expert_weights_norm, true, hparams.expert_weights_scale, (llm_expert_gating_func_type) hparams.expert_gating_func, - cb, il, gf); + cb, il, gf, false, model.layers[il].ffn_up_gate_exps); cb(moe_out, "ffn_moe_out", il); ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, lctx, nullptr, cur,