Merge ffn_up and ffn_gate experts tensors (part 2) (#1139)

* Add ability to merge up+gate exps to more models

* We need to of course pass the merged tensor to build_ffn

* All the others

* Also Qwen3VL-MoE

---------

Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
Kawrakow
2026-01-13 08:07:52 +02:00
committed by GitHub
parent 54a1f68d32
commit 978202a754
2 changed files with 50 additions and 62 deletions

View File

@@ -1940,7 +1940,7 @@ ggml_cgraph * llm_build_context::build_llama() {
LLM_FFN_SILU, false,
false, 0.0,
LLM_EXPERT_GATING_FUNC_SIGMOID,
cb, il, gf, true);
cb, il, gf, true, model.layers[il].ffn_up_gate_exps);
// Shared experts
ggml_tensor * shexp_out = llm_build_ffn(ctx0, lctx, nullptr, ffn_inp_normed,
@@ -2773,7 +2773,7 @@ ggml_cgraph * llm_build_context::build_dbrx() {
LLM_FFN_SILU, true,
false, 0.0,
LLM_EXPERT_GATING_FUNC_SOFTMAX,
cb, il, gf);
cb, il, gf, false, model.layers[il].ffn_up_gate_exps);
cb(cur, "ffn_moe_out", il);
cur = ggml_add(ctx0, cur, ffn_inp);
@@ -3861,7 +3861,7 @@ ggml_cgraph * llm_build_context::build_qwen2moe() {
LLM_FFN_SILU, false,
false, 0.0,
LLM_EXPERT_GATING_FUNC_SOFTMAX,
cb, il, gf);
cb, il, gf, false, model.layers[il].ffn_up_gate_exps);
cb(cur, "ffn_moe_out", il);
// FFN shared expert
@@ -4270,7 +4270,7 @@ ggml_cgraph * llm_build_context::build_qwen3vlmoe() {
LLM_FFN_SILU, true,
false, 0.0,
LLM_EXPERT_GATING_FUNC_SOFTMAX,
cb, il, gf);
cb, il, gf, false, model.layers[il].ffn_up_gate_exps);
cb(cur, "ffn_moe_out", il);
cur = ggml_add(ctx0, cur, ffn_inp);
@@ -6783,7 +6783,7 @@ ggml_cgraph * llm_build_context::build_deepseek2() {
LLM_FFN_SILU, hparams.expert_weights_norm,
true, hparams.expert_weights_scale,
(enum llm_expert_gating_func_type) hparams.expert_gating_func,
cb, il, gf);
cb, il, gf, false, model.layers[il].ffn_up_gate_exps);
cb(moe_out, "ffn_moe_out", il);
// FFN shared expert
@@ -6935,7 +6935,7 @@ ggml_cgraph * llm_build_context::build_glm4_moe() {
n_expert, n_expert_used,
LLM_FFN_SILU, hparams.expert_weights_norm, true, hparams.expert_weights_scale,
(llm_expert_gating_func_type) hparams.expert_gating_func,
LLM_FFN_SILU, cb, il, gf, true);
LLM_FFN_SILU, cb, il, gf, true, model.layers[il].ffn_up_gate_exps);
}
// residual and context vector
@@ -8016,7 +8016,7 @@ ggml_cgraph * llm_build_context::build_dots1() {
LLM_FFN_SILU, hparams.expert_weights_norm,
true, hparams.expert_weights_scale,
(enum llm_expert_gating_func_type) hparams.expert_gating_func,
cb, il, gf);
cb, il, gf, false, model.layers[il].ffn_up_gate_exps);
cb(moe_out, "ffn_moe_out", il);
{
@@ -8286,7 +8286,7 @@ ggml_cgraph * llm_build_context::build_hunyuan_moe() {
n_expert, n_expert_used,
LLM_FFN_SILU, true, false, 0.0f,
LLM_EXPERT_GATING_FUNC_SOFTMAX,
LLM_FFN_SILU, cb, il, gf, true);
LLM_FFN_SILU, cb, il, gf, true, model.layers[il].ffn_up_gate_exps);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);
@@ -8359,7 +8359,7 @@ ggml_cgraph * llm_build_context::build_mimo2() {
n_expert, n_expert_used,
LLM_FFN_SILU, true, false, 0.0f,
LLM_EXPERT_GATING_FUNC_SIGMOID,
LLM_FFN_SILU, cb, il, gf, true);
LLM_FFN_SILU, cb, il, gf, true, model.layers[il].ffn_up_gate_exps);
}
cur = lctx.cvec.apply_to(ctx0, cur, il);
@@ -8536,7 +8536,7 @@ ggml_cgraph * llm_build_context::build_bailingmoe2() {
LLM_FFN_SILU, hparams.expert_weights_norm,
true, hparams.expert_weights_scale,
(llm_expert_gating_func_type) hparams.expert_gating_func,
cb, il, gf);
cb, il, gf, false, model.layers[il].ffn_up_gate_exps);
cb(moe_out, "ffn_moe_out", il);
ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, lctx, nullptr, cur,
@@ -8668,7 +8668,7 @@ ggml_cgraph* llm_build_context::build_minimaxm2() {
LLM_FFN_SILU, true,
false, 0,
(llm_expert_gating_func_type)hparams.expert_gating_func,
cb, il, gf);
cb, il, gf, false, model.layers[il].ffn_up_gate_exps);
cb(cur, "ffn_moe_out", il);
cur = ggml_add(ctx0, cur, ffn_inp);