Ernie-4.5-MoE split mode graph

This commit is contained in:
Kawrakow
2026-01-08 08:08:46 +00:00
parent 0456aa47d3
commit 8e1a625aaa
2 changed files with 136 additions and 105 deletions

View File

@@ -8178,121 +8178,150 @@ ggml_cgraph * llm_build_context::build_ernie4_5_moe() {
GGML_ASSERT(hparams.n_moe_layer_step > 0 && "Ernie 4.5 MoE requires n_moe_layer_step > 0"); GGML_ASSERT(hparams.n_moe_layer_step > 0 && "Ernie 4.5 MoE requires n_moe_layer_step > 0");
for (int il = 0; il < n_layer; ++il) { for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL; //ggml_tensor * inpSA = inpL;
// norm
// Pre-attention norm
cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, cb, il);
cb(cur, "attn_norm", il);
// self-attention cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, KQ_mask, nullptr, nullptr,
{ 1.0f/sqrtf(float(n_embd_head)), 0.0f, 0, il, true, false, true);
// Q, K, V projections
ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); //// norm
cb(Qcur, "Qcur", il); //// Pre-attention norm
if (model.layers[il].bq) { //cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, cb, il);
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); //cb(cur, "attn_norm", il);
cb(Qcur, "Qcur", il);
} //// self-attention
//{
// // Q, K, V projections
// ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
// cb(Qcur, "Qcur", il);
// if (model.layers[il].bq) {
// Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
// cb(Qcur, "Qcur", il);
// }
ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); // ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
cb(Kcur, "Kcur", il); // cb(Kcur, "Kcur", il);
if (model.layers[il].bk) { // if (model.layers[il].bk) {
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
cb(Kcur, "Kcur", il); // cb(Kcur, "Kcur", il);
} // }
cb(Kcur, "Kcur", il); // cb(Kcur, "Kcur", il);
ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); // ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
cb(Vcur, "Vcur", il); // cb(Vcur, "Vcur", il);
if (model.layers[il].bv) { // if (model.layers[il].bv) {
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
cb(Vcur, "Vcur", il); // cb(Vcur, "Vcur", il);
} // }
// reshape for multi-head // // reshape for multi-head
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); // Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); // Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
// Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); // // Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
// apply RoPE // // apply RoPE
Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, // Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr,
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, // n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow); // ext_factor, attn_factor, beta_fast, beta_slow);
Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, // Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr,
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, // n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow); // ext_factor, attn_factor, beta_fast, beta_slow);
cb(Qcur, "Qcur", il); // cb(Qcur, "Qcur", il);
cb(Kcur, "Kcur", il); // cb(Kcur, "Kcur", il);
cb(Vcur, "Vcur", il); // cb(Vcur, "Vcur", il);
cur = llm_build_kv(ctx0, lctx, kv_self, gf, // cur = llm_build_kv(ctx0, lctx, kv_self, gf,
model.layers[il].wo, NULL, // model.layers[il].wo, NULL,
Kcur, Vcur, Qcur, KQ_mask, // Kcur, Vcur, Qcur, KQ_mask,
n_tokens, kv_head, n_kv, // n_tokens, kv_head, n_kv,
1.0f / sqrtf(float(n_embd_head)), cb, il); // 1.0f / sqrtf(float(n_embd_head)), cb, il);
} //}
if (il == n_layer - 1 && inp_out_ids) { if (il == n_layer - 1 && inp_out_ids) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); //inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
// residual connection for attention output //// residual connection for attention output
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); //ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
cb(ffn_inp, "ffn_inp", il); //cb(ffn_inp, "ffn_inp", il);
// feed-forward network // feed-forward network
bool is_moe_layer = static_cast<uint32_t>(il) >= hparams.n_layer_dense_lead && (il + 1) % hparams.n_moe_layer_step == 0; bool is_moe_layer = static_cast<uint32_t>(il) >= hparams.n_layer_dense_lead && (il + 1) % hparams.n_moe_layer_step == 0;
if (!is_moe_layer) { if (!is_moe_layer) {
cur = llm_build_ffn(ctx0, lctx, model.layers[il].ffn_norm, ffn_inp, // dense FFN
model.layers[il].ffn_up, NULL, NULL, cur = llm_build_ffn(ctx0, lctx, model.layers[il].ffn_norm, cur,
model.layers[il].ffn_up, NULL, NULL,
model.layers[il].ffn_gate, NULL, NULL, model.layers[il].ffn_gate, NULL, NULL,
model.layers[il].ffn_down, NULL, NULL, model.layers[il].ffn_down, NULL, NULL,
NULL, NULL,
LLM_FFN_SILU, LLM_FFN_PAR, cb, il); LLM_FFN_SILU, LLM_FFN_PAR, cb, il, gf, true);
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
} } else {
else { cur = llm_build_std_moe_ffn(ctx0, lctx, model.layers[il].ffn_norm, cur,
// MoE branch model.layers[il].ffn_gate_inp, model.layers[il].ffn_gate_inp_b,
cur = llm_build_norm(ctx0, ffn_inp, hparams, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, cb, il); model.layers[il].ffn_up_exps, model.layers[il].ffn_up_exps_b,
cb(cur, "ffn_norm", il); model.layers[il].ffn_gate_exps, model.layers[il].ffn_gate_exps_b,
model.layers[il].ffn_down_exps, model.layers[il].ffn_down_exps_b,
ggml_tensor * moe_out = llm_build_moe_ffn(ctx0, lctx, cur,
model.layers[il].ffn_gate_inp,
model.layers[il].ffn_up_exps,
model.layers[il].ffn_gate_exps,
model.layers[il].ffn_down_exps,
model.layers[il].ffn_exp_probs_b, model.layers[il].ffn_exp_probs_b,
model.layers[il].ffn_up_shexp, nullptr, // we don't have shared expert biases?
model.layers[il].ffn_gate_shexp, nullptr,
model.layers[il].ffn_down_shexp, nullptr,
n_expert, n_expert_used, n_expert, n_expert_used,
LLM_FFN_SILU, true, LLM_FFN_SILU, true, false, 0.0f,
false, 0.0,
LLM_EXPERT_GATING_FUNC_SOFTMAX, LLM_EXPERT_GATING_FUNC_SOFTMAX,
cb, il, gf); LLM_FFN_SILU, cb, il, gf, true);
cb(moe_out, "ffn_moe_out", il);
// Shared expert (if present)
if (hparams.n_ff_shexp > 0) {
ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, lctx, nullptr, cur,
model.layers[il].ffn_up_shexp, NULL, NULL,
model.layers[il].ffn_gate_shexp, NULL, NULL,
model.layers[il].ffn_down_shexp, NULL, NULL,
NULL,
LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
cb(ffn_shexp, "ffn_shexp", il);
cur = ggml_add(ctx0, moe_out, ffn_shexp);
}
else {
cur = moe_out;
}
cb(cur, "ffn_out", il);
} }
cur = ggml_add(ctx0, cur, ffn_inp); //if (!is_moe_layer) {
cb(cur, "ffn_out", il); // cur = llm_build_ffn(ctx0, lctx, model.layers[il].ffn_norm, ffn_inp,
// model.layers[il].ffn_up, NULL, NULL,
// model.layers[il].ffn_gate, NULL, NULL,
// model.layers[il].ffn_down, NULL, NULL,
// NULL,
// LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
// cb(cur, "ffn_out", il);
//}
//else {
// // MoE branch
// cur = llm_build_norm(ctx0, ffn_inp, hparams, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, cb, il);
// cb(cur, "ffn_norm", il);
// ggml_tensor * moe_out = llm_build_moe_ffn(ctx0, lctx, cur,
// model.layers[il].ffn_gate_inp,
// model.layers[il].ffn_up_exps,
// model.layers[il].ffn_gate_exps,
// model.layers[il].ffn_down_exps,
// model.layers[il].ffn_exp_probs_b,
// n_expert, n_expert_used,
// LLM_FFN_SILU, true,
// false, 0.0,
// LLM_EXPERT_GATING_FUNC_SOFTMAX,
// cb, il, gf);
// cb(moe_out, "ffn_moe_out", il);
// // Shared expert (if present)
// if (hparams.n_ff_shexp > 0) {
// ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, lctx, nullptr, cur,
// model.layers[il].ffn_up_shexp, NULL, NULL,
// model.layers[il].ffn_gate_shexp, NULL, NULL,
// model.layers[il].ffn_down_shexp, NULL, NULL,
// NULL,
// LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
// cb(ffn_shexp, "ffn_shexp", il);
// cur = ggml_add(ctx0, moe_out, ffn_shexp);
// }
// else {
// cur = moe_out;
// }
// cb(cur, "ffn_out", il);
//}
//cur = ggml_add(ctx0, cur, ffn_inp);
//cb(cur, "ffn_out", il);
cur = lctx.cvec.apply_to(ctx0, cur, il); cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
@@ -8301,15 +8330,18 @@ ggml_cgraph * llm_build_context::build_ernie4_5_moe() {
inpL = cur; inpL = cur;
} }
cur = inpL; cur = build_output(lctx, ctx0, inpL, model.output, model.output_norm, cb);
cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, NULL, LLM_NORM_RMS, cb, -1);
cb(cur, "result_norm", -1);
// lm_head
cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
cb(cur, "result_output", -1); cb(cur, "result_output", -1);
//cur = inpL;
//cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, NULL, LLM_NORM_RMS, cb, -1);
//cb(cur, "result_norm", -1);
//// lm_head
//cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
//cb(cur, "result_output", -1);
ggml_build_forward_expand(gf, cur); ggml_build_forward_expand(gf, cur);
return gf; return gf;
} }

View File

@@ -2490,10 +2490,9 @@ bool create_tensors_helper::create_ernie45_tensors(const LLM_TN & tn) {
for (int i = 0; i < n_layer; ++i) { for (int i = 0; i < n_layer; ++i) {
auto& layer = model.layers[i]; auto& layer = model.layers[i];
ggml_context* ctx_layer = ctx_for_layer(i);
ggml_context* ctx_split = ctx_for_layer_split(i); ggml_context* ctx_split = ctx_for_layer_split(i);
layer.attn_norm = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0); layer.attn_norm = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
layer.wq = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0); layer.wq = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0);
layer.wk = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0); layer.wk = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
@@ -2501,18 +2500,18 @@ bool create_tensors_helper::create_ernie45_tensors(const LLM_TN & tn) {
layer.wo = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0); layer.wo = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
// optional bias tensors // optional bias tensors
layer.bq = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), { n_embd }, llama_model_loader::TENSOR_NOT_REQUIRED); layer.bq = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "bias", i), { n_embd }, llama_model_loader::TENSOR_NOT_REQUIRED);
layer.bk = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), { n_embd_gqa }, llama_model_loader::TENSOR_NOT_REQUIRED); layer.bk = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "bias", i), { n_embd_gqa }, llama_model_loader::TENSOR_NOT_REQUIRED);
layer.bv = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), { n_embd_gqa }, llama_model_loader::TENSOR_NOT_REQUIRED); layer.bv = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "bias", i), { n_embd_gqa }, llama_model_loader::TENSOR_NOT_REQUIRED);
layer.bo = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd }, llama_model_loader::TENSOR_NOT_REQUIRED); layer.bo = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd }, llama_model_loader::TENSOR_NOT_REQUIRED);
layer.ffn_norm = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0); layer.ffn_norm = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
if (model.arch == LLM_ARCH_ERNIE4_5_MOE && static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead) { // MoE layers if (model.arch == LLM_ARCH_ERNIE4_5_MOE && static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead) { // MoE layers
int n_ff_exp = hparams.n_ff_exp; int n_ff_exp = hparams.n_ff_exp;
layer.ffn_gate_inp = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert }, 0); layer.ffn_gate_inp = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert }, 0);
layer.ffn_exp_probs_b = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), { n_expert }, llama_model_loader::TENSOR_NOT_REQUIRED); layer.ffn_exp_probs_b = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), { n_expert }, llama_model_loader::TENSOR_NOT_REQUIRED);
layer.ffn_gate_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, llama_model_loader::TENSOR_NOT_REQUIRED); layer.ffn_gate_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, llama_model_loader::TENSOR_NOT_REQUIRED);
layer.ffn_down_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert }, 0); layer.ffn_down_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert }, 0);
layer.ffn_up_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, 0); layer.ffn_up_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, 0);