Fused rope+rope

This commit is contained in:
Iwan Kawrakow
2025-11-02 06:57:08 +02:00
parent ea97dc3a1c
commit f5ac78de5c
4 changed files with 186 additions and 21 deletions

View File

@@ -7779,6 +7779,9 @@ ggml_cgraph * llm_build_context::build_hunyuan_moe() {
ggml_cgraph * llm_build_context::build_openai_moe() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
const int64_t n_embd_head = hparams.n_embd_head_v;
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
ggml_tensor * cur;
ggml_tensor * inpL;
@@ -7796,6 +7799,9 @@ ggml_cgraph * llm_build_context::build_openai_moe() {
const int sliding_window_pattern = 2;
auto rope_cache = ggml_rope_cache(ctx0, inp_pos, nullptr, n_embd_head, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow);
for (int il = 0; il < n_layer; ++il) {
const bool is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1);
ggml_tensor * inpSA = inpL;
@@ -7815,14 +7821,17 @@ ggml_cgraph * llm_build_context::build_openai_moe() {
model.layers[il].wv, model.layers[il].bv,
nullptr, nullptr, 0.0f, il);
Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr,
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor,
beta_fast, beta_slow);
Qcur = ggml_rope_fast(ctx0, Qcur, rope_cache);
Kcur = ggml_rope_fast(ctx0, Kcur, rope_cache);
//Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr,
// n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor,
// beta_fast, beta_slow);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr,
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor,
attn_factor, beta_fast, beta_slow);
//Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr,
// n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor,
// attn_factor, beta_fast, beta_slow);
cb(Kcur, "Kcur", il);
//auto [Qcur, Kcur, Vcur] = llm_build_mul_mat_qkv(gf, cur, model.layers[il].wq, model.layers[il].bq,
@@ -7926,6 +7935,9 @@ ggml_cgraph * llm_build_context::build_bailingmoe2() {
const int n_transformer_layers = n_layer - hparams.nextn_predict_layers;
auto rope_cache = ggml_rope_cache(ctx0, inp_pos, nullptr, n_embd_head, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow);
for (int il = 0; il < n_transformer_layers; ++il) {
ggml_tensor * inpSA = inpL;
@@ -7950,16 +7962,19 @@ ggml_cgraph * llm_build_context::build_bailingmoe2() {
//Qcur = llm_build_norm(ctx0, Qcur, hparams, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, cb, il);
//cb(Qcur, "Qcur_normed", il);
Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr,
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow);
Qcur = ggml_rope_fast(ctx0, Qcur, rope_cache);
Kcur = ggml_rope_fast(ctx0, Kcur, rope_cache);
//Kcur = llm_build_norm(ctx0, Kcur, hparams, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, cb, il);
//cb(Kcur, "Kcur_normed", il);
//Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr,
// n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
// ext_factor, attn_factor, beta_fast, beta_slow);
Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr,
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow);
////Kcur = llm_build_norm(ctx0, Kcur, hparams, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, cb, il);
////cb(Kcur, "Kcur_normed", il);
//Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr,
// n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
// ext_factor, attn_factor, beta_fast, beta_slow);
cb(Qcur, "Qcur", il);
cb(Kcur, "Kcur", il);