Avoid ggml_get_rows for TG

This commit is contained in:
Kawrakow
2026-01-18 11:31:35 +00:00
parent fb5c340e17
commit a26adbcf5d
4 changed files with 105 additions and 86 deletions

View File

@@ -1830,6 +1830,8 @@ ggml_cgraph * llm_build_context::build_llama() {
KQ_mask_swa = build_inp_KQ_mask_swa();
}
ggml_tensor * inp_out_ids = build_inp_out_ids();
//const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : 1.f;
for (int il = 0; il < n_layer; ++il) {
@@ -1845,7 +1847,7 @@ ggml_cgraph * llm_build_context::build_llama() {
// self-attention
if (use_rope) {
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr,
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, il == n_layer - 1 ? inp_out_ids : nullptr, nullptr,
this_KQ_mask, nullptr, nullptr, kq_scale, hparams.f_attention_scale, this_n_swa, il, true, false, true);
}
else {
@@ -1895,9 +1897,8 @@ ggml_cgraph * llm_build_context::build_llama() {
}
//printf("%s: attn result for layer %d is %s, %s\n", __func__, il, cur->name, ggml_op_name(cur->op));
if (il == n_layer - 1) {
if (il == n_layer - 1 && use_rope) {
// skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
cb(cur, "last_attn", il);
@@ -2047,7 +2048,7 @@ ggml_cgraph * llm_build_context::build_mistral3() {
auto rope_factors = build_rope_factors(il);
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, rope_factors, KQ_mask,
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, rope_factors, KQ_mask,
nullptr, inp_attn_scale, kq_scale, hparams.f_attention_scale, 0, il);
if (il == n_layer - 1 && inp_out_ids) {
@@ -3947,7 +3948,7 @@ ggml_cgraph * llm_build_context::build_qwen3() {
struct ggml_tensor * inpSA = inpL;
if (!rope_cache) {
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, KQ_mask, nullptr, nullptr,
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, nullptr, KQ_mask, nullptr, nullptr,
1.0f/sqrtf(float(n_embd_head)), 0.0f, 0, il, true, false, true);
} else {
@@ -4034,27 +4035,25 @@ ggml_cgraph * llm_build_context::build_qwen3moe() {
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
ggml_tensor * inp_out_ids = nullptr; //build_inp_out_ids();
for (int il = 0; il < n_layer; ++il) {
//struct ggml_tensor * inpSA = inpL;
// norm
//cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, cb, il);
//cb(cur, "attn_norm", il);
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, KQ_mask, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), 0.0f, 0,
il, true, false, true);
//printf("%s: attn = %s(%s)\n", __func__, cur->name, ggml_op_name(cur->op));
if (il == n_layer - 1) {
// skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids();
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
//inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
if (il == n_layer - 1 && n_tokens > 1) {
inp_out_ids = build_inp_out_ids();
}
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, inp_out_ids, nullptr,
KQ_mask, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), 0.0f, 0, il, true, false, true);
//if (il == n_layer - 1) {
// // skip computing output for unused tokens
// struct ggml_tensor * inp_out_ids = build_inp_out_ids();
// cur = ggml_get_rows(ctx0, cur, inp_out_ids);
// //inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
//}
auto ffn_inp = cur;
//struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
//cb(ffn_inp, "ffn_inp", il);
cur = llm_build_std_moe_ffn(ctx0, lctx, model.layers[il].ffn_norm, ffn_inp,
model.layers[il].ffn_gate_inp, nullptr,
@@ -4071,9 +4070,6 @@ ggml_cgraph * llm_build_context::build_qwen3moe() {
LLM_FFN_SILU, cb, il, gf, true,
model.layers[il].ffn_up_gate_exps);
//printf("%s: ffn = %s(%s)\n", __func__, cur->name, ggml_op_name(cur->op));
//cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);
@@ -4130,7 +4126,7 @@ ggml_cgraph * llm_build_context::build_qwen3vl() {
for (int il = 0; il < n_layer; ++il) {
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, KQ_mask,
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, nullptr, KQ_mask,
nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), 0.0f, 0, il, true, false, true, false, true);
if (il == n_layer - 1) {
@@ -6867,7 +6863,7 @@ ggml_cgraph * llm_build_context::build_glm4_moe() {
// self-attention
if (rope_cache == nullptr) {
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, KQ_mask, nullptr, nullptr, kq_scale, 0.0f, 0, il, true, false, true);
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, nullptr, KQ_mask, nullptr, nullptr, kq_scale, 0.0f, 0, il, true, false, true);
} else {
// Pre-attention norm
cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, cb, il);
@@ -7256,7 +7252,7 @@ ggml_cgraph * llm_build_context::build_cohere2() {
struct ggml_tensor * KQ_mask_l = is_sliding ? KQ_mask_swa : KQ_mask;
// self-attention
auto attn_out = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, KQ_mask_l, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), 0.f,
auto attn_out = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, nullptr, KQ_mask_l, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), 0.f,
is_sliding ? hparams.n_swa : 0, il, is_sliding, false, true, true);
cb(attn_out, "attn_out", il);
@@ -8201,7 +8197,7 @@ ggml_cgraph * llm_build_context::build_ernie4_5_moe() {
GGML_ASSERT(hparams.n_moe_layer_step > 0 && "Ernie 4.5 MoE requires n_moe_layer_step > 0");
for (int il = 0; il < n_layer; ++il) {
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, KQ_mask, nullptr, nullptr,
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, nullptr, KQ_mask, nullptr, nullptr,
1.0f/sqrtf(float(n_embd_head)), 0.0f, 0, il, true, false, true);
if (il == n_layer - 1 && inp_out_ids) {
@@ -8273,7 +8269,7 @@ ggml_cgraph * llm_build_context::build_hunyuan_moe() {
for (int il = 0; il < n_layer; ++il) {
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, KQ_mask,
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, nullptr, KQ_mask,
nullptr, nullptr, kq_scale, 0.0f, 0, il, true, false, true);
if (il == n_layer - 1 && inp_out_ids) {
@@ -8334,7 +8330,7 @@ ggml_cgraph * llm_build_context::build_mimo2() {
const bool is_sliding = model.hparams.swa_layers[il];
auto KQ_mask_l = is_sliding ? KQ_mask_swa : KQ_mask;
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, KQ_mask_l, model.layers[il].attn_sinks,
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, nullptr, KQ_mask_l, model.layers[il].attn_sinks,
nullptr, 1.0f/sqrtf(float(n_embd_head_k)), 0.0f, is_sliding ? hparams.n_swa : 0, il, true, false, true);
if (il == n_layer - 1) {
@@ -8409,7 +8405,7 @@ ggml_cgraph * llm_build_context::build_openai_moe() {
struct ggml_tensor * KQ_mask_l = is_sliding ? KQ_mask_swa : KQ_mask;
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, KQ_mask_l,
cur = build_std_attention(gf, model.layers[il].attn_norm, inpL, inp_pos, nullptr, nullptr, KQ_mask_l,
model.layers[il].attn_sinks, nullptr, kq_scale, 0.0f, is_sliding ? hparams.n_swa : 0, il, true, false, true);
if (il == n_layer - 1) {
@@ -9176,7 +9172,7 @@ ggml_cgraph * llm_build_context::llama_build_graph(
}
ggml_tensor * llm_build_context::build_std_attention(ggml_cgraph * gf, ggml_tensor * the_attn_norm,
ggml_tensor * input, ggml_tensor * inp_pos, ggml_tensor * rope_factors_in,
ggml_tensor * input, ggml_tensor * inp_pos, ggml_tensor * inp_out_ids, ggml_tensor * rope_factors_in,
ggml_tensor * KQ_mask, ggml_tensor * sinks, ggml_tensor * inp_attn_scale, float KQ_scale, float f_attn_scale,
int n_swa, int il, bool do_rope, bool add_graph_split, bool add_input, bool is_norm, bool is_multi) {
@@ -9353,6 +9349,11 @@ ggml_tensor * llm_build_context::build_std_attention(ggml_cgraph * gf, ggml_tens
cur = ggml_reshape_2d(ctx0, cur, split_wo->ne[0], n_tokens);
cb(cur, "flash_attn_reshaped", il_cb);
if (inp_out_ids) { // && ggml_nrows(inp_out_ids) > 1) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
cb(cur, "fa_get_rows", il_cb);
}
cur = llm_build_lora_mm(lctx, ctx0, split_wo, cur);
if (lctx.model.arch == LLM_ARCH_GLM4 || lctx.model.arch == LLM_ARCH_GLM4_MOE) {
// GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators
@@ -9373,6 +9374,10 @@ ggml_tensor * llm_build_context::build_std_attention(ggml_cgraph * gf, ggml_tens
}
GGML_ASSERT(id_last >= 0);
if (add_input) {
if (inp_out_ids) { // && ggml_nrows(inp_out_ids) > 1) {
input = ggml_get_rows(ctx0, input, inp_out_ids);
cb(input, "sainp_get_rows", il);
}
attn[id_last] = ggml_add(ctx0, attn[id_last], input);
cb(attn[id_last], "attn_out_with_input", il);
}
@@ -9424,6 +9429,15 @@ ggml_tensor * llm_build_context::build_std_attention(ggml_cgraph * gf, ggml_tens
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, KQ_scale, cb, il, sinks, n_swa);
if (inp_out_ids) { // && ggml_nrows(inp_out_ids) > 1) {
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
cb(cur, "sa_get_rows", il);
if (add_input) {
input = ggml_get_rows(ctx0, input, inp_out_ids);
cb(input, "sainp_get_rows", il);
}
}
if (add_input) {
cb(cur, "attn_out", il);
cur = ggml_add(ctx0, cur, input);