Merge Q and K into a single tensor (#892)

* Merge Q and K into a single tensor

* Make V mul mat follow QK mul mat

so they can be fused, which gives a slightly bbetter TG performance.

---------

Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
Kawrakow
2025-11-05 10:54:36 +02:00
committed by GitHub
parent e68f50be9a
commit cb30f8e057
4 changed files with 81 additions and 1 deletions

View File

@@ -1270,6 +1270,7 @@ std::tuple<ggml_tensor*, ggml_tensor*, ggml_tensor*> llm_build_context::llm_buil
std::tuple<ggml_tensor*, ggml_tensor*, ggml_tensor*> llm_build_context::llm_build_mul_mat_qkv(ggml_cgraph * gf, ggml_tensor * cur,
ggml_tensor * wqkv, ggml_tensor * bqkv,
ggml_tensor * wqk, ggml_tensor * bqk,
ggml_tensor * wq, ggml_tensor * bq,
ggml_tensor * wk, ggml_tensor * bk,
ggml_tensor * wv, ggml_tensor * bv,
@@ -1307,6 +1308,40 @@ std::tuple<ggml_tensor*, ggml_tensor*, ggml_tensor*> llm_build_context::llm_buil
//ggml_build_forward_expand(gf, Vcur);
}
if (wqk) {
auto qk = llm_build_lora_mm(lctx, ctx0, wqk, cur);
cb(qk, "qkv", il);
if (bqk) {
qk = ggml_add(ctx0, qk, bqk);
cb(qk, "qkv_b", il);
}
auto Vcur = llm_build_lora_mm(lctx, ctx0, wv, cur);
cb(Vcur, "Vcur", il);
if (bv) {
Vcur = ggml_add(ctx0, Vcur, bv);
cb(Vcur, "Vcur", il);
}
ggml_build_forward_expand(gf, qk);
ggml_build_forward_expand(gf, Vcur);
auto Qcur = ggml_view_3d(ctx0, qk, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), qk->nb[1], 0*sizeof(float)*(n_embd));
auto Kcur = ggml_view_3d(ctx0, qk, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), qk->nb[1], 1*sizeof(float)*Qcur->ne[0]*Qcur->ne[1]);
cb(Qcur, "Qcur", il);
cb(Kcur, "Kcur", il);
if (q_norm) {
Qcur = llm_build_norm(ctx0, Qcur, hparams, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, cb, il);
cb(Qcur, "Qcur_normed", il);
ggml_build_forward_expand(gf, Qcur);
}
if (k_norm) {
Kcur = llm_build_norm(ctx0, Kcur, hparams, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, cb, il);
cb(Kcur, "Kcur_normed", il);
ggml_build_forward_expand(gf, Kcur);
}
return {Qcur, Kcur, Vcur};
}
auto [Q, K, V] = llm_build_mul_mat_qkv(gf, cur, wq, bq, wk, bk, wv, bv, attention_scale, il);
auto Qcur = ggml_reshape_3d(ctx0, Q, n_embd_head, n_head, n_tokens);
if (q_norm) {
@@ -1374,6 +1409,7 @@ ggml_cgraph * llm_build_context::build_llama() {
auto [Qcur, Kcur, Vcur] = llm_build_mul_mat_qkv(gf, cur,
model.layers[il].wqkv, model.layers[il].bqkv,
model.layers[il].wqk, model.layers[il].bqk,
model.layers[il].wq, model.layers[il].bq,
model.layers[il].wk, model.layers[il].bk,
model.layers[il].wv, model.layers[il].bv,
@@ -3400,6 +3436,7 @@ ggml_cgraph * llm_build_context::build_qwen3() {
{
auto [Qcur, Kcur, Vcur] = llm_build_mul_mat_qkv(gf, cur,
model.layers[il].wqkv, nullptr,
model.layers[il].wqk, nullptr,
model.layers[il].wq, nullptr,
model.layers[il].wk, nullptr,
model.layers[il].wv, nullptr,
@@ -3502,6 +3539,7 @@ ggml_cgraph * llm_build_context::build_qwen3moe() {
{
auto [Qcur, Kcur, Vcur] = llm_build_mul_mat_qkv(gf, cur,
model.layers[il].wqkv, nullptr,
model.layers[il].wqk, nullptr,
model.layers[il].wq, nullptr, model.layers[il].wk, nullptr, model.layers[il].wv, nullptr,
model.layers[il].attn_q_norm, model.layers[il].attn_k_norm, 0, il);
@@ -6403,6 +6441,7 @@ ggml_cgraph * llm_build_context::build_glm4_moe() {
{
auto [Qcur, Kcur, Vcur] = llm_build_mul_mat_qkv(gf, cur,
model.layers[il].wqkv, model.layers[il].bqkv,
model.layers[il].wqk, model.layers[il].bqk,
model.layers[il].wq, model.layers[il].bq,
model.layers[il].wk, model.layers[il].bk,
model.layers[il].wv, model.layers[il].bv,
@@ -6814,6 +6853,7 @@ ggml_cgraph * llm_build_context::build_cohere2() {
auto [Qcur, Kcur, Vcur] = llm_build_mul_mat_qkv(gf, cur,
model.layers[il].wqkv, model.layers[il].bqkv,
model.layers[il].wqk, model.layers[il].bqk,
model.layers[il].wq, model.layers[il].bq,
model.layers[il].wk, model.layers[il].bk,
model.layers[il].wv, model.layers[il].bv, nullptr, nullptr, 0.f, il);
@@ -8116,6 +8156,7 @@ ggml_cgraph * llm_build_context::build_openai_moe() {
{
auto [Qcur, Kcur, Vcur] = llm_build_mul_mat_qkv(gf, cur,
model.layers[il].wqkv, model.layers[il].bqkv,
model.layers[il].wqk, model.layers[il].bqk,
model.layers[il].wq, model.layers[il].bq,
model.layers[il].wk, model.layers[il].bk,
model.layers[il].wv, model.layers[il].bv,
@@ -8234,7 +8275,7 @@ ggml_cgraph * llm_build_context::build_bailingmoe2() {
// self_attention
{
auto [Qcur, Kcur, Vcur] = llm_build_mul_mat_qkv(gf, cur, model.layers[il].wqkv, model.layers[il].bqkv,
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
model.layers[il].attn_q_norm, model.layers[il].attn_k_norm, 0.0f, il);
if (rope_cache) {

View File

@@ -152,6 +152,7 @@ struct llm_build_context {
std::tuple<ggml_tensor*, ggml_tensor*, ggml_tensor*> llm_build_mul_mat_qkv(ggml_cgraph * gf, ggml_tensor * cur,
ggml_tensor * wqkv, ggml_tensor * bqkv,
ggml_tensor * wqk, ggml_tensor * bqk,
ggml_tensor * wq, ggml_tensor * bq,
ggml_tensor * wk, ggml_tensor * bk,
ggml_tensor * wv, ggml_tensor * bv,

View File

@@ -2495,6 +2495,40 @@ bool create_tensors_helper::merge_qkv(const LLM_TN & tn, int i, int bias) {
}
}
}
if (!fused_qkv && ml.merge_qkv && wq->type == wk->type && hparams.f_attention_scale == 0.0f) {
GGML_ASSERT(wq->ne[0] == n_embd && wq->ne[1] == n_head * n_embd_head_k);
GGML_ASSERT(wk->ne[0] == n_embd && wk->ne[1] == n_embd_gqa);
layer.wqk = ggml_new_tensor_2d(ctx_split, wq->type, n_embd, n_embd_head_k * (n_head + n_head_kv));
snprintf(layer.wqk->name, GGML_MAX_NAME, "blk.%d.attn_qk.weight", i);
layer.wq = ml.create_tensor_as_view(ctx_split, layer.wqk, wq_name.c_str(), { wq->ne[0], wq->ne[1] }, 0);
layer.wk = ml.create_tensor_as_view(ctx_split, layer.wqk, wk_name.c_str(), { wk->ne[0], wk->ne[1] }, wq->ne[1]*wq->nb[1]);
layer.wv = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
printf("====================== Merged only Q and K in layer %d because V is of different type\n", i);
fused_qkv = true;
if (bias) {
auto bq_name = tn(LLM_TENSOR_ATTN_Q, "bias", i);
auto bk_name = tn(LLM_TENSOR_ATTN_K, "bias", i);
auto bv_name = tn(LLM_TENSOR_ATTN_V, "bias", i);
auto bq = ml.get_tensor_meta(bq_name.c_str());
auto bk = ml.get_tensor_meta(bk_name.c_str());
auto bv = ml.get_tensor_meta(bv_name.c_str());
if (bias == 2) {
GGML_ASSERT(bq && bk && bv);
} else {
GGML_ASSERT(!bq && !bk && !bv);
}
if (bq && bk && bv) {
GGML_ASSERT(bq->type == GGML_TYPE_F32 && bk->type == GGML_TYPE_F32);
GGML_ASSERT(ggml_nrows(bq) == 1 && bq->ne[0] == wq->ne[1]);
GGML_ASSERT(ggml_nrows(bk) == 1 && bk->ne[0] == wk->ne[1]);
layer.bqk = ggml_new_tensor_1d(ctx_layer, bq->type, n_embd_head_k * (n_head + n_head_kv));
snprintf(layer.bqk->name, GGML_MAX_NAME, "blk.%d.attn_qk.bias", i);
layer.bq = ml.create_tensor_as_view(ctx_layer, layer.bqk, bq_name.c_str(), { bq->ne[0] }, 0);
layer.bk = ml.create_tensor_as_view(ctx_layer, layer.bqk, bk_name.c_str(), { bk->ne[0] }, bq->ne[0]*bq->nb[0]);
layer.bv = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {layer.wv->ne[1]});
}
}
}
if (!fused_qkv) {
if (ml.merge_qkv) {

View File

@@ -154,6 +154,8 @@ struct llama_layer {
struct ggml_tensor * wv = nullptr;
struct ggml_tensor * wo = nullptr;
struct ggml_tensor * wqkv = nullptr;
struct ggml_tensor * wqk = nullptr;
struct ggml_tensor * wkv = nullptr;
struct ggml_tensor * wq_a = nullptr;
struct ggml_tensor * wq_b = nullptr;
struct ggml_tensor * wkv_a_mqa = nullptr;
@@ -176,6 +178,8 @@ struct llama_layer {
struct ggml_tensor * bv = nullptr;
struct ggml_tensor * bo = nullptr;
struct ggml_tensor * bqkv = nullptr;
struct ggml_tensor * bqk = nullptr;
struct ggml_tensor * bkv = nullptr;
// relative position bias
struct ggml_tensor * attn_rel_b = nullptr;