Fused delta-net (#1315)

* Revive fused delta-net

* Add command line argument for fused delta net

* Simplify/improve CUDA delta-net

* Add -fdn to llama-bench

* More CUDA fused delta net optimizations

* CPU optimizations

* Much faster fused delta-net on the CPU

It seems it is faster than the chunked implementation!

* Change meaning of fdn from bool flag to threshold value

* Use eps = 1e-6

* Give some nodes a name
This commit is contained in:
Kawrakow
2026-02-25 14:12:48 +01:00
committed by GitHub
parent 0bf7043a7b
commit c77ec4b8b8
15 changed files with 1002 additions and 13 deletions

View File

@@ -372,6 +372,83 @@ std::pair<ggml_tensor *, ggml_tensor *> delta_net::build_delta_net_autoregressiv
return {core_attn_out, state};
}
std::pair<ggml_tensor *, ggml_tensor *> delta_net::build_fused_delta_net(ggml_context * ctx0,
ggml_tensor * q, ggml_tensor * k, ggml_tensor * v,
ggml_tensor * g, ggml_tensor * beta, ggml_tensor * state,
int il, const llm_build_cb & cb) {
const int64_t S_k = q->ne[0];
const int64_t H_k = q->ne[1];
const int64_t n_tokens = q->ne[2];
const int64_t n_seqs = q->ne[3];
const int64_t S_v = v->ne[0];
const int64_t H_v = v->ne[1];
GGML_ASSERT(q->ne[0] == S_k && q->ne[1] == H_k && q->ne[2] == n_tokens && q->ne[3] == n_seqs);
GGML_ASSERT(k->ne[0] == S_k && k->ne[1] == H_k && k->ne[2] == n_tokens && k->ne[3] == n_seqs);
GGML_ASSERT(v->ne[2] == n_tokens);
GGML_ASSERT(k->ne[2] == n_tokens);
GGML_ASSERT(g->ne[0] == H_v && g->ne[1] == n_tokens && g->ne[2] == n_seqs);
GGML_ASSERT(beta->ne[0] == H_v && beta->ne[2] == n_tokens && beta->ne[3] == n_seqs);
GGML_ASSERT(state->ne[0] == S_v && state->ne[1] == S_v && state->ne[2] == H_v && state->ne[3] == n_seqs);
GGML_ASSERT(H_k == H_v);
cb(q, "q_in", il);
cb(k, "k_in", il);
cb(v, "v_in", il);
cb(beta, "beta_in", il);
cb(g, "g_in", il);
cb(state,"state_in", il);
q = ggml_permute(ctx0, q, 0, 2, 1, 3);
k = ggml_permute(ctx0, k, 0, 2, 1, 3);
v = ggml_permute(ctx0, v, 0, 2, 1, 3);
g = ggml_permute(ctx0, g, 2, 0, 3, 1);
beta = ggml_permute(ctx0, beta, 2, 0, 1, 3);
if (n_seqs > 1 || n_tokens > 1) {
q = ggml_cont_4d(ctx0, q, S_k, n_tokens, H_k, n_seqs);
k = ggml_cont_4d(ctx0, k, S_k, n_tokens, H_k, n_seqs);
v = ggml_cont_4d(ctx0, v, S_v, n_tokens, H_v, n_seqs);
g = ggml_cont_4d(ctx0, g, n_tokens, 1, H_k, n_seqs);
beta = ggml_cont_4d(ctx0, beta, 1, n_tokens, H_k, n_seqs);
}
ggml_tensor * state_flat = ggml_reshape_4d(ctx0, state, S_v, S_v * H_v, 1, n_seqs);
if (!ggml_is_contiguous(state_flat)) {
state_flat = ggml_cont_4d(ctx0, state_flat, S_v, S_v * H_v, 1, n_seqs);
}
cb(q, "q_fused", il);
cb(k, "k_fused", il);
cb(v, "v_fused", il);
cb(g, "g_fused", il);
cb(beta, "beta_fused", il);
cb(state_flat,"state_fused", il);
ggml_tensor * fused_result = ggml_delta_net(ctx0, q, k, v, g, beta, state_flat);
cb(fused_result, "delta_net_fused_raw", il);
const int64_t output_size = S_v * H_v * n_tokens * n_seqs;
const int64_t state_size = S_v * S_v * H_v * n_seqs;
ggml_tensor * output_tokens = ggml_view_4d(ctx0, fused_result,
S_v, H_v, n_tokens, n_seqs,
ggml_row_size(fused_result->type, S_v),
ggml_row_size(fused_result->type, S_v * H_v),
ggml_row_size(fused_result->type, S_v * H_v * n_tokens), 0);
output_tokens = ggml_cont_4d(ctx0, output_tokens, S_v, H_v, n_tokens, n_seqs);
ggml_tensor * new_state_flat = ggml_view_1d(ctx0, fused_result, state_size,
output_size * ggml_element_size(fused_result));
ggml_tensor * new_state = ggml_reshape_4d(ctx0, new_state_flat, S_v, S_v, H_v, n_seqs);
cb(output_tokens, "output_tokens", il);
cb(new_state, "new_state", il);
return {output_tokens, new_state};
}
std::pair<ggml_tensor *, ggml_tensor *> delta_net::build_qkvz(ggml_context * ctx0, ggml_tensor * input, int il, const llm_build_cb & cb) const {
auto & model = lctx.model;
const int64_t n_tok = input->ne[1];
@@ -497,10 +574,14 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_
alpha = ggml_cont_3d(ctx0, a, num_v_heads, n_tok, 1);
} else {
beta = llm_build_context::llm_build_lora_mm(lctx, ctx0, model.layers[il].ssm_beta, cur);
cb(beta, "beta", il);
beta = ggml_reshape_4d(ctx0, beta, num_v_heads, 1, n_tok, 1);
cb(beta, "beta_reshaped", il);
alpha = llm_build_context::llm_build_lora_mm(lctx, ctx0, model.layers[il].ssm_alpha, cur);
// Why???
cb(alpha, "alpha", il);
// Why? Don't think this ggml_cont_3d is needed, but lets leave it in for now just in case.
alpha = ggml_cont_3d(ctx0, alpha, num_v_heads, n_seq_tokens, n_seqs);
cb(alpha, "alpha_cont", il);
}
cb(beta, "beta", il);
cb(alpha, "alpha", il);
@@ -603,15 +684,16 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_
cb(k_conv, "k_conv_predelta", il);
cb(v_conv, "v_conv_predelta", il);
std::pair<ggml_tensor *, ggml_tensor *> attn_out;
GGML_ASSERT(causal_mask != nullptr);
GGML_ASSERT(identity != nullptr);
GGML_ASSERT(diag_mask != nullptr);
attn_out = n_tok == 1
? build_delta_net_autoregressive(ctx0, q_conv, k_conv, v_conv, gate, beta, state, il, cb)
: build_delta_net_chunking(ctx0, q_conv, k_conv, v_conv, gate, beta, state, causal_mask, identity, diag_mask, il, cb);
std::pair<ggml_tensor *, ggml_tensor *> attn_out;
// The fused delta-net implementation is only faster than chunked for n_tok <= 8, so use it only in that case
attn_out = n_tok <= lctx.cparams.fused_delta_net ? build_fused_delta_net(ctx0, q_conv, k_conv, v_conv, gate, beta, state, il, cb) :
n_tok == 1 ? build_delta_net_autoregressive(ctx0, q_conv, k_conv, v_conv, gate, beta, state, il, cb)
: build_delta_net_chunking(ctx0, q_conv, k_conv, v_conv, gate, beta, state, causal_mask, identity, diag_mask, il, cb);
ggml_tensor * output = attn_out.first;
ggml_tensor * new_state = attn_out.second;
cb(output, "attn_output", il);