This is better than chunked

This commit is contained in:
Kawrakow
2026-02-27 09:01:08 +00:00
parent 1e6d36b1b4
commit 06727c50be
2 changed files with 329 additions and 85 deletions

View File

@@ -27,6 +27,125 @@ __device__ __forceinline__ float reduce_sum(float x, float * s) {
return x;
}
template <int HEAD_DIM, int block_size>
__global__ void delta_net_recurrent_f32_a(
const float * __restrict__ q, // [HEAD_DIM, n_tokens, n_heads, n_seqs]
const float * __restrict__ k, // [HEAD_DIM, n_tokens, n_heads, n_seqs]
const float * __restrict__ v, // [HEAD_DIM, n_tokens, n_heads, n_seqs]
const float * __restrict__ g, // [n_tokens, 1, n_heads, n_seqs]
const float * __restrict__ beta_in, // [1, n_tokens, n_heads, n_seqs]
const float * __restrict__ state_in, // [HEAD_DIM, HEAD_DIM*n_heads, 1, n_seqs]
float * __restrict__ dst, // output + new_state concatenated
const int64_t n_heads,
const int64_t n_tokens,
const int64_t n_seqs,
const int64_t output_offset, // offset where state starts in output
const float eps) {
constexpr int warps_per_head = HEAD_DIM/WARP_SIZE;
const int batch_idx = blockIdx.x / (warps_per_head*n_heads);
const int sub_head_idx = blockIdx.x % (warps_per_head*n_heads);
const int head_idx = sub_head_idx / warps_per_head;
const int sub_idx = sub_head_idx % warps_per_head;
const int tid = threadIdx.x;
// Strides for input tensors (column-major)
// Q/K/V: [HEAD_DIM, n_tokens, n_heads, n_seqs]
const int64_t qkv_stride_token = HEAD_DIM;
const int64_t qkv_stride_head = HEAD_DIM * n_tokens;
const int64_t qkv_stride_batch = HEAD_DIM * n_tokens * n_heads;
// G/Beta: [n_tokens, 1, n_heads, n_seqs] / [1, n_tokens, n_heads, n_seqs]
const int64_t g_stride_head = n_tokens;
const int64_t g_stride_batch = n_tokens * n_heads;
// State: [HEAD_DIM, HEAD_DIM*n_heads, 1, n_seqs]
// For head h: columns h*HEAD_DIM to (h+1)*HEAD_DIM
// state[row, col] for head h = state[row, h*HEAD_DIM + col]
// Linear index: row + (h*HEAD_DIM + col) * HEAD_DIM = row + h*HEAD_DIM^2 + col*HEAD_DIM
const int64_t state_head_offset = head_idx * HEAD_DIM * HEAD_DIM;
const int64_t state_batch_stride = HEAD_DIM * HEAD_DIM * n_heads;
// Pointers for this batch/head
const float * q_ptr = q + batch_idx * qkv_stride_batch + head_idx * qkv_stride_head;
const float * k_ptr = k + batch_idx * qkv_stride_batch + head_idx * qkv_stride_head;
const float * v_ptr = v + batch_idx * qkv_stride_batch + head_idx * qkv_stride_head;
const float * g_ptr = g + batch_idx * g_stride_batch + head_idx * g_stride_head;
const float * beta_ptr = beta_in + batch_idx * g_stride_batch + head_idx * g_stride_head;
const float * state_src = state_in + batch_idx * state_batch_stride + state_head_offset;
// Output layout: [head_v_dim, num_v_heads, n_seq_tokens, n_seqs]
// For [dim, head, token, batch]: index = dim + head*S_v + token*S_v*H_v + batch*S_v*H_v*n_tokens
float * out_base = dst + batch_idx * (HEAD_DIM * n_heads * n_tokens) + head_idx * HEAD_DIM;
const int64_t out_token_stride = HEAD_DIM * n_heads; // stride between tokens
float * state_dst = dst + output_offset + batch_idx * state_batch_stride + state_head_offset;
// Shared memory for current token's Q, K, V (normalized), and intermediate results
extern __shared__ float smem[];
float * sQ = smem; // HEAD_DIM
float * sK = sQ + HEAD_DIM; // HEAD_DIM
const float scale = rsqrtf((float)HEAD_DIM);
__shared__ float sum_helper[block_size/WARP_SIZE];
constexpr int num_warps = block_size/WARP_SIZE;
const int row = tid % WARP_SIZE;
const int col_idx_0 = tid / WARP_SIZE;
for (int col = col_idx_0; col < HEAD_DIM; col += num_warps) {
state_dst[col*HEAD_DIM + row + sub_idx * WARP_SIZE] = state_src[col*HEAD_DIM + row + sub_idx * WARP_SIZE];
}
constexpr int WARP_SIZE_S = WARP_SIZE + 1;
constexpr int num_stored_rows = block_size/WARP_SIZE;
__shared__ float all_sum[2*WARP_SIZE_S*num_stored_rows];
auto all_sum1 = all_sum;
auto all_sum2 = all_sum1 + WARP_SIZE_S*num_stored_rows;
for (int64_t t = 0; t < n_tokens; t++) {
float sum_kq = 0.0f;
for (int i = tid; i < HEAD_DIM; i += block_size) {
sQ[i] = q_ptr[t * qkv_stride_token + i] * scale;
sK[i] = k_ptr[t * qkv_stride_token + i];
sum_kq += sK[i] * sQ[i];
}
float attn_score = reduce_sum<block_size>(sum_kq, sum_helper);
float beta_val = sigmoid_f(beta_ptr[t]);
float decay = expf(fminf(g_ptr[t], 50.0f));
float sum1 = 0, sum2 = 0;
#pragma unroll
for (int col = col_idx_0; col < HEAD_DIM; col += num_warps) {
float sval = state_dst[row + sub_idx * WARP_SIZE + col * HEAD_DIM];
sum1 += sval * sK[col];
sum2 += sval * sQ[col];
}
all_sum1[col_idx_0*WARP_SIZE_S + row] = sum1;
all_sum2[col_idx_0*WARP_SIZE_S + row] = sum2;
__syncthreads();
sum1 = sum2 = 0;
#pragma unroll
for (int i = 0; i < block_size/WARP_SIZE; ++i) {
sum1 += all_sum1[i*WARP_SIZE_S + row];
sum2 += all_sum2[i*WARP_SIZE_S + row];
}
float sv_new = beta_val * (v_ptr[t * qkv_stride_token + row + sub_idx*WARP_SIZE] - sum1 * decay);
if (col_idx_0 == 0) {
out_base[t * out_token_stride + row + sub_idx*WARP_SIZE] = sum2 * decay + sv_new * attn_score;
}
for (int col = col_idx_0; col < HEAD_DIM; col += num_warps) {
float state_val = state_dst[row + sub_idx*WARP_SIZE + col * HEAD_DIM];
float new_state_val = decay * state_val + sv_new * sK[col];
new_state_val = fminf(fmaxf(new_state_val, -1e6f), 1e6f);
state_dst[row + sub_idx*WARP_SIZE + col * HEAD_DIM] = new_state_val;
}
}
}
template <int HEAD_DIM, int block_size>
__global__ void delta_net_recurrent_f32(
const float * __restrict__ q, // [HEAD_DIM, n_tokens, n_heads, n_seqs]
@@ -101,6 +220,58 @@ __global__ void delta_net_recurrent_f32(
auto all_sum1 = all_sum;
auto all_sum2 = all_sum1 + HEAD_DIM_S*num_stored_rows;
if constexpr (block_size >= HEAD_DIM && block_size % HEAD_DIM == 0) {
int idx = tid / HEAD_DIM;
int row_out = tid % HEAD_DIM;
for (int64_t t = 0; t < n_tokens; t++) {
if (idx == 0) {
sQ[row_out] = q_ptr[t * qkv_stride_token + row_out] * scale;
sK[row_out] = k_ptr[t * qkv_stride_token + row_out];
float kq = sQ[row_out]*sK[row_out];
kq = warp_reduce_sum(kq);
if (row_out % WARP_SIZE == 0) {
sum_helper[row_out/WARP_SIZE] = kq;
}
}
__syncthreads();
float attn_score = 0;
for (int i = 0; i < HEAD_DIM/WARP_SIZE; ++i) {
attn_score += sum_helper[i];
}
float beta_val = sigmoid_f(beta_ptr[t]);
float decay = expf(fminf(g_ptr[t], 50.0f));
float sum1 = 0, sum2 = 0;
#pragma unroll
for (int col = idx; col < HEAD_DIM; col += block_size/HEAD_DIM) {
float sval = state_dst[row_out + col * HEAD_DIM];
sum1 += sval * sK[col];
sum2 += sval * sQ[col];
}
all_sum1[idx*HEAD_DIM_S + row_out] = sum1;
all_sum2[idx*HEAD_DIM_S + row_out] = sum2;
__syncthreads();
sum1 = sum2 = 0;
#pragma unroll
for (int i = 0; i < block_size/HEAD_DIM; ++i) {
sum1 += all_sum1[i*HEAD_DIM_S + row_out];
sum2 += all_sum2[i*HEAD_DIM_S + row_out];
}
float sv_new = beta_val * (v_ptr[t * qkv_stride_token + row_out] - sum1 * decay);
if (idx == 0) {
out_base[t * out_token_stride + row_out] = sum2 * decay + sv_new * attn_score;
}
for (int col = idx; col < HEAD_DIM; col += block_size/HEAD_DIM) {
float state_val = state_dst[row_out + col * HEAD_DIM];
float new_state_val = decay * state_val + sv_new * sK[col];
new_state_val = fminf(fmaxf(new_state_val, -1e6f), 1e6f);
state_dst[row_out + col * HEAD_DIM] = new_state_val;
}
}
} else {
// Process each token sequentially
for (int64_t t = 0; t < n_tokens; t++) {
@@ -117,33 +288,76 @@ __global__ void delta_net_recurrent_f32(
float beta_val = sigmoid_f(beta_ptr[t]);
float decay = expf(fminf(g_ptr[t], 50.0f));
if constexpr (block_size >= HEAD_DIM && block_size % HEAD_DIM == 0) {
int idx = tid / HEAD_DIM;
int row_out = tid % HEAD_DIM;
float sum1 = 0, sum2 = 0;
#pragma unroll
for (int col = idx; col < HEAD_DIM; col += block_size/HEAD_DIM) {
float sval = state_dst[row_out + col * HEAD_DIM];
sum1 += sval * sK[col];
sum2 += sval * sQ[col];
}
all_sum1[idx*HEAD_DIM_S + row_out] = sum1;
all_sum2[idx*HEAD_DIM_S + row_out] = sum2;
//if constexpr (block_size >= HEAD_DIM && block_size % HEAD_DIM == 0) {
// int idx = tid / HEAD_DIM;
// int row_out = tid % HEAD_DIM;
// float sum1 = 0, sum2 = 0;
// #pragma unroll
// for (int col = idx; col < HEAD_DIM; col += block_size/HEAD_DIM) {
// float sval = state_dst[row_out + col * HEAD_DIM];
// sum1 += sval * sK[col];
// sum2 += sval * sQ[col];
// }
// all_sum1[idx*HEAD_DIM_S + row_out] = sum1;
// all_sum2[idx*HEAD_DIM_S + row_out] = sum2;
__syncthreads();
// __syncthreads();
if (idx == 0) {
#pragma unroll
for (int i = 1; i < block_size/HEAD_DIM; ++i) {
sum1 += all_sum1[i*HEAD_DIM_S + row_out];
sum2 += all_sum2[i*HEAD_DIM_S + row_out];
}
sVNew[row_out] = sV[row_out] * beta_val - sum1 * beta_val * decay;
float v_attn = sVNew[row_out] * attn_score;
out_base[t * out_token_stride + row_out] = sum2 * decay + v_attn;
}
__syncthreads();
} else {
// sum1 = sum2 = 0;
// #pragma unroll
// for (int i = 0; i < block_size/HEAD_DIM; ++i) {
// sum1 += all_sum1[i*HEAD_DIM_S + row_out];
// sum2 += all_sum2[i*HEAD_DIM_S + row_out];
// }
// float sv_new = sV[row_out] * beta_val - sum1 * beta_val * decay;
// if (idx == 0) {
// out_base[t * out_token_stride + row_out] = sum2 * decay + sv_new * attn_score;
// }
// for (int col = idx; col < HEAD_DIM; col += block_size/HEAD_DIM) {
// float state_val = state_dst[row_out + col * HEAD_DIM];
// float new_state_val = decay * state_val + sv_new * sK[col];
// new_state_val = fminf(fmaxf(new_state_val, -1e6f), 1e6f);
// state_dst[row_out + col * HEAD_DIM] = new_state_val;
// }
// //if (idx == 0) {
// // #pragma unroll
// // for (int i = 1; i < block_size/HEAD_DIM; ++i) {
// // sum1 += all_sum1[i*HEAD_DIM_S + row_out];
// // sum2 += all_sum2[i*HEAD_DIM_S + row_out];
// // }
// // sVNew[row_out] = sV[row_out] * beta_val - sum1 * beta_val * decay;
// // float v_attn = sVNew[row_out] * attn_score;
// // out_base[t * out_token_stride + row_out] = sum2 * decay + v_attn;
// //}
// //__syncthreads();
// //for (int col = idx; col < HEAD_DIM; col += block_size/HEAD_DIM) {
// // float state_val = state_dst[row_out + col * HEAD_DIM];
// // float new_state_val = decay * state_val + sVNew[row_out] * sK[col];
// // new_state_val = fminf(fmaxf(new_state_val, -1e6f), 1e6f);
// // state_dst[row_out + col * HEAD_DIM] = new_state_val;
// //}
// sum1 = sum2 = 0;
// #pragma unroll
// for (int i = 0; i < block_size/HEAD_DIM; ++i) {
// sum1 += all_sum1[i*HEAD_DIM_S + row_out];
// sum2 += all_sum2[i*HEAD_DIM_S + row_out];
// }
// float sv_new = sV[row_out] * beta_val - sum1 * beta_val * decay;
// if (idx == 0) {
// out_base[t * out_token_stride + row_out] = sum2 * decay + sv_new * attn_score;
// }
// for (int col = idx; col < HEAD_DIM; col += block_size/HEAD_DIM) {
// float state_val = state_dst[row_out + col * HEAD_DIM];
// float new_state_val = decay * state_val + sv_new * sK[col];
// new_state_val = fminf(fmaxf(new_state_val, -1e6f), 1e6f);
// state_dst[row_out + col * HEAD_DIM] = new_state_val;
// }
//} else {
for (int row_out = lane_id; row_out < HEAD_DIM; row_out += WARP_SIZE) {
float sum1 = 0.0f;
float sum2 = 0.0f;
@@ -171,16 +385,16 @@ __global__ void delta_net_recurrent_f32(
out_base[t * out_token_stride + row_out] = sum2 * decay + v_attn;
}
__syncthreads();
}
for (int out_dim = warp_id; out_dim < HEAD_DIM; out_dim += NUM_WARPS) {
float k_col = sK[out_dim];
#pragma unroll
for (int row = lane_id; row < HEAD_DIM; row += WARP_SIZE) {
float state_val = state_dst[row + out_dim * HEAD_DIM];
float new_state_val = decay * state_val + sVNew[row] * k_col; //sK[out_dim];
new_state_val = fminf(fmaxf(new_state_val, -1e6f), 1e6f);
state_dst[row + out_dim * HEAD_DIM] = new_state_val;
for (int out_dim = warp_id; out_dim < HEAD_DIM; out_dim += NUM_WARPS) {
float k_col = sK[out_dim];
#pragma unroll
for (int row = lane_id; row < HEAD_DIM; row += WARP_SIZE) {
float state_val = state_dst[row + out_dim * HEAD_DIM];
float new_state_val = decay * state_val + sVNew[row] * k_col; //sK[out_dim];
new_state_val = fminf(fmaxf(new_state_val, -1e6f), 1e6f);
state_dst[row + out_dim * HEAD_DIM] = new_state_val;
}
}
}
}
@@ -417,25 +631,66 @@ static void delta_net_f32_cuda(
const int64_t output_offset = head_dim * n_tokens * n_heads * n_seqs;
// One block per (batch, head) pair
const int num_blocks = n_seqs * n_heads;
constexpr int threads_per_block = 512; //256;
//const int num_blocks = n_seqs * n_heads;
//constexpr int threads_per_block = 512; //256;
const size_t smem_size = 4 * head_dim * sizeof(float);
//const size_t smem_size = 4 * head_dim * sizeof(float);
// Use templated kernel for common head dimensions, generic for others
if (head_dim == 64) {
delta_net_recurrent_f32<64, threads_per_block><<<num_blocks, threads_per_block, smem_size, stream>>>(
q, k, v, g, beta, state_in, dst, n_heads, n_tokens, n_seqs, output_offset, eps);
} else if (head_dim == 128) {
GGML_ASSERT(num_blocks % 8 == 0);
delta_net_recurrent_f32<128, threads_per_block><<<num_blocks, threads_per_block, smem_size, stream>>>(
q, k, v, g, beta, state_in, dst, n_heads, n_tokens, n_seqs, output_offset, eps);
} else {
GGML_ASSERT("Unsupported delta net head size");
delta_net_recurrent_generic_f32<<<num_blocks, threads_per_block, smem_size, stream>>>(
q, k, v, g, beta, state_in, dst, head_dim, n_tokens, n_heads, n_seqs, output_offset, eps);
//// Use templated kernel for common head dimensions, generic for others
//if (head_dim == 64) {
// delta_net_recurrent_f32<64, threads_per_block><<<num_blocks, threads_per_block, smem_size, stream>>>(
// q, k, v, g, beta, state_in, dst, n_heads, n_tokens, n_seqs, output_offset, eps);
//} else if (head_dim == 128) {
// GGML_ASSERT(num_blocks % 8 == 0);
// delta_net_recurrent_f32<128, threads_per_block><<<num_blocks, threads_per_block, smem_size, stream>>>(
// q, k, v, g, beta, state_in, dst, n_heads, n_tokens, n_seqs, output_offset, eps);
//} else {
// GGML_ASSERT("Unsupported delta net head size");
// delta_net_recurrent_generic_f32<<<num_blocks, threads_per_block, smem_size, stream>>>(
// q, k, v, g, beta, state_in, dst, head_dim, n_tokens, n_heads, n_seqs, output_offset, eps);
//}
if (head_dim != 64 && head_dim != 128) {
GGML_ABORT("Unsupported delta net head size");
}
GGML_ASSERT(head_dim % WARP_SIZE == 0);
const int num_blocks = n_seqs * n_heads * (head_dim/WARP_SIZE);
const size_t smem_size = 2 * head_dim * sizeof(float);
if (n_tokens <= 8) {
constexpr int threads_per_block = 256;
if (head_dim == 64) {
delta_net_recurrent_f32_a<64, threads_per_block><<<num_blocks, threads_per_block, smem_size, stream>>>(
q, k, v, g, beta, state_in, dst, n_heads, n_tokens, n_seqs, output_offset, eps);
} else {
delta_net_recurrent_f32_a<128, threads_per_block><<<num_blocks, threads_per_block, smem_size, stream>>>(
q, k, v, g, beta, state_in, dst, n_heads, n_tokens, n_seqs, output_offset, eps);
}
} else {
constexpr int threads_per_block = 128;
if (head_dim == 64) {
delta_net_recurrent_f32_a<64, threads_per_block><<<num_blocks, threads_per_block, smem_size, stream>>>(
q, k, v, g, beta, state_in, dst, n_heads, n_tokens, n_seqs, output_offset, eps);
} else {
delta_net_recurrent_f32_a<128, threads_per_block><<<num_blocks, threads_per_block, smem_size, stream>>>(
q, k, v, g, beta, state_in, dst, n_heads, n_tokens, n_seqs, output_offset, eps);
}
}
//// Use templated kernel for common head dimensions, generic for others
//if (head_dim == 64) {
// delta_net_recurrent_f32_a<64, threads_per_block><<<num_blocks, threads_per_block, smem_size, stream>>>(
// q, k, v, g, beta, state_in, dst, n_heads, n_tokens, n_seqs, output_offset, eps);
//} else if (head_dim == 128) {
// delta_net_recurrent_f32_a<128, threads_per_block><<<num_blocks, threads_per_block, smem_size, stream>>>(
// q, k, v, g, beta, state_in, dst, n_heads, n_tokens, n_seqs, output_offset, eps);
//} else {
// GGML_ASSERT("Unsupported delta net head size");
// delta_net_recurrent_generic_f32<<<num_blocks, threads_per_block, smem_size, stream>>>(
// q, k, v, g, beta, state_in, dst, head_dim, n_tokens, n_heads, n_seqs, output_offset, eps);
//}
CUDA_CHECK(cudaGetLastError());
}

View File

@@ -9,7 +9,7 @@
#include <algorithm>
#include <unordered_set>
#define QWEN3NEXT_CHUNK_SIZE 64
#define DELTA_CHUNK_SIZE 64
delta_net::delta_net(llama_context & _lctx, const llama_batch & _batch) : lctx(_lctx), batch(_batch) {
auto & model = lctx.model;
@@ -111,7 +111,7 @@ std::pair<ggml_tensor *, ggml_tensor *> delta_net::build_delta_net_chunking(ggml
cb(g, "g_in", il);
cb(state,"state_in", il);
const int64_t chunk_size = QWEN3NEXT_CHUNK_SIZE;
const int64_t chunk_size = DELTA_CHUNK_SIZE;
const int64_t pad = (chunk_size - n_tokens % chunk_size) % chunk_size;
const int64_t n_chunks = (n_tokens + pad) / chunk_size;
@@ -296,8 +296,8 @@ std::pair<ggml_tensor *, ggml_tensor *> delta_net::build_delta_net_chunking(ggml
ggml_tensor * output_tokens = ggml_view_4d(ctx0, core_attn_out,
S_v, n_tokens, H_v, n_seqs,
ggml_row_size(core_attn_out->type, S_v),
ggml_row_size(core_attn_out->type, S_v * QWEN3NEXT_CHUNK_SIZE * n_chunks),
ggml_row_size(core_attn_out->type, S_v * QWEN3NEXT_CHUNK_SIZE * n_chunks * H_v), 0);
ggml_row_size(core_attn_out->type, S_v * DELTA_CHUNK_SIZE * n_chunks),
ggml_row_size(core_attn_out->type, S_v * DELTA_CHUNK_SIZE * n_chunks * H_v), 0);
cb(output_tokens, "output_tokens", il);
output_tokens = ggml_permute(ctx0, output_tokens, 0, 2, 1, 3);
@@ -572,19 +572,20 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_
beta = ggml_cont_4d(ctx0, b, num_v_heads, 1, n_tok, 1);
alpha = ggml_cont_3d(ctx0, a, num_v_heads, n_tok, 1);
cb(beta, "beta", il);
cb(alpha, "alpha", il);
} else {
beta = llm_build_context::llm_build_lora_mm(lctx, ctx0, model.layers[il].ssm_beta, cur);
alpha = llm_build_context::llm_build_lora_mm(lctx, ctx0, model.layers[il].ssm_alpha, cur);
ggml_build_forward_expand(gf, beta);
ggml_build_forward_expand(gf, alpha);
cb(beta, "beta", il);
cb(alpha, "alpha", il);
beta = ggml_reshape_4d(ctx0, beta, num_v_heads, 1, n_tok, 1);
cb(beta, "beta_reshaped", il);
alpha = llm_build_context::llm_build_lora_mm(lctx, ctx0, model.layers[il].ssm_alpha, cur);
cb(alpha, "alpha", il);
// Why? Don't think this ggml_cont_3d is needed, but lets leave it in for now just in case.
alpha = ggml_cont_3d(ctx0, alpha, num_v_heads, n_seq_tokens, n_seqs);
cb(alpha, "alpha_cont", il);
alpha = ggml_reshape_3d(ctx0, alpha, num_v_heads, n_seq_tokens, n_seqs);
cb(alpha, "alpha_reshaped", il);
}
cb(beta, "beta", il);
cb(alpha, "alpha", il);
ggml_build_forward_expand(gf, beta);
ggml_build_forward_expand(gf, alpha);
@@ -606,18 +607,13 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_
state_all = ggml_view_2d(ctx0, state_storage, state_dim, qnext_state_slots, state_row_size, 0);
ggml_tensor * state_dst = ggml_view_2d(ctx0, state_all, state_dim, 1, state_row_size, state_seq_id_local * state_row_size);
ggml_tensor * state_f32 = state_dst;
if (state_f32->type != GGML_TYPE_F32) {
state_f32 = ggml_cast(ctx0, state_f32, GGML_TYPE_F32);
}
if (reset_state_local) {
state_f32 = ggml_scale(ctx0, state_f32, 0.0f);
cb(state_f32, "state_reset", il);
state_dst = ggml_scale(ctx0, state_dst, 0.0f);
cb(state_dst, "state_reset", il);
}
ggml_tensor * conv_state_flat = ggml_view_2d(ctx0, state_f32, conv_state_dim, 1, state_f32->nb[1], 0);
ggml_tensor * ssm_state_flat = ggml_view_2d(ctx0, state_f32, ssm_state_dim, 1, state_f32->nb[1],
conv_state_dim * ggml_element_size(state_f32));
ggml_tensor * conv_state_flat = ggml_view_2d(ctx0, state_dst, conv_state_dim, 1, state_dst->nb[1], 0);
ggml_tensor * ssm_state_flat = ggml_view_2d(ctx0, state_dst, ssm_state_dim, 1, state_dst->nb[1], conv_state_dim * ggml_element_size(state_dst));
ggml_tensor * conv_states = ggml_reshape_3d(ctx0, conv_state_flat, hparams.ssm_d_conv - 1, conv_dim, 1);
ggml_tensor * state = ggml_reshape_4d(ctx0, ssm_state_flat, head_v_dim, head_v_dim, num_v_heads, 1);
@@ -628,8 +624,6 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_
ggml_tensor * conv_output_raw = ggml_ssm_conv(ctx0, conv_states, qkv_mixed, model.layers[il].ssm_conv1d, inp_s_seq_qnext);
cb(conv_output_raw, "conv_output_raw", il);
//ggml_tensor * conv_output = ggml_view_2d(ctx0, conv_output_raw, conv_dim, n_tok, conv_dim * ggml_element_size(conv_output_raw), 0);
//ggml_tensor * conv_output_silu = ggml_silu(ctx0, conv_output);
ggml_tensor * conv_output_silu = ggml_silu(ctx0, conv_output_raw);
cb(conv_output_silu, "conv_output_silu", il);
@@ -639,27 +633,24 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_
// Extract the convolved Q, K, V from conv_output
ggml_tensor * q_conv = ggml_view_4d(ctx0, conv_output_silu, head_k_dim, num_k_heads, n_tok, 1,
ggml_row_size(conv_output_silu->type, head_k_dim),
nb1_qkv, nb1_qkv * n_tok, 0);
ggml_row_size(conv_output_silu->type, head_k_dim), nb1_qkv, nb1_qkv * n_tok, 0);
ggml_tensor * k_conv = ggml_view_4d(ctx0, conv_output_silu, head_k_dim, num_k_heads, n_tok, 1,
ggml_row_size(conv_output_silu->type, head_k_dim),
nb1_qkv, nb1_qkv * n_tok,
ggml_row_size(conv_output_silu->type, head_k_dim), nb1_qkv, nb1_qkv * n_tok,
head_k_dim * num_k_heads * ggml_element_size(conv_output_silu));
ggml_tensor * v_conv = ggml_view_4d(ctx0, conv_output_silu, head_v_dim, num_v_heads, n_tok, 1,
ggml_row_size(conv_output_silu->type, head_v_dim),
nb1_qkv, nb1_qkv * n_tok,
ggml_row_size(conv_output_silu->type, head_v_dim), nb1_qkv, nb1_qkv * n_tok,
ggml_row_size(conv_output_silu->type, 2 * head_k_dim * num_k_heads));
cb(q_conv, "q_conv", il);
cb(k_conv, "k_conv", il);
cb(v_conv, "v_conv", il);
const float eps_norm = hparams.f_norm_rms_eps;
q_conv = ggml_l2_norm(ctx0, q_conv, eps_norm);
k_conv = ggml_l2_norm(ctx0, k_conv, eps_norm);
q_conv = ggml_l2_norm(ctx0, q_conv, hparams.f_norm_rms_eps);
k_conv = ggml_l2_norm(ctx0, k_conv, hparams.f_norm_rms_eps);
cb(q_conv, "q_conv_normed", il);
cb(k_conv, "k_conv_normed", il);
if (num_k_heads != num_v_heads) {
GGML_ASSERT(num_v_heads % num_k_heads == 0);
@@ -709,9 +700,6 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_
ggml_tensor * new_state_flat = ggml_concat(ctx0, new_conv_flat, new_ssm_flat, 0);
ggml_tensor * state_update = new_state_flat;
if (state_dst->type != GGML_TYPE_F32) {
state_update = ggml_cast(ctx0, state_update, state_dst->type);
}
ggml_build_forward_expand(gf, ggml_cpy(ctx0, state_update, state_dst));
ggml_tensor * attn_out_2d = ggml_reshape_2d(ctx0, output, head_v_dim, num_v_heads * n_tok);
@@ -728,7 +716,8 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_
ggml_tensor * out = llm_build_context::llm_build_lora_mm(lctx, ctx0, model.layers[il].ssm_out, final_output);
cb(out, "linear_attn_out", il);
return ggml_reshape_2d(ctx0, out, hparams.n_embd, n_tok);
return out;
//return ggml_reshape_2d(ctx0, out, hparams.n_embd, n_tok);
}