mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-28 17:14:17 +00:00
@@ -1533,7 +1533,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
||||
}
|
||||
if (arg == "-fdn" || arg == "--fused-delta-net") {
|
||||
CHECK_ARG
|
||||
params.fused_delta_net = std::stoi(argv[i]);
|
||||
fprintf(stderr, "=================== %s has been deprecated\n", arg.c_str());
|
||||
return true;
|
||||
}
|
||||
if (arg == "-smf16" || arg == "--split-mode-f16") {
|
||||
@@ -2276,7 +2276,6 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
||||
options.push_back({ "*", "-grt, --graph-reduce-type", "Type for data exchange between GPUs (default: %s)", "f32"});
|
||||
options.push_back({ "*", "-smgs, --split-mode-graph-scheduling,", "Force Split Mode Graph Scheduling (default: %d)", params.split_mode_graph_scheduling});
|
||||
options.push_back({ "*", "-sas, --scheduler_async,", "Async evaluation of compute graphs: %d)", params.scheduler_async});
|
||||
options.push_back({ "*", "-fdn, --fused-delta-net N", "Use fused delta-net when batch size is <= N with recurrent models: %d)", params.fused_delta_net});
|
||||
options.push_back({ "*", "-vq, --validate-quants", "validate quantized data while loading the model (default: %d)", params.validate_quants});
|
||||
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"
|
||||
"in conversation mode, this will be used as system prompt\n"
|
||||
@@ -3355,7 +3354,6 @@ struct llama_context_params common_context_params_to_llama(const gpt_params & pa
|
||||
cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling;
|
||||
//cparams.split_mode_f16 = params.split_mode_f16;
|
||||
cparams.scheduler_async = params.scheduler_async;
|
||||
cparams.fused_delta_net = params.fused_delta_net;
|
||||
cparams.min_experts = params.min_experts;
|
||||
cparams.thresh_experts = params.thresh_experts;
|
||||
cparams.only_active_experts = params.only_active_exps;
|
||||
@@ -4366,7 +4364,6 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
|
||||
//fprintf(stream, "split_mode_f16: %s # default: true\n", params.split_mode_f16 ? "true" : "false");
|
||||
fprintf(stream, "reduce_type: %s # default f16\n", params.reduce_type.c_str());
|
||||
fprintf(stream, "scheduler_async: %s # default: false\n", params.scheduler_async ? "true" : "false");
|
||||
fprintf(stream, "fused_delta_net: %d # default: 0\n", params.fused_delta_net );
|
||||
fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts);
|
||||
fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp);
|
||||
|
||||
|
||||
@@ -359,7 +359,6 @@ struct gpt_params {
|
||||
bool split_mode_graph_scheduling = false; // if true, force split mode graph scheduling
|
||||
//bool split_mode_f16 = true; // if true, intermediate results will be cast to f16 before copying to other GPUs to perform reduce ops
|
||||
bool scheduler_async = false; // if true, in split mode graph the scheduler will use multiple threads to evaluate the graph
|
||||
int fused_delta_net = 65536; // use fused delta-net if number of tokens in the batch is less than this value
|
||||
bool has_mtp = false; // enable MTP if supported by the model
|
||||
|
||||
std::string cache_type_k = "f16"; // KV cache data type for the K
|
||||
|
||||
@@ -271,7 +271,6 @@ struct cmd_params {
|
||||
bool muge = false;
|
||||
bool rcache = false;
|
||||
bool sas = false;
|
||||
int fdn = 65536; // fdn = fused delta net
|
||||
bool print_overrides = false;
|
||||
output_formats output_format;
|
||||
output_formats output_format_stderr;
|
||||
@@ -317,7 +316,6 @@ static const cmd_params cmd_params_defaults = {
|
||||
/* muge */ false,
|
||||
/* rcache */ false,
|
||||
/* sas */ false,
|
||||
/* fdn */ 65536,
|
||||
/* print_overrides */ false,
|
||||
/* output_format */ MARKDOWN,
|
||||
/* output_format_stderr */ NONE,
|
||||
@@ -371,7 +369,6 @@ static void print_usage(int /* argc */, char ** argv) {
|
||||
printf(" -no-fug, --no-fused-up-gate <0|1> (default: %s)\n", cmd_params_defaults.no_fug? "1" : "0");
|
||||
printf(" -no-ooae, --no-offload-only-active-experts <0|1> (default: %s)\n", cmd_params_defaults.no_ooae? "1" : "0");
|
||||
printf(" -sas, --scheduler-async <0|1> (default: %s)\n", cmd_params_defaults.sas ? "1" : "0");
|
||||
printf(" -fdn, --fused-delta-net <n> (default: %d)\n", cmd_params_defaults.fdn);
|
||||
printf(" --print-overrides <0|1> (default: %s)\n", cmd_params_defaults.print_overrides ? "1" : "0");
|
||||
printf("\n");
|
||||
printf("Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.\n");
|
||||
@@ -813,12 +810,6 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
||||
break;
|
||||
}
|
||||
params.sas = std::stoi(argv[i]);
|
||||
} else if (arg == "-fdn" || arg == "--fused-delta-net") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.fdn = std::stoi(argv[i]);
|
||||
} else if (arg == "-rcache" || arg == "--rope-cache") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
@@ -965,7 +956,6 @@ struct cmd_params_instance {
|
||||
bool muge = false;
|
||||
bool rcache = false;
|
||||
bool sas = false;
|
||||
int fdn = 0;
|
||||
const llama_model_tensor_buft_override* buft_overrides;
|
||||
|
||||
llama_model_params to_llama_mparams() const {
|
||||
@@ -1001,7 +991,6 @@ struct cmd_params_instance {
|
||||
muge == other.muge &&
|
||||
use_thp == other.use_thp &&
|
||||
sas == other.sas &&
|
||||
fdn == other.fdn &&
|
||||
tensor_split == other.tensor_split;
|
||||
}
|
||||
|
||||
@@ -1028,7 +1017,6 @@ struct cmd_params_instance {
|
||||
cparams.embeddings = embeddings;
|
||||
cparams.cuda_params = (void *)cuda_params.data();
|
||||
cparams.scheduler_async = sas;
|
||||
cparams.fused_delta_net = fdn;
|
||||
|
||||
return cparams;
|
||||
}
|
||||
@@ -1095,7 +1083,6 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .muge = */ params.muge,
|
||||
/* .rcache = */ params.rcache,
|
||||
/* .sas = */ params.sas,
|
||||
/* .fdn = */ params.fdn,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
instances.push_back(instance);
|
||||
@@ -1139,7 +1126,6 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .muge = */ params.muge,
|
||||
/* .rcache = */ params.rcache,
|
||||
/* .sas = */ params.sas,
|
||||
/* .fdn = */ params.fdn,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
instances.push_back(instance);
|
||||
@@ -1183,7 +1169,6 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .muge = */ params.muge,
|
||||
/* .rcache = */ params.rcache,
|
||||
/* .sas = */ params.sas,
|
||||
/* .fdn = */ params.fdn,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
instances.push_back(instance);
|
||||
@@ -1227,7 +1212,6 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .muge = */ params.muge,
|
||||
/* .rcache = */ params.rcache,
|
||||
/* .sas = */ params.sas,
|
||||
/* .fdn = */ params.fdn,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
instances.push_back(instance);
|
||||
@@ -1282,7 +1266,6 @@ struct test {
|
||||
bool muge = false;
|
||||
bool rcache = false;
|
||||
bool sas = false;
|
||||
int fdn = 0;
|
||||
std::string override_tensor;
|
||||
int n_prompt;
|
||||
int n_gen;
|
||||
@@ -1324,7 +1307,6 @@ struct test {
|
||||
ger = inst.ger;
|
||||
rcache = inst.rcache;
|
||||
sas = inst.sas;
|
||||
fdn = inst.fdn;
|
||||
no_fug = inst.no_fug;
|
||||
use_thp = inst.use_thp;
|
||||
no_ooae = inst.no_ooae;
|
||||
@@ -1429,7 +1411,7 @@ struct test {
|
||||
field == "model_size" || field == "model_n_params" ||
|
||||
field == "n_gpu_layers" || field == "main_gpu" ||
|
||||
field == "n_prompt" || field == "n_gen" || field == "mla_attn" || field == "attn_max_batch" ||
|
||||
field == "avg_ns" || field == "stddev_ns" || field == "fdn") {
|
||||
field == "avg_ns" || field == "stddev_ns") {
|
||||
return INT;
|
||||
}
|
||||
if (field == "cuda" || field == "vulkan" || field == "kompute" || field == "metal" ||
|
||||
@@ -1480,7 +1462,7 @@ struct test {
|
||||
std::to_string(mla_attn), std::to_string(attn_max_batch), ser_to_string(ser), std::to_string(reuse),
|
||||
tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings),
|
||||
std::to_string(repack), std::to_string(mqkv), std::to_string(muge), std::to_string(fmoe), std::to_string(ger),
|
||||
std::to_string(no_fug), std::to_string(use_thp), std::to_string(no_ooae), std::to_string(rcache), std::to_string(sas), std::to_string(fdn),
|
||||
std::to_string(no_fug), std::to_string(use_thp), std::to_string(no_ooae), std::to_string(rcache), std::to_string(sas),
|
||||
cuda_params, override_tensor,
|
||||
std::to_string(n_prompt), std::to_string(n_gen), test_time,
|
||||
std::to_string(avg_ns()), std::to_string(stdev_ns()),
|
||||
@@ -1501,7 +1483,7 @@ struct test {
|
||||
"n_gpu_layers", "split_mode",
|
||||
"main_gpu", "no_kv_offload", "flash_attn", "mla_attn", "attn_max_batch", "ser", "reuse",
|
||||
"tensor_split", "use_mmap", "embeddings", "repack", "mqkv", "muge", "fused_moe", "grouped_er",
|
||||
"no_fused_up_gate", "use_thp", "no_ooae", "rcache", "sas", "fdn", "cuda_params", "override_tensor",
|
||||
"no_fused_up_gate", "use_thp", "no_ooae", "rcache", "sas", "cuda_params", "override_tensor",
|
||||
"n_prompt", "n_gen", "test_time",
|
||||
"avg_ns", "stddev_ns",
|
||||
"avg_ts", "stddev_ts", "test",
|
||||
@@ -1691,9 +1673,6 @@ struct markdown_printer : public printer {
|
||||
if (field == "sas") {
|
||||
return 3;
|
||||
}
|
||||
if (field == "fdn") {
|
||||
return 4;
|
||||
}
|
||||
if (field == "use_thp") {
|
||||
return 3;
|
||||
}
|
||||
@@ -1767,9 +1746,6 @@ struct markdown_printer : public printer {
|
||||
if (field == "sas") {
|
||||
return "sas";
|
||||
}
|
||||
if (field == "fdn") {
|
||||
return "fdn";
|
||||
}
|
||||
if (field == "use_thp") {
|
||||
return "thp";
|
||||
}
|
||||
@@ -1880,9 +1856,6 @@ struct markdown_printer : public printer {
|
||||
if (params.sas != cmd_params_defaults.sas) {
|
||||
fields.emplace_back("sas");
|
||||
}
|
||||
if (params.fdn != cmd_params_defaults.fdn) {
|
||||
fields.emplace_back("fdn");
|
||||
}
|
||||
if (params.muge != cmd_params_defaults.muge) {
|
||||
fields.emplace_back("muge");
|
||||
}
|
||||
|
||||
@@ -456,7 +456,6 @@ extern "C" {
|
||||
bool split_mode_graph_scheduling; // if true, force split mode graph scheduling
|
||||
//bool split_mode_f16; // if true, cast intermediate results to f16 before copying to other GPUs
|
||||
bool scheduler_async; // if true, with split mode "graph" graph evaluation will be done using multiple threads
|
||||
int fused_delta_net;
|
||||
bool mtp; // Activate MTP if supported
|
||||
enum llama_mtp_op_type mtp_op_type;
|
||||
|
||||
|
||||
@@ -43,7 +43,6 @@ struct llama_cparams {
|
||||
bool split_mode_graph_scheduling;
|
||||
//bool split_mode_f16;
|
||||
bool scheduler_async;
|
||||
int fused_delta_net;
|
||||
int min_experts;
|
||||
float thresh_experts;
|
||||
bool mtp;
|
||||
|
||||
@@ -74,304 +74,6 @@ delta_net::delta_net(llama_context & _lctx, const llama_batch & _batch) : lctx(_
|
||||
|
||||
delta_net::~delta_net() = default;
|
||||
|
||||
std::pair<ggml_tensor *, ggml_tensor *> delta_net::build_delta_net_chunking(ggml_context * ctx0,
|
||||
ggml_tensor * q, ggml_tensor * k, ggml_tensor * v,
|
||||
ggml_tensor * g, ggml_tensor * beta, ggml_tensor * state,
|
||||
ggml_tensor * causal_mask, ggml_tensor * identity,
|
||||
ggml_tensor * diag_mask, int il, const llm_build_cb & cb) {
|
||||
|
||||
const int64_t S_k = q->ne[0];
|
||||
const int64_t H_k = q->ne[1];
|
||||
const int64_t n_tokens = q->ne[2];
|
||||
const int64_t n_seqs = q->ne[3];
|
||||
|
||||
const int64_t S_v = v->ne[0];
|
||||
const int64_t H_v = v->ne[1];
|
||||
|
||||
GGML_ASSERT(n_seqs == 1);
|
||||
GGML_ASSERT(v->ne[2] == n_tokens);
|
||||
GGML_ASSERT(k->ne[2] == n_tokens);
|
||||
GGML_ASSERT(g->ne[0] == H_v && g->ne[1] == n_tokens && g->ne[2] == n_seqs);
|
||||
if (beta->ne[0] != H_v || beta->ne[2] != n_tokens || beta->ne[3] != n_seqs) {
|
||||
printf("beta: %ld x %ld x %ld, expected %ld x %ld x %ld\n", beta->ne[0], beta->ne[2], beta->ne[3], H_v, n_tokens, n_seqs);
|
||||
}
|
||||
GGML_ASSERT(beta->ne[0] == H_v && beta->ne[2] == n_tokens && beta->ne[3] == n_seqs);
|
||||
GGML_ASSERT(state->ne[0] == S_v && state->ne[1] == S_v && state->ne[2] == H_v && state->ne[3] == n_seqs);
|
||||
GGML_ASSERT(H_k == H_v);
|
||||
|
||||
const float scale = 1.0f / sqrtf(S_v);
|
||||
q = ggml_scale(ctx0, q, scale);
|
||||
|
||||
beta = ggml_sigmoid(ctx0, beta);
|
||||
|
||||
cb(q, "q_in", il);
|
||||
cb(k, "k_in", il);
|
||||
cb(v, "v_in", il);
|
||||
cb(beta, "beta_in", il);
|
||||
cb(g, "g_in", il);
|
||||
cb(state,"state_in", il);
|
||||
|
||||
const int64_t chunk_size = DELTA_CHUNK_SIZE;
|
||||
const int64_t pad = (chunk_size - n_tokens % chunk_size) % chunk_size;
|
||||
const int64_t n_chunks = (n_tokens + pad) / chunk_size;
|
||||
|
||||
q = ggml_permute(ctx0, q, 0, 2, 1, 3);
|
||||
k = ggml_permute(ctx0, k, 0, 2, 1, 3);
|
||||
v = ggml_permute(ctx0, v, 0, 2, 1, 3);
|
||||
g = ggml_permute(ctx0, g, 2, 0, 3, 1);
|
||||
beta = ggml_permute(ctx0, beta, 2, 0, 1, 3);
|
||||
|
||||
q = ggml_pad(ctx0, q, 0, pad, 0, 0);
|
||||
k = ggml_pad(ctx0, k, 0, pad, 0, 0);
|
||||
v = ggml_pad(ctx0, v, 0, pad, 0, 0);
|
||||
beta = ggml_pad(ctx0, beta, 0, pad, 0, 0);
|
||||
g = ggml_pad(ctx0, g, pad, 0, 0, 0);
|
||||
|
||||
cb(q, "q_pad", il);
|
||||
cb(k, "k_pad", il);
|
||||
cb(v, "v_pad", il);
|
||||
cb(beta, "beta_pad", il);
|
||||
cb(g, "g_pad", il);
|
||||
|
||||
ggml_tensor * v_beta = ggml_mul(ctx0, v, beta);
|
||||
ggml_tensor * k_beta = ggml_mul(ctx0, k, beta);
|
||||
|
||||
cb(v_beta, "v_beta", il);
|
||||
cb(k_beta, "k_beta", il);
|
||||
|
||||
q = ggml_reshape_4d(ctx0, q, S_k, chunk_size, n_chunks, H_k * n_seqs);
|
||||
k = ggml_reshape_4d(ctx0, k, S_k, chunk_size, n_chunks, H_k * n_seqs);
|
||||
k_beta = ggml_reshape_4d(ctx0, k_beta, S_k, chunk_size, n_chunks, H_v * n_seqs);
|
||||
v = ggml_reshape_4d(ctx0, v, S_v, chunk_size, n_chunks, H_v * n_seqs);
|
||||
v_beta = ggml_reshape_4d(ctx0, v_beta, S_v, chunk_size, n_chunks, H_v * n_seqs);
|
||||
|
||||
g = ggml_reshape_4d(ctx0, g, chunk_size, 1, n_chunks, H_v * n_seqs);
|
||||
beta = ggml_reshape_4d(ctx0, beta, 1, chunk_size, n_chunks, H_v * n_seqs);
|
||||
|
||||
ggml_tensor * g_cumsum = ggml_cumsum(ctx0, g);
|
||||
cb(g_cumsum, "g_cumsum", il);
|
||||
|
||||
ggml_tensor * gcs_i =
|
||||
ggml_repeat_4d(ctx0, g_cumsum, chunk_size, chunk_size, n_chunks, H_v * n_seqs);
|
||||
ggml_tensor * gcs_j = ggml_reshape_4d(ctx0, g_cumsum, 1, chunk_size, n_chunks, H_v * n_seqs);
|
||||
|
||||
ggml_tensor * gcs_j_broadcast =
|
||||
ggml_repeat_4d(ctx0, gcs_j, chunk_size, chunk_size, n_chunks, H_v * n_seqs);
|
||||
ggml_tensor * decay_mask = ggml_sub(ctx0, gcs_j_broadcast, gcs_i);
|
||||
cb(decay_mask, "decay_mask", il);
|
||||
|
||||
decay_mask = ggml_mul(ctx0, decay_mask, diag_mask);
|
||||
cb(decay_mask, "decay_mask_1", il);
|
||||
decay_mask = ggml_exp(ctx0, decay_mask);
|
||||
cb(decay_mask, "decay_mask_exp", il);
|
||||
decay_mask = ggml_mul(ctx0, decay_mask, diag_mask);
|
||||
cb(decay_mask, "decay_mask_2", il);
|
||||
|
||||
ggml_tensor * kmulkbeta = ggml_mul_mat(ctx0, k, k_beta);
|
||||
cb(kmulkbeta, "kk_beta", il);
|
||||
|
||||
ggml_tensor * k_decay = ggml_mul(ctx0, kmulkbeta, decay_mask);
|
||||
cb(k_decay, "k_decay_1", il);
|
||||
k_decay = ggml_mul(ctx0, k_decay, causal_mask);
|
||||
cb(k_decay, "k_decay_2", il);
|
||||
ggml_tensor * attn = ggml_neg(ctx0, k_decay);
|
||||
cb(attn, "attn_pre_solve", il);
|
||||
|
||||
ggml_tensor * attn_lower = ggml_mul(ctx0, attn, causal_mask);
|
||||
cb(attn_lower, "attn_lower", il);
|
||||
ggml_tensor * identity_repeat =
|
||||
ggml_repeat_4d(ctx0, identity, attn_lower->ne[0], attn_lower->ne[1], attn_lower->ne[2], attn_lower->ne[3]);
|
||||
ggml_tensor * lhs = ggml_neg(ctx0, ggml_sub(ctx0, attn_lower, identity_repeat));
|
||||
|
||||
ggml_tensor * lin_solve = ggml_solve_tri(ctx0, lhs, attn, true, true, false);
|
||||
attn = ggml_mul(ctx0, lin_solve, causal_mask);
|
||||
cb(attn, "attn_mul", il);
|
||||
attn = ggml_add(ctx0, attn, identity);
|
||||
cb(attn, "attn_solved", il);
|
||||
|
||||
auto v_beta_t = ggml_cont(ctx0, ggml_transpose(ctx0, v_beta));
|
||||
cb(v_beta_t, "v_beta_t", il);
|
||||
v = ggml_mul_mat(ctx0, v_beta_t, attn);
|
||||
cb(v, "v_beta", il);
|
||||
|
||||
ggml_tensor * g_cumsum_t = ggml_cont(ctx0, ggml_transpose(ctx0, g_cumsum));
|
||||
cb(g_cumsum_t, "g_cumsum_t", il);
|
||||
ggml_tensor * gexp = ggml_exp(ctx0, g_cumsum_t);
|
||||
cb(gexp, "gexp", il);
|
||||
|
||||
ggml_tensor * kbeta_gexp = ggml_mul(ctx0, k_beta, gexp);
|
||||
cb(kbeta_gexp, "kbeta_gexp", il);
|
||||
|
||||
auto kbeta_gexp_t = ggml_cont(ctx0, ggml_transpose(ctx0, kbeta_gexp));
|
||||
cb(kbeta_gexp_t, "kbeta_gexp_t", il);
|
||||
auto attn_kbeta = ggml_mul_mat(ctx0, attn, kbeta_gexp_t);
|
||||
cb(attn_kbeta, "attn_kbeta", il);
|
||||
ggml_tensor * k_cumdecay = ggml_cont(ctx0, ggml_transpose(ctx0, attn_kbeta));
|
||||
cb(k_cumdecay, "k_cumdecay", il);
|
||||
|
||||
ggml_tensor * attn_kq = ggml_mul_mat(ctx0, k, q);
|
||||
cb(attn_kq, "attn_kq_pre", il);
|
||||
attn_kq = ggml_mul(ctx0, decay_mask, attn_kq);
|
||||
cb(attn_kq, "attn_kq_0", il);
|
||||
attn_kq = ggml_mul(ctx0, attn_kq, diag_mask);
|
||||
cb(attn_kq, "attn_kq", il);
|
||||
|
||||
ggml_tensor * g_last = ggml_view_4d(ctx0, g_cumsum, 1, 1, g_cumsum->ne[2], g_cumsum->ne[3],
|
||||
g_cumsum->nb[1], g_cumsum->nb[2], g_cumsum->nb[3],
|
||||
(g_cumsum->ne[0] - 1) * ggml_element_size(g_cumsum));
|
||||
g_last = ggml_cont(ctx0, g_last);
|
||||
cb(g_last, "g_last", il);
|
||||
|
||||
ggml_tensor * g_last_exp = ggml_exp(ctx0, g_last);
|
||||
cb(g_last_exp, "g_last_exp", il);
|
||||
|
||||
ggml_tensor * g_last_repeat =
|
||||
ggml_repeat_4d(ctx0, g_last, chunk_size, 1, n_chunks, H_v * n_seqs);
|
||||
ggml_tensor * g_diff = ggml_neg(ctx0, ggml_sub(ctx0, g_cumsum, g_last_repeat));
|
||||
cb(g_diff, "g_diff", il);
|
||||
|
||||
ggml_tensor * g_diff_exp = ggml_exp(ctx0, g_diff);
|
||||
cb(g_diff_exp, "g_diff_exp", il);
|
||||
ggml_tensor * g_diff_exp_t = ggml_reshape_4d(ctx0, g_diff_exp, 1, chunk_size, n_chunks, g_diff_exp->ne[3]);
|
||||
|
||||
ggml_tensor * key_gdiff = ggml_mul(ctx0, k, g_diff_exp_t);
|
||||
cb(key_gdiff, "key_gdiff", il);
|
||||
|
||||
ggml_tensor * key_gdiff_t = ggml_cont(ctx0, ggml_transpose(ctx0, key_gdiff));
|
||||
cb(key_gdiff_t, "key_gdiff_t", il);
|
||||
|
||||
cb(state, "new_state", il);
|
||||
|
||||
auto get_slice_2d = [ctx0](ggml_tensor * t, int64_t c) -> ggml_tensor * {
|
||||
return ggml_view_4d(ctx0, t, t->ne[0], t->ne[1], 1, t->ne[3],
|
||||
t->nb[1], t->nb[2], t->nb[3], t->nb[2] * c);
|
||||
};
|
||||
|
||||
ggml_tensor * core_attn_out = nullptr;
|
||||
|
||||
for (int64_t chunk = 0; chunk < n_chunks; chunk++) {
|
||||
ggml_tensor * q_chunk = get_slice_2d(q, chunk);
|
||||
ggml_tensor * v_chunk = get_slice_2d(v, chunk);
|
||||
ggml_tensor * gexp_chunk = get_slice_2d(gexp, chunk);
|
||||
ggml_tensor * k_cumdecay_chunk = get_slice_2d(k_cumdecay, chunk);
|
||||
ggml_tensor * attn_chunk = get_slice_2d(attn_kq, chunk);
|
||||
cb(attn_chunk, "attn_chunk", il);
|
||||
|
||||
ggml_tensor * state_t = ggml_cont_4d(ctx0, ggml_permute(ctx0, state, 1, 0, 2, 3), S_v, S_v, 1, H_v * n_seqs);
|
||||
cb(state_t, "state_t", il);
|
||||
|
||||
ggml_tensor * v_prime = ggml_mul_mat(ctx0, state_t, k_cumdecay_chunk);
|
||||
cb(v_prime, "v_prime_chunk", il);
|
||||
|
||||
ggml_tensor * v_new = ggml_sub(ctx0, v_prime, v_chunk);
|
||||
ggml_tensor * v_new_t = ggml_cont(ctx0, ggml_transpose(ctx0, v_new));
|
||||
cb(v_new, "v_new_chunk", il);
|
||||
|
||||
ggml_tensor * q_g_exp = ggml_mul(ctx0, q_chunk, gexp_chunk);
|
||||
cb(q_g_exp, "q_g_exp", il);
|
||||
ggml_tensor * attn_inter = ggml_mul_mat(ctx0, state_t, q_g_exp);
|
||||
cb(attn_inter, "attn_inter_chunk", il);
|
||||
|
||||
ggml_tensor * v_attn = ggml_mul_mat(ctx0, v_new_t, attn_chunk);
|
||||
cb(v_attn, "v_attn_chunk", il);
|
||||
|
||||
ggml_tensor * core_attn_out_chunk = ggml_sub(ctx0, attn_inter, v_attn);
|
||||
cb(core_attn_out_chunk, "core_attn_out_chunk", il);
|
||||
|
||||
core_attn_out = core_attn_out == nullptr
|
||||
? core_attn_out_chunk
|
||||
: ggml_concat(ctx0, core_attn_out, core_attn_out_chunk, 2);
|
||||
|
||||
ggml_tensor * k_gdiff_t = get_slice_2d(key_gdiff_t, chunk);
|
||||
ggml_tensor * kgdmulvnew = ggml_mul_mat(ctx0, v_new_t, k_gdiff_t);
|
||||
cb(kgdmulvnew, "kgdmulvnew", il);
|
||||
|
||||
ggml_tensor * gexp_last_chunk = ggml_cont(ctx0, get_slice_2d(g_last_exp, chunk));
|
||||
cb(gexp_last_chunk, "gexp_last_chunk", il);
|
||||
auto s_mul = ggml_mul(ctx0, state, ggml_reshape_4d(ctx0, gexp_last_chunk, gexp_last_chunk->ne[0], gexp_last_chunk->ne[1], H_v, n_seqs));
|
||||
cb(s_mul, "s_mul", il);
|
||||
state = ggml_sub(ctx0, s_mul, ggml_reshape_4d(ctx0, kgdmulvnew, kgdmulvnew->ne[0], kgdmulvnew->ne[1], H_v, n_seqs));
|
||||
}
|
||||
|
||||
ggml_tensor * output_tokens = ggml_view_4d(ctx0, core_attn_out,
|
||||
S_v, n_tokens, H_v, n_seqs,
|
||||
ggml_row_size(core_attn_out->type, S_v),
|
||||
ggml_row_size(core_attn_out->type, S_v * DELTA_CHUNK_SIZE * n_chunks),
|
||||
ggml_row_size(core_attn_out->type, S_v * DELTA_CHUNK_SIZE * n_chunks * H_v), 0);
|
||||
cb(output_tokens, "output_tokens", il);
|
||||
|
||||
output_tokens = ggml_permute(ctx0, output_tokens, 0, 2, 1, 3);
|
||||
output_tokens = ggml_cont(ctx0, output_tokens);
|
||||
cb(output_tokens, "output_tokens", il);
|
||||
|
||||
return {output_tokens, state};
|
||||
}
|
||||
|
||||
std::pair<ggml_tensor *, ggml_tensor *> delta_net::build_delta_net_autoregressive(ggml_context * ctx0,
|
||||
ggml_tensor * q, ggml_tensor * k, ggml_tensor * v,
|
||||
ggml_tensor * g, ggml_tensor * beta, ggml_tensor * state,
|
||||
int il, const llm_build_cb & cb) {
|
||||
const int64_t H_k = q->ne[1];
|
||||
const int64_t n_tokens = q->ne[2];
|
||||
const int64_t n_seqs = q->ne[3];
|
||||
|
||||
const int64_t S_v = v->ne[0];
|
||||
const int64_t H_v = v->ne[1];
|
||||
|
||||
GGML_ASSERT(n_tokens == 1);
|
||||
GGML_ASSERT(n_seqs == 1);
|
||||
GGML_ASSERT(H_k == H_v);
|
||||
GGML_ASSERT(state->ne[0] == S_v && state->ne[1] == S_v && state->ne[2] == H_v && state->ne[3] == n_seqs);
|
||||
|
||||
const float scale = 1.0f / sqrtf(S_v);
|
||||
|
||||
q = ggml_scale(ctx0, q, scale);
|
||||
beta = ggml_sigmoid(ctx0, beta);
|
||||
|
||||
cb(q, "q_in", il);
|
||||
cb(k, "k_in", il);
|
||||
cb(v, "v_in", il);
|
||||
cb(beta, "beta_in", il);
|
||||
cb(g, "g_in", il);
|
||||
|
||||
ggml_tensor * g_t = ggml_reshape_4d(ctx0, ggml_transpose(ctx0, g), 1, 1, H_k, n_seqs);
|
||||
ggml_tensor * beta_t = ggml_reshape_4d(ctx0, ggml_transpose(ctx0, beta), 1, 1, H_k, n_seqs);
|
||||
|
||||
g_t = ggml_exp(ctx0, g_t);
|
||||
cb(g_t, "g_t", il);
|
||||
state = ggml_mul(ctx0, state, g_t);
|
||||
cb(state, "state", il);
|
||||
|
||||
ggml_tensor * k_t_unsqueezed = ggml_reshape_4d(ctx0, k, 1, S_v, H_v, n_seqs);
|
||||
ggml_tensor * kv_mem = ggml_mul(ctx0, state, k_t_unsqueezed);
|
||||
cb(kv_mem, "kv_mem", il);
|
||||
kv_mem = ggml_cont(ctx0, ggml_transpose(ctx0, kv_mem));
|
||||
cb(kv_mem, "kv_mem_t_cont", il);
|
||||
kv_mem = ggml_transpose(ctx0, ggml_sum_rows(ctx0, kv_mem));
|
||||
|
||||
ggml_tensor * v_t = ggml_reshape_4d(ctx0, v, S_v, 1, H_v, n_seqs);
|
||||
ggml_tensor * v_diff = ggml_sub(ctx0, v_t, kv_mem);
|
||||
cb(v_diff, "v_diff", il);
|
||||
ggml_tensor * delta = ggml_mul(ctx0, v_diff, beta_t);
|
||||
cb(delta, "delta", il);
|
||||
|
||||
ggml_tensor * k_t_delta = ggml_mul(ctx0, ggml_repeat_4d(ctx0, k_t_unsqueezed, S_v, S_v, H_v, n_seqs), delta);
|
||||
cb(k_t_delta, "k_t_delta", il);
|
||||
state = ggml_add(ctx0, state, k_t_delta);
|
||||
|
||||
ggml_tensor * q_t_unsqueezed = ggml_reshape_4d(ctx0, q, 1, S_v, H_v, n_seqs);
|
||||
ggml_tensor * state_q = ggml_mul(ctx0, state, q_t_unsqueezed);
|
||||
cb(state_q, "state_q", il);
|
||||
state_q = ggml_cont(ctx0, ggml_transpose(ctx0, state_q));
|
||||
cb(state_q, "state_q_t_cont", il);
|
||||
ggml_tensor * core_attn_out = ggml_transpose(ctx0, ggml_sum_rows(ctx0, state_q));
|
||||
|
||||
cb(core_attn_out, "output_tokens", il);
|
||||
cb(state, "new_state", il);
|
||||
|
||||
return {core_attn_out, state};
|
||||
}
|
||||
|
||||
std::pair<ggml_tensor *, ggml_tensor *> delta_net::build_fused_delta_net(ggml_context * ctx0,
|
||||
ggml_tensor * q, ggml_tensor * k, ggml_tensor * v,
|
||||
ggml_tensor * g, ggml_tensor * beta, ggml_tensor * state,
|
||||
@@ -544,9 +246,7 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_
|
||||
const int64_t n_seqs = 1;
|
||||
const int64_t n_seq_tokens = n_tok;
|
||||
|
||||
auto qkvz = build_qkvz(ctx0, cur, il, cb);
|
||||
ggml_tensor * qkv_mixed = qkvz.first;
|
||||
ggml_tensor * z = qkvz.second;
|
||||
auto [qkv_mixed, z] = build_qkvz(ctx0, cur, il, cb);
|
||||
|
||||
ggml_tensor *alpha, *beta;
|
||||
if (model.layers[il].ssm_beta_alpha) {
|
||||
@@ -679,14 +379,8 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_
|
||||
GGML_ASSERT(identity != nullptr);
|
||||
GGML_ASSERT(diag_mask != nullptr);
|
||||
|
||||
std::pair<ggml_tensor *, ggml_tensor *> attn_out;
|
||||
// The fused delta-net implementation is only faster than chunked for n_tok <= 8, so use it only in that case
|
||||
attn_out = n_tok <= lctx.cparams.fused_delta_net ? build_fused_delta_net(ctx0, q_conv, k_conv, v_conv, gate, beta, state, il, cb) :
|
||||
n_tok == 1 ? build_delta_net_autoregressive(ctx0, q_conv, k_conv, v_conv, gate, beta, state, il, cb)
|
||||
: build_delta_net_chunking(ctx0, q_conv, k_conv, v_conv, gate, beta, state, causal_mask, identity, diag_mask, il, cb);
|
||||
auto [output, new_state] = build_fused_delta_net(ctx0, q_conv, k_conv, v_conv, gate, beta, state, il, cb);
|
||||
|
||||
ggml_tensor * output = attn_out.first;
|
||||
ggml_tensor * new_state = attn_out.second;
|
||||
cb(output, "attn_output", il);
|
||||
cb(new_state, "new_state", il);
|
||||
|
||||
@@ -699,8 +393,7 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_
|
||||
ggml_tensor * new_ssm_flat = ggml_reshape_2d(ctx0, new_state, ssm_state_dim, 1);
|
||||
ggml_tensor * new_state_flat = ggml_concat(ctx0, new_conv_flat, new_ssm_flat, 0);
|
||||
|
||||
ggml_tensor * state_update = new_state_flat;
|
||||
ggml_build_forward_expand(gf, ggml_cpy(ctx0, state_update, state_dst));
|
||||
ggml_build_forward_expand(gf, ggml_cpy(ctx0, new_state_flat, state_dst));
|
||||
|
||||
ggml_tensor * attn_out_2d = ggml_reshape_2d(ctx0, output, head_v_dim, num_v_heads * n_tok);
|
||||
ggml_tensor * z_2d = ggml_reshape_2d(ctx0, z, head_v_dim, num_v_heads * n_tok);
|
||||
@@ -717,7 +410,6 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_
|
||||
cb(out, "linear_attn_out", il);
|
||||
|
||||
return out;
|
||||
//return ggml_reshape_2d(ctx0, out, hparams.n_embd, n_tok);
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -8,17 +8,6 @@ struct delta_net {
|
||||
delta_net(llama_context & lctx, const llama_batch & batch);
|
||||
~delta_net();
|
||||
|
||||
static std::pair<ggml_tensor *, ggml_tensor *> build_delta_net_chunking(ggml_context * ctx0,
|
||||
ggml_tensor * q, ggml_tensor * k, ggml_tensor * v,
|
||||
ggml_tensor * g, ggml_tensor * beta, ggml_tensor * state,
|
||||
ggml_tensor * causal_mask, ggml_tensor * identity,
|
||||
ggml_tensor * diag_mask, int il, const llm_build_cb & cb);
|
||||
|
||||
static std::pair<ggml_tensor *, ggml_tensor *> build_delta_net_autoregressive(ggml_context * ctx0,
|
||||
ggml_tensor * q, ggml_tensor * k, ggml_tensor * v,
|
||||
ggml_tensor * g, ggml_tensor * beta, ggml_tensor * state,
|
||||
int il, const llm_build_cb & cb);
|
||||
|
||||
static std::pair<ggml_tensor *, ggml_tensor *> build_fused_delta_net(ggml_context * ctx0,
|
||||
ggml_tensor * q, ggml_tensor * k, ggml_tensor * v,
|
||||
ggml_tensor * g, ggml_tensor * beta, ggml_tensor * state,
|
||||
|
||||
@@ -1512,6 +1512,7 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
|
||||
LLAMA_LOG_INFO("%s: ssm_d_inner = %u\n", __func__, hparams.ssm_d_inner);
|
||||
LLAMA_LOG_INFO("%s: ssm_d_state = %u\n", __func__, hparams.ssm_d_state);
|
||||
LLAMA_LOG_INFO("%s: ssm_dt_rank = %u\n", __func__, hparams.ssm_dt_rank);
|
||||
LLAMA_LOG_INFO("%s: ssm_n_group = %u\n", __func__, hparams.ssm_n_group);
|
||||
}
|
||||
|
||||
LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
|
||||
@@ -4394,7 +4395,6 @@ struct llama_context_params llama_context_default_params() {
|
||||
/*.split_mode_graph_scheduling =*/ false,
|
||||
// /*.split_mode_f16 =*/ true,
|
||||
/*.scheduler_async =*/ false,
|
||||
/*.fused_delta_net =*/ 65536,
|
||||
/*.mtp =*/ false,
|
||||
/*.mtp_op_type =*/ MTP_OP_NONE,
|
||||
/*.abort_callback =*/ nullptr,
|
||||
@@ -4766,7 +4766,6 @@ struct llama_context * llama_init_from_model(
|
||||
cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling;
|
||||
//cparams.split_mode_f16 = params.split_mode_f16;
|
||||
cparams.scheduler_async = params.scheduler_async;
|
||||
cparams.fused_delta_net = params.fused_delta_net;
|
||||
cparams.min_experts = params.min_experts;
|
||||
cparams.thresh_experts = params.thresh_experts;
|
||||
cparams.cuda_params = params.cuda_params;
|
||||
@@ -4873,7 +4872,6 @@ struct llama_context * llama_init_from_model(
|
||||
//LLAMA_LOG_INFO("%s: split_mode_f16= %d\n", __func__, cparams.split_mode_f16);
|
||||
LLAMA_LOG_INFO("%s: reduce_type = %s\n", __func__, ggml_type_name(cparams.reduce_type));
|
||||
LLAMA_LOG_INFO("%s: sched_async = %d\n", __func__, cparams.scheduler_async);
|
||||
LLAMA_LOG_INFO("%s: fused_delta = %d\n", __func__, cparams.fused_delta_net);
|
||||
LLAMA_LOG_INFO("%s: ser = %d, %g\n", __func__, cparams.min_experts, cparams.thresh_experts);
|
||||
LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
|
||||
LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
|
||||
|
||||
Reference in New Issue
Block a user