diff --git a/common/common.cpp b/common/common.cpp index f0a4d3dd..83c8d562 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1531,6 +1531,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.scheduler_async = true; return true; } + if (arg == "-fdn" || arg == "--fused-delta-net") { + params.fused_delta_net = true; + return true; + } if (arg == "-smf16" || arg == "--split-mode-f16") { params.reduce_type = "f16"; //params.split_mode_f16 = true; @@ -2258,6 +2262,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param options.push_back({ "*", "-grt, --graph-reduce-type", "Type for data exchange between GPUs (default: %s)", "f32"}); options.push_back({ "*", "-smgs, --split-mode-graph-scheduling,", "Force Split Mode Graph Scheduling (default: %d)", params.split_mode_graph_scheduling}); options.push_back({ "*", "-sas, --scheduler_async,", "Async evaluation of compute graphs: %d)", params.scheduler_async}); + options.push_back({ "*", "-fdn, --fused-delta-net", "Use fused delta-net for TH with recurrent models: %d)", params.fused_delta_net}); options.push_back({ "*", "-vq, --validate-quants", "validate quantized data while loading the model (default: %d)", params.validate_quants}); options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n" "in conversation mode, this will be used as system prompt\n" @@ -3336,6 +3341,7 @@ struct llama_context_params common_context_params_to_llama(const gpt_params & pa cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling; //cparams.split_mode_f16 = params.split_mode_f16; cparams.scheduler_async = params.scheduler_async; + cparams.fused_delta_net = params.fused_delta_net; cparams.min_experts = params.min_experts; cparams.thresh_experts = params.thresh_experts; cparams.only_active_experts = params.only_active_exps; @@ -4346,6 +4352,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l //fprintf(stream, "split_mode_f16: %s # default: true\n", params.split_mode_f16 ? "true" : "false"); fprintf(stream, "reduce_type: %s # default f16\n", params.reduce_type.c_str()); fprintf(stream, "scheduler_async: %s # default: false\n", params.scheduler_async ? "true" : "false"); + fprintf(stream, "fused_delta_net: %s # default: false\n", params.fused_delta_net ? "true" : "false"); fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts); fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp); diff --git a/common/common.h b/common/common.h index 26cd520e..b29c9d97 100644 --- a/common/common.h +++ b/common/common.h @@ -357,6 +357,7 @@ struct gpt_params { bool split_mode_graph_scheduling = false; // if true, force split mode graph scheduling //bool split_mode_f16 = true; // if true, intermediate results will be cast to f16 before copying to other GPUs to perform reduce ops bool scheduler_async = false; // if true, in split mode graph the scheduler will use multiple threads to evaluate the graph + bool fused_delta_net = false; // if true, use fused delta-net for TG with hybrid/recurrent models bool has_mtp = false; // enable MTP if supported by the model std::string cache_type_k = "f16"; // KV cache data type for the K diff --git a/include/llama.h b/include/llama.h index 104f5d40..f1b43a5a 100644 --- a/include/llama.h +++ b/include/llama.h @@ -456,6 +456,7 @@ extern "C" { bool split_mode_graph_scheduling; // if true, force split mode graph scheduling //bool split_mode_f16; // if true, cast intermediate results to f16 before copying to other GPUs bool scheduler_async; // if true, with split mode "graph" graph evaluation will be done using multiple threads + bool fused_delta_net; bool mtp; // Activate MTP if supported enum llama_mtp_op_type mtp_op_type; diff --git a/src/llama-cparams.h b/src/llama-cparams.h index b178059f..05bda231 100644 --- a/src/llama-cparams.h +++ b/src/llama-cparams.h @@ -43,6 +43,7 @@ struct llama_cparams { bool split_mode_graph_scheduling; //bool split_mode_f16; bool scheduler_async; + bool fused_delta_net; int min_experts; float thresh_experts; bool mtp; diff --git a/src/llama-delta-net.cpp b/src/llama-delta-net.cpp index 0a15a900..a0949d5a 100644 --- a/src/llama-delta-net.cpp +++ b/src/llama-delta-net.cpp @@ -679,10 +679,9 @@ ggml_tensor * delta_net::build_layer_attn_linear_core(ggml_context * ctx0, ggml_ GGML_ASSERT(identity != nullptr); GGML_ASSERT(diag_mask != nullptr); - attn_out = n_tok == 1 - //? build_delta_net_autoregressive(ctx0, q_conv, k_conv, v_conv, gate, beta, state, il, cb) - ? build_fused_delta_net(ctx0, q_conv, k_conv, v_conv, gate, beta, state, il, cb) - : build_delta_net_chunking(ctx0, q_conv, k_conv, v_conv, gate, beta, state, causal_mask, identity, diag_mask, il, cb); + attn_out = n_tok == 1 ? lctx.cparams.fused_delta_net ? build_fused_delta_net(ctx0, q_conv, k_conv, v_conv, gate, beta, state, il, cb) + : build_delta_net_autoregressive(ctx0, q_conv, k_conv, v_conv, gate, beta, state, il, cb) + : build_delta_net_chunking(ctx0, q_conv, k_conv, v_conv, gate, beta, state, causal_mask, identity, diag_mask, il, cb); ggml_tensor * output = attn_out.first; ggml_tensor * new_state = attn_out.second; cb(output, "attn_output", il); diff --git a/src/llama.cpp b/src/llama.cpp index 4442e2dd..00f24c2c 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -4378,8 +4378,9 @@ struct llama_context_params llama_context_default_params() { /*.only_active_experts =*/ false, /*.k_cache_hadamard =*/ false, /*.split_mode_graph_scheduling =*/ false, - // /*.split_mode_f16 =*/ true, + // /*.split_mode_f16 =*/ true, /*.scheduler_async =*/ false, + /*.fused_delta_net =*/ false, /*.mtp =*/ false, /*.mtp_op_type =*/ MTP_OP_NONE, /*.abort_callback =*/ nullptr, @@ -4750,6 +4751,7 @@ struct llama_context * llama_init_from_model( cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling; //cparams.split_mode_f16 = params.split_mode_f16; cparams.scheduler_async = params.scheduler_async; + cparams.fused_delta_net = params.fused_delta_net; cparams.min_experts = params.min_experts; cparams.thresh_experts = params.thresh_experts; cparams.cuda_params = params.cuda_params; @@ -4835,7 +4837,7 @@ struct llama_context * llama_init_from_model( cparams.mtp = 0; } - cparams.mtp_op_type = params.mtp_op_type; + cparams.mtp_op_type = params.mtp_op_type; LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx); LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch); @@ -4856,6 +4858,7 @@ struct llama_context * llama_init_from_model( //LLAMA_LOG_INFO("%s: split_mode_f16= %d\n", __func__, cparams.split_mode_f16); LLAMA_LOG_INFO("%s: reduce_type = %s\n", __func__, ggml_type_name(cparams.reduce_type)); LLAMA_LOG_INFO("%s: sched_async = %d\n", __func__, cparams.scheduler_async); + LLAMA_LOG_INFO("%s: fused_delta = %d\n", __func__, cparams.fused_delta_net); LLAMA_LOG_INFO("%s: ser = %d, %g\n", __func__, cparams.min_experts, cparams.thresh_experts); LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base); LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);