Graph reuse (#947)

* Add mainline compatible FA command line option

* Graph reuse: add command line argument to turn it on

* WIP

* This seems to work

* This is perhaps cleaner

* Change the command line option to -gr

---------

Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
Kawrakow
2025-11-14 06:58:19 +02:00
committed by GitHub
parent 22c20fcd6d
commit 6b9d1bf4b4
9 changed files with 174 additions and 38 deletions

View File

@@ -1135,6 +1135,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.flash_attn = false;
return true;
}
if (arg == "-fa" || arg == "--flash-attn") {
CHECK_ARG
std::string next_arg{argv[i]};
@@ -1180,6 +1181,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.rope_cache = true;
return true;
}
if (arg == "-gr" || arg == "--graph-reuse") {
params.graph_reuse = true;
return true;
}
if (arg == "-ser" || arg == "--smart-expert-reduction") {
CHECK_ARG
auto values = string_split_pairs<int,float>(argv[i], ',');
@@ -2004,6 +2009,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "*", "-no-fug, --no-fused-up-gate", "disaable fused up-gate (default: %s)", params.fused_up_gate ? "enabled" : "disabled" });
options.push_back({ "*", "-no-mmad, --no-fused-mul-multiadd", "disaable fused mul-multi_add (default: %s)", params.fused_mmad? "enabled" : "disabled" });
options.push_back({ "*", "-rcache, --rope-cache", "enable RoPE cache (default: %s)", params.rope_cache ? "enabled" : "disabled" });
options.push_back({ "*", "-gr, --graph-reuse", "enable graph reuse (default: %s)", params.graph_reuse ? "enabled" : "disabled" });
options.push_back({ "*", "-ser, --smart-expert-reduction,","experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts});
options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv});
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"
@@ -2979,6 +2985,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
cparams.fused_up_gate = params.fused_up_gate;
cparams.fused_mmad = params.fused_mmad;
cparams.rope_cache = params.rope_cache;
cparams.graph_reuse = params.graph_reuse;
cparams.min_experts = params.min_experts;
cparams.thresh_experts = params.thresh_experts;
cparams.only_active_experts = params.only_active_exps;
@@ -4123,7 +4130,8 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
fprintf(stream, "grouped_expert_routing: %s # default: false\n", params.grouped_expert_routing ? "true" : "false");
fprintf(stream, "fused_up_gate: %s # default: true\n", params.fused_up_gate ? "true" : "false");
fprintf(stream, "fused_mmad: %s # default: true\n", params.fused_mmad ? "true" : "false");
fprintf(stream, "rope_cache: %s # default: true\n", params.rope_cache ? "true" : "false");
fprintf(stream, "rope_cache: %s # default: false\n", params.rope_cache ? "true" : "false");
fprintf(stream, "graph_reuse: %s # default: false\n", params.graph_reuse ? "true" : "false");
fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts);
fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp);

View File

@@ -254,6 +254,7 @@ struct gpt_params {
bool fused_mmad = true; // fused mul+multi_add op
bool grouped_expert_routing = false; // if to use grouped expert routing (BailingMoeV2 arch)
bool rope_cache = false; // if to use RoPE cache (for supported models)
bool graph_reuse = false; // if to reuse compute graphs
int min_experts = -1;
float thresh_experts = 0;