Disable some fusion, RoPE cache off by default (#894)

* Disable some fusion and make rope cahe off by default

* Minor

---------

Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
Kawrakow
2025-11-04 07:50:14 +02:00
committed by GitHub
parent fb0d5a995c
commit c23fda2103
4 changed files with 7 additions and 11 deletions

View File

@@ -1106,8 +1106,8 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.fused_mmad = false;
return true;
}
if (arg == "-no-rcache" || arg == "--no-rope-cache") {
params.rope_cache = false;
if (arg == "-rcache" || arg == "--rope-cache") {
params.rope_cache = true;
return true;
}
if (arg == "-ser" || arg == "--smart-expert-reduction") {
@@ -1918,7 +1918,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "*", "-ger, --grouped-expert-routing", "enable grouped expert routing (default: %s)", params.grouped_expert_routing ? "enabled" : "disabled" });
options.push_back({ "*", "-no-fug, --no-fused-up-gate", "disaable fused up-gate (default: %s)", params.fused_up_gate ? "enabled" : "disabled" });
options.push_back({ "*", "-no-mmad, --no-fused-mul-multiadd", "disaable fused mul-multi_add (default: %s)", params.fused_mmad? "enabled" : "disabled" });
options.push_back({ "*", "-no-rcache, --no-rope-cache", "disaable RoPE cache (default: %s)", params.rope_cache ? "enabled" : "disabled" });
options.push_back({ "*", "-rcache, --rope-cache", "enable RoPE cache (default: %s)", params.rope_cache ? "enabled" : "disabled" });
options.push_back({ "*", "-ser, --smart-expert-reduction,","experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts});
options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv});
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"

View File

@@ -249,7 +249,7 @@ struct gpt_params {
bool fused_up_gate = true; // fused up*unary(gate) op
bool fused_mmad = true; // fused mul+multi_add op
bool grouped_expert_routing = false; // if to use grouped expert routing (BailingMoeV2 arch)
bool rope_cache = true; // if to use RoPE cache (for supported models)
bool rope_cache = false; // if to use RoPE cache (for supported models)
int min_experts = -1;
float thresh_experts = 0;