Change fmoe to be on by default

This commit is contained in:
Iwan Kawrakow
2025-10-25 09:18:48 +03:00
parent 2522c97dc9
commit 03a0f4d3cc
3 changed files with 5 additions and 5 deletions

View File

@@ -1008,8 +1008,8 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.attn_max_batch = std::stoi(argv[i]);
return true;
}
if (arg == "-fmoe" || arg == "--fused-moe") {
params.fused_moe_up_gate = true;
if (arg == "-no-fmoe" || arg == "--no-fused-moe") {
params.fused_moe_up_gate = false;
return true;
}
if (arg == "-ger" || arg == "--grouped-expert-routing") {
@@ -1807,7 +1807,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "*", "-fa, --flash-attn", "enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled" });
options.push_back({ "*", "-mla, --mla-use", "enable MLA (default: %d)", params.mla_attn });
options.push_back({ "*", "-amb, --attention-max-batch", "max batch size for attention computations (default: %d)", params.attn_max_batch});
options.push_back({ "*", "-fmoe, --fused-moe", "enable fused MoE (default: %s)", params.fused_moe_up_gate ? "enabled" : "disabled" });
options.push_back({ "*", "-no-fmoe, --no-fused-moe", "disable fused MoE (default: %s)", params.fused_moe_up_gate ? "enabled" : "disabled" });
options.push_back({ "*", "-ger, --grouped-expert-routing", "enable grouped expert routing (default: %s)", params.grouped_expert_routing ? "enabled" : "disabled" });
options.push_back({ "*", "-no-fug, --no-fused-up-gate", "disaable fused up-gate (default: %s)", params.fused_up_gate ? "enabled" : "disabled" });
options.push_back({ "*", "-no-mmad, --no-fused-mul-multiadd", "disaable fused mul-multi_add (default: %s)", params.fused_mmad? "enabled" : "disabled" });

View File

@@ -233,7 +233,7 @@ struct gpt_params {
bool flash_attn = false; // flash attention
int mla_attn = 0; // MLA 0: standard attention, 1: MLA with K and transposed V cache, 2: MLA with just K cache
int attn_max_batch = 0; // Max batch size to use when computing attention (only applicable if flash_attn = false)
bool fused_moe_up_gate = false; // fused up*unary(gate) op for MoE models
bool fused_moe_up_gate = true; // fused up*unary(gate) op for MoE models
bool fused_up_gate = true; // fused up*unary(gate) op
bool fused_mmad = true; // fused mul+multi_add op
bool grouped_expert_routing = false; // if to use grouped expert routing (BailingMoeV2 arch)

View File

@@ -3753,7 +3753,7 @@ struct llama_context_params llama_context_default_params() {
/*.flash_attn =*/ false,
/*.mla_attn =*/ 0,
/*.attn_max_batch =*/ 0,
/*.fused_moe_up_gate =*/ false,
/*.fused_moe_up_gate =*/ true,
/*.grouped_expert_routing =*/ false,
/*.fused_up_gate =*/ true,
/*.fused_mmad =*/ true,