From 03a0f4d3cc4724b8bf8396099b8b1fa2977367a9 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Sat, 25 Oct 2025 09:18:48 +0300 Subject: [PATCH] Change fmoe to be on by default --- common/common.cpp | 6 +++--- common/common.h | 2 +- src/llama.cpp | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index df4d5d0f..0e3da14d 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1008,8 +1008,8 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.attn_max_batch = std::stoi(argv[i]); return true; } - if (arg == "-fmoe" || arg == "--fused-moe") { - params.fused_moe_up_gate = true; + if (arg == "-no-fmoe" || arg == "--no-fused-moe") { + params.fused_moe_up_gate = false; return true; } if (arg == "-ger" || arg == "--grouped-expert-routing") { @@ -1807,7 +1807,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param options.push_back({ "*", "-fa, --flash-attn", "enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled" }); options.push_back({ "*", "-mla, --mla-use", "enable MLA (default: %d)", params.mla_attn }); options.push_back({ "*", "-amb, --attention-max-batch", "max batch size for attention computations (default: %d)", params.attn_max_batch}); - options.push_back({ "*", "-fmoe, --fused-moe", "enable fused MoE (default: %s)", params.fused_moe_up_gate ? "enabled" : "disabled" }); + options.push_back({ "*", "-no-fmoe, --no-fused-moe", "disable fused MoE (default: %s)", params.fused_moe_up_gate ? "enabled" : "disabled" }); options.push_back({ "*", "-ger, --grouped-expert-routing", "enable grouped expert routing (default: %s)", params.grouped_expert_routing ? "enabled" : "disabled" }); options.push_back({ "*", "-no-fug, --no-fused-up-gate", "disaable fused up-gate (default: %s)", params.fused_up_gate ? "enabled" : "disabled" }); options.push_back({ "*", "-no-mmad, --no-fused-mul-multiadd", "disaable fused mul-multi_add (default: %s)", params.fused_mmad? "enabled" : "disabled" }); diff --git a/common/common.h b/common/common.h index 327afb24..2deece17 100644 --- a/common/common.h +++ b/common/common.h @@ -233,7 +233,7 @@ struct gpt_params { bool flash_attn = false; // flash attention int mla_attn = 0; // MLA 0: standard attention, 1: MLA with K and transposed V cache, 2: MLA with just K cache int attn_max_batch = 0; // Max batch size to use when computing attention (only applicable if flash_attn = false) - bool fused_moe_up_gate = false; // fused up*unary(gate) op for MoE models + bool fused_moe_up_gate = true; // fused up*unary(gate) op for MoE models bool fused_up_gate = true; // fused up*unary(gate) op bool fused_mmad = true; // fused mul+multi_add op bool grouped_expert_routing = false; // if to use grouped expert routing (BailingMoeV2 arch) diff --git a/src/llama.cpp b/src/llama.cpp index fe3bfbce..0011e59b 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -3753,7 +3753,7 @@ struct llama_context_params llama_context_default_params() { /*.flash_attn =*/ false, /*.mla_attn =*/ 0, /*.attn_max_batch =*/ 0, - /*.fused_moe_up_gate =*/ false, + /*.fused_moe_up_gate =*/ true, /*.grouped_expert_routing =*/ false, /*.fused_up_gate =*/ true, /*.fused_mmad =*/ true,