From 7671335ac901bf3af94b6d5779c0db5193fecd20 Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Mon, 12 Jan 2026 12:49:18 +0200 Subject: [PATCH] Add command line option to merge experts up/gate --- common/common.cpp | 7 +++++++ common/common.h | 1 + include/llama.h | 1 + src/llama-load-tensors.cpp | 4 ++-- src/llama-model-loader.cpp | 3 ++- src/llama-model-loader.h | 4 +++- src/llama-quantize.cpp | 2 +- src/llama.cpp | 4 +++- 8 files changed, 20 insertions(+), 6 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index b4be2d31..30921e24 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1442,6 +1442,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.merge_qkv = true; return true; } + if (arg == "-muge" || arg == "--merge-up-gate-expsrts") { + params.merge_up_gate_exps = true; + return true; + } if (arg == "-khad" || arg == "--k-cache-hadamard") { params.k_cache_hadamard = true; return true; @@ -2148,6 +2152,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param options.push_back({ "*", "-no-gr, --no-graph-reuse", "disable graph reuse (default: %s)", !params.graph_reuse ? "enabled" : "disabled" }); options.push_back({ "*", "-ser, --smart-expert-reduction", "experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts}); options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv}); + options.push_back({ "*", "-muge, --merge-up-gate-experts,","merge ffn_up/gate_exps (default: %d)", params.merge_up_gate_exps}); options.push_back({ "*", "-khad, --k-cache-hadamard,", "Use Hadamard transform for K-cache (default: %d)", params.k_cache_hadamard}); options.push_back({ "*", "-smf16, --split-mode-f16,", "Use f16 for data exchange between GPUs (default: %d)", params.split_mode_f16}); options.push_back({ "*", "-smf32, --split-mode-f32,", "Use f32 for data exchange between GPUs (default: %d)", !params.split_mode_f16}); @@ -3088,6 +3093,7 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & mparams.use_thp = params.use_thp; mparams.validate_quants = params.validate_quants; mparams.merge_qkv = params.merge_qkv; + mparams.merge_up_gate_exps = params.merge_up_gate_exps; if (params.kv_overrides.empty()) { mparams.kv_overrides = NULL; } else { @@ -4134,6 +4140,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l fprintf(stream, "use_thp: %s # default: false\n", params.use_thp ? "true" : "false"); fprintf(stream, "validate_quants: %s # default: false\n", params.validate_quants ? "true" : "false"); fprintf(stream, "merge_qkv: %s # default: false\n", params.merge_qkv ? "true" : "false"); + fprintf(stream, "merge_up_gate_exps: %s # default: false\n", params.merge_up_gate_exps ? "true" : "false"); fprintf(stream, "max_extra_alloc: %d # default: 256\n", params.max_extra_alloc_MiB); fprintf(stream, "penalize_nl: %s # default: false\n", sparams.penalize_nl ? "true" : "false"); fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type); diff --git a/common/common.h b/common/common.h index 79d12773..9addabe3 100644 --- a/common/common.h +++ b/common/common.h @@ -287,6 +287,7 @@ struct gpt_params { bool validate_quants = false; // if true, check for NaNs while loading the model bool only_active_exps = true; // if true, offload only active experts (relevant only for hybrid CPU/GPU) bool merge_qkv = false; // if true, merge separate Q, K, V tensors into a single, contiguous tensor + bool merge_up_gate_exps= false; // if true, merge ffn_up_exps and ffn_gate_exps into a single, contiguous tensor bool k_cache_hadamard = false; // if true, use Hadamard transform for the K-cache (only makes sense with quantized cache) bool split_mode_graph_scheduling = false; // if true, force split mode graph scheduling bool split_mode_f16 = true; // if true, intermediate results will be cast to f16 before copying to other GPUs to perform reduce ops diff --git a/include/llama.h b/include/llama.h index 8364a616..9a52c51c 100644 --- a/include/llama.h +++ b/include/llama.h @@ -392,6 +392,7 @@ extern "C" { bool use_thp; // use transparent huge pages (linux only) bool validate_quants; // if true, check for NaNs while loading the model bool merge_qkv; // if true, merge separate Q, K, V tensors into a single, contiguous tensor + bool merge_up_gate_exps; // if true, merge ffn_up_exps and ffn_gate_exps tensors into a single, contiguous tensor }; // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations diff --git a/src/llama-load-tensors.cpp b/src/llama-load-tensors.cpp index 739403b6..c502fe53 100644 --- a/src/llama-load-tensors.cpp +++ b/src/llama-load-tensors.cpp @@ -1176,7 +1176,7 @@ bool create_tensors_helper::create_qwen3_moe_tensors(const LLM_TN & tn) { // MoE branch const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used; - bool merged = merge_up_gate_exps(tn, i, 0); + bool merged = ml.merge_up_gate_exps && merge_up_gate_exps(tn, i, 0); if (merged) { use_mmap_buffer = false; } else { @@ -2585,7 +2585,7 @@ bool create_tensors_helper::create_openai_moe_tensors(const LLM_TN & tn) { ggml_context *ctx_ffn_gate, *ctx_ffn_up, *ctx_ffn_down; layer.ffn_gate_inp = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert}, 0); - bool merged = merge_up_gate_exps(tn, i, 2); + bool merged = ml.merge_up_gate_exps && merge_up_gate_exps(tn, i, 2); use_mmap_buffer &= !merged; if (merged) { ctx_ffn_gate = ctx_ffn_up = ctx_split; diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp index d7c68b33..4e200211 100644 --- a/src/llama-model-loader.cpp +++ b/src/llama-model-loader.cpp @@ -204,7 +204,7 @@ namespace GGUFMeta { } llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, - bool repack_tensors, bool use_thp, bool merge_qkv, + bool repack_tensors, bool use_thp, bool merge_qkv, bool merge_up_gate_exps, const llama_model_kv_override * param_overrides_p, const llama_model_tensor_buft_override * param_tensor_buft_overrides_p) { int trace = 0; @@ -497,6 +497,7 @@ llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap, this->repack_tensors = repack_tensors; this->use_thp = use_thp; this->merge_qkv = merge_qkv; + this->merge_up_gate_exps = merge_up_gate_exps; } llama_model_loader::~llama_model_loader() { diff --git a/src/llama-model-loader.h b/src/llama-model-loader.h index 366dea41..c59eaf4f 100644 --- a/src/llama-model-loader.h +++ b/src/llama-model-loader.h @@ -45,6 +45,7 @@ struct llama_model_loader { bool repack_tensors = false; bool use_thp = false; bool merge_qkv = false; + bool merge_up_gate_exps = false; llama_files files; llama_ftype ftype; @@ -79,7 +80,8 @@ struct llama_model_loader { std::string arch_name; LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN); - llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, bool repack_tensors, bool use_thp, bool merge_qkv, + llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, bool repack_tensors, bool use_thp, + bool merge_qkv, bool merge_up_gate_exps, const llama_model_kv_override * param_overrides_p, const llama_model_tensor_buft_override * param_tensor_buft_overrides_p); diff --git a/src/llama-quantize.cpp b/src/llama-quantize.cpp index c9938b38..927c3f31 100644 --- a/src/llama-quantize.cpp +++ b/src/llama-quantize.cpp @@ -1009,7 +1009,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s kv_overrides = v->data(); } llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, /* repack_tensors */ false, - /* use_thp */ false, /* merge_qkv */ false, kv_overrides, nullptr); + /* use_thp */ false, /* merge_qkv */ false, /* merge_up_gate_exps */ false, kv_overrides, nullptr); ml.init_mappings(false); // no prefetching llama_model model; diff --git a/src/llama.cpp b/src/llama.cpp index 4c8f7b03..9e906157 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -2107,7 +2107,8 @@ static bool llm_load_tensors( static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) { try { llama_model_loader ml(fname, params.use_mmap, params.check_tensors, - params.repack_tensors, params.use_thp, params.merge_qkv, params.kv_overrides, params.tensor_buft_overrides); + params.repack_tensors, params.use_thp, params.merge_qkv, params.merge_up_gate_exps, + params.kv_overrides, params.tensor_buft_overrides); model.hparams.vocab_only = params.vocab_only; @@ -4020,6 +4021,7 @@ struct llama_model_params llama_model_default_params() { /*.use_thp =*/ false, /*.validate_quants =*/ false, /*.merge_qkv =*/ false, + /*.merge_up_gate_exps =*/ false, }; #ifdef GGML_USE_METAL