mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
Grouped expert routing (CPU only) (#836)
* Better argsort (CPU) * Attemt at grouped topk * This seems to do the trick for grouped experts routing * Cleanup * Trying to merge, something is not right * Working merged grouped top_k (CPU) * Add command line option to enable grouped expert routing * Add grouped expert routing option to llama-bench --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -1012,6 +1012,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
||||
params.fused_moe_up_gate = true;
|
||||
return true;
|
||||
}
|
||||
if (arg == "-ger" || arg == "--grouped-expert-routing") {
|
||||
params.grouped_expert_routing = true;
|
||||
return true;
|
||||
}
|
||||
if (arg == "-no-fug" || arg == "--no-fused-up-gate") {
|
||||
params.fused_up_gate = false;
|
||||
return true;
|
||||
@@ -1800,6 +1804,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
||||
options.push_back({ "*", "-mla, --mla-use", "enable MLA (default: %d)", params.mla_attn });
|
||||
options.push_back({ "*", "-amb, --attention-max-batch", "max batch size for attention computations (default: %d)", params.attn_max_batch});
|
||||
options.push_back({ "*", "-fmoe, --fused-moe", "enable fused MoE (default: %s)", params.fused_moe_up_gate ? "enabled" : "disabled" });
|
||||
options.push_back({ "*", "-ger, --grouped-expert-routing", "enable grouped expert routing (default: %s)", params.grouped_expert_routing ? "enabled" : "disabled" });
|
||||
options.push_back({ "*", "-no-fug, --no-fused-up-gate", "disaable fused up-gate (default: %s)", params.fused_up_gate ? "enabled" : "disabled" });
|
||||
options.push_back({ "*", "-ser, --smart-expert-reduction,","experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts});
|
||||
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"
|
||||
@@ -2755,6 +2760,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
||||
cparams.mla_attn = params.mla_attn;
|
||||
cparams.attn_max_batch = params.attn_max_batch;
|
||||
cparams.fused_moe_up_gate = params.fused_moe_up_gate;
|
||||
cparams.grouped_expert_routing = params.grouped_expert_routing;
|
||||
cparams.fused_up_gate = params.fused_up_gate;
|
||||
cparams.min_experts = params.min_experts;
|
||||
cparams.thresh_experts = params.thresh_experts;
|
||||
@@ -3871,6 +3877,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
|
||||
fprintf(stream, "mla_attn: %d # default: 0\n", params.mla_attn);
|
||||
fprintf(stream, "attn_max_batch: %d # default: 0\n", params.attn_max_batch);
|
||||
fprintf(stream, "fused_moe: %s # default: false\n", params.fused_moe_up_gate ? "true" : "false");
|
||||
fprintf(stream, "grouped_expert_routing: %s # default: false\n", params.grouped_expert_routing ? "true" : "false");
|
||||
fprintf(stream, "fused_up_gate: %s # default: true\n", params.fused_up_gate ? "true" : "false");
|
||||
fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts);
|
||||
fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp);
|
||||
|
||||
@@ -235,6 +235,7 @@ struct gpt_params {
|
||||
int attn_max_batch = 0; // Max batch size to use when computing attention (only applicable if flash_attn = false)
|
||||
bool fused_moe_up_gate = false; // fused up*unary(gate) op for MoE models
|
||||
bool fused_up_gate = true; // fused up*unary(gate) op
|
||||
bool grouped_expert_routing = false; // if to use grouped expert routing (BailingMoeV2 arch)
|
||||
int min_experts = -1;
|
||||
float thresh_experts = 0;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user