Grouped expert routing (CPU only) (#836)

* Better argsort (CPU)

* Attemt at grouped topk

* This seems to do the trick for grouped experts routing

* Cleanup

* Trying to merge, something is not right

* Working merged grouped top_k (CPU)

* Add command line option to enable grouped expert routing

* Add grouped expert routing option to llama-bench

---------

Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
Kawrakow
2025-10-16 14:57:02 +03:00
committed by GitHub
parent e66d307e13
commit cde642e591
11 changed files with 221 additions and 44 deletions

View File

@@ -261,6 +261,7 @@ struct cmd_params {
bool warmup;
bool repack = false;
bool fmoe = false;
bool ger = false; // ger = Grouped Expert Routing
bool no_fug = false;
bool use_thp = false;
output_formats output_format;
@@ -296,9 +297,10 @@ static const cmd_params cmd_params_defaults = {
/* verbose */ false,
/* warmup */ true,
/* repack */ false,
/* use_thp */ false,
/* fmoe */ false,
/* ger */ false,
/* no_fug */ false,
/* use_thp */ false,
/* output_format */ MARKDOWN,
/* output_format_stderr */ NONE,
};
@@ -341,6 +343,7 @@ static void print_usage(int /* argc */, char ** argv) {
printf(" -thp, --transparent-huge-pages <0|1> (default: %s)\n", cmd_params_defaults.use_thp? "1" : "0");
printf(" -ot, --override-tensor pattern (default: none)\n");
printf(" -fmoe, --fused-moe <0|1> (default: %s)\n", cmd_params_defaults.fmoe? "1" : "0");
printf(" -ger, --grouped-expert-routing <0|1>(default: %s)\n", cmd_params_defaults.ger ? "1" : "0");
printf(" -no-fug, --no-fused-up-gate <0|1> (default: %s)\n", cmd_params_defaults.no_fug? "1" : "0");
printf("\n");
printf("Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.\n");
@@ -739,6 +742,12 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
break;
}
params.fmoe = std::stoi(argv[i]);
} else if (arg == "-ger" || arg == "--grouped-expert-routing") {
if (++i >= argc) {
invalid_param = true;
break;
}
params.ger = std::stoi(argv[i]);
} else if (arg == "-no-fug" || arg == "--no-fused-up-gate") {
if (++i >= argc) {
invalid_param = true;
@@ -829,6 +838,7 @@ struct cmd_params_instance {
bool embeddings;
bool repack = false;
bool fmoe = false;
bool ger = false;
bool no_fug = false;
bool use_thp = false;
const llama_model_tensor_buft_override* buft_overrides;
@@ -876,6 +886,7 @@ struct cmd_params_instance {
cparams.mla_attn = mla_attn;
cparams.attn_max_batch = attn_max_batch;
cparams.fused_moe_up_gate = fmoe;
cparams.grouped_expert_routing = ger;
cparams.fused_up_gate = !no_fug;
cparams.min_experts = ser.first;
cparams.thresh_experts = ser.second;
@@ -935,6 +946,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .embeddings = */ embd,
/* .repack = */ params.repack,
/* .fmoe = */ params.fmoe,
/* .ger = */ params.ger,
/* .no_fug = */ params.no_fug,
/* .use_thp = */ params.use_thp,
/* .buft_overrides=*/ params.buft_overrides.data(),
@@ -970,6 +982,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .embeddings = */ embd,
/* .repack = */ params.repack,
/* .fmoe = */ params.fmoe,
/* .ger = */ params.ger,
/* .no_fug = */ params.no_fug,
/* .use_thp = */ params.use_thp,
/* .buft_overrides=*/ params.buft_overrides.data(),
@@ -1005,6 +1018,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .embeddings = */ embd,
/* .repack = */ params.repack,
/* .fmoe = */ params.fmoe,
/* .ger = */ params.ger,
/* .no_fug = */ params.no_fug,
/* .use_thp = */ params.use_thp,
/* .buft_overrides=*/ params.buft_overrides.data(),
@@ -1040,6 +1054,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .embeddings = */ embd,
/* .repack = */ params.repack,
/* .fmoe = */ params.fmoe,
/* .ger = */ params.ger,
/* .no_fug = */ params.no_fug,
/* .use_thp = */ params.use_thp,
/* .buft_overrides=*/ params.buft_overrides.data(),
@@ -1086,6 +1101,7 @@ struct test {
bool embeddings;
bool repack = false;
bool fmoe = false;
bool ger = false;
bool no_fug = false;
bool use_thp = false;
int n_prompt;
@@ -1120,6 +1136,8 @@ struct test {
use_mmap = inst.use_mmap;
embeddings = inst.embeddings;
repack = inst.repack;
fmoe = inst.fmoe;
ger = inst.ger;
no_fug = inst.no_fug;
use_thp = inst.use_thp;
n_prompt = inst.n_prompt;
@@ -1212,7 +1230,7 @@ struct test {
"n_threads", "type_k", "type_v",
"n_gpu_layers", "split_mode",
"main_gpu", "no_kv_offload", "flash_attn", "mla_attn", "attn_max_batch", "ser",
"tensor_split", "use_mmap", "embeddings", "repack", "fused_moe", "fused_up_gate", "use_thp",
"tensor_split", "use_mmap", "embeddings", "repack", "fused_moe", "grouped_er", "fused_up_gate", "use_thp",
"n_prompt", "n_gen", "test_time",
"avg_ns", "stddev_ns",
"avg_ts", "stddev_ts", "test",
@@ -1234,7 +1252,7 @@ struct test {
if (field == "cuda" || field == "vulkan" || field == "kompute" || field == "metal" ||
field == "gpu_blas" || field == "blas" || field == "sycl" ||field == "f16_kv" || field == "no_kv_offload" ||
field == "flash_attn" || field == "use_mmap" || field == "embeddings" || field == "repack" || field == "use_thp" ||
field == "fused_moe" || field == "fused_up_gate") {
field == "fused_moe" || field == "grouped_er" || field == "fused_up_gate") {
return BOOL;
}
if (field == "avg_ts" || field == "stddev_ts") {
@@ -1277,7 +1295,8 @@ struct test {
std::to_string(main_gpu), std::to_string(no_kv_offload), std::to_string(flash_attn),
std::to_string(mla_attn), std::to_string(attn_max_batch), ser_to_string(ser),
tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings),
std::to_string(repack), std::to_string(fmoe), std::to_string(no_fug), std::to_string(use_thp),
std::to_string(repack), std::to_string(fmoe), std::to_string(ger),
std::to_string(no_fug), std::to_string(use_thp),
std::to_string(n_prompt), std::to_string(n_gen), test_time,
std::to_string(avg_ns()), std::to_string(stdev_ns()),
std::to_string(avg_ts()), std::to_string(stdev_ts()),
@@ -1461,6 +1480,9 @@ struct markdown_printer : public printer {
if (field == "fused_moe") {
return 4;
}
if (field == "grouped_er") {
return 3;
}
if (field == "fused_up_gate") {
return 6;
}
@@ -1513,6 +1535,12 @@ struct markdown_printer : public printer {
if (field == "fused_moe") {
return "fmoe";
}
if (field == "grouped_er") {
return "ger";
}
if (field == "grouped_er") {
return "ger";
}
if (field == "fused_up_gate") {
return "no-fug";
}
@@ -1589,6 +1617,9 @@ struct markdown_printer : public printer {
if (params.fmoe != cmd_params_defaults.fmoe) {
fields.emplace_back("fused_moe");
}
if (params.ger != cmd_params_defaults.ger) {
fields.emplace_back("grouped_er");
}
if (params.no_fug != cmd_params_defaults.no_fug) {
fields.emplace_back("fused_up_gate");
}