Graph parallel for Qwen-3.5-MoE (#1347)

* Graph parallel for Qwen3.5-MoE

* Add --max-gpu to llama-bench

* Fix graph reuse when not all GPUs participate in self-attention
This commit is contained in:
Kawrakow
2026-03-02 07:48:43 +01:00
committed by GitHub
parent 8f9e19d57c
commit d239dabcc6
4 changed files with 58 additions and 18 deletions

View File

@@ -271,7 +271,7 @@ struct cmd_params {
bool muge = false;
bool rcache = false;
bool sas = false;
int fdn = 0; // fdn = fused delta net
int max_gpu = 0;
bool print_overrides = false;
output_formats output_format;
output_formats output_format_stderr;
@@ -317,7 +317,7 @@ static const cmd_params cmd_params_defaults = {
/* muge */ false,
/* rcache */ false,
/* sas */ false,
/* fdn */ 0,
/* max_gpu */ 0,
/* print_overrides */ false,
/* output_format */ MARKDOWN,
/* output_format_stderr */ NONE,
@@ -371,6 +371,7 @@ static void print_usage(int /* argc */, char ** argv) {
printf(" -no-fug, --no-fused-up-gate <0|1> (default: %s)\n", cmd_params_defaults.no_fug? "1" : "0");
printf(" -no-ooae, --no-offload-only-active-experts <0|1> (default: %s)\n", cmd_params_defaults.no_ooae? "1" : "0");
printf(" -sas, --scheduler-async <0|1> (default: %s)\n", cmd_params_defaults.sas ? "1" : "0");
printf(" --max-gpu <N> (default: %d)\n", cmd_params_defaults.max_gpu);
printf(" --print-overrides <0|1> (default: %s)\n", cmd_params_defaults.print_overrides ? "1" : "0");
printf("\n");
printf("Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.\n");
@@ -812,6 +813,12 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
break;
}
params.sas = std::stoi(argv[i]);
} else if (arg == "--max-gpu") {
if (++i >= argc) {
invalid_param = true;
break;
}
params.max_gpu = std::stoi(argv[i]);
} else if (arg == "-rcache" || arg == "--rope-cache") {
if (++i >= argc) {
invalid_param = true;
@@ -958,6 +965,7 @@ struct cmd_params_instance {
bool muge = false;
bool rcache = false;
bool sas = false;
int max_gpu = 0;
const llama_model_tensor_buft_override* buft_overrides;
llama_model_params to_llama_mparams() const {
@@ -977,6 +985,7 @@ struct cmd_params_instance {
mparams.merge_up_gate_exps = muge;
mparams.tensor_buft_overrides = buft_overrides;
mparams.mla = mla_attn;
mparams.max_gpu = max_gpu;
return mparams;
}
@@ -993,6 +1002,7 @@ struct cmd_params_instance {
muge == other.muge &&
use_thp == other.use_thp &&
sas == other.sas &&
max_gpu == other.max_gpu &&
tensor_split == other.tensor_split;
}
@@ -1085,6 +1095,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .muge = */ params.muge,
/* .rcache = */ params.rcache,
/* .sas = */ params.sas,
/* .max_gpu = */ params.max_gpu,
/* .buft_overrides=*/ params.buft_overrides.data(),
};
instances.push_back(instance);
@@ -1128,6 +1139,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .muge = */ params.muge,
/* .rcache = */ params.rcache,
/* .sas = */ params.sas,
/* .max_gpu = */ params.max_gpu,
/* .buft_overrides=*/ params.buft_overrides.data(),
};
instances.push_back(instance);
@@ -1171,6 +1183,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .muge = */ params.muge,
/* .rcache = */ params.rcache,
/* .sas = */ params.sas,
/* .max_gpu = */ params.max_gpu,
/* .buft_overrides=*/ params.buft_overrides.data(),
};
instances.push_back(instance);
@@ -1214,6 +1227,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .muge = */ params.muge,
/* .rcache = */ params.rcache,
/* .sas = */ params.sas,
/* .max_gpu = */ params.max_gpu,
/* .buft_overrides=*/ params.buft_overrides.data(),
};
instances.push_back(instance);
@@ -1268,6 +1282,7 @@ struct test {
bool muge = false;
bool rcache = false;
bool sas = false;
bool max_gpu = 0;
std::string override_tensor;
int n_prompt;
int n_gen;
@@ -1309,6 +1324,7 @@ struct test {
ger = inst.ger;
rcache = inst.rcache;
sas = inst.sas;
max_gpu = inst.max_gpu;
no_fug = inst.no_fug;
use_thp = inst.use_thp;
no_ooae = inst.no_ooae;
@@ -1413,7 +1429,7 @@ struct test {
field == "model_size" || field == "model_n_params" ||
field == "n_gpu_layers" || field == "main_gpu" ||
field == "n_prompt" || field == "n_gen" || field == "mla_attn" || field == "attn_max_batch" ||
field == "avg_ns" || field == "stddev_ns") {
field == "avg_ns" || field == "stddev_ns" || field == "max_gpu") {
return INT;
}
if (field == "cuda" || field == "vulkan" || field == "kompute" || field == "metal" ||
@@ -1465,6 +1481,7 @@ struct test {
tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings),
std::to_string(repack), std::to_string(mqkv), std::to_string(muge), std::to_string(fmoe), std::to_string(ger),
std::to_string(no_fug), std::to_string(use_thp), std::to_string(no_ooae), std::to_string(rcache), std::to_string(sas),
std::to_string(max_gpu),
cuda_params, override_tensor,
std::to_string(n_prompt), std::to_string(n_gen), test_time,
std::to_string(avg_ns()), std::to_string(stdev_ns()),
@@ -1485,7 +1502,7 @@ struct test {
"n_gpu_layers", "split_mode",
"main_gpu", "no_kv_offload", "flash_attn", "mla_attn", "attn_max_batch", "ser", "reuse",
"tensor_split", "use_mmap", "embeddings", "repack", "mqkv", "muge", "fused_moe", "grouped_er",
"no_fused_up_gate", "use_thp", "no_ooae", "rcache", "sas", "cuda_params", "override_tensor",
"no_fused_up_gate", "use_thp", "no_ooae", "rcache", "sas", "max_gpu", "cuda_params", "override_tensor",
"n_prompt", "n_gen", "test_time",
"avg_ns", "stddev_ns",
"avg_ts", "stddev_ts", "test",
@@ -1675,6 +1692,9 @@ struct markdown_printer : public printer {
if (field == "sas") {
return 3;
}
if (field == "max_gpu") {
return 7;
}
if (field == "use_thp") {
return 3;
}
@@ -1748,6 +1768,9 @@ struct markdown_printer : public printer {
if (field == "sas") {
return "sas";
}
if (field == "max_gpu") {
return "max_gpu";
}
if (field == "use_thp") {
return "thp";
}
@@ -1858,6 +1881,9 @@ struct markdown_printer : public printer {
if (params.sas != cmd_params_defaults.sas) {
fields.emplace_back("sas");
}
if (params.max_gpu != cmd_params_defaults.max_gpu) {
fields.emplace_back("max_gpu");
}
if (params.muge != cmd_params_defaults.muge) {
fields.emplace_back("muge");
}