mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-03-06 03:50:08 +00:00
Graph parallel for Qwen-3.5-MoE (#1347)
* Graph parallel for Qwen3.5-MoE * Add --max-gpu to llama-bench * Fix graph reuse when not all GPUs participate in self-attention
This commit is contained in:
@@ -271,7 +271,7 @@ struct cmd_params {
|
||||
bool muge = false;
|
||||
bool rcache = false;
|
||||
bool sas = false;
|
||||
int fdn = 0; // fdn = fused delta net
|
||||
int max_gpu = 0;
|
||||
bool print_overrides = false;
|
||||
output_formats output_format;
|
||||
output_formats output_format_stderr;
|
||||
@@ -317,7 +317,7 @@ static const cmd_params cmd_params_defaults = {
|
||||
/* muge */ false,
|
||||
/* rcache */ false,
|
||||
/* sas */ false,
|
||||
/* fdn */ 0,
|
||||
/* max_gpu */ 0,
|
||||
/* print_overrides */ false,
|
||||
/* output_format */ MARKDOWN,
|
||||
/* output_format_stderr */ NONE,
|
||||
@@ -371,6 +371,7 @@ static void print_usage(int /* argc */, char ** argv) {
|
||||
printf(" -no-fug, --no-fused-up-gate <0|1> (default: %s)\n", cmd_params_defaults.no_fug? "1" : "0");
|
||||
printf(" -no-ooae, --no-offload-only-active-experts <0|1> (default: %s)\n", cmd_params_defaults.no_ooae? "1" : "0");
|
||||
printf(" -sas, --scheduler-async <0|1> (default: %s)\n", cmd_params_defaults.sas ? "1" : "0");
|
||||
printf(" --max-gpu <N> (default: %d)\n", cmd_params_defaults.max_gpu);
|
||||
printf(" --print-overrides <0|1> (default: %s)\n", cmd_params_defaults.print_overrides ? "1" : "0");
|
||||
printf("\n");
|
||||
printf("Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.\n");
|
||||
@@ -812,6 +813,12 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
||||
break;
|
||||
}
|
||||
params.sas = std::stoi(argv[i]);
|
||||
} else if (arg == "--max-gpu") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.max_gpu = std::stoi(argv[i]);
|
||||
} else if (arg == "-rcache" || arg == "--rope-cache") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
@@ -958,6 +965,7 @@ struct cmd_params_instance {
|
||||
bool muge = false;
|
||||
bool rcache = false;
|
||||
bool sas = false;
|
||||
int max_gpu = 0;
|
||||
const llama_model_tensor_buft_override* buft_overrides;
|
||||
|
||||
llama_model_params to_llama_mparams() const {
|
||||
@@ -977,6 +985,7 @@ struct cmd_params_instance {
|
||||
mparams.merge_up_gate_exps = muge;
|
||||
mparams.tensor_buft_overrides = buft_overrides;
|
||||
mparams.mla = mla_attn;
|
||||
mparams.max_gpu = max_gpu;
|
||||
|
||||
return mparams;
|
||||
}
|
||||
@@ -993,6 +1002,7 @@ struct cmd_params_instance {
|
||||
muge == other.muge &&
|
||||
use_thp == other.use_thp &&
|
||||
sas == other.sas &&
|
||||
max_gpu == other.max_gpu &&
|
||||
tensor_split == other.tensor_split;
|
||||
}
|
||||
|
||||
@@ -1085,6 +1095,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .muge = */ params.muge,
|
||||
/* .rcache = */ params.rcache,
|
||||
/* .sas = */ params.sas,
|
||||
/* .max_gpu = */ params.max_gpu,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
instances.push_back(instance);
|
||||
@@ -1128,6 +1139,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .muge = */ params.muge,
|
||||
/* .rcache = */ params.rcache,
|
||||
/* .sas = */ params.sas,
|
||||
/* .max_gpu = */ params.max_gpu,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
instances.push_back(instance);
|
||||
@@ -1171,6 +1183,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .muge = */ params.muge,
|
||||
/* .rcache = */ params.rcache,
|
||||
/* .sas = */ params.sas,
|
||||
/* .max_gpu = */ params.max_gpu,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
instances.push_back(instance);
|
||||
@@ -1214,6 +1227,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .muge = */ params.muge,
|
||||
/* .rcache = */ params.rcache,
|
||||
/* .sas = */ params.sas,
|
||||
/* .max_gpu = */ params.max_gpu,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
instances.push_back(instance);
|
||||
@@ -1268,6 +1282,7 @@ struct test {
|
||||
bool muge = false;
|
||||
bool rcache = false;
|
||||
bool sas = false;
|
||||
bool max_gpu = 0;
|
||||
std::string override_tensor;
|
||||
int n_prompt;
|
||||
int n_gen;
|
||||
@@ -1309,6 +1324,7 @@ struct test {
|
||||
ger = inst.ger;
|
||||
rcache = inst.rcache;
|
||||
sas = inst.sas;
|
||||
max_gpu = inst.max_gpu;
|
||||
no_fug = inst.no_fug;
|
||||
use_thp = inst.use_thp;
|
||||
no_ooae = inst.no_ooae;
|
||||
@@ -1413,7 +1429,7 @@ struct test {
|
||||
field == "model_size" || field == "model_n_params" ||
|
||||
field == "n_gpu_layers" || field == "main_gpu" ||
|
||||
field == "n_prompt" || field == "n_gen" || field == "mla_attn" || field == "attn_max_batch" ||
|
||||
field == "avg_ns" || field == "stddev_ns") {
|
||||
field == "avg_ns" || field == "stddev_ns" || field == "max_gpu") {
|
||||
return INT;
|
||||
}
|
||||
if (field == "cuda" || field == "vulkan" || field == "kompute" || field == "metal" ||
|
||||
@@ -1465,6 +1481,7 @@ struct test {
|
||||
tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings),
|
||||
std::to_string(repack), std::to_string(mqkv), std::to_string(muge), std::to_string(fmoe), std::to_string(ger),
|
||||
std::to_string(no_fug), std::to_string(use_thp), std::to_string(no_ooae), std::to_string(rcache), std::to_string(sas),
|
||||
std::to_string(max_gpu),
|
||||
cuda_params, override_tensor,
|
||||
std::to_string(n_prompt), std::to_string(n_gen), test_time,
|
||||
std::to_string(avg_ns()), std::to_string(stdev_ns()),
|
||||
@@ -1485,7 +1502,7 @@ struct test {
|
||||
"n_gpu_layers", "split_mode",
|
||||
"main_gpu", "no_kv_offload", "flash_attn", "mla_attn", "attn_max_batch", "ser", "reuse",
|
||||
"tensor_split", "use_mmap", "embeddings", "repack", "mqkv", "muge", "fused_moe", "grouped_er",
|
||||
"no_fused_up_gate", "use_thp", "no_ooae", "rcache", "sas", "cuda_params", "override_tensor",
|
||||
"no_fused_up_gate", "use_thp", "no_ooae", "rcache", "sas", "max_gpu", "cuda_params", "override_tensor",
|
||||
"n_prompt", "n_gen", "test_time",
|
||||
"avg_ns", "stddev_ns",
|
||||
"avg_ts", "stddev_ts", "test",
|
||||
@@ -1675,6 +1692,9 @@ struct markdown_printer : public printer {
|
||||
if (field == "sas") {
|
||||
return 3;
|
||||
}
|
||||
if (field == "max_gpu") {
|
||||
return 7;
|
||||
}
|
||||
if (field == "use_thp") {
|
||||
return 3;
|
||||
}
|
||||
@@ -1748,6 +1768,9 @@ struct markdown_printer : public printer {
|
||||
if (field == "sas") {
|
||||
return "sas";
|
||||
}
|
||||
if (field == "max_gpu") {
|
||||
return "max_gpu";
|
||||
}
|
||||
if (field == "use_thp") {
|
||||
return "thp";
|
||||
}
|
||||
@@ -1858,6 +1881,9 @@ struct markdown_printer : public printer {
|
||||
if (params.sas != cmd_params_defaults.sas) {
|
||||
fields.emplace_back("sas");
|
||||
}
|
||||
if (params.max_gpu != cmd_params_defaults.max_gpu) {
|
||||
fields.emplace_back("max_gpu");
|
||||
}
|
||||
if (params.muge != cmd_params_defaults.muge) {
|
||||
fields.emplace_back("muge");
|
||||
}
|
||||
|
||||
@@ -4568,16 +4568,25 @@ ggml_cgraph * llm_build_context::build_qwen35moe() {
|
||||
|
||||
if (hparams.is_recurrent(il)) {
|
||||
ggml_tensor * inpSA = inpL;
|
||||
|
||||
cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, nullptr, LLM_NORM_RMS, cb, il);
|
||||
int idx = model.default_layer_device[il];
|
||||
if (inpL->op == GGML_OP_REDUCE) {
|
||||
if (kv_self.s_l[il]) {
|
||||
// This shouldn't be necessary, but just in case.
|
||||
int idx_s_l = ggml_backend_sched_get_backend_idx(lctx.sched, kv_self.s_l[il]->buffer);
|
||||
if (idx_s_l >= 0) idx = idx_s_l;
|
||||
}
|
||||
if (inpL->src[idx]) {
|
||||
inpL->view_src = inpL->src[idx];
|
||||
}
|
||||
}
|
||||
auto norm = model.layers[il].attn_norm->extra ? ((ggml_split_tensor_t *)model.layers[il].attn_norm->extra)->splits[idx] : model.layers[il].attn_norm;
|
||||
cur = llm_build_norm(ctx0, inpL, hparams, norm, nullptr, LLM_NORM_RMS, cb, il);
|
||||
cb(cur, "attn_norm", il);
|
||||
|
||||
cur = delta.build_layer_attn_linear(ctx0, gf, cur, causal_mask, identity, diag_mask, il, cb);
|
||||
if (il == n_layer - 1 && inp_out_ids) {
|
||||
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
||||
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
|
||||
}
|
||||
|
||||
cur = ggml_add(ctx0, cur, inpSA);
|
||||
cb(cur, "attn_residual", il);
|
||||
} else {
|
||||
|
||||
@@ -1417,7 +1417,9 @@ bool create_tensors_helper::create_qwen35moe_tensors(const LLM_TN & tn) {
|
||||
const int64_t conv_dim = key_dim * 2 + value_dim;
|
||||
|
||||
for (int i = 0; i < n_layer; ++i) {
|
||||
ggml_context * ctx_split = ctx_for_layer_split(i);
|
||||
auto ctx_split = ctx_for_layer_split(i);
|
||||
auto ctx_layer = ctx_for_layer(i);
|
||||
|
||||
|
||||
auto & layer = model.layers[i];
|
||||
|
||||
@@ -1438,15 +1440,15 @@ bool create_tensors_helper::create_qwen35moe_tensors(const LLM_TN & tn) {
|
||||
} else {
|
||||
// Linear attention (gated delta net) specific tensors
|
||||
// Create tensors with calculated dimensions
|
||||
layer.wqkv = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, key_dim * 2 + value_dim }, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
layer.wqkv_gate = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_GATE, "weight", i), { n_embd, value_dim }, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
layer.ssm_conv1d = create_tensor(ctx_split, tn(LLM_TENSOR_SSM_CONV1D, "weight", i), { hparams.ssm_d_conv, conv_dim }, 0);
|
||||
layer.ssm_dt = create_tensor(ctx_split, tn(LLM_TENSOR_SSM_DT, "bias", i), { hparams.ssm_dt_rank }, 0);
|
||||
layer.ssm_a = create_tensor(ctx_split, tn(LLM_TENSOR_SSM_A_NOSCAN, i), { hparams.ssm_dt_rank }, 0);
|
||||
layer.ssm_beta = create_tensor(ctx_split, tn(LLM_TENSOR_SSM_BETA, "weight", i), { n_embd, n_v_heads }, 0);
|
||||
layer.ssm_alpha = create_tensor(ctx_split, tn(LLM_TENSOR_SSM_ALPHA, "weight", i), { n_embd, n_v_heads }, 0);
|
||||
layer.ssm_norm = create_tensor(ctx_split, tn(LLM_TENSOR_SSM_NORM, "weight", i), { head_v_dim }, 0);
|
||||
layer.ssm_out = create_tensor(ctx_split, tn(LLM_TENSOR_SSM_OUT, "weight", i), { value_dim, n_embd }, 0);
|
||||
layer.wqkv = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, key_dim * 2 + value_dim }, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
layer.wqkv_gate = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_GATE, "weight", i), { n_embd, value_dim }, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
layer.ssm_conv1d = create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_CONV1D, "weight", i), { hparams.ssm_d_conv, conv_dim }, 0);
|
||||
layer.ssm_dt = create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_DT, "bias", i), { hparams.ssm_dt_rank }, 0);
|
||||
layer.ssm_a = create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_A_NOSCAN, i), { hparams.ssm_dt_rank }, 0);
|
||||
layer.ssm_beta = create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_BETA, "weight", i), { n_embd, n_v_heads }, 0);
|
||||
layer.ssm_alpha = create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_ALPHA, "weight", i), { n_embd, n_v_heads }, 0);
|
||||
layer.ssm_norm = create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_NORM, "weight", i), { head_v_dim }, 0);
|
||||
layer.ssm_out = create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_OUT, "weight", i), { value_dim, n_embd }, 0);
|
||||
}
|
||||
|
||||
layer.ffn_gate_inp = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert }, 0);
|
||||
|
||||
@@ -590,6 +590,7 @@ bool llama_context::update_cache_copies() {
|
||||
GGML_ASSERT(kl->n_device == vl->n_device);
|
||||
}
|
||||
for (int id = 0; id < kl->n_device; ++id) {
|
||||
if (!kl->splits[id]) continue;
|
||||
auto& c = cache_copies[2*model.splits.size()*il + 2*id + 0];
|
||||
if (!c.cpy || c.cpy->op != GGML_OP_CPY || c.cpy->view_src != kl->splits[id]) return false;
|
||||
c.cpy->view_offs = kv_self.head*c.step;
|
||||
@@ -598,6 +599,7 @@ bool llama_context::update_cache_copies() {
|
||||
}
|
||||
if (!vl) continue;
|
||||
for (int id = 0; id < vl->n_device; ++id) {
|
||||
if (!vl->splits[id]) continue;
|
||||
auto& c = cache_copies[2*model.splits.size()*il + 2*id + 1];
|
||||
if (!c.cpy || c.cpy->op != GGML_OP_CPY || c.cpy->view_src != vl->splits[id]) return false;
|
||||
c.cpy->view_offs = kv_self.head*c.step;
|
||||
@@ -1939,6 +1941,7 @@ static bool is_model_split_supported(const llama_model & model) {
|
||||
LLM_ARCH_STEP35,
|
||||
LLM_ARCH_QWEN3NEXT,
|
||||
LLM_ARCH_QWEN35,
|
||||
LLM_ARCH_QWEN35MOE,
|
||||
};
|
||||
auto it = k_supported.find(model.arch);
|
||||
return it != k_supported.end();
|
||||
|
||||
Reference in New Issue
Block a user