mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
Add merge up/gate command line parameter to llama-bench
This commit is contained in:
@@ -268,6 +268,7 @@ struct cmd_params {
|
||||
bool use_thp = false;
|
||||
bool no_ooae = false;
|
||||
bool mqkv = false;
|
||||
bool muge = false;
|
||||
bool rcache = false;
|
||||
output_formats output_format;
|
||||
output_formats output_format_stderr;
|
||||
@@ -293,7 +294,7 @@ static const cmd_params cmd_params_defaults = {
|
||||
/* mla_attn */ {3},
|
||||
/* attn_max_batch */ {0},
|
||||
/* ser */ {{-1,0.0f}},
|
||||
/* reuse */ {false},
|
||||
/* reuse */ {true},
|
||||
/* tensor_split */ {std::vector<float>(llama_max_devices(), 0.0f)},
|
||||
/* use_mmap */ {true},
|
||||
/* embeddings */ {false},
|
||||
@@ -310,6 +311,7 @@ static const cmd_params cmd_params_defaults = {
|
||||
/* use_thp */ false,
|
||||
/* no_ooae */ false,
|
||||
/* mqkv */ false,
|
||||
/* muge */ false,
|
||||
/* rcache */ false,
|
||||
/* output_format */ MARKDOWN,
|
||||
/* output_format_stderr */ NONE,
|
||||
@@ -354,6 +356,7 @@ static void print_usage(int /* argc */, char ** argv) {
|
||||
printf(" -rtr, --run-time-repack <0|1> (default: %s)\n", cmd_params_defaults.repack ? "1" : "0");
|
||||
printf(" -cuda, --cuda-params <string> (default: %s)\n", cmd_params_defaults.cuda_params.c_str());
|
||||
printf(" -mqkv, --merge-qkv (default: %s)\n", cmd_params_defaults.mqkv ? "1" : "0");
|
||||
printf(" -muge, --merge-up-gate-experts (default: %s)\n", cmd_params_defaults.muge ? "1" : "0");
|
||||
printf(" -rcache, --rope-cache (default: %s)\n", cmd_params_defaults.rcache ? "1" : "0");
|
||||
printf(" -thp, --transparent-huge-pages <0|1> (default: %s)\n", cmd_params_defaults.use_thp? "1" : "0");
|
||||
printf(" -ot, --override-tensor pattern (default: none)\n");
|
||||
@@ -789,6 +792,12 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
||||
break;
|
||||
}
|
||||
params.mqkv = std::stoi(argv[i]);
|
||||
} else if (arg == "-muge" || arg == "--merge-up-gate-exps") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.muge = std::stoi(argv[i]);
|
||||
} else if (arg == "-rcache" || arg == "--rope-cache") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
@@ -926,6 +935,7 @@ struct cmd_params_instance {
|
||||
bool use_thp = false;
|
||||
bool no_ooae = false;
|
||||
bool mqkv = false;
|
||||
bool muge = false;
|
||||
bool rcache = false;
|
||||
const llama_model_tensor_buft_override* buft_overrides;
|
||||
|
||||
@@ -943,6 +953,7 @@ struct cmd_params_instance {
|
||||
mparams.repack_tensors = repack;
|
||||
mparams.use_thp = use_thp;
|
||||
mparams.merge_qkv = mqkv;
|
||||
mparams.merge_up_gate_exps = muge;
|
||||
mparams.tensor_buft_overrides = buft_overrides;
|
||||
mparams.mla = mla_attn;
|
||||
|
||||
@@ -958,6 +969,7 @@ struct cmd_params_instance {
|
||||
use_mmap == other.use_mmap &&
|
||||
repack == other.repack &&
|
||||
mqkv == other.mqkv &&
|
||||
muge == other.muge &&
|
||||
use_thp == other.use_thp &&
|
||||
tensor_split == other.tensor_split;
|
||||
}
|
||||
@@ -1047,6 +1059,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .use_thp = */ params.use_thp,
|
||||
/* .no_ooae = */ params.no_ooae,
|
||||
/* .mqkv = */ params.mqkv,
|
||||
/* .muge = */ params.muge,
|
||||
/* .rcache = */ params.rcache,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
@@ -1088,6 +1101,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .use_thp = */ params.use_thp,
|
||||
/* .no_ooae = */ params.no_ooae,
|
||||
/* .mqkv = */ params.mqkv,
|
||||
/* .muge = */ params.muge,
|
||||
/* .rcache = */ params.rcache,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
@@ -1129,6 +1143,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .use_thp = */ params.use_thp,
|
||||
/* .no_ooae = */ params.no_ooae,
|
||||
/* .mqkv = */ params.mqkv,
|
||||
/* .muge = */ params.muge,
|
||||
/* .rcache = */ params.rcache,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
@@ -1170,6 +1185,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .use_thp = */ params.use_thp,
|
||||
/* .no_ooae = */ params.no_ooae,
|
||||
/* .mqkv = */ params.mqkv,
|
||||
/* .muge = */ params.muge,
|
||||
/* .rcache = */ params.rcache,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
@@ -1222,6 +1238,7 @@ struct test {
|
||||
bool use_thp = false;
|
||||
bool no_ooae = false;
|
||||
bool mqkv = false;
|
||||
bool muge = false;
|
||||
bool rcache = false;
|
||||
std::string override_tensor;
|
||||
int n_prompt;
|
||||
@@ -1259,6 +1276,7 @@ struct test {
|
||||
embeddings = inst.embeddings;
|
||||
repack = inst.repack;
|
||||
mqkv = inst.mqkv;
|
||||
muge = inst.muge;
|
||||
fmoe = inst.fmoe;
|
||||
ger = inst.ger;
|
||||
rcache = inst.rcache;
|
||||
@@ -1368,7 +1386,7 @@ struct test {
|
||||
"n_threads", "type_k", "type_v",
|
||||
"n_gpu_layers", "split_mode",
|
||||
"main_gpu", "no_kv_offload", "flash_attn", "mla_attn", "attn_max_batch", "ser", "reuse",
|
||||
"tensor_split", "use_mmap", "embeddings", "repack", "mqkv", "fused_moe", "grouped_er",
|
||||
"tensor_split", "use_mmap", "embeddings", "repack", "mqkv", "muge", "fused_moe", "grouped_er",
|
||||
"no_fused_up_gate", "use_thp", "no_ooae", "rcache", "cuda_params", "override_tensor",
|
||||
"n_prompt", "n_gen", "test_time",
|
||||
"avg_ns", "stddev_ns",
|
||||
@@ -1392,7 +1410,7 @@ struct test {
|
||||
field == "gpu_blas" || field == "blas" || field == "sycl" || field == "no_kv_offload" ||
|
||||
field == "flash_attn" || field == "use_mmap" || field == "embeddings" || field == "repack" || field == "use_thp" ||
|
||||
field == "fused_moe" || field == "grouped_er" || field == "no_fused_up_gate" || field == "no_ooae" || field == "mqkv" ||
|
||||
field == "rcache" || field == "reuse") {
|
||||
field == "rcache" || field == "reuse" || field == "muge") {
|
||||
return BOOL;
|
||||
}
|
||||
if (field == "avg_ts" || field == "stddev_ts") {
|
||||
@@ -1435,7 +1453,7 @@ struct test {
|
||||
std::to_string(main_gpu), std::to_string(no_kv_offload), std::to_string(flash_attn),
|
||||
std::to_string(mla_attn), std::to_string(attn_max_batch), ser_to_string(ser), std::to_string(reuse),
|
||||
tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings),
|
||||
std::to_string(repack), std::to_string(mqkv), std::to_string(fmoe), std::to_string(ger),
|
||||
std::to_string(repack), std::to_string(mqkv), std::to_string(muge), std::to_string(fmoe), std::to_string(ger),
|
||||
std::to_string(no_fug), std::to_string(use_thp), std::to_string(no_ooae), std::to_string(rcache),
|
||||
cuda_params, override_tensor,
|
||||
std::to_string(n_prompt), std::to_string(n_gen), test_time,
|
||||
@@ -1621,6 +1639,9 @@ struct markdown_printer : public printer {
|
||||
if (field == "mqkv") {
|
||||
return 4;
|
||||
}
|
||||
if (field == "muge") {
|
||||
return 4;
|
||||
}
|
||||
if (field == "use_thp") {
|
||||
return 3;
|
||||
}
|
||||
@@ -1688,6 +1709,9 @@ struct markdown_printer : public printer {
|
||||
if (field == "mqkv") {
|
||||
return "mqkv";
|
||||
}
|
||||
if (field == "muge") {
|
||||
return "muge";
|
||||
}
|
||||
if (field == "use_thp") {
|
||||
return "thp";
|
||||
}
|
||||
@@ -1791,6 +1815,9 @@ struct markdown_printer : public printer {
|
||||
if (params.mqkv != cmd_params_defaults.mqkv) {
|
||||
fields.emplace_back("mqkv");
|
||||
}
|
||||
if (params.muge != cmd_params_defaults.muge) {
|
||||
fields.emplace_back("muge");
|
||||
}
|
||||
if (params.use_thp != cmd_params_defaults.use_thp) {
|
||||
fields.emplace_back("use_thp");
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user