Add mainline compatible FA command line option (#944)

Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
Kawrakow
2025-11-13 08:55:33 +02:00
committed by GitHub
parent 32edcb4b74
commit 874926800f

View File

@@ -1135,6 +1135,21 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.flash_attn = false;
return true;
}
if (arg == "-fa" || arg == "--flash-attention") {
CHECK_ARG
std::string next_arg{argv[i]};
for (auto& c : next_arg) c = std::tolower(c);
if (next_arg == "auto" || next_arg == "1" || next_arg == "on") {
params.flash_attn = true;
}
else if (next_arg == "off" || next_arg == "0") {
params.flash_attn = false;
}
else {
invalid_param = true;
}
return true;
}
if (arg == "-mla" || arg == "--mla-use") {
CHECK_ARG
params.mla_attn = std::stoi(argv[i]);
@@ -1981,6 +1996,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "*", " --keep N", "number of tokens to keep from the initial prompt (default: %d, -1 = all)", params.n_keep });
options.push_back({ "*", " --chunks N", "max number of chunks to process (default: %d, -1 = all)", params.n_chunks });
options.push_back({ "*", "-no-fa, --no-flash-attn", "disable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled" });
options.push_back({ "*", "-fa, --flash-attn (auto|on|off|0|1)", "set Flash Attention (default: %s)", params.flash_attn ? "on" : "off" });
options.push_back({ "*", "-mla, --mla-use", "enable MLA (default: %d)", params.mla_attn });
options.push_back({ "*", "-amb, --attention-max-batch", "max batch size for attention computations (default: %d)", params.attn_max_batch});
options.push_back({ "*", "-no-fmoe, --no-fused-moe", "disable fused MoE (default: %s)", params.fused_moe_up_gate ? "enabled" : "disabled" });