mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-23 14:44:09 +00:00
merge_qkv: add command loine argument to enable
This commit is contained in:
@@ -1272,6 +1272,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
||||
params.validate_quants = true;
|
||||
return true;
|
||||
}
|
||||
if (arg == "-mqkv" || arg == "--merge-qkv") {
|
||||
params.merge_qkv = true;
|
||||
return true;
|
||||
}
|
||||
if (arg == "--numa") {
|
||||
CHECK_ARG
|
||||
std::string value(argv[i]);
|
||||
@@ -1911,6 +1915,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
||||
options.push_back({ "*", "-no-fug, --no-fused-up-gate", "disaable fused up-gate (default: %s)", params.fused_up_gate ? "enabled" : "disabled" });
|
||||
options.push_back({ "*", "-no-mmad, --no-fused-mul-multiadd", "disaable fused mul-multi_add (default: %s)", params.fused_mmad? "enabled" : "disabled" });
|
||||
options.push_back({ "*", "-ser, --smart-expert-reduction,","experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts});
|
||||
options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv});
|
||||
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"
|
||||
"in conversation mode, this will be used as system prompt\n"
|
||||
"(default: '%s')", params.prompt.c_str() });
|
||||
@@ -2778,7 +2783,7 @@ void llama_lora_adapters_apply(struct llama_context * ctx, std::vector<llama_lor
|
||||
|
||||
struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & params) {
|
||||
auto mparams = llama_model_default_params();
|
||||
mparams.devices = params.devices.c_str();
|
||||
mparams.devices = params.devices.c_str();
|
||||
|
||||
if (params.n_gpu_layers != -1) {
|
||||
mparams.n_gpu_layers = params.n_gpu_layers;
|
||||
@@ -2794,6 +2799,7 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
|
||||
mparams.repack_tensors = params.repack_tensors;
|
||||
mparams.use_thp = params.use_thp;
|
||||
mparams.validate_quants = params.validate_quants;
|
||||
mparams.merge_qkv = params.merge_qkv;
|
||||
if (params.kv_overrides.empty()) {
|
||||
mparams.kv_overrides = NULL;
|
||||
} else {
|
||||
@@ -3965,6 +3971,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
|
||||
fprintf(stream, "repack: %s # default: false\n", params.repack_tensors ? "true" : "false");
|
||||
fprintf(stream, "use_thp: %s # default: false\n", params.use_thp ? "true" : "false");
|
||||
fprintf(stream, "validate_quants: %s # default: false\n", params.validate_quants ? "true" : "false");
|
||||
fprintf(stream, "merge_qkv: %s # default: false\n", params.merge_qkv ? "true" : "false");
|
||||
fprintf(stream, "penalize_nl: %s # default: false\n", sparams.penalize_nl ? "true" : "false");
|
||||
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
|
||||
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);
|
||||
|
||||
@@ -269,6 +269,7 @@ struct gpt_params {
|
||||
bool use_thp = false; // use transparent huge pages (linux only)
|
||||
bool validate_quants = false; // if true, check for NaNs while loading the model
|
||||
bool only_active_exps = true; // if true, offload only active experts (relevant only for hybrid CPU/GPU)
|
||||
bool merge_qkv = false; // if true, merge separate Q, K, V tensors into a single, contiguous tensor
|
||||
|
||||
std::string cache_type_k = "f16"; // KV cache data type for the K
|
||||
std::string cache_type_v = "f16"; // KV cache data type for the V
|
||||
|
||||
@@ -382,6 +382,7 @@ extern "C" {
|
||||
bool repack_tensors;// repack if available
|
||||
bool use_thp; // use transparent huge pages (linux only)
|
||||
bool validate_quants; // if true, check for NaNs while loading the model
|
||||
bool merge_qkv; // if true, merge separate Q, K, V tensors into a single, contiguous tensor
|
||||
};
|
||||
|
||||
// NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
|
||||
|
||||
@@ -2441,7 +2441,7 @@ bool create_tensors_helper::merge_qkv(const LLM_TN & tn, int i, int bias) {
|
||||
GGML_ASSERT(wq && wk && wv);
|
||||
|
||||
bool fused_qkv = false;
|
||||
if (wq->type == wk->type && wq->type == wv->type && hparams.f_attention_scale == 0.0f) {
|
||||
if (ml.merge_qkv && wq->type == wk->type && wq->type == wv->type && hparams.f_attention_scale == 0.0f) {
|
||||
GGML_ASSERT(wq->ne[0] == n_embd && wq->ne[1] == n_head * n_embd_head_k);
|
||||
GGML_ASSERT(wk->ne[0] == n_embd && wk->ne[1] == n_embd_gqa);
|
||||
GGML_ASSERT(wv->ne[0] == n_embd && wv->ne[1] == n_embd_gqa);
|
||||
@@ -2454,7 +2454,7 @@ bool create_tensors_helper::merge_qkv(const LLM_TN & tn, int i, int bias) {
|
||||
layer.wk = ml.create_tensor_as_view(ctx_split, layer.wqkv, wk_name.c_str(), { wk->ne[0], wk->ne[1] }, wq->ne[1]*wq->nb[1]);
|
||||
layer.wv = ml.create_tensor_as_view(ctx_split, layer.wqkv, wv_name.c_str(), { wv->ne[0], wv->ne[1] }, wq->ne[1]*wq->nb[1] + wk->ne[1]*wk->nb[1] );
|
||||
fused_qkv = true;
|
||||
printf("Created fused qkv %s\n", layer.wqkv->name);
|
||||
printf("Created merged qkv %s\n", layer.wqkv->name);
|
||||
if (bias) {
|
||||
auto bq_name = tn(LLM_TENSOR_ATTN_Q, "bias", i);
|
||||
auto bk_name = tn(LLM_TENSOR_ATTN_K, "bias", i);
|
||||
|
||||
@@ -203,9 +203,10 @@ namespace GGUFMeta {
|
||||
};
|
||||
}
|
||||
|
||||
llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, bool repack_tensors, bool use_thp,
|
||||
const llama_model_kv_override * param_overrides_p,
|
||||
const llama_model_tensor_buft_override * param_tensor_buft_overrides_p) {
|
||||
llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors,
|
||||
bool repack_tensors, bool use_thp, bool merge_qkv,
|
||||
const llama_model_kv_override * param_overrides_p,
|
||||
const llama_model_tensor_buft_override * param_tensor_buft_overrides_p) {
|
||||
int trace = 0;
|
||||
if (getenv("LLAMA_TRACE")) {
|
||||
trace = atoi(getenv("LLAMA_TRACE"));
|
||||
@@ -495,6 +496,7 @@ llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap,
|
||||
this->check_tensors = check_tensors;
|
||||
this->repack_tensors = repack_tensors;
|
||||
this->use_thp = use_thp;
|
||||
this->merge_qkv = merge_qkv;
|
||||
}
|
||||
|
||||
llama_model_loader::~llama_model_loader() {
|
||||
|
||||
@@ -44,6 +44,7 @@ struct llama_model_loader {
|
||||
bool check_tensors;
|
||||
bool repack_tensors = false;
|
||||
bool use_thp = false;
|
||||
bool merge_qkv = false;
|
||||
|
||||
llama_files files;
|
||||
llama_ftype ftype;
|
||||
@@ -78,7 +79,7 @@ struct llama_model_loader {
|
||||
std::string arch_name;
|
||||
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
|
||||
|
||||
llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, bool repack_tensors, bool use_thp,
|
||||
llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, bool repack_tensors, bool use_thp, bool merge_qkv,
|
||||
const llama_model_kv_override * param_overrides_p,
|
||||
const llama_model_tensor_buft_override * param_tensor_buft_overrides_p);
|
||||
|
||||
|
||||
@@ -1007,7 +1007,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
auto v = (std::vector<llama_model_kv_override>*)params->kv_overrides;
|
||||
kv_overrides = v->data();
|
||||
}
|
||||
llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, /* repack_tensors */ false, /* use_thp */ false, kv_overrides, nullptr);
|
||||
llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, /* repack_tensors */ false,
|
||||
/* use_thp */ false, /* merge_qkv */ false, kv_overrides, nullptr);
|
||||
ml.init_mappings(false); // no prefetching
|
||||
|
||||
llama_model model;
|
||||
|
||||
@@ -1896,7 +1896,7 @@ static bool llm_load_tensors(
|
||||
static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) {
|
||||
try {
|
||||
llama_model_loader ml(fname, params.use_mmap, params.check_tensors,
|
||||
params.repack_tensors, params.use_thp, params.kv_overrides, params.tensor_buft_overrides);
|
||||
params.repack_tensors, params.use_thp, params.merge_qkv, params.kv_overrides, params.tensor_buft_overrides);
|
||||
|
||||
model.hparams.vocab_only = params.vocab_only;
|
||||
|
||||
@@ -3788,6 +3788,7 @@ struct llama_model_params llama_model_default_params() {
|
||||
/*.repack_tensors =*/ false,
|
||||
/*.use_thp =*/ false,
|
||||
/*.validate_quants =*/ false,
|
||||
/*.merge_qkv =*/ false,
|
||||
};
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
|
||||
Reference in New Issue
Block a user