mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-03-09 21:40:22 +00:00
Be able to quantize mmproj files (#1367)
This commit is contained in:
@@ -39,7 +39,7 @@ static inline const char * llm_expert_gating_func_name(llm_expert_gating_func_ty
|
||||
|
||||
void llm_load_hparams(
|
||||
llama_model_loader & ml,
|
||||
llama_model & model) {
|
||||
llama_model & model, bool ignore_vocab) {
|
||||
auto & hparams = model.hparams;
|
||||
const gguf_context * ctx = ml.meta;
|
||||
|
||||
@@ -54,11 +54,13 @@ void llm_load_hparams(
|
||||
model.gguf_kv.emplace(name, value);
|
||||
}
|
||||
|
||||
ml.get_key(LLM_KV_BLOCK_COUNT, hparams.n_layer);
|
||||
|
||||
// get general kv
|
||||
ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
|
||||
|
||||
// get hparams kv
|
||||
ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
|
||||
ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab, !ignore_vocab);
|
||||
|
||||
// everything past this point is not vocab-related
|
||||
if (hparams.vocab_only) {
|
||||
@@ -67,7 +69,6 @@ void llm_load_hparams(
|
||||
|
||||
ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
|
||||
ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
|
||||
ml.get_key(LLM_KV_BLOCK_COUNT, hparams.n_layer);
|
||||
ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false);
|
||||
ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user