diff --git a/src/llama.cpp b/src/llama.cpp index 9a6ef293..8d7d3917 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -4105,7 +4105,7 @@ static void llm_load_hparams( // that have no expert_gating_func model parameter set hparams.expert_gating_func = LLM_EXPERT_GATING_FUNC_SOFTMAX; } - ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul); + ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul, false); switch (hparams.n_layer) { case 27: model.type = e_model::MODEL_16B; break;