Do not allocate KV cache for unused layers

This commit is contained in:
Iwan Kawrakow
2025-10-20 08:35:25 +03:00
parent 1789de5994
commit 599c812f12

View File

@@ -532,7 +532,7 @@ static bool llama_kv_cache_init(
const struct llama_hparams & hparams = model.hparams;
const int64_t n_layer = hparams.n_layer;
const int64_t n_layer = hparams.n_layer - hparams.nextn_predict_layers;
cache.has_shift = false;