From 04ca346732d1255a16f8b024b0e531144910cc0d Mon Sep 17 00:00:00 2001 From: turboderp <11859846+turboderp@users.noreply.github.com> Date: Tue, 14 Oct 2025 03:11:59 +0200 Subject: [PATCH] Fix formatting --- backends/exllamav2/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backends/exllamav2/model.py b/backends/exllamav2/model.py index 447e9f4..1d32309 100644 --- a/backends/exllamav2/model.py +++ b/backends/exllamav2/model.py @@ -310,8 +310,8 @@ class ExllamaV2Container(BaseModelContainer): if self.config.max_seq_len > cache_size: logger.warning( - f"The given max_seq_len ({self.config.max_seq_len}) is larger than the " - f"cache size and will be limited to {cache_size} tokens." + f"The given max_seq_len ({self.config.max_seq_len}) is larger than " + f"the cache size and will be limited to {cache_size} tokens." ) self.config.max_seq_len = cache_size