mirror of
https://github.com/turboderp-org/exllamav2.git
synced 2026-04-20 06:19:00 +00:00
Remove return values from load_gen
This commit is contained in:
@@ -35,7 +35,7 @@ cache = ExLlamaV2Cache(model)
|
||||
|
||||
# Load LoRA
|
||||
|
||||
lora_directory = "/mnt/str/models/_test_loras/tloen_alpaca-lora-7b/"
|
||||
lora_directory = "/mnt/str/models/_lora/tloen_alpaca-lora-7b/"
|
||||
lora = ExLlamaV2Lora.from_directory(model, lora_directory)
|
||||
|
||||
# Initialize generators
|
||||
|
||||
@@ -245,7 +245,7 @@ class ExLlamaV2:
|
||||
|
||||
def load(self, gpu_split = None, lazy = False, stats = False, callback = None, callback_gen = None):
|
||||
f = self.load_gen(gpu_split, lazy, stats, callback, callback_gen)
|
||||
for item in f: return item
|
||||
for item in f: x = item
|
||||
|
||||
|
||||
def load_gen(self, gpu_split = None, lazy = False, stats = False, callback = None, callback_gen = None):
|
||||
@@ -275,8 +275,8 @@ class ExLlamaV2:
|
||||
self.loaded = True
|
||||
cleanup_stfiles()
|
||||
|
||||
if stats: yield gpu_split, stats_
|
||||
else: yield gpu_split
|
||||
# if stats: yield gpu_split, stats_
|
||||
# else: yield gpu_split
|
||||
|
||||
|
||||
def load_autosplit(self, cache, reserve_vram = None, last_id_only = False, callback = None, callback_gen = None):
|
||||
|
||||
Reference in New Issue
Block a user