mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-03-13 00:49:48 +00:00
safe value for new memory peak
This commit is contained in:
@@ -435,7 +435,7 @@ def load_models_gpu(models, memory_required=0):
|
||||
if lowvram_available and (vram_set_state == VRAMState.LOW_VRAM or vram_set_state == VRAMState.NORMAL_VRAM):
|
||||
model_size = loaded_model.model_memory_required(torch_dev)
|
||||
current_free_mem = get_free_memory(torch_dev)
|
||||
lowvram_model_memory = int(max(64 * (1024 * 1024), (current_free_mem - 1024 * (1024 * 1024)) / 1.3 ))
|
||||
lowvram_model_memory = int(max(64 * (1024 * 1024), (current_free_mem - 1.5 * 1024 * (1024 * 1024)) / 1.3))
|
||||
if model_size > (current_free_mem - inference_memory): #only switch to lowvram if really necessary
|
||||
vram_set_state = VRAMState.LOW_VRAM
|
||||
else:
|
||||
@@ -443,9 +443,6 @@ def load_models_gpu(models, memory_required=0):
|
||||
|
||||
if vram_set_state == VRAMState.NO_VRAM:
|
||||
lowvram_model_memory = 64 * 1024 * 1024
|
||||
|
||||
# TODO: New offload system seems to have unpredicted risk to OOM on some 2GB devices
|
||||
# maybe lowvram_model_memory *= 1.5 later if user reports that
|
||||
|
||||
cur_loaded_model = loaded_model.model_load(lowvram_model_memory)
|
||||
current_loaded_models.insert(0, loaded_model)
|
||||
|
||||
Reference in New Issue
Block a user