mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-01-26 19:09:45 +00:00
exclude torch jit objects from space memory management
todo: fix a bug that torch jit module offload does not work on some versions
This commit is contained in:
Binary file not shown.
@@ -95,7 +95,12 @@ def load_module(m):
|
|||||||
print(f"[Memory Management] Required Inference Memory: {inference_memory / (1024 * 1024):.2f} MB")
|
print(f"[Memory Management] Required Inference Memory: {inference_memory / (1024 * 1024):.2f} MB")
|
||||||
print(f"[Memory Management] Estimated Remaining GPU Memory: {estimated_remaining_memory / (1024 * 1024):.2f} MB")
|
print(f"[Memory Management] Estimated Remaining GPU Memory: {estimated_remaining_memory / (1024 * 1024):.2f} MB")
|
||||||
|
|
||||||
if ALWAYS_SWAP or estimated_remaining_memory < 0:
|
is_torch_jit = 'ScriptModule' in type(m).__name__
|
||||||
|
|
||||||
|
if is_torch_jit:
|
||||||
|
print(f'Detected torch jit module: {type(m).__name__}')
|
||||||
|
|
||||||
|
if (ALWAYS_SWAP or estimated_remaining_memory < 0) and not is_torch_jit:
|
||||||
print(f'Move module to SWAP: {type(m).__name__}')
|
print(f'Move module to SWAP: {type(m).__name__}')
|
||||||
DynamicSwapInstaller.install_model(m, target_device=gpu)
|
DynamicSwapInstaller.install_model(m, target_device=gpu)
|
||||||
model_gpu_memory_when_using_cpu_swap = memory_management.compute_model_gpu_memory_when_using_cpu_swap(current_free_mem, inference_memory)
|
model_gpu_memory_when_using_cpu_swap = memory_management.compute_model_gpu_memory_when_using_cpu_swap(current_free_mem, inference_memory)
|
||||||
|
|||||||
Reference in New Issue
Block a user