mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-04-21 14:59:05 +00:00
Merge upstream PR 14855
This commit is contained in:
@@ -2,6 +2,7 @@ import torch
|
||||
from ldm_patched.modules.conds import CONDRegular, CONDCrossAttn
|
||||
from ldm_patched.modules.samplers import sampling_function
|
||||
from ldm_patched.modules import model_management
|
||||
from modules_forge.stream import synchronize_current_stream
|
||||
|
||||
|
||||
def cond_from_a1111_to_patched_ldm(cond):
|
||||
@@ -113,4 +114,5 @@ def sampling_prepare(unet, x):
|
||||
def sampling_cleanup(unet):
|
||||
for cnet in unet.list_controlnets():
|
||||
cnet.cleanup()
|
||||
synchronize_current_stream()
|
||||
return
|
||||
|
||||
56
modules_forge/stream.py
Normal file
56
modules_forge/stream.py
Normal file
@@ -0,0 +1,56 @@
|
||||
# https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14855
|
||||
|
||||
import torch
|
||||
|
||||
from modules import shared
|
||||
from ldm_patched.modules import model_management
|
||||
|
||||
|
||||
def stream_context():
|
||||
if torch.cuda.is_available():
|
||||
return torch.cuda.stream
|
||||
|
||||
if model_management.is_intel_xpu():
|
||||
return torch.xpu.stream
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_current_stream():
|
||||
try:
|
||||
if torch.cuda.is_available():
|
||||
return torch.cuda.current_stream(torch.device(torch.cuda.current_device()))
|
||||
if model_management.is_intel_xpu():
|
||||
return torch.xpu.current_stream(torch.device("xpu"))
|
||||
except:
|
||||
pass
|
||||
print('Stream is not used.')
|
||||
return None
|
||||
|
||||
|
||||
def get_new_stream():
|
||||
try:
|
||||
if torch.cuda.is_available():
|
||||
return torch.cuda.Stream(torch.device(torch.cuda.current_device()))
|
||||
if model_management.is_intel_xpu():
|
||||
return torch.xpu.Stream(torch.device("xpu"))
|
||||
except:
|
||||
pass
|
||||
print('Stream is not used.')
|
||||
return None
|
||||
|
||||
|
||||
def synchronize_current_stream():
|
||||
global current_stream
|
||||
if current_stream is not None:
|
||||
current_stream.synchronize()
|
||||
|
||||
|
||||
if shared.opts.use_non_streamlined_lowvram:
|
||||
current_stream = None
|
||||
mover_stream = None
|
||||
using_stream = False
|
||||
else:
|
||||
current_stream = get_current_stream()
|
||||
mover_stream = get_new_stream()
|
||||
using_stream = current_stream is not None and mover_stream is not None
|
||||
Reference in New Issue
Block a user