mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2026-03-05 13:10:47 +00:00
Merge pull request #3 from 920232796/master
fix device support for mps update the support for SD2.0
This commit is contained in:
@@ -38,8 +38,8 @@ def get_optimal_device():
|
||||
if torch.cuda.is_available():
|
||||
return torch.device(get_cuda_device_string())
|
||||
|
||||
# if has_mps():
|
||||
# return torch.device("mps")
|
||||
if has_mps():
|
||||
return torch.device("mps")
|
||||
|
||||
return cpu
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
|
||||
# new memory efficient cross attention blocks do not support hypernets and we already
|
||||
# have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention
|
||||
ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention
|
||||
# ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
|
||||
ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
|
||||
|
||||
# silence new console spam from SD2
|
||||
ldm.modules.attention.print = lambda *args: None
|
||||
|
||||
@@ -110,7 +110,11 @@ restricted_opts = {
|
||||
from omegaconf import OmegaConf
|
||||
config = OmegaConf.load(f"{cmd_opts.config}")
|
||||
# XLMR-Large
|
||||
text_model_name = config.model.params.cond_stage_config.params.name
|
||||
try:
|
||||
text_model_name = config.model.params.cond_stage_config.params.name
|
||||
|
||||
except :
|
||||
text_model_name = "stable_diffusion"
|
||||
|
||||
cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access
|
||||
|
||||
|
||||
Reference in New Issue
Block a user