mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-01-26 19:09:45 +00:00
Free WebUI from its Prison
Congratulations WebUI. Say Hello to freedom.
This commit is contained in:
@@ -7,7 +7,6 @@ import torch
|
||||
import modules.scripts as scripts
|
||||
from modules import shared, script_callbacks, masking, images
|
||||
from modules.ui_components import InputAccordion
|
||||
from modules.api.api import decode_base64_to_image
|
||||
import gradio as gr
|
||||
|
||||
from lib_controlnet import global_state, external_code
|
||||
|
||||
@@ -24,7 +24,6 @@ from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusion
|
||||
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
|
||||
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
|
||||
from PIL import PngImagePlugin
|
||||
from modules.sd_models_config import find_checkpoint_config_near_filename
|
||||
from modules.realesrgan_model import get_realesrgan_models
|
||||
from modules import devices
|
||||
from typing import Any
|
||||
@@ -725,7 +724,7 @@ class Api:
|
||||
|
||||
def get_sd_models(self):
|
||||
import modules.sd_models as sd_models
|
||||
return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in sd_models.checkpoints_list.values()]
|
||||
return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename} for x in sd_models.checkpoints_list.values()]
|
||||
|
||||
def get_sd_vaes(self):
|
||||
import modules.sd_vae as sd_vae
|
||||
|
||||
@@ -10,16 +10,6 @@ from threading import Thread
|
||||
from modules.timer import startup_timer
|
||||
|
||||
|
||||
class HiddenPrints:
|
||||
def __enter__(self):
|
||||
self._original_stdout = sys.stdout
|
||||
sys.stdout = open(os.devnull, 'w')
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
sys.stdout.close()
|
||||
sys.stdout = self._original_stdout
|
||||
|
||||
|
||||
def imports():
|
||||
logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
|
||||
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
|
||||
@@ -35,16 +25,8 @@ def imports():
|
||||
import gradio # noqa: F401
|
||||
startup_timer.record("import gradio")
|
||||
|
||||
with HiddenPrints():
|
||||
from modules import paths, timer, import_hook, errors # noqa: F401
|
||||
startup_timer.record("setup paths")
|
||||
|
||||
import ldm.modules.encoders.modules # noqa: F401
|
||||
import ldm.modules.diffusionmodules.model
|
||||
startup_timer.record("import ldm")
|
||||
|
||||
import sgm.modules.encoders.modules # noqa: F401
|
||||
startup_timer.record("import sgm")
|
||||
from modules import paths, timer, import_hook, errors # noqa: F401
|
||||
startup_timer.record("setup paths")
|
||||
|
||||
from modules import shared_init
|
||||
shared_init.initialize()
|
||||
@@ -141,11 +123,6 @@ def initialize_rest(*, reload_script_modules=False):
|
||||
textual_inversion.textual_inversion.list_textual_inversion_templates()
|
||||
startup_timer.record("refresh textual inversion templates")
|
||||
|
||||
from modules import script_callbacks, sd_hijack_optimizations, sd_hijack
|
||||
script_callbacks.on_list_optimizers(sd_hijack_optimizations.list_optimizers)
|
||||
sd_hijack.list_optimizers()
|
||||
startup_timer.record("scripts list_optimizers")
|
||||
|
||||
from modules import sd_unet
|
||||
sd_unet.list_unets()
|
||||
startup_timer.record("scripts list_unets")
|
||||
|
||||
@@ -391,15 +391,15 @@ def prepare_environment():
|
||||
openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip")
|
||||
|
||||
assets_repo = os.environ.get('ASSETS_REPO', "https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets.git")
|
||||
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
|
||||
stable_diffusion_xl_repo = os.environ.get('STABLE_DIFFUSION_XL_REPO', "https://github.com/Stability-AI/generative-models.git")
|
||||
# stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
|
||||
# stable_diffusion_xl_repo = os.environ.get('STABLE_DIFFUSION_XL_REPO', "https://github.com/Stability-AI/generative-models.git")
|
||||
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
|
||||
huggingface_guess_repo = os.environ.get('HUGGINGFACE_GUESS_REPO', 'https://github.com/lllyasviel/huggingface_guess.git')
|
||||
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
|
||||
|
||||
assets_commit_hash = os.environ.get('ASSETS_COMMIT_HASH', "6f7db241d2f8ba7457bac5ca9753331f0c266917")
|
||||
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf")
|
||||
stable_diffusion_xl_commit_hash = os.environ.get('STABLE_DIFFUSION_XL_COMMIT_HASH', "45c443b316737a4ab6e40413d7794a7f5657c19f")
|
||||
# stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf")
|
||||
# stable_diffusion_xl_commit_hash = os.environ.get('STABLE_DIFFUSION_XL_COMMIT_HASH', "45c443b316737a4ab6e40413d7794a7f5657c19f")
|
||||
k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "ab527a9a6d347f364e3d185ba6d714e22d80cb3c")
|
||||
huggingface_guess_commit_hash = os.environ.get('HUGGINGFACE_GUESS_HASH', "78f7d1da6a00721a6670e33a9132fd73c4e987b4")
|
||||
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
|
||||
@@ -456,8 +456,8 @@ def prepare_environment():
|
||||
os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True)
|
||||
|
||||
git_clone(assets_repo, repo_dir('stable-diffusion-webui-assets'), "assets", assets_commit_hash)
|
||||
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
|
||||
git_clone(stable_diffusion_xl_repo, repo_dir('generative-models'), "Stable Diffusion XL", stable_diffusion_xl_commit_hash)
|
||||
# git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
|
||||
# git_clone(stable_diffusion_xl_repo, repo_dir('generative-models'), "Stable Diffusion XL", stable_diffusion_xl_commit_hash)
|
||||
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
|
||||
git_clone(huggingface_guess_repo, repo_dir('huggingface_guess'), "huggingface_guess", huggingface_guess_commit_hash)
|
||||
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
|
||||
|
||||
@@ -36,8 +36,8 @@ assert sd_path is not None, f"Couldn't find Stable Diffusion in any of: {possibl
|
||||
mute_sdxl_imports()
|
||||
|
||||
path_dirs = [
|
||||
(sd_path, 'ldm', 'Stable Diffusion', []),
|
||||
(os.path.join(sd_path, '../generative-models'), 'sgm', 'Stable Diffusion XL', ["sgm"]),
|
||||
# (sd_path, 'ldm', 'Stable Diffusion', []),
|
||||
# (os.path.join(sd_path, '../generative-models'), 'sgm', 'Stable Diffusion XL', ["sgm"]),
|
||||
(os.path.join(sd_path, '../BLIP'), 'models/blip.py', 'BLIP', []),
|
||||
(os.path.join(sd_path, '../k-diffusion'), 'k_diffusion/sampling.py', 'k_diffusion', ["atstart"]),
|
||||
(os.path.join(sd_path, '../huggingface_guess'), 'huggingface_guess/detection.py', 'huggingface_guess', []),
|
||||
@@ -53,13 +53,13 @@ for d, must_exist, what, options in path_dirs:
|
||||
d = os.path.abspath(d)
|
||||
if "atstart" in options:
|
||||
sys.path.insert(0, d)
|
||||
elif "sgm" in options:
|
||||
# Stable Diffusion XL repo has scripts dir with __init__.py in it which ruins every extension's scripts dir, so we
|
||||
# import sgm and remove it from sys.path so that when a script imports scripts.something, it doesbn't use sgm's scripts dir.
|
||||
|
||||
sys.path.insert(0, d)
|
||||
import sgm # noqa: F401
|
||||
sys.path.pop(0)
|
||||
# elif "sgm" in options:
|
||||
# # Stable Diffusion XL repo has scripts dir with __init__.py in it which ruins every extension's scripts dir, so we
|
||||
# # import sgm and remove it from sys.path so that when a script imports scripts.something, it doesbn't use sgm's scripts dir.
|
||||
#
|
||||
# sys.path.insert(0, d)
|
||||
# import sgm # noqa: F401
|
||||
# sys.path.pop(0)
|
||||
else:
|
||||
sys.path.append(d)
|
||||
paths[what] = d
|
||||
|
||||
@@ -1,124 +1,3 @@
|
||||
import torch
|
||||
from torch.nn.functional import silu
|
||||
from types import MethodType
|
||||
|
||||
from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet, patches
|
||||
from modules.hypernetworks import hypernetwork
|
||||
from modules.shared import cmd_opts
|
||||
from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr, xlmr_m18
|
||||
|
||||
import ldm.modules.attention
|
||||
import ldm.modules.diffusionmodules.model
|
||||
import ldm.modules.diffusionmodules.openaimodel
|
||||
import ldm.models.diffusion.ddpm
|
||||
import ldm.models.diffusion.ddim
|
||||
import ldm.models.diffusion.plms
|
||||
import ldm.modules.encoders.modules
|
||||
|
||||
import sgm.modules.attention
|
||||
import sgm.modules.diffusionmodules.model
|
||||
import sgm.modules.diffusionmodules.openaimodel
|
||||
import sgm.modules.encoders.modules
|
||||
|
||||
attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward
|
||||
diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity
|
||||
diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward
|
||||
|
||||
# new memory efficient cross attention blocks do not support hypernets and we already
|
||||
# have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention
|
||||
ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention
|
||||
ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
|
||||
|
||||
# silence new console spam from SD2
|
||||
ldm.modules.attention.print = shared.ldm_print
|
||||
ldm.modules.diffusionmodules.model.print = shared.ldm_print
|
||||
ldm.util.print = shared.ldm_print
|
||||
ldm.models.diffusion.ddpm.print = shared.ldm_print
|
||||
|
||||
optimizers = []
|
||||
current_optimizer: sd_hijack_optimizations.SdOptimization = None
|
||||
|
||||
ldm_patched_forward = sd_unet.create_unet_forward(ldm.modules.diffusionmodules.openaimodel.UNetModel.forward)
|
||||
ldm_original_forward = patches.patch(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", ldm_patched_forward)
|
||||
|
||||
sgm_patched_forward = sd_unet.create_unet_forward(sgm.modules.diffusionmodules.openaimodel.UNetModel.forward)
|
||||
sgm_original_forward = patches.patch(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sgm_patched_forward)
|
||||
|
||||
|
||||
def list_optimizers():
|
||||
new_optimizers = script_callbacks.list_optimizers_callback()
|
||||
|
||||
new_optimizers = [x for x in new_optimizers if x.is_available()]
|
||||
|
||||
new_optimizers = sorted(new_optimizers, key=lambda x: x.priority, reverse=True)
|
||||
|
||||
optimizers.clear()
|
||||
optimizers.extend(new_optimizers)
|
||||
|
||||
|
||||
def apply_optimizations(option=None):
|
||||
return
|
||||
|
||||
|
||||
def undo_optimizations():
|
||||
return
|
||||
|
||||
|
||||
def fix_checkpoint():
|
||||
"""checkpoints are now added and removed in embedding/hypernet code, since torch doesn't want
|
||||
checkpoints to be added when not training (there's a warning)"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def weighted_loss(sd_model, pred, target, mean=True):
|
||||
#Calculate the weight normally, but ignore the mean
|
||||
loss = sd_model._old_get_loss(pred, target, mean=False)
|
||||
|
||||
#Check if we have weights available
|
||||
weight = getattr(sd_model, '_custom_loss_weight', None)
|
||||
if weight is not None:
|
||||
loss *= weight
|
||||
|
||||
#Return the loss, as mean if specified
|
||||
return loss.mean() if mean else loss
|
||||
|
||||
def weighted_forward(sd_model, x, c, w, *args, **kwargs):
|
||||
try:
|
||||
#Temporarily append weights to a place accessible during loss calc
|
||||
sd_model._custom_loss_weight = w
|
||||
|
||||
#Replace 'get_loss' with a weight-aware one. Otherwise we need to reimplement 'forward' completely
|
||||
#Keep 'get_loss', but don't overwrite the previous old_get_loss if it's already set
|
||||
if not hasattr(sd_model, '_old_get_loss'):
|
||||
sd_model._old_get_loss = sd_model.get_loss
|
||||
sd_model.get_loss = MethodType(weighted_loss, sd_model)
|
||||
|
||||
#Run the standard forward function, but with the patched 'get_loss'
|
||||
return sd_model.forward(x, c, *args, **kwargs)
|
||||
finally:
|
||||
try:
|
||||
#Delete temporary weights if appended
|
||||
del sd_model._custom_loss_weight
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
#If we have an old loss function, reset the loss function to the original one
|
||||
if hasattr(sd_model, '_old_get_loss'):
|
||||
sd_model.get_loss = sd_model._old_get_loss
|
||||
del sd_model._old_get_loss
|
||||
|
||||
def apply_weighted_forward(sd_model):
|
||||
#Add new function 'weighted_forward' that can be called to calc weighted loss
|
||||
sd_model.weighted_forward = MethodType(weighted_forward, sd_model)
|
||||
|
||||
def undo_weighted_forward(sd_model):
|
||||
try:
|
||||
del sd_model.weighted_forward
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
class StableDiffusionModelHijack:
|
||||
fixes = None
|
||||
layers = None
|
||||
@@ -156,74 +35,201 @@ class StableDiffusionModelHijack:
|
||||
pass
|
||||
|
||||
|
||||
class EmbeddingsWithFixes(torch.nn.Module):
|
||||
def __init__(self, wrapped, embeddings, textual_inversion_key='clip_l'):
|
||||
super().__init__()
|
||||
self.wrapped = wrapped
|
||||
self.embeddings = embeddings
|
||||
self.textual_inversion_key = textual_inversion_key
|
||||
self.weight = self.wrapped.weight
|
||||
|
||||
def forward(self, input_ids):
|
||||
batch_fixes = self.embeddings.fixes
|
||||
self.embeddings.fixes = None
|
||||
|
||||
inputs_embeds = self.wrapped(input_ids)
|
||||
|
||||
if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0:
|
||||
return inputs_embeds
|
||||
|
||||
vecs = []
|
||||
for fixes, tensor in zip(batch_fixes, inputs_embeds):
|
||||
for offset, embedding in fixes:
|
||||
vec = embedding.vec[self.textual_inversion_key] if isinstance(embedding.vec, dict) else embedding.vec
|
||||
emb = devices.cond_cast_unet(vec)
|
||||
emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0])
|
||||
tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]]).to(dtype=inputs_embeds.dtype)
|
||||
|
||||
vecs.append(tensor)
|
||||
|
||||
return torch.stack(vecs)
|
||||
|
||||
|
||||
class TextualInversionEmbeddings(torch.nn.Embedding):
|
||||
def __init__(self, num_embeddings: int, embedding_dim: int, textual_inversion_key='clip_l', **kwargs):
|
||||
super().__init__(num_embeddings, embedding_dim, **kwargs)
|
||||
|
||||
self.embeddings = model_hijack
|
||||
self.textual_inversion_key = textual_inversion_key
|
||||
|
||||
@property
|
||||
def wrapped(self):
|
||||
return super().forward
|
||||
|
||||
def forward(self, input_ids):
|
||||
return EmbeddingsWithFixes.forward(self, input_ids)
|
||||
|
||||
|
||||
def add_circular_option_to_conv_2d():
|
||||
conv2d_constructor = torch.nn.Conv2d.__init__
|
||||
|
||||
def conv2d_constructor_circular(self, *args, **kwargs):
|
||||
return conv2d_constructor(self, *args, padding_mode='circular', **kwargs)
|
||||
|
||||
torch.nn.Conv2d.__init__ = conv2d_constructor_circular
|
||||
|
||||
|
||||
model_hijack = StableDiffusionModelHijack()
|
||||
|
||||
|
||||
def register_buffer(self, name, attr):
|
||||
"""
|
||||
Fix register buffer bug for Mac OS.
|
||||
"""
|
||||
|
||||
if type(attr) == torch.Tensor:
|
||||
if attr.device != devices.device:
|
||||
attr = attr.to(device=devices.device, dtype=(torch.float32 if devices.device.type == 'mps' else None))
|
||||
|
||||
setattr(self, name, attr)
|
||||
|
||||
|
||||
ldm.models.diffusion.ddim.DDIMSampler.register_buffer = register_buffer
|
||||
ldm.models.diffusion.plms.PLMSSampler.register_buffer = register_buffer
|
||||
# import torch
|
||||
# from torch.nn.functional import silu
|
||||
# from types import MethodType
|
||||
#
|
||||
# from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet, patches
|
||||
# from modules.hypernetworks import hypernetwork
|
||||
# from modules.shared import cmd_opts
|
||||
# from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr, xlmr_m18
|
||||
#
|
||||
# import ldm.modules.attention
|
||||
# import ldm.modules.diffusionmodules.model
|
||||
# import ldm.modules.diffusionmodules.openaimodel
|
||||
# import ldm.models.diffusion.ddpm
|
||||
# import ldm.models.diffusion.ddim
|
||||
# import ldm.models.diffusion.plms
|
||||
# import ldm.modules.encoders.modules
|
||||
#
|
||||
# import sgm.modules.attention
|
||||
# import sgm.modules.diffusionmodules.model
|
||||
# import sgm.modules.diffusionmodules.openaimodel
|
||||
# import sgm.modules.encoders.modules
|
||||
#
|
||||
# attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward
|
||||
# diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity
|
||||
# diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward
|
||||
#
|
||||
# # new memory efficient cross attention blocks do not support hypernets and we already
|
||||
# # have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention
|
||||
# ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention
|
||||
# ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
|
||||
#
|
||||
# # silence new console spam from SD2
|
||||
# ldm.modules.attention.print = shared.ldm_print
|
||||
# ldm.modules.diffusionmodules.model.print = shared.ldm_print
|
||||
# ldm.util.print = shared.ldm_print
|
||||
# ldm.models.diffusion.ddpm.print = shared.ldm_print
|
||||
#
|
||||
# optimizers = []
|
||||
# current_optimizer: sd_hijack_optimizations.SdOptimization = None
|
||||
#
|
||||
# ldm_patched_forward = sd_unet.create_unet_forward(ldm.modules.diffusionmodules.openaimodel.UNetModel.forward)
|
||||
# ldm_original_forward = patches.patch(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", ldm_patched_forward)
|
||||
#
|
||||
# sgm_patched_forward = sd_unet.create_unet_forward(sgm.modules.diffusionmodules.openaimodel.UNetModel.forward)
|
||||
# sgm_original_forward = patches.patch(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sgm_patched_forward)
|
||||
#
|
||||
#
|
||||
# def list_optimizers():
|
||||
# new_optimizers = script_callbacks.list_optimizers_callback()
|
||||
#
|
||||
# new_optimizers = [x for x in new_optimizers if x.is_available()]
|
||||
#
|
||||
# new_optimizers = sorted(new_optimizers, key=lambda x: x.priority, reverse=True)
|
||||
#
|
||||
# optimizers.clear()
|
||||
# optimizers.extend(new_optimizers)
|
||||
#
|
||||
#
|
||||
# def apply_optimizations(option=None):
|
||||
# return
|
||||
#
|
||||
#
|
||||
# def undo_optimizations():
|
||||
# return
|
||||
#
|
||||
#
|
||||
# def fix_checkpoint():
|
||||
# """checkpoints are now added and removed in embedding/hypernet code, since torch doesn't want
|
||||
# checkpoints to be added when not training (there's a warning)"""
|
||||
#
|
||||
# pass
|
||||
#
|
||||
#
|
||||
# def weighted_loss(sd_model, pred, target, mean=True):
|
||||
# #Calculate the weight normally, but ignore the mean
|
||||
# loss = sd_model._old_get_loss(pred, target, mean=False)
|
||||
#
|
||||
# #Check if we have weights available
|
||||
# weight = getattr(sd_model, '_custom_loss_weight', None)
|
||||
# if weight is not None:
|
||||
# loss *= weight
|
||||
#
|
||||
# #Return the loss, as mean if specified
|
||||
# return loss.mean() if mean else loss
|
||||
#
|
||||
# def weighted_forward(sd_model, x, c, w, *args, **kwargs):
|
||||
# try:
|
||||
# #Temporarily append weights to a place accessible during loss calc
|
||||
# sd_model._custom_loss_weight = w
|
||||
#
|
||||
# #Replace 'get_loss' with a weight-aware one. Otherwise we need to reimplement 'forward' completely
|
||||
# #Keep 'get_loss', but don't overwrite the previous old_get_loss if it's already set
|
||||
# if not hasattr(sd_model, '_old_get_loss'):
|
||||
# sd_model._old_get_loss = sd_model.get_loss
|
||||
# sd_model.get_loss = MethodType(weighted_loss, sd_model)
|
||||
#
|
||||
# #Run the standard forward function, but with the patched 'get_loss'
|
||||
# return sd_model.forward(x, c, *args, **kwargs)
|
||||
# finally:
|
||||
# try:
|
||||
# #Delete temporary weights if appended
|
||||
# del sd_model._custom_loss_weight
|
||||
# except AttributeError:
|
||||
# pass
|
||||
#
|
||||
# #If we have an old loss function, reset the loss function to the original one
|
||||
# if hasattr(sd_model, '_old_get_loss'):
|
||||
# sd_model.get_loss = sd_model._old_get_loss
|
||||
# del sd_model._old_get_loss
|
||||
#
|
||||
# def apply_weighted_forward(sd_model):
|
||||
# #Add new function 'weighted_forward' that can be called to calc weighted loss
|
||||
# sd_model.weighted_forward = MethodType(weighted_forward, sd_model)
|
||||
#
|
||||
# def undo_weighted_forward(sd_model):
|
||||
# try:
|
||||
# del sd_model.weighted_forward
|
||||
# except AttributeError:
|
||||
# pass
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# class EmbeddingsWithFixes(torch.nn.Module):
|
||||
# def __init__(self, wrapped, embeddings, textual_inversion_key='clip_l'):
|
||||
# super().__init__()
|
||||
# self.wrapped = wrapped
|
||||
# self.embeddings = embeddings
|
||||
# self.textual_inversion_key = textual_inversion_key
|
||||
# self.weight = self.wrapped.weight
|
||||
#
|
||||
# def forward(self, input_ids):
|
||||
# batch_fixes = self.embeddings.fixes
|
||||
# self.embeddings.fixes = None
|
||||
#
|
||||
# inputs_embeds = self.wrapped(input_ids)
|
||||
#
|
||||
# if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0:
|
||||
# return inputs_embeds
|
||||
#
|
||||
# vecs = []
|
||||
# for fixes, tensor in zip(batch_fixes, inputs_embeds):
|
||||
# for offset, embedding in fixes:
|
||||
# vec = embedding.vec[self.textual_inversion_key] if isinstance(embedding.vec, dict) else embedding.vec
|
||||
# emb = devices.cond_cast_unet(vec)
|
||||
# emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0])
|
||||
# tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]]).to(dtype=inputs_embeds.dtype)
|
||||
#
|
||||
# vecs.append(tensor)
|
||||
#
|
||||
# return torch.stack(vecs)
|
||||
#
|
||||
#
|
||||
# class TextualInversionEmbeddings(torch.nn.Embedding):
|
||||
# def __init__(self, num_embeddings: int, embedding_dim: int, textual_inversion_key='clip_l', **kwargs):
|
||||
# super().__init__(num_embeddings, embedding_dim, **kwargs)
|
||||
#
|
||||
# self.embeddings = model_hijack
|
||||
# self.textual_inversion_key = textual_inversion_key
|
||||
#
|
||||
# @property
|
||||
# def wrapped(self):
|
||||
# return super().forward
|
||||
#
|
||||
# def forward(self, input_ids):
|
||||
# return EmbeddingsWithFixes.forward(self, input_ids)
|
||||
#
|
||||
#
|
||||
# def add_circular_option_to_conv_2d():
|
||||
# conv2d_constructor = torch.nn.Conv2d.__init__
|
||||
#
|
||||
# def conv2d_constructor_circular(self, *args, **kwargs):
|
||||
# return conv2d_constructor(self, *args, padding_mode='circular', **kwargs)
|
||||
#
|
||||
# torch.nn.Conv2d.__init__ = conv2d_constructor_circular
|
||||
#
|
||||
#
|
||||
# model_hijack = StableDiffusionModelHijack()
|
||||
#
|
||||
#
|
||||
# def register_buffer(self, name, attr):
|
||||
# """
|
||||
# Fix register buffer bug for Mac OS.
|
||||
# """
|
||||
#
|
||||
# if type(attr) == torch.Tensor:
|
||||
# if attr.device != devices.device:
|
||||
# attr = attr.to(device=devices.device, dtype=(torch.float32 if devices.device.type == 'mps' else None))
|
||||
#
|
||||
# setattr(self, name, attr)
|
||||
#
|
||||
#
|
||||
# ldm.models.diffusion.ddim.DDIMSampler.register_buffer = register_buffer
|
||||
# ldm.models.diffusion.plms.PLMSSampler.register_buffer = register_buffer
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,154 +1,154 @@
|
||||
import torch
|
||||
from packaging import version
|
||||
from einops import repeat
|
||||
import math
|
||||
|
||||
from modules import devices
|
||||
from modules.sd_hijack_utils import CondFunc
|
||||
|
||||
|
||||
class TorchHijackForUnet:
|
||||
"""
|
||||
This is torch, but with cat that resizes tensors to appropriate dimensions if they do not match;
|
||||
this makes it possible to create pictures with dimensions that are multiples of 8 rather than 64
|
||||
"""
|
||||
|
||||
def __getattr__(self, item):
|
||||
if item == 'cat':
|
||||
return self.cat
|
||||
|
||||
if hasattr(torch, item):
|
||||
return getattr(torch, item)
|
||||
|
||||
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'")
|
||||
|
||||
def cat(self, tensors, *args, **kwargs):
|
||||
if len(tensors) == 2:
|
||||
a, b = tensors
|
||||
if a.shape[-2:] != b.shape[-2:]:
|
||||
a = torch.nn.functional.interpolate(a, b.shape[-2:], mode="nearest")
|
||||
|
||||
tensors = (a, b)
|
||||
|
||||
return torch.cat(tensors, *args, **kwargs)
|
||||
|
||||
|
||||
th = TorchHijackForUnet()
|
||||
|
||||
|
||||
# Below are monkey patches to enable upcasting a float16 UNet for float32 sampling
|
||||
def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
|
||||
"""Always make sure inputs to unet are in correct dtype."""
|
||||
if isinstance(cond, dict):
|
||||
for y in cond.keys():
|
||||
if isinstance(cond[y], list):
|
||||
cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
|
||||
else:
|
||||
cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y]
|
||||
|
||||
with devices.autocast():
|
||||
result = orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs)
|
||||
if devices.unet_needs_upcast:
|
||||
return result.float()
|
||||
else:
|
||||
return result
|
||||
|
||||
|
||||
# Monkey patch to create timestep embed tensor on device, avoiding a block.
|
||||
def timestep_embedding(_, timesteps, dim, max_period=10000, repeat_only=False):
|
||||
"""
|
||||
Create sinusoidal timestep embeddings.
|
||||
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
||||
These may be fractional.
|
||||
:param dim: the dimension of the output.
|
||||
:param max_period: controls the minimum frequency of the embeddings.
|
||||
:return: an [N x dim] Tensor of positional embeddings.
|
||||
"""
|
||||
if not repeat_only:
|
||||
half = dim // 2
|
||||
freqs = torch.exp(
|
||||
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=timesteps.device) / half
|
||||
)
|
||||
args = timesteps[:, None].float() * freqs[None]
|
||||
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
||||
if dim % 2:
|
||||
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
||||
else:
|
||||
embedding = repeat(timesteps, 'b -> b d', d=dim)
|
||||
return embedding
|
||||
|
||||
|
||||
# Monkey patch to SpatialTransformer removing unnecessary contiguous calls.
|
||||
# Prevents a lot of unnecessary aten::copy_ calls
|
||||
def spatial_transformer_forward(_, self, x: torch.Tensor, context=None):
|
||||
# note: if no context is given, cross-attention defaults to self-attention
|
||||
if not isinstance(context, list):
|
||||
context = [context]
|
||||
b, c, h, w = x.shape
|
||||
x_in = x
|
||||
x = self.norm(x)
|
||||
if not self.use_linear:
|
||||
x = self.proj_in(x)
|
||||
x = x.permute(0, 2, 3, 1).reshape(b, h * w, c)
|
||||
if self.use_linear:
|
||||
x = self.proj_in(x)
|
||||
for i, block in enumerate(self.transformer_blocks):
|
||||
x = block(x, context=context[i])
|
||||
if self.use_linear:
|
||||
x = self.proj_out(x)
|
||||
x = x.view(b, h, w, c).permute(0, 3, 1, 2)
|
||||
if not self.use_linear:
|
||||
x = self.proj_out(x)
|
||||
return x + x_in
|
||||
|
||||
|
||||
class GELUHijack(torch.nn.GELU, torch.nn.Module):
|
||||
def __init__(self, *args, **kwargs):
|
||||
torch.nn.GELU.__init__(self, *args, **kwargs)
|
||||
def forward(self, x):
|
||||
if devices.unet_needs_upcast:
|
||||
return torch.nn.GELU.forward(self.float(), x.float()).to(devices.dtype_unet)
|
||||
else:
|
||||
return torch.nn.GELU.forward(self, x)
|
||||
|
||||
|
||||
ddpm_edit_hijack = None
|
||||
def hijack_ddpm_edit():
|
||||
global ddpm_edit_hijack
|
||||
if not ddpm_edit_hijack:
|
||||
CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
|
||||
CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
|
||||
ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model)
|
||||
|
||||
|
||||
unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast
|
||||
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
|
||||
CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding)
|
||||
CondFunc('ldm.modules.attention.SpatialTransformer.forward', spatial_transformer_forward)
|
||||
CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
|
||||
|
||||
if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available():
|
||||
CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast)
|
||||
CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast)
|
||||
CondFunc('open_clip.transformer.ResidualAttentionBlock.__init__', lambda orig_func, *args, **kwargs: kwargs.update({'act_layer': GELUHijack}) and False or orig_func(*args, **kwargs), lambda _, *args, **kwargs: kwargs.get('act_layer') is None or kwargs['act_layer'] == torch.nn.GELU)
|
||||
|
||||
first_stage_cond = lambda _, self, *args, **kwargs: devices.unet_needs_upcast and self.model.diffusion_model.dtype == torch.float16
|
||||
first_stage_sub = lambda orig_func, self, x, **kwargs: orig_func(self, x.to(devices.dtype_vae), **kwargs)
|
||||
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
|
||||
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
|
||||
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond)
|
||||
|
||||
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model)
|
||||
CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model)
|
||||
|
||||
|
||||
def timestep_embedding_cast_result(orig_func, timesteps, *args, **kwargs):
|
||||
if devices.unet_needs_upcast and timesteps.dtype == torch.int64:
|
||||
dtype = torch.float32
|
||||
else:
|
||||
dtype = devices.dtype_unet
|
||||
return orig_func(timesteps, *args, **kwargs).to(dtype=dtype)
|
||||
|
||||
|
||||
CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding_cast_result)
|
||||
CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding_cast_result)
|
||||
# import torch
|
||||
# from packaging import version
|
||||
# from einops import repeat
|
||||
# import math
|
||||
#
|
||||
# from modules import devices
|
||||
# from modules.sd_hijack_utils import CondFunc
|
||||
#
|
||||
#
|
||||
# class TorchHijackForUnet:
|
||||
# """
|
||||
# This is torch, but with cat that resizes tensors to appropriate dimensions if they do not match;
|
||||
# this makes it possible to create pictures with dimensions that are multiples of 8 rather than 64
|
||||
# """
|
||||
#
|
||||
# def __getattr__(self, item):
|
||||
# if item == 'cat':
|
||||
# return self.cat
|
||||
#
|
||||
# if hasattr(torch, item):
|
||||
# return getattr(torch, item)
|
||||
#
|
||||
# raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'")
|
||||
#
|
||||
# def cat(self, tensors, *args, **kwargs):
|
||||
# if len(tensors) == 2:
|
||||
# a, b = tensors
|
||||
# if a.shape[-2:] != b.shape[-2:]:
|
||||
# a = torch.nn.functional.interpolate(a, b.shape[-2:], mode="nearest")
|
||||
#
|
||||
# tensors = (a, b)
|
||||
#
|
||||
# return torch.cat(tensors, *args, **kwargs)
|
||||
#
|
||||
#
|
||||
# th = TorchHijackForUnet()
|
||||
#
|
||||
#
|
||||
# # Below are monkey patches to enable upcasting a float16 UNet for float32 sampling
|
||||
# def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
|
||||
# """Always make sure inputs to unet are in correct dtype."""
|
||||
# if isinstance(cond, dict):
|
||||
# for y in cond.keys():
|
||||
# if isinstance(cond[y], list):
|
||||
# cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
|
||||
# else:
|
||||
# cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y]
|
||||
#
|
||||
# with devices.autocast():
|
||||
# result = orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs)
|
||||
# if devices.unet_needs_upcast:
|
||||
# return result.float()
|
||||
# else:
|
||||
# return result
|
||||
#
|
||||
#
|
||||
# # Monkey patch to create timestep embed tensor on device, avoiding a block.
|
||||
# def timestep_embedding(_, timesteps, dim, max_period=10000, repeat_only=False):
|
||||
# """
|
||||
# Create sinusoidal timestep embeddings.
|
||||
# :param timesteps: a 1-D Tensor of N indices, one per batch element.
|
||||
# These may be fractional.
|
||||
# :param dim: the dimension of the output.
|
||||
# :param max_period: controls the minimum frequency of the embeddings.
|
||||
# :return: an [N x dim] Tensor of positional embeddings.
|
||||
# """
|
||||
# if not repeat_only:
|
||||
# half = dim // 2
|
||||
# freqs = torch.exp(
|
||||
# -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=timesteps.device) / half
|
||||
# )
|
||||
# args = timesteps[:, None].float() * freqs[None]
|
||||
# embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
||||
# if dim % 2:
|
||||
# embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
||||
# else:
|
||||
# embedding = repeat(timesteps, 'b -> b d', d=dim)
|
||||
# return embedding
|
||||
#
|
||||
#
|
||||
# # Monkey patch to SpatialTransformer removing unnecessary contiguous calls.
|
||||
# # Prevents a lot of unnecessary aten::copy_ calls
|
||||
# def spatial_transformer_forward(_, self, x: torch.Tensor, context=None):
|
||||
# # note: if no context is given, cross-attention defaults to self-attention
|
||||
# if not isinstance(context, list):
|
||||
# context = [context]
|
||||
# b, c, h, w = x.shape
|
||||
# x_in = x
|
||||
# x = self.norm(x)
|
||||
# if not self.use_linear:
|
||||
# x = self.proj_in(x)
|
||||
# x = x.permute(0, 2, 3, 1).reshape(b, h * w, c)
|
||||
# if self.use_linear:
|
||||
# x = self.proj_in(x)
|
||||
# for i, block in enumerate(self.transformer_blocks):
|
||||
# x = block(x, context=context[i])
|
||||
# if self.use_linear:
|
||||
# x = self.proj_out(x)
|
||||
# x = x.view(b, h, w, c).permute(0, 3, 1, 2)
|
||||
# if not self.use_linear:
|
||||
# x = self.proj_out(x)
|
||||
# return x + x_in
|
||||
#
|
||||
#
|
||||
# class GELUHijack(torch.nn.GELU, torch.nn.Module):
|
||||
# def __init__(self, *args, **kwargs):
|
||||
# torch.nn.GELU.__init__(self, *args, **kwargs)
|
||||
# def forward(self, x):
|
||||
# if devices.unet_needs_upcast:
|
||||
# return torch.nn.GELU.forward(self.float(), x.float()).to(devices.dtype_unet)
|
||||
# else:
|
||||
# return torch.nn.GELU.forward(self, x)
|
||||
#
|
||||
#
|
||||
# ddpm_edit_hijack = None
|
||||
# def hijack_ddpm_edit():
|
||||
# global ddpm_edit_hijack
|
||||
# if not ddpm_edit_hijack:
|
||||
# CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
|
||||
# CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
|
||||
# ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model)
|
||||
#
|
||||
#
|
||||
# unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast
|
||||
# CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
|
||||
# CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding)
|
||||
# CondFunc('ldm.modules.attention.SpatialTransformer.forward', spatial_transformer_forward)
|
||||
# CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
|
||||
#
|
||||
# if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available():
|
||||
# CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast)
|
||||
# CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast)
|
||||
# CondFunc('open_clip.transformer.ResidualAttentionBlock.__init__', lambda orig_func, *args, **kwargs: kwargs.update({'act_layer': GELUHijack}) and False or orig_func(*args, **kwargs), lambda _, *args, **kwargs: kwargs.get('act_layer') is None or kwargs['act_layer'] == torch.nn.GELU)
|
||||
#
|
||||
# first_stage_cond = lambda _, self, *args, **kwargs: devices.unet_needs_upcast and self.model.diffusion_model.dtype == torch.float16
|
||||
# first_stage_sub = lambda orig_func, self, x, **kwargs: orig_func(self, x.to(devices.dtype_vae), **kwargs)
|
||||
# CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
|
||||
# CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
|
||||
# CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond)
|
||||
#
|
||||
# CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model)
|
||||
# CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model)
|
||||
#
|
||||
#
|
||||
# def timestep_embedding_cast_result(orig_func, timesteps, *args, **kwargs):
|
||||
# if devices.unet_needs_upcast and timesteps.dtype == torch.int64:
|
||||
# dtype = torch.float32
|
||||
# else:
|
||||
# dtype = devices.dtype_unet
|
||||
# return orig_func(timesteps, *args, **kwargs).to(dtype=dtype)
|
||||
#
|
||||
#
|
||||
# CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding_cast_result)
|
||||
# CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding_cast_result)
|
||||
|
||||
@@ -1,137 +1,137 @@
|
||||
import os
|
||||
|
||||
import torch
|
||||
|
||||
from modules import shared, paths, sd_disable_initialization, devices
|
||||
|
||||
sd_configs_path = shared.sd_configs_path
|
||||
sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion")
|
||||
sd_xl_repo_configs_path = os.path.join(paths.paths['Stable Diffusion XL'], "configs", "inference")
|
||||
|
||||
|
||||
config_default = shared.sd_default_config
|
||||
# config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml")
|
||||
config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
|
||||
config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
|
||||
config_sdxl = os.path.join(sd_xl_repo_configs_path, "sd_xl_base.yaml")
|
||||
config_sdxl_refiner = os.path.join(sd_xl_repo_configs_path, "sd_xl_refiner.yaml")
|
||||
config_sdxl_inpainting = os.path.join(sd_configs_path, "sd_xl_inpaint.yaml")
|
||||
config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
|
||||
config_unclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-l-inference.yaml")
|
||||
config_unopenclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-h-inference.yaml")
|
||||
config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
|
||||
config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
|
||||
config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
|
||||
config_alt_diffusion_m18 = os.path.join(sd_configs_path, "alt-diffusion-m18-inference.yaml")
|
||||
config_sd3 = os.path.join(sd_configs_path, "sd3-inference.yaml")
|
||||
|
||||
|
||||
def is_using_v_parameterization_for_sd2(state_dict):
|
||||
"""
|
||||
Detects whether unet in state_dict is using v-parameterization. Returns True if it is. You're welcome.
|
||||
"""
|
||||
|
||||
import ldm.modules.diffusionmodules.openaimodel
|
||||
|
||||
device = devices.device
|
||||
|
||||
with sd_disable_initialization.DisableInitialization():
|
||||
unet = ldm.modules.diffusionmodules.openaimodel.UNetModel(
|
||||
use_checkpoint=False,
|
||||
use_fp16=False,
|
||||
image_size=32,
|
||||
in_channels=4,
|
||||
out_channels=4,
|
||||
model_channels=320,
|
||||
attention_resolutions=[4, 2, 1],
|
||||
num_res_blocks=2,
|
||||
channel_mult=[1, 2, 4, 4],
|
||||
num_head_channels=64,
|
||||
use_spatial_transformer=True,
|
||||
use_linear_in_transformer=True,
|
||||
transformer_depth=1,
|
||||
context_dim=1024,
|
||||
legacy=False
|
||||
)
|
||||
unet.eval()
|
||||
|
||||
with torch.no_grad():
|
||||
unet_sd = {k.replace("model.diffusion_model.", ""): v for k, v in state_dict.items() if "model.diffusion_model." in k}
|
||||
unet.load_state_dict(unet_sd, strict=True)
|
||||
unet.to(device=device, dtype=devices.dtype_unet)
|
||||
|
||||
test_cond = torch.ones((1, 2, 1024), device=device) * 0.5
|
||||
x_test = torch.ones((1, 4, 8, 8), device=device) * 0.5
|
||||
|
||||
with devices.autocast():
|
||||
out = (unet(x_test, torch.asarray([999], device=device), context=test_cond) - x_test).mean().cpu().item()
|
||||
|
||||
return out < -1
|
||||
|
||||
|
||||
def guess_model_config_from_state_dict(sd, filename):
|
||||
sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
|
||||
diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
|
||||
sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None)
|
||||
|
||||
if "model.diffusion_model.x_embedder.proj.weight" in sd:
|
||||
return config_sd3
|
||||
|
||||
if sd.get('conditioner.embedders.1.model.ln_final.weight', None) is not None:
|
||||
if diffusion_model_input.shape[1] == 9:
|
||||
return config_sdxl_inpainting
|
||||
else:
|
||||
return config_sdxl
|
||||
|
||||
if sd.get('conditioner.embedders.0.model.ln_final.weight', None) is not None:
|
||||
return config_sdxl_refiner
|
||||
elif sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
|
||||
return config_depth_model
|
||||
elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 768:
|
||||
return config_unclip
|
||||
elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 1024:
|
||||
return config_unopenclip
|
||||
|
||||
if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
|
||||
if diffusion_model_input.shape[1] == 9:
|
||||
return config_sd2_inpainting
|
||||
# elif is_using_v_parameterization_for_sd2(sd):
|
||||
# return config_sd2v
|
||||
else:
|
||||
return config_sd2v
|
||||
|
||||
if diffusion_model_input is not None:
|
||||
if diffusion_model_input.shape[1] == 9:
|
||||
return config_inpainting
|
||||
if diffusion_model_input.shape[1] == 8:
|
||||
return config_instruct_pix2pix
|
||||
|
||||
if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None:
|
||||
if sd.get('cond_stage_model.transformation.weight').size()[0] == 1024:
|
||||
return config_alt_diffusion_m18
|
||||
return config_alt_diffusion
|
||||
|
||||
return config_default
|
||||
|
||||
|
||||
def find_checkpoint_config(state_dict, info):
|
||||
if info is None:
|
||||
return guess_model_config_from_state_dict(state_dict, "")
|
||||
|
||||
config = find_checkpoint_config_near_filename(info)
|
||||
if config is not None:
|
||||
return config
|
||||
|
||||
return guess_model_config_from_state_dict(state_dict, info.filename)
|
||||
|
||||
|
||||
def find_checkpoint_config_near_filename(info):
|
||||
if info is None:
|
||||
return None
|
||||
|
||||
config = f"{os.path.splitext(info.filename)[0]}.yaml"
|
||||
if os.path.exists(config):
|
||||
return config
|
||||
|
||||
return None
|
||||
|
||||
# import os
|
||||
#
|
||||
# import torch
|
||||
#
|
||||
# from modules import shared, paths, sd_disable_initialization, devices
|
||||
#
|
||||
# sd_configs_path = shared.sd_configs_path
|
||||
# sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion")
|
||||
# sd_xl_repo_configs_path = os.path.join(paths.paths['Stable Diffusion XL'], "configs", "inference")
|
||||
#
|
||||
#
|
||||
# config_default = shared.sd_default_config
|
||||
# # config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml")
|
||||
# config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
|
||||
# config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
|
||||
# config_sdxl = os.path.join(sd_xl_repo_configs_path, "sd_xl_base.yaml")
|
||||
# config_sdxl_refiner = os.path.join(sd_xl_repo_configs_path, "sd_xl_refiner.yaml")
|
||||
# config_sdxl_inpainting = os.path.join(sd_configs_path, "sd_xl_inpaint.yaml")
|
||||
# config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
|
||||
# config_unclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-l-inference.yaml")
|
||||
# config_unopenclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-h-inference.yaml")
|
||||
# config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
|
||||
# config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
|
||||
# config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
|
||||
# config_alt_diffusion_m18 = os.path.join(sd_configs_path, "alt-diffusion-m18-inference.yaml")
|
||||
# config_sd3 = os.path.join(sd_configs_path, "sd3-inference.yaml")
|
||||
#
|
||||
#
|
||||
# def is_using_v_parameterization_for_sd2(state_dict):
|
||||
# """
|
||||
# Detects whether unet in state_dict is using v-parameterization. Returns True if it is. You're welcome.
|
||||
# """
|
||||
#
|
||||
# import ldm.modules.diffusionmodules.openaimodel
|
||||
#
|
||||
# device = devices.device
|
||||
#
|
||||
# with sd_disable_initialization.DisableInitialization():
|
||||
# unet = ldm.modules.diffusionmodules.openaimodel.UNetModel(
|
||||
# use_checkpoint=False,
|
||||
# use_fp16=False,
|
||||
# image_size=32,
|
||||
# in_channels=4,
|
||||
# out_channels=4,
|
||||
# model_channels=320,
|
||||
# attention_resolutions=[4, 2, 1],
|
||||
# num_res_blocks=2,
|
||||
# channel_mult=[1, 2, 4, 4],
|
||||
# num_head_channels=64,
|
||||
# use_spatial_transformer=True,
|
||||
# use_linear_in_transformer=True,
|
||||
# transformer_depth=1,
|
||||
# context_dim=1024,
|
||||
# legacy=False
|
||||
# )
|
||||
# unet.eval()
|
||||
#
|
||||
# with torch.no_grad():
|
||||
# unet_sd = {k.replace("model.diffusion_model.", ""): v for k, v in state_dict.items() if "model.diffusion_model." in k}
|
||||
# unet.load_state_dict(unet_sd, strict=True)
|
||||
# unet.to(device=device, dtype=devices.dtype_unet)
|
||||
#
|
||||
# test_cond = torch.ones((1, 2, 1024), device=device) * 0.5
|
||||
# x_test = torch.ones((1, 4, 8, 8), device=device) * 0.5
|
||||
#
|
||||
# with devices.autocast():
|
||||
# out = (unet(x_test, torch.asarray([999], device=device), context=test_cond) - x_test).mean().cpu().item()
|
||||
#
|
||||
# return out < -1
|
||||
#
|
||||
#
|
||||
# def guess_model_config_from_state_dict(sd, filename):
|
||||
# sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
|
||||
# diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
|
||||
# sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None)
|
||||
#
|
||||
# if "model.diffusion_model.x_embedder.proj.weight" in sd:
|
||||
# return config_sd3
|
||||
#
|
||||
# if sd.get('conditioner.embedders.1.model.ln_final.weight', None) is not None:
|
||||
# if diffusion_model_input.shape[1] == 9:
|
||||
# return config_sdxl_inpainting
|
||||
# else:
|
||||
# return config_sdxl
|
||||
#
|
||||
# if sd.get('conditioner.embedders.0.model.ln_final.weight', None) is not None:
|
||||
# return config_sdxl_refiner
|
||||
# elif sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
|
||||
# return config_depth_model
|
||||
# elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 768:
|
||||
# return config_unclip
|
||||
# elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 1024:
|
||||
# return config_unopenclip
|
||||
#
|
||||
# if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
|
||||
# if diffusion_model_input.shape[1] == 9:
|
||||
# return config_sd2_inpainting
|
||||
# # elif is_using_v_parameterization_for_sd2(sd):
|
||||
# # return config_sd2v
|
||||
# else:
|
||||
# return config_sd2v
|
||||
#
|
||||
# if diffusion_model_input is not None:
|
||||
# if diffusion_model_input.shape[1] == 9:
|
||||
# return config_inpainting
|
||||
# if diffusion_model_input.shape[1] == 8:
|
||||
# return config_instruct_pix2pix
|
||||
#
|
||||
# if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None:
|
||||
# if sd.get('cond_stage_model.transformation.weight').size()[0] == 1024:
|
||||
# return config_alt_diffusion_m18
|
||||
# return config_alt_diffusion
|
||||
#
|
||||
# return config_default
|
||||
#
|
||||
#
|
||||
# def find_checkpoint_config(state_dict, info):
|
||||
# if info is None:
|
||||
# return guess_model_config_from_state_dict(state_dict, "")
|
||||
#
|
||||
# config = find_checkpoint_config_near_filename(info)
|
||||
# if config is not None:
|
||||
# return config
|
||||
#
|
||||
# return guess_model_config_from_state_dict(state_dict, info.filename)
|
||||
#
|
||||
#
|
||||
# def find_checkpoint_config_near_filename(info):
|
||||
# if info is None:
|
||||
# return None
|
||||
#
|
||||
# config = f"{os.path.splitext(info.filename)[0]}.yaml"
|
||||
# if os.path.exists(config):
|
||||
# return config
|
||||
#
|
||||
# return None
|
||||
#
|
||||
|
||||
@@ -1,115 +1,115 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
import sgm.models.diffusion
|
||||
import sgm.modules.diffusionmodules.denoiser_scaling
|
||||
import sgm.modules.diffusionmodules.discretizer
|
||||
from modules import devices, shared, prompt_parser
|
||||
from modules import torch_utils
|
||||
|
||||
from backend import memory_management
|
||||
|
||||
|
||||
def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch: prompt_parser.SdConditioning | list[str]):
|
||||
|
||||
for embedder in self.conditioner.embedders:
|
||||
embedder.ucg_rate = 0.0
|
||||
|
||||
width = getattr(batch, 'width', 1024) or 1024
|
||||
height = getattr(batch, 'height', 1024) or 1024
|
||||
is_negative_prompt = getattr(batch, 'is_negative_prompt', False)
|
||||
aesthetic_score = shared.opts.sdxl_refiner_low_aesthetic_score if is_negative_prompt else shared.opts.sdxl_refiner_high_aesthetic_score
|
||||
|
||||
devices_args = dict(device=self.forge_objects.clip.patcher.current_device, dtype=memory_management.text_encoder_dtype())
|
||||
|
||||
sdxl_conds = {
|
||||
"txt": batch,
|
||||
"original_size_as_tuple": torch.tensor([height, width], **devices_args).repeat(len(batch), 1),
|
||||
"crop_coords_top_left": torch.tensor([shared.opts.sdxl_crop_top, shared.opts.sdxl_crop_left], **devices_args).repeat(len(batch), 1),
|
||||
"target_size_as_tuple": torch.tensor([height, width], **devices_args).repeat(len(batch), 1),
|
||||
"aesthetic_score": torch.tensor([aesthetic_score], **devices_args).repeat(len(batch), 1),
|
||||
}
|
||||
|
||||
force_zero_negative_prompt = is_negative_prompt and all(x == '' for x in batch)
|
||||
c = self.conditioner(sdxl_conds, force_zero_embeddings=['txt'] if force_zero_negative_prompt else [])
|
||||
|
||||
return c
|
||||
|
||||
|
||||
def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond, *args, **kwargs):
|
||||
if self.model.diffusion_model.in_channels == 9:
|
||||
x = torch.cat([x] + cond['c_concat'], dim=1)
|
||||
|
||||
return self.model(x, t, cond, *args, **kwargs)
|
||||
|
||||
|
||||
def get_first_stage_encoding(self, x): # SDXL's encode_first_stage does everything so get_first_stage_encoding is just there for compatibility
|
||||
return x
|
||||
|
||||
|
||||
sgm.models.diffusion.DiffusionEngine.get_learned_conditioning = get_learned_conditioning
|
||||
sgm.models.diffusion.DiffusionEngine.apply_model = apply_model
|
||||
sgm.models.diffusion.DiffusionEngine.get_first_stage_encoding = get_first_stage_encoding
|
||||
|
||||
|
||||
def encode_embedding_init_text(self: sgm.modules.GeneralConditioner, init_text, nvpt):
|
||||
res = []
|
||||
|
||||
for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'encode_embedding_init_text')]:
|
||||
encoded = embedder.encode_embedding_init_text(init_text, nvpt)
|
||||
res.append(encoded)
|
||||
|
||||
return torch.cat(res, dim=1)
|
||||
|
||||
|
||||
def tokenize(self: sgm.modules.GeneralConditioner, texts):
|
||||
for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'tokenize')]:
|
||||
return embedder.tokenize(texts)
|
||||
|
||||
raise AssertionError('no tokenizer available')
|
||||
|
||||
|
||||
|
||||
def process_texts(self, texts):
|
||||
for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'process_texts')]:
|
||||
return embedder.process_texts(texts)
|
||||
|
||||
|
||||
def get_target_prompt_token_count(self, token_count):
|
||||
for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'get_target_prompt_token_count')]:
|
||||
return embedder.get_target_prompt_token_count(token_count)
|
||||
|
||||
|
||||
# those additions to GeneralConditioner make it possible to use it as model.cond_stage_model from SD1.5 in exist
|
||||
sgm.modules.GeneralConditioner.encode_embedding_init_text = encode_embedding_init_text
|
||||
sgm.modules.GeneralConditioner.tokenize = tokenize
|
||||
sgm.modules.GeneralConditioner.process_texts = process_texts
|
||||
sgm.modules.GeneralConditioner.get_target_prompt_token_count = get_target_prompt_token_count
|
||||
|
||||
|
||||
def extend_sdxl(model):
|
||||
"""this adds a bunch of parameters to make SDXL model look a bit more like SD1.5 to the rest of the codebase."""
|
||||
|
||||
dtype = torch_utils.get_param(model.model.diffusion_model).dtype
|
||||
model.model.diffusion_model.dtype = dtype
|
||||
model.model.conditioning_key = 'crossattn'
|
||||
model.cond_stage_key = 'txt'
|
||||
# model.cond_stage_model will be set in sd_hijack
|
||||
|
||||
model.parameterization = "v" if isinstance(model.denoiser.scaling, sgm.modules.diffusionmodules.denoiser_scaling.VScaling) else "eps"
|
||||
|
||||
discretization = sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization()
|
||||
model.alphas_cumprod = torch.asarray(discretization.alphas_cumprod, device=devices.device, dtype=torch.float32)
|
||||
|
||||
model.conditioner.wrapped = torch.nn.Module()
|
||||
|
||||
|
||||
sgm.modules.attention.print = shared.ldm_print
|
||||
sgm.modules.diffusionmodules.model.print = shared.ldm_print
|
||||
sgm.modules.diffusionmodules.openaimodel.print = shared.ldm_print
|
||||
sgm.modules.encoders.modules.print = shared.ldm_print
|
||||
|
||||
# this gets the code to load the vanilla attention that we override
|
||||
sgm.modules.attention.SDP_IS_AVAILABLE = True
|
||||
sgm.modules.attention.XFORMERS_IS_AVAILABLE = False
|
||||
# from __future__ import annotations
|
||||
#
|
||||
# import torch
|
||||
#
|
||||
# import sgm.models.diffusion
|
||||
# import sgm.modules.diffusionmodules.denoiser_scaling
|
||||
# import sgm.modules.diffusionmodules.discretizer
|
||||
# from modules import devices, shared, prompt_parser
|
||||
# from modules import torch_utils
|
||||
#
|
||||
# from backend import memory_management
|
||||
#
|
||||
#
|
||||
# def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch: prompt_parser.SdConditioning | list[str]):
|
||||
#
|
||||
# for embedder in self.conditioner.embedders:
|
||||
# embedder.ucg_rate = 0.0
|
||||
#
|
||||
# width = getattr(batch, 'width', 1024) or 1024
|
||||
# height = getattr(batch, 'height', 1024) or 1024
|
||||
# is_negative_prompt = getattr(batch, 'is_negative_prompt', False)
|
||||
# aesthetic_score = shared.opts.sdxl_refiner_low_aesthetic_score if is_negative_prompt else shared.opts.sdxl_refiner_high_aesthetic_score
|
||||
#
|
||||
# devices_args = dict(device=self.forge_objects.clip.patcher.current_device, dtype=memory_management.text_encoder_dtype())
|
||||
#
|
||||
# sdxl_conds = {
|
||||
# "txt": batch,
|
||||
# "original_size_as_tuple": torch.tensor([height, width], **devices_args).repeat(len(batch), 1),
|
||||
# "crop_coords_top_left": torch.tensor([shared.opts.sdxl_crop_top, shared.opts.sdxl_crop_left], **devices_args).repeat(len(batch), 1),
|
||||
# "target_size_as_tuple": torch.tensor([height, width], **devices_args).repeat(len(batch), 1),
|
||||
# "aesthetic_score": torch.tensor([aesthetic_score], **devices_args).repeat(len(batch), 1),
|
||||
# }
|
||||
#
|
||||
# force_zero_negative_prompt = is_negative_prompt and all(x == '' for x in batch)
|
||||
# c = self.conditioner(sdxl_conds, force_zero_embeddings=['txt'] if force_zero_negative_prompt else [])
|
||||
#
|
||||
# return c
|
||||
#
|
||||
#
|
||||
# def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond, *args, **kwargs):
|
||||
# if self.model.diffusion_model.in_channels == 9:
|
||||
# x = torch.cat([x] + cond['c_concat'], dim=1)
|
||||
#
|
||||
# return self.model(x, t, cond, *args, **kwargs)
|
||||
#
|
||||
#
|
||||
# def get_first_stage_encoding(self, x): # SDXL's encode_first_stage does everything so get_first_stage_encoding is just there for compatibility
|
||||
# return x
|
||||
#
|
||||
#
|
||||
# sgm.models.diffusion.DiffusionEngine.get_learned_conditioning = get_learned_conditioning
|
||||
# sgm.models.diffusion.DiffusionEngine.apply_model = apply_model
|
||||
# sgm.models.diffusion.DiffusionEngine.get_first_stage_encoding = get_first_stage_encoding
|
||||
#
|
||||
#
|
||||
# def encode_embedding_init_text(self: sgm.modules.GeneralConditioner, init_text, nvpt):
|
||||
# res = []
|
||||
#
|
||||
# for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'encode_embedding_init_text')]:
|
||||
# encoded = embedder.encode_embedding_init_text(init_text, nvpt)
|
||||
# res.append(encoded)
|
||||
#
|
||||
# return torch.cat(res, dim=1)
|
||||
#
|
||||
#
|
||||
# def tokenize(self: sgm.modules.GeneralConditioner, texts):
|
||||
# for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'tokenize')]:
|
||||
# return embedder.tokenize(texts)
|
||||
#
|
||||
# raise AssertionError('no tokenizer available')
|
||||
#
|
||||
#
|
||||
#
|
||||
# def process_texts(self, texts):
|
||||
# for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'process_texts')]:
|
||||
# return embedder.process_texts(texts)
|
||||
#
|
||||
#
|
||||
# def get_target_prompt_token_count(self, token_count):
|
||||
# for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'get_target_prompt_token_count')]:
|
||||
# return embedder.get_target_prompt_token_count(token_count)
|
||||
#
|
||||
#
|
||||
# # those additions to GeneralConditioner make it possible to use it as model.cond_stage_model from SD1.5 in exist
|
||||
# sgm.modules.GeneralConditioner.encode_embedding_init_text = encode_embedding_init_text
|
||||
# sgm.modules.GeneralConditioner.tokenize = tokenize
|
||||
# sgm.modules.GeneralConditioner.process_texts = process_texts
|
||||
# sgm.modules.GeneralConditioner.get_target_prompt_token_count = get_target_prompt_token_count
|
||||
#
|
||||
#
|
||||
# def extend_sdxl(model):
|
||||
# """this adds a bunch of parameters to make SDXL model look a bit more like SD1.5 to the rest of the codebase."""
|
||||
#
|
||||
# dtype = torch_utils.get_param(model.model.diffusion_model).dtype
|
||||
# model.model.diffusion_model.dtype = dtype
|
||||
# model.model.conditioning_key = 'crossattn'
|
||||
# model.cond_stage_key = 'txt'
|
||||
# # model.cond_stage_model will be set in sd_hijack
|
||||
#
|
||||
# model.parameterization = "v" if isinstance(model.denoiser.scaling, sgm.modules.diffusionmodules.denoiser_scaling.VScaling) else "eps"
|
||||
#
|
||||
# discretization = sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization()
|
||||
# model.alphas_cumprod = torch.asarray(discretization.alphas_cumprod, device=devices.device, dtype=torch.float32)
|
||||
#
|
||||
# model.conditioner.wrapped = torch.nn.Module()
|
||||
#
|
||||
#
|
||||
# sgm.modules.attention.print = shared.ldm_print
|
||||
# sgm.modules.diffusionmodules.model.print = shared.ldm_print
|
||||
# sgm.modules.diffusionmodules.openaimodel.print = shared.ldm_print
|
||||
# sgm.modules.encoders.modules.print = shared.ldm_print
|
||||
#
|
||||
# # this gets the code to load the vanilla attention that we override
|
||||
# sgm.modules.attention.SDP_IS_AVAILABLE = True
|
||||
# sgm.modules.attention.XFORMERS_IS_AVAILABLE = False
|
||||
|
||||
@@ -35,9 +35,7 @@ def refresh_vae_list():
|
||||
|
||||
|
||||
def cross_attention_optimizations():
|
||||
import modules.sd_hijack
|
||||
|
||||
return ["Automatic"] + [x.title() for x in modules.sd_hijack.optimizers] + ["None"]
|
||||
return ["Automatic"]
|
||||
|
||||
|
||||
def sd_unet_items():
|
||||
|
||||
Reference in New Issue
Block a user