mirror of
https://github.com/ostris/ai-toolkit.git
synced 2026-04-29 02:31:17 +00:00
Complete reqork of how slider training works and optimized it to hell. Can run entire algorythm in 1 batch now with less VRAM consumption than a quarter of it used to take
This commit is contained in:
@@ -108,6 +108,7 @@ class SliderConfig:
|
||||
self.resolutions: List[List[int]] = kwargs.get('resolutions', [[512, 512]])
|
||||
self.prompt_file: str = kwargs.get('prompt_file', None)
|
||||
self.prompt_tensors: str = kwargs.get('prompt_tensors', None)
|
||||
self.batch_full_slide: bool = kwargs.get('batch_full_slide', True)
|
||||
|
||||
|
||||
class GenerateImageConfig:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
from torch.utils.checkpoint import checkpoint
|
||||
|
||||
|
||||
class ReductionKernel(nn.Module):
|
||||
@@ -29,3 +30,15 @@ class ReductionKernel(nn.Module):
|
||||
|
||||
def forward(self, x):
|
||||
return nn.functional.conv2d(x, self.kernel, stride=self.kernel_size, padding=0, groups=1)
|
||||
|
||||
|
||||
class CheckpointGradients(nn.Module):
|
||||
def __init__(self, is_gradient_checkpointing=True):
|
||||
super(CheckpointGradients, self).__init__()
|
||||
self.is_gradient_checkpointing = is_gradient_checkpointing
|
||||
|
||||
def forward(self, module, *args, num_chunks=1):
|
||||
if self.is_gradient_checkpointing:
|
||||
return checkpoint(module, *args, num_chunks=self.num_chunks)
|
||||
else:
|
||||
return module(*args)
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from typing import List, Optional, Dict, Type, Union
|
||||
|
||||
@@ -9,7 +11,170 @@ from .paths import SD_SCRIPTS_ROOT
|
||||
|
||||
sys.path.append(SD_SCRIPTS_ROOT)
|
||||
|
||||
from networks.lora import LoRANetwork, LoRAModule, get_block_index
|
||||
from networks.lora import LoRANetwork, get_block_index
|
||||
|
||||
from torch.utils.checkpoint import checkpoint
|
||||
|
||||
RE_UPDOWN = re.compile(r"(up|down)_blocks_(\d+)_(resnets|upsamplers|downsamplers|attentions)_(\d+)_")
|
||||
|
||||
|
||||
class LoRAModule(torch.nn.Module):
|
||||
"""
|
||||
replaces forward method of the original Linear, instead of replacing the original Linear module.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
lora_name,
|
||||
org_module: torch.nn.Module,
|
||||
multiplier=1.0,
|
||||
lora_dim=4,
|
||||
alpha=1,
|
||||
dropout=None,
|
||||
rank_dropout=None,
|
||||
module_dropout=None,
|
||||
):
|
||||
"""if alpha == 0 or None, alpha is rank (no scaling)."""
|
||||
super().__init__()
|
||||
self.lora_name = lora_name
|
||||
|
||||
if org_module.__class__.__name__ == "Conv2d":
|
||||
in_dim = org_module.in_channels
|
||||
out_dim = org_module.out_channels
|
||||
else:
|
||||
in_dim = org_module.in_features
|
||||
out_dim = org_module.out_features
|
||||
|
||||
# if limit_rank:
|
||||
# self.lora_dim = min(lora_dim, in_dim, out_dim)
|
||||
# if self.lora_dim != lora_dim:
|
||||
# print(f"{lora_name} dim (rank) is changed to: {self.lora_dim}")
|
||||
# else:
|
||||
self.lora_dim = lora_dim
|
||||
|
||||
if org_module.__class__.__name__ == "Conv2d":
|
||||
kernel_size = org_module.kernel_size
|
||||
stride = org_module.stride
|
||||
padding = org_module.padding
|
||||
self.lora_down = torch.nn.Conv2d(in_dim, self.lora_dim, kernel_size, stride, padding, bias=False)
|
||||
self.lora_up = torch.nn.Conv2d(self.lora_dim, out_dim, (1, 1), (1, 1), bias=False)
|
||||
else:
|
||||
self.lora_down = torch.nn.Linear(in_dim, self.lora_dim, bias=False)
|
||||
self.lora_up = torch.nn.Linear(self.lora_dim, out_dim, bias=False)
|
||||
|
||||
if type(alpha) == torch.Tensor:
|
||||
alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
|
||||
alpha = self.lora_dim if alpha is None or alpha == 0 else alpha
|
||||
self.scale = alpha / self.lora_dim
|
||||
self.register_buffer("alpha", torch.tensor(alpha)) # 定数として扱える
|
||||
|
||||
# same as microsoft's
|
||||
torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
|
||||
torch.nn.init.zeros_(self.lora_up.weight)
|
||||
|
||||
self.multiplier: Union[float, List[float]] = multiplier
|
||||
self.org_module = org_module # remove in applying
|
||||
self.dropout = dropout
|
||||
self.rank_dropout = rank_dropout
|
||||
self.module_dropout = module_dropout
|
||||
self.is_checkpointing = False
|
||||
|
||||
def apply_to(self):
|
||||
self.org_forward = self.org_module.forward
|
||||
self.org_module.forward = self.forward
|
||||
del self.org_module
|
||||
|
||||
# this allows us to set different multipliers on a per item in a batch basis
|
||||
# allowing us to run positive and negative weights in the same batch
|
||||
# really only useful for slider training for now
|
||||
def get_multiplier(self, lora_up):
|
||||
batch_size = lora_up.size(0)
|
||||
# batch will have all negative prompts first and positive prompts second
|
||||
# our multiplier list is for a prompt pair. So we need to repeat it for positive and negative prompts
|
||||
# if there is more than our multiplier, it is liekly a batch size increase, so we need to
|
||||
# interleve the multipliers
|
||||
if isinstance(self.multiplier, list):
|
||||
if len(self.multiplier) == 0:
|
||||
# single item, just return it
|
||||
return self.multiplier[0]
|
||||
else:
|
||||
# we have a list of multipliers, so we need to get the multiplier for this batch
|
||||
multiplier_tensor = torch.tensor(self.multiplier * 2).to(lora_up.device, dtype=lora_up.dtype)
|
||||
# should be 1 for if total batch size was 1
|
||||
num_interleaves = (batch_size // 2) // len(self.multiplier)
|
||||
multiplier_tensor = multiplier_tensor.repeat_interleave(num_interleaves)
|
||||
|
||||
# match lora_up rank
|
||||
if len(lora_up.size()) == 2:
|
||||
multiplier_tensor = multiplier_tensor.view(-1, 1)
|
||||
elif len(lora_up.size()) == 3:
|
||||
multiplier_tensor = multiplier_tensor.view(-1, 1, 1)
|
||||
elif len(lora_up.size()) == 4:
|
||||
multiplier_tensor = multiplier_tensor.view(-1, 1, 1, 1)
|
||||
return multiplier_tensor
|
||||
|
||||
else:
|
||||
return self.multiplier
|
||||
|
||||
def _call_forward(self, x):
|
||||
# module dropout
|
||||
if self.module_dropout is not None and self.training:
|
||||
if torch.rand(1) < self.module_dropout:
|
||||
return 0.0 # added to original forward
|
||||
|
||||
lx = self.lora_down(x)
|
||||
|
||||
# normal dropout
|
||||
if self.dropout is not None and self.training:
|
||||
lx = torch.nn.functional.dropout(lx, p=self.dropout)
|
||||
|
||||
# rank dropout
|
||||
if self.rank_dropout is not None and self.training:
|
||||
mask = torch.rand((lx.size(0), self.lora_dim), device=lx.device) > self.rank_dropout
|
||||
if len(lx.size()) == 3:
|
||||
mask = mask.unsqueeze(1) # for Text Encoder
|
||||
elif len(lx.size()) == 4:
|
||||
mask = mask.unsqueeze(-1).unsqueeze(-1) # for Conv2d
|
||||
lx = lx * mask
|
||||
|
||||
# scaling for rank dropout: treat as if the rank is changed
|
||||
# maskから計算することも考えられるが、augmentation的な効果を期待してrank_dropoutを用いる
|
||||
scale = self.scale * (1.0 / (1.0 - self.rank_dropout)) # redundant for readability
|
||||
else:
|
||||
scale = self.scale
|
||||
|
||||
lx = self.lora_up(lx)
|
||||
|
||||
multiplier = self.get_multiplier(lx)
|
||||
|
||||
return lx * multiplier * scale
|
||||
|
||||
def create_custom_forward(self):
|
||||
def custom_forward(*inputs):
|
||||
return self._call_forward(*inputs)
|
||||
|
||||
return custom_forward
|
||||
|
||||
def forward(self, x):
|
||||
org_forwarded = self.org_forward(x)
|
||||
# TODO this just loses the grad. Not sure why. Probably why no one else is doing it either
|
||||
# if torch.is_grad_enabled() and self.is_checkpointing and self.training:
|
||||
# lora_output = checkpoint(
|
||||
# self.create_custom_forward(),
|
||||
# x,
|
||||
# )
|
||||
# else:
|
||||
# lora_output = self._call_forward(x)
|
||||
|
||||
lora_output = self._call_forward(x)
|
||||
|
||||
return org_forwarded + lora_output
|
||||
|
||||
def enable_gradient_checkpointing(self):
|
||||
self.is_checkpointing = True
|
||||
|
||||
def disable_gradient_checkpointing(self):
|
||||
self.is_checkpointing = False
|
||||
|
||||
|
||||
class LoRASpecialNetwork(LoRANetwork):
|
||||
@@ -70,6 +235,7 @@ class LoRASpecialNetwork(LoRANetwork):
|
||||
self.dropout = dropout
|
||||
self.rank_dropout = rank_dropout
|
||||
self.module_dropout = module_dropout
|
||||
self.is_checkpointing = False
|
||||
|
||||
if modules_dim is not None:
|
||||
print(f"create LoRA network from weights")
|
||||
@@ -236,14 +402,11 @@ class LoRASpecialNetwork(LoRANetwork):
|
||||
torch.save(state_dict, file)
|
||||
|
||||
@property
|
||||
def multiplier(self):
|
||||
def multiplier(self) -> Union[float, List[float]]:
|
||||
return self._multiplier
|
||||
|
||||
@multiplier.setter
|
||||
def multiplier(self, value):
|
||||
# only update if changed
|
||||
if self._multiplier == value:
|
||||
return
|
||||
def multiplier(self, value: Union[float, List[float]]):
|
||||
self._multiplier = value
|
||||
self._update_lora_multiplier()
|
||||
|
||||
@@ -264,6 +427,8 @@ class LoRASpecialNetwork(LoRANetwork):
|
||||
for lora in self.text_encoder_loras:
|
||||
lora.multiplier = 0
|
||||
|
||||
# called when the context manager is entered
|
||||
# ie: with network:
|
||||
def __enter__(self):
|
||||
self.is_active = True
|
||||
self._update_lora_multiplier()
|
||||
@@ -281,3 +446,29 @@ class LoRASpecialNetwork(LoRANetwork):
|
||||
loras += self.text_encoder_loras
|
||||
for lora in loras:
|
||||
lora.to(device, dtype)
|
||||
|
||||
def _update_checkpointing(self):
|
||||
if self.is_checkpointing:
|
||||
if hasattr(self, 'unet_loras'):
|
||||
for lora in self.unet_loras:
|
||||
lora.enable_gradient_checkpointing()
|
||||
if hasattr(self, 'text_encoder_loras'):
|
||||
for lora in self.text_encoder_loras:
|
||||
lora.enable_gradient_checkpointing()
|
||||
else:
|
||||
if hasattr(self, 'unet_loras'):
|
||||
for lora in self.unet_loras:
|
||||
lora.disable_gradient_checkpointing()
|
||||
if hasattr(self, 'text_encoder_loras'):
|
||||
for lora in self.text_encoder_loras:
|
||||
lora.disable_gradient_checkpointing()
|
||||
|
||||
def enable_gradient_checkpointing(self):
|
||||
# not supported
|
||||
self.is_checkpointing = True
|
||||
self._update_checkpointing()
|
||||
|
||||
def disable_gradient_checkpointing(self):
|
||||
# not supported
|
||||
self.is_checkpointing = False
|
||||
self._update_checkpointing()
|
||||
|
||||
387
toolkit/prompt_utils.py
Normal file
387
toolkit/prompt_utils.py
Normal file
@@ -0,0 +1,387 @@
|
||||
import os
|
||||
from typing import Optional, TYPE_CHECKING, List
|
||||
|
||||
import torch
|
||||
from safetensors.torch import load_file, save_file
|
||||
from tqdm import tqdm
|
||||
|
||||
from toolkit.stable_diffusion_model import PromptEmbeds
|
||||
from toolkit.train_tools import get_torch_dtype
|
||||
|
||||
|
||||
class ACTION_TYPES_SLIDER:
|
||||
ERASE_NEGATIVE = 0
|
||||
ENHANCE_NEGATIVE = 1
|
||||
|
||||
|
||||
class EncodedPromptPair:
|
||||
def __init__(
|
||||
self,
|
||||
target_class,
|
||||
target_class_with_neutral,
|
||||
positive_target,
|
||||
positive_target_with_neutral,
|
||||
negative_target,
|
||||
negative_target_with_neutral,
|
||||
neutral,
|
||||
empty_prompt,
|
||||
both_targets,
|
||||
action=ACTION_TYPES_SLIDER.ERASE_NEGATIVE,
|
||||
action_list=None,
|
||||
multiplier=1.0,
|
||||
multiplier_list=None,
|
||||
weight=1.0
|
||||
):
|
||||
self.target_class: PromptEmbeds = target_class
|
||||
self.target_class_with_neutral: PromptEmbeds = target_class_with_neutral
|
||||
self.positive_target: PromptEmbeds = positive_target
|
||||
self.positive_target_with_neutral: PromptEmbeds = positive_target_with_neutral
|
||||
self.negative_target: PromptEmbeds = negative_target
|
||||
self.negative_target_with_neutral: PromptEmbeds = negative_target_with_neutral
|
||||
self.neutral: PromptEmbeds = neutral
|
||||
self.empty_prompt: PromptEmbeds = empty_prompt
|
||||
self.both_targets: PromptEmbeds = both_targets
|
||||
self.multiplier: float = multiplier
|
||||
if multiplier_list is not None:
|
||||
self.multiplier_list: list[float] = multiplier_list
|
||||
else:
|
||||
self.multiplier_list: list[float] = [multiplier]
|
||||
self.action: int = action
|
||||
if action_list is not None:
|
||||
self.action_list: list[int] = action_list
|
||||
else:
|
||||
self.action_list: list[int] = [action]
|
||||
self.weight: float = weight
|
||||
|
||||
# simulate torch to for tensors
|
||||
def to(self, *args, **kwargs):
|
||||
self.target_class = self.target_class.to(*args, **kwargs)
|
||||
self.positive_target = self.positive_target.to(*args, **kwargs)
|
||||
self.positive_target_with_neutral = self.positive_target_with_neutral.to(*args, **kwargs)
|
||||
self.negative_target = self.negative_target.to(*args, **kwargs)
|
||||
self.negative_target_with_neutral = self.negative_target_with_neutral.to(*args, **kwargs)
|
||||
self.neutral = self.neutral.to(*args, **kwargs)
|
||||
self.empty_prompt = self.empty_prompt.to(*args, **kwargs)
|
||||
self.both_targets = self.both_targets.to(*args, **kwargs)
|
||||
return self
|
||||
|
||||
|
||||
def concat_prompt_embeds(prompt_embeds: list[PromptEmbeds]):
|
||||
text_embeds = torch.cat([p.text_embeds for p in prompt_embeds], dim=0)
|
||||
pooled_embeds = None
|
||||
if prompt_embeds[0].pooled_embeds is not None:
|
||||
pooled_embeds = torch.cat([p.pooled_embeds for p in prompt_embeds], dim=0)
|
||||
return PromptEmbeds([text_embeds, pooled_embeds])
|
||||
|
||||
|
||||
def concat_prompt_pairs(prompt_pairs: list[EncodedPromptPair]):
|
||||
weight = prompt_pairs[0].weight
|
||||
target_class = concat_prompt_embeds([p.target_class for p in prompt_pairs])
|
||||
target_class_with_neutral = concat_prompt_embeds([p.target_class_with_neutral for p in prompt_pairs])
|
||||
positive_target = concat_prompt_embeds([p.positive_target for p in prompt_pairs])
|
||||
positive_target_with_neutral = concat_prompt_embeds([p.positive_target_with_neutral for p in prompt_pairs])
|
||||
negative_target = concat_prompt_embeds([p.negative_target for p in prompt_pairs])
|
||||
negative_target_with_neutral = concat_prompt_embeds([p.negative_target_with_neutral for p in prompt_pairs])
|
||||
neutral = concat_prompt_embeds([p.neutral for p in prompt_pairs])
|
||||
empty_prompt = concat_prompt_embeds([p.empty_prompt for p in prompt_pairs])
|
||||
both_targets = concat_prompt_embeds([p.both_targets for p in prompt_pairs])
|
||||
# combine all the lists
|
||||
action_list = []
|
||||
multiplier_list = []
|
||||
weight_list = []
|
||||
for p in prompt_pairs:
|
||||
action_list += p.action_list
|
||||
multiplier_list += p.multiplier_list
|
||||
return EncodedPromptPair(
|
||||
target_class=target_class,
|
||||
target_class_with_neutral=target_class_with_neutral,
|
||||
positive_target=positive_target,
|
||||
positive_target_with_neutral=positive_target_with_neutral,
|
||||
negative_target=negative_target,
|
||||
negative_target_with_neutral=negative_target_with_neutral,
|
||||
neutral=neutral,
|
||||
empty_prompt=empty_prompt,
|
||||
both_targets=both_targets,
|
||||
action_list=action_list,
|
||||
multiplier_list=multiplier_list,
|
||||
weight=weight
|
||||
)
|
||||
|
||||
|
||||
def split_prompt_embeds(concatenated: PromptEmbeds, num_parts=None) -> List[PromptEmbeds]:
|
||||
if num_parts is None:
|
||||
# use batch size
|
||||
num_parts = concatenated.text_embeds.shape[0]
|
||||
text_embeds_splits = torch.chunk(concatenated.text_embeds, num_parts, dim=0)
|
||||
|
||||
if concatenated.pooled_embeds is not None:
|
||||
pooled_embeds_splits = torch.chunk(concatenated.pooled_embeds, num_parts, dim=0)
|
||||
else:
|
||||
pooled_embeds_splits = [None] * num_parts
|
||||
|
||||
prompt_embeds_list = [
|
||||
PromptEmbeds([text, pooled])
|
||||
for text, pooled in zip(text_embeds_splits, pooled_embeds_splits)
|
||||
]
|
||||
|
||||
return prompt_embeds_list
|
||||
|
||||
|
||||
def split_prompt_pairs(concatenated: EncodedPromptPair, num_embeds=None) -> List[EncodedPromptPair]:
|
||||
target_class_splits = split_prompt_embeds(concatenated.target_class, num_embeds)
|
||||
target_class_with_neutral_splits = split_prompt_embeds(concatenated.target_class_with_neutral, num_embeds)
|
||||
positive_target_splits = split_prompt_embeds(concatenated.positive_target, num_embeds)
|
||||
positive_target_with_neutral_splits = split_prompt_embeds(concatenated.positive_target_with_neutral, num_embeds)
|
||||
negative_target_splits = split_prompt_embeds(concatenated.negative_target, num_embeds)
|
||||
negative_target_with_neutral_splits = split_prompt_embeds(concatenated.negative_target_with_neutral, num_embeds)
|
||||
neutral_splits = split_prompt_embeds(concatenated.neutral, num_embeds)
|
||||
empty_prompt_splits = split_prompt_embeds(concatenated.empty_prompt, num_embeds)
|
||||
both_targets_splits = split_prompt_embeds(concatenated.both_targets, num_embeds)
|
||||
|
||||
prompt_pairs = []
|
||||
for i in range(len(target_class_splits)):
|
||||
action_list_split = concatenated.action_list[i::len(target_class_splits)]
|
||||
multiplier_list_split = concatenated.multiplier_list[i::len(target_class_splits)]
|
||||
|
||||
prompt_pair = EncodedPromptPair(
|
||||
target_class=target_class_splits[i],
|
||||
target_class_with_neutral=target_class_with_neutral_splits[i],
|
||||
positive_target=positive_target_splits[i],
|
||||
positive_target_with_neutral=positive_target_with_neutral_splits[i],
|
||||
negative_target=negative_target_splits[i],
|
||||
negative_target_with_neutral=negative_target_with_neutral_splits[i],
|
||||
neutral=neutral_splits[i],
|
||||
empty_prompt=empty_prompt_splits[i],
|
||||
both_targets=both_targets_splits[i],
|
||||
action_list=action_list_split,
|
||||
multiplier_list=multiplier_list_split,
|
||||
weight=concatenated.weight
|
||||
)
|
||||
prompt_pairs.append(prompt_pair)
|
||||
|
||||
return prompt_pairs
|
||||
|
||||
|
||||
class PromptEmbedsCache:
|
||||
prompts: dict[str, PromptEmbeds] = {}
|
||||
|
||||
def __setitem__(self, __name: str, __value: PromptEmbeds) -> None:
|
||||
self.prompts[__name] = __value
|
||||
|
||||
def __getitem__(self, __name: str) -> Optional[PromptEmbeds]:
|
||||
if __name in self.prompts:
|
||||
return self.prompts[__name]
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class EncodedAnchor:
|
||||
def __init__(
|
||||
self,
|
||||
prompt,
|
||||
neg_prompt,
|
||||
multiplier=1.0,
|
||||
multiplier_list=None
|
||||
):
|
||||
self.prompt = prompt
|
||||
self.neg_prompt = neg_prompt
|
||||
self.multiplier = multiplier
|
||||
|
||||
if multiplier_list is not None:
|
||||
self.multiplier_list: list[float] = multiplier_list
|
||||
else:
|
||||
self.multiplier_list: list[float] = [multiplier]
|
||||
|
||||
def to(self, *args, **kwargs):
|
||||
self.prompt = self.prompt.to(*args, **kwargs)
|
||||
self.neg_prompt = self.neg_prompt.to(*args, **kwargs)
|
||||
return self
|
||||
|
||||
|
||||
def concat_anchors(anchors: list[EncodedAnchor]):
|
||||
prompt = concat_prompt_embeds([a.prompt for a in anchors])
|
||||
neg_prompt = concat_prompt_embeds([a.neg_prompt for a in anchors])
|
||||
return EncodedAnchor(
|
||||
prompt=prompt,
|
||||
neg_prompt=neg_prompt,
|
||||
multiplier_list=[a.multiplier for a in anchors]
|
||||
)
|
||||
|
||||
|
||||
def split_anchors(concatenated: EncodedAnchor, num_anchors: int = 4) -> List[EncodedAnchor]:
|
||||
prompt_splits = split_prompt_embeds(concatenated.prompt, num_anchors)
|
||||
neg_prompt_splits = split_prompt_embeds(concatenated.neg_prompt, num_anchors)
|
||||
multiplier_list_splits = torch.chunk(torch.tensor(concatenated.multiplier_list), num_anchors)
|
||||
|
||||
anchors = []
|
||||
for prompt, neg_prompt, multiplier in zip(prompt_splits, neg_prompt_splits, multiplier_list_splits):
|
||||
anchor = EncodedAnchor(
|
||||
prompt=prompt,
|
||||
neg_prompt=neg_prompt,
|
||||
multiplier=multiplier.tolist()
|
||||
)
|
||||
anchors.append(anchor)
|
||||
|
||||
return anchors
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from toolkit.stable_diffusion_model import StableDiffusion
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def encode_prompts_to_cache(
|
||||
prompt_list: list[str],
|
||||
sd: "StableDiffusion",
|
||||
cache: Optional[PromptEmbedsCache] = None,
|
||||
prompt_tensor_file: Optional[str] = None,
|
||||
) -> PromptEmbedsCache:
|
||||
# TODO: add support for larger prompts
|
||||
if cache is None:
|
||||
cache = PromptEmbedsCache()
|
||||
|
||||
if prompt_tensor_file is not None:
|
||||
# check to see if it exists
|
||||
if os.path.exists(prompt_tensor_file):
|
||||
# load it.
|
||||
print(f"Loading prompt tensors from {prompt_tensor_file}")
|
||||
prompt_tensors = load_file(prompt_tensor_file, device='cpu')
|
||||
# add them to the cache
|
||||
for prompt_txt, prompt_tensor in tqdm(prompt_tensors.items(), desc="Loading prompts", leave=False):
|
||||
if prompt_txt.startswith("te:"):
|
||||
prompt = prompt_txt[3:]
|
||||
# text_embeds
|
||||
text_embeds = prompt_tensor
|
||||
pooled_embeds = None
|
||||
# find pool embeds
|
||||
if f"pe:{prompt}" in prompt_tensors:
|
||||
pooled_embeds = prompt_tensors[f"pe:{prompt}"]
|
||||
|
||||
# make it
|
||||
prompt_embeds = PromptEmbeds([text_embeds, pooled_embeds])
|
||||
cache[prompt] = prompt_embeds.to(device='cpu', dtype=torch.float32)
|
||||
|
||||
if len(cache.prompts) == 0:
|
||||
print("Prompt tensors not found. Encoding prompts..")
|
||||
empty_prompt = ""
|
||||
# encode empty_prompt
|
||||
cache[empty_prompt] = sd.encode_prompt(empty_prompt)
|
||||
|
||||
for p in tqdm(prompt_list, desc="Encoding prompts", leave=False):
|
||||
# build the cache
|
||||
if cache[p] is None:
|
||||
cache[p] = sd.encode_prompt(p).to(device="cpu", dtype=torch.float16)
|
||||
|
||||
# should we shard? It can get large
|
||||
if prompt_tensor_file:
|
||||
print(f"Saving prompt tensors to {prompt_tensor_file}")
|
||||
state_dict = {}
|
||||
for prompt_txt, prompt_embeds in cache.prompts.items():
|
||||
state_dict[f"te:{prompt_txt}"] = prompt_embeds.text_embeds.to(
|
||||
"cpu", dtype=get_torch_dtype('fp16')
|
||||
)
|
||||
if prompt_embeds.pooled_embeds is not None:
|
||||
state_dict[f"pe:{prompt_txt}"] = prompt_embeds.pooled_embeds.to(
|
||||
"cpu",
|
||||
dtype=get_torch_dtype('fp16')
|
||||
)
|
||||
save_file(state_dict, prompt_tensor_file)
|
||||
|
||||
return cache
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from toolkit.config_modules import SliderTargetConfig
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def build_prompt_pair_batch_from_cache(
|
||||
cache: PromptEmbedsCache,
|
||||
target: 'SliderTargetConfig',
|
||||
neutral: Optional[str] = '',
|
||||
) -> list[EncodedPromptPair]:
|
||||
erase_negative = len(target.positive.strip()) == 0
|
||||
enhance_positive = len(target.negative.strip()) == 0
|
||||
|
||||
both = not erase_negative and not enhance_positive
|
||||
|
||||
prompt_pair_batch = []
|
||||
|
||||
if both or erase_negative:
|
||||
print("Encoding erase negative")
|
||||
prompt_pair_batch += [
|
||||
# erase standard
|
||||
EncodedPromptPair(
|
||||
target_class=cache[target.target_class],
|
||||
target_class_with_neutral=cache[f"{target.target_class} {neutral}"],
|
||||
positive_target=cache[f"{target.positive}"],
|
||||
positive_target_with_neutral=cache[f"{target.positive} {neutral}"],
|
||||
negative_target=cache[f"{target.negative}"],
|
||||
negative_target_with_neutral=cache[f"{target.negative} {neutral}"],
|
||||
neutral=cache[neutral],
|
||||
action=ACTION_TYPES_SLIDER.ERASE_NEGATIVE,
|
||||
multiplier=target.multiplier,
|
||||
both_targets=cache[f"{target.positive} {target.negative}"],
|
||||
empty_prompt=cache[""],
|
||||
weight=target.weight
|
||||
),
|
||||
]
|
||||
if both or enhance_positive:
|
||||
print("Encoding enhance positive")
|
||||
prompt_pair_batch += [
|
||||
# enhance standard, swap pos neg
|
||||
EncodedPromptPair(
|
||||
target_class=cache[target.target_class],
|
||||
target_class_with_neutral=cache[f"{target.target_class} {neutral}"],
|
||||
positive_target=cache[f"{target.negative}"],
|
||||
positive_target_with_neutral=cache[f"{target.negative} {neutral}"],
|
||||
negative_target=cache[f"{target.positive}"],
|
||||
negative_target_with_neutral=cache[f"{target.positive} {neutral}"],
|
||||
neutral=cache[neutral],
|
||||
action=ACTION_TYPES_SLIDER.ENHANCE_NEGATIVE,
|
||||
multiplier=target.multiplier,
|
||||
both_targets=cache[f"{target.positive} {target.negative}"],
|
||||
empty_prompt=cache[""],
|
||||
weight=target.weight
|
||||
),
|
||||
]
|
||||
if both or enhance_positive:
|
||||
print("Encoding erase positive (inverse)")
|
||||
prompt_pair_batch += [
|
||||
# erase inverted
|
||||
EncodedPromptPair(
|
||||
target_class=cache[target.target_class],
|
||||
target_class_with_neutral=cache[f"{target.target_class} {neutral}"],
|
||||
positive_target=cache[f"{target.negative}"],
|
||||
positive_target_with_neutral=cache[f"{target.negative} {neutral}"],
|
||||
negative_target=cache[f"{target.positive}"],
|
||||
negative_target_with_neutral=cache[f"{target.positive} {neutral}"],
|
||||
neutral=cache[neutral],
|
||||
action=ACTION_TYPES_SLIDER.ERASE_NEGATIVE,
|
||||
both_targets=cache[f"{target.positive} {target.negative}"],
|
||||
empty_prompt=cache[""],
|
||||
multiplier=target.multiplier * -1.0,
|
||||
weight=target.weight
|
||||
),
|
||||
]
|
||||
if both or erase_negative:
|
||||
print("Encoding enhance negative (inverse)")
|
||||
prompt_pair_batch += [
|
||||
# enhance inverted
|
||||
EncodedPromptPair(
|
||||
target_class=cache[target.target_class],
|
||||
target_class_with_neutral=cache[f"{target.target_class} {neutral}"],
|
||||
positive_target=cache[f"{target.positive}"],
|
||||
positive_target_with_neutral=cache[f"{target.positive} {neutral}"],
|
||||
negative_target=cache[f"{target.negative}"],
|
||||
negative_target_with_neutral=cache[f"{target.negative} {neutral}"],
|
||||
both_targets=cache[f"{target.positive} {target.negative}"],
|
||||
neutral=cache[neutral],
|
||||
action=ACTION_TYPES_SLIDER.ENHANCE_NEGATIVE,
|
||||
empty_prompt=cache[""],
|
||||
multiplier=target.multiplier * -1.0,
|
||||
weight=target.weight
|
||||
),
|
||||
]
|
||||
|
||||
return prompt_pair_batch
|
||||
@@ -1,6 +1,6 @@
|
||||
import gc
|
||||
import typing
|
||||
from typing import Union, OrderedDict, List
|
||||
from typing import Union, OrderedDict, List, Tuple
|
||||
import sys
|
||||
import os
|
||||
|
||||
@@ -50,10 +50,10 @@ VAE_SCALE_FACTOR = 8 # 2 ** (len(vae.config.block_out_channels) - 1) = 8
|
||||
|
||||
|
||||
class PromptEmbeds:
|
||||
text_embeds: torch.FloatTensor
|
||||
pooled_embeds: Union[torch.FloatTensor, None]
|
||||
text_embeds: torch.Tensor
|
||||
pooled_embeds: Union[torch.Tensor, None]
|
||||
|
||||
def __init__(self, args) -> None:
|
||||
def __init__(self, args: Union[Tuple[torch.Tensor], List[torch.Tensor], torch.Tensor]) -> None:
|
||||
if isinstance(args, list) or isinstance(args, tuple):
|
||||
# xl
|
||||
self.text_embeds = args[0]
|
||||
@@ -139,12 +139,23 @@ class StableDiffusion:
|
||||
pipln = self.custom_pipeline
|
||||
else:
|
||||
pipln = CustomStableDiffusionXLPipeline
|
||||
pipe = pipln.from_single_file(
|
||||
self.model_config.name_or_path,
|
||||
dtype=dtype,
|
||||
scheduler_type='ddpm',
|
||||
device=self.device_torch,
|
||||
).to(self.device_torch)
|
||||
|
||||
# see if path exists
|
||||
if not os.path.exists(self.model_config.name_or_path):
|
||||
# try to load with default diffusers
|
||||
pipe = pipln.from_pretrained(
|
||||
self.model_config.name_or_path,
|
||||
dtype=dtype,
|
||||
scheduler_type='ddpm',
|
||||
device=self.device_torch,
|
||||
).to(self.device_torch)
|
||||
else:
|
||||
pipe = pipln.from_single_file(
|
||||
self.model_config.name_or_path,
|
||||
dtype=dtype,
|
||||
scheduler_type='ddpm',
|
||||
device=self.device_torch,
|
||||
).to(self.device_torch)
|
||||
|
||||
text_encoders = [pipe.text_encoder, pipe.text_encoder_2]
|
||||
tokenizer = [pipe.tokenizer, pipe.tokenizer_2]
|
||||
@@ -158,14 +169,27 @@ class StableDiffusion:
|
||||
pipln = self.custom_pipeline
|
||||
else:
|
||||
pipln = CustomStableDiffusionPipeline
|
||||
pipe = pipln.from_single_file(
|
||||
self.model_config.name_or_path,
|
||||
dtype=dtype,
|
||||
scheduler_type='dpm',
|
||||
device=self.device_torch,
|
||||
load_safety_checker=False,
|
||||
requires_safety_checker=False,
|
||||
).to(self.device_torch)
|
||||
|
||||
# see if path exists
|
||||
if not os.path.exists(self.model_config.name_or_path):
|
||||
# try to load with default diffusers
|
||||
pipe = pipln.from_pretrained(
|
||||
self.model_config.name_or_path,
|
||||
dtype=dtype,
|
||||
scheduler_type='dpm',
|
||||
device=self.device_torch,
|
||||
load_safety_checker=False,
|
||||
requires_safety_checker=False,
|
||||
).to(self.device_torch)
|
||||
else:
|
||||
pipe = pipln.from_single_file(
|
||||
self.model_config.name_or_path,
|
||||
dtype=dtype,
|
||||
scheduler_type='dpm',
|
||||
device=self.device_torch,
|
||||
load_safety_checker=False,
|
||||
requires_safety_checker=False,
|
||||
).to(self.device_torch)
|
||||
pipe.register_to_config(requires_safety_checker=False)
|
||||
text_encoder = pipe.text_encoder
|
||||
text_encoder.to(self.device_torch, dtype=dtype)
|
||||
|
||||
Reference in New Issue
Block a user