mirror of
https://github.com/ostris/ai-toolkit.git
synced 2026-04-30 19:21:39 +00:00
Added LoCON from LyCORIS
This commit is contained in:
@@ -9,6 +9,7 @@ from typing import List, Optional, Dict, Type, Union
|
||||
import torch
|
||||
from transformers import CLIPTextModel
|
||||
|
||||
from .network_mixins import ToolkitNetworkMixin, ToolkitModuleMixin
|
||||
from .paths import SD_SCRIPTS_ROOT, KEYMAPS_ROOT
|
||||
from .train_tools import get_torch_dtype
|
||||
|
||||
@@ -21,7 +22,7 @@ from torch.utils.checkpoint import checkpoint
|
||||
RE_UPDOWN = re.compile(r"(up|down)_blocks_(\d+)_(resnets|upsamplers|downsamplers|attentions)_(\d+)_")
|
||||
|
||||
|
||||
class LoRAModule(torch.nn.Module):
|
||||
class LoRAModule(ToolkitModuleMixin, torch.nn.Module):
|
||||
"""
|
||||
replaces forward method of the original Linear, instead of replacing the original Linear module.
|
||||
"""
|
||||
@@ -40,6 +41,7 @@ class LoRAModule(torch.nn.Module):
|
||||
"""if alpha == 0 or None, alpha is rank (no scaling)."""
|
||||
super().__init__()
|
||||
self.lora_name = lora_name
|
||||
self.scalar = torch.tensor(1.0)
|
||||
|
||||
if org_module.__class__.__name__ == "Conv2d":
|
||||
in_dim = org_module.in_channels
|
||||
@@ -89,153 +91,8 @@ class LoRAModule(torch.nn.Module):
|
||||
self.org_module.forward = self.forward
|
||||
del self.org_module
|
||||
|
||||
# this allows us to set different multipliers on a per item in a batch basis
|
||||
# allowing us to run positive and negative weights in the same batch
|
||||
# really only useful for slider training for now
|
||||
def get_multiplier(self, lora_up):
|
||||
with torch.no_grad():
|
||||
batch_size = lora_up.size(0)
|
||||
# batch will have all negative prompts first and positive prompts second
|
||||
# our multiplier list is for a prompt pair. So we need to repeat it for positive and negative prompts
|
||||
# if there is more than our multiplier, it is likely a batch size increase, so we need to
|
||||
# interleave the multipliers
|
||||
if isinstance(self.multiplier, list):
|
||||
if len(self.multiplier) == 0:
|
||||
# single item, just return it
|
||||
return self.multiplier[0]
|
||||
elif len(self.multiplier) == batch_size:
|
||||
# not doing CFG
|
||||
multiplier_tensor = torch.tensor(self.multiplier).to(lora_up.device, dtype=lora_up.dtype)
|
||||
else:
|
||||
|
||||
# we have a list of multipliers, so we need to get the multiplier for this batch
|
||||
multiplier_tensor = torch.tensor(self.multiplier * 2).to(lora_up.device, dtype=lora_up.dtype)
|
||||
# should be 1 for if total batch size was 1
|
||||
num_interleaves = (batch_size // 2) // len(self.multiplier)
|
||||
multiplier_tensor = multiplier_tensor.repeat_interleave(num_interleaves)
|
||||
|
||||
# match lora_up rank
|
||||
if len(lora_up.size()) == 2:
|
||||
multiplier_tensor = multiplier_tensor.view(-1, 1)
|
||||
elif len(lora_up.size()) == 3:
|
||||
multiplier_tensor = multiplier_tensor.view(-1, 1, 1)
|
||||
elif len(lora_up.size()) == 4:
|
||||
multiplier_tensor = multiplier_tensor.view(-1, 1, 1, 1)
|
||||
return multiplier_tensor.detach()
|
||||
|
||||
else:
|
||||
return self.multiplier
|
||||
|
||||
def _call_forward(self, x):
|
||||
# module dropout
|
||||
if self.module_dropout is not None and self.training:
|
||||
if torch.rand(1) < self.module_dropout:
|
||||
return 0.0 # added to original forward
|
||||
|
||||
lx = self.lora_down(x)
|
||||
|
||||
# normal dropout
|
||||
if self.dropout is not None and self.training:
|
||||
lx = torch.nn.functional.dropout(lx, p=self.dropout)
|
||||
|
||||
# rank dropout
|
||||
if self.rank_dropout is not None and self.training:
|
||||
mask = torch.rand((lx.size(0), self.lora_dim), device=lx.device) > self.rank_dropout
|
||||
if len(lx.size()) == 3:
|
||||
mask = mask.unsqueeze(1) # for Text Encoder
|
||||
elif len(lx.size()) == 4:
|
||||
mask = mask.unsqueeze(-1).unsqueeze(-1) # for Conv2d
|
||||
lx = lx * mask
|
||||
|
||||
# scaling for rank dropout: treat as if the rank is changed
|
||||
# maskから計算することも考えられるが、augmentation的な効果を期待してrank_dropoutを用いる
|
||||
scale = self.scale * (1.0 / (1.0 - self.rank_dropout)) # redundant for readability
|
||||
else:
|
||||
scale = self.scale
|
||||
|
||||
lx = self.lora_up(lx)
|
||||
|
||||
return lx * scale
|
||||
|
||||
def forward(self, x):
|
||||
org_forwarded = self.org_forward(x)
|
||||
lora_output = self._call_forward(x)
|
||||
multiplier = self.get_multiplier(lora_output)
|
||||
|
||||
if self.is_normalizing:
|
||||
with torch.no_grad():
|
||||
|
||||
# do this calculation without set multiplier and instead use same polarity, but with 1.0 multiplier
|
||||
if isinstance(multiplier, torch.Tensor):
|
||||
norm_multiplier = multiplier.clone().detach() * 10
|
||||
norm_multiplier = norm_multiplier.clamp(min=-1.0, max=1.0)
|
||||
else:
|
||||
norm_multiplier = multiplier
|
||||
|
||||
# get a dim array from orig forward that had index of all dimensions except the batch and channel
|
||||
|
||||
# Calculate the target magnitude for the combined output
|
||||
orig_max = torch.max(torch.abs(org_forwarded))
|
||||
|
||||
# Calculate the additional increase in magnitude that lora_output would introduce
|
||||
potential_max_increase = torch.max(torch.abs(org_forwarded + lora_output * norm_multiplier) - torch.abs(org_forwarded))
|
||||
|
||||
epsilon = 1e-6 # Small constant to avoid division by zero
|
||||
|
||||
# Calculate the scaling factor for the lora_output
|
||||
# to ensure that the potential increase in magnitude doesn't change the original max
|
||||
normalize_scaler = orig_max / (orig_max + potential_max_increase + epsilon)
|
||||
normalize_scaler = normalize_scaler.detach()
|
||||
|
||||
# save the scaler so it can be applied later
|
||||
self.normalize_scaler = normalize_scaler.clone().detach()
|
||||
|
||||
lora_output *= normalize_scaler
|
||||
|
||||
return org_forwarded + (lora_output * multiplier)
|
||||
|
||||
def enable_gradient_checkpointing(self):
|
||||
self.is_checkpointing = True
|
||||
|
||||
def disable_gradient_checkpointing(self):
|
||||
self.is_checkpointing = False
|
||||
|
||||
@torch.no_grad()
|
||||
def apply_stored_normalizer(self, target_normalize_scaler: float = 1.0):
|
||||
"""
|
||||
Applied the previous normalization calculation to the module.
|
||||
This must be called before saving or normalization will be lost.
|
||||
It is probably best to call after each batch as well.
|
||||
We just scale the up down weights to match this vector
|
||||
:return:
|
||||
"""
|
||||
# get state dict
|
||||
state_dict = self.state_dict()
|
||||
dtype = state_dict['lora_up.weight'].dtype
|
||||
device = state_dict['lora_up.weight'].device
|
||||
|
||||
# todo should we do this at fp32?
|
||||
if isinstance(self.normalize_scaler, torch.Tensor):
|
||||
scaler = self.normalize_scaler.clone().detach()
|
||||
else:
|
||||
scaler = torch.tensor(self.normalize_scaler).to(device, dtype=dtype)
|
||||
|
||||
total_module_scale = scaler / target_normalize_scaler
|
||||
num_modules_layers = 2 # up and down
|
||||
up_down_scale = torch.pow(total_module_scale, 1.0 / num_modules_layers) \
|
||||
.to(device, dtype=dtype)
|
||||
|
||||
# apply the scaler to the up and down weights
|
||||
for key in state_dict.keys():
|
||||
if key.endswith('.lora_up.weight') or key.endswith('.lora_down.weight'):
|
||||
# do it inplace do params are updated
|
||||
state_dict[key] *= up_down_scale
|
||||
|
||||
# reset the normalization scaler
|
||||
self.normalize_scaler = target_normalize_scaler
|
||||
|
||||
|
||||
class LoRASpecialNetwork(LoRANetwork):
|
||||
class LoRASpecialNetwork(ToolkitNetworkMixin, LoRANetwork):
|
||||
NUM_OF_BLOCKS = 12 # フルモデル相当でのup,downの層の数
|
||||
|
||||
UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel"]
|
||||
@@ -445,154 +302,3 @@ class LoRASpecialNetwork(LoRANetwork):
|
||||
for lora in self.text_encoder_loras + self.unet_loras:
|
||||
assert lora.lora_name not in names, f"duplicated lora name: {lora.lora_name}"
|
||||
names.add(lora.lora_name)
|
||||
|
||||
def get_keymap(self):
|
||||
if self.is_sdxl:
|
||||
keymap_tail = 'sdxl'
|
||||
elif self.is_v2:
|
||||
keymap_tail = 'sd2'
|
||||
else:
|
||||
keymap_tail = 'sd1'
|
||||
# load keymap
|
||||
keymap_name = f"stable_diffusion_locon_{keymap_tail}.json"
|
||||
|
||||
keymap = None
|
||||
# check if file exists
|
||||
if os.path.exists(keymap_name):
|
||||
with open(keymap_name, 'r') as f:
|
||||
keymap = json.load(f)
|
||||
|
||||
return keymap
|
||||
|
||||
def save_weights(self, file, dtype, metadata):
|
||||
keymap = self.get_keymap()
|
||||
|
||||
save_keymap = {}
|
||||
if keymap is not None:
|
||||
for ldm_key, diffusers_key in keymap.items():
|
||||
# invert them
|
||||
save_keymap[diffusers_key] = ldm_key
|
||||
|
||||
if metadata is not None and len(metadata) == 0:
|
||||
metadata = None
|
||||
|
||||
state_dict = self.state_dict()
|
||||
save_dict = OrderedDict()
|
||||
|
||||
if dtype is not None:
|
||||
for key in list(state_dict.keys()):
|
||||
v = state_dict[key]
|
||||
v = v.detach().clone().to("cpu").to(dtype)
|
||||
save_key = save_keymap[key] if key in save_keymap else key
|
||||
save_dict[save_key] = v
|
||||
|
||||
if os.path.splitext(file)[1] == ".safetensors":
|
||||
from safetensors.torch import save_file
|
||||
save_file(save_dict, file, metadata)
|
||||
else:
|
||||
torch.save(save_dict, file)
|
||||
|
||||
def load_weights(self, file):
|
||||
# allows us to save and load to and from ldm weights
|
||||
keymap = self.get_keymap()
|
||||
keymap = {} if keymap is None else keymap
|
||||
|
||||
if os.path.splitext(file)[1] == ".safetensors":
|
||||
from safetensors.torch import load_file
|
||||
|
||||
weights_sd = load_file(file)
|
||||
else:
|
||||
weights_sd = torch.load(file, map_location="cpu")
|
||||
|
||||
load_sd = OrderedDict()
|
||||
for key, value in weights_sd.items():
|
||||
load_key = keymap[key] if key in keymap else key
|
||||
load_sd[load_key] = value
|
||||
|
||||
info = self.load_state_dict(load_sd, False)
|
||||
return info
|
||||
|
||||
@property
|
||||
def multiplier(self) -> Union[float, List[float]]:
|
||||
return self._multiplier
|
||||
|
||||
@multiplier.setter
|
||||
def multiplier(self, value: Union[float, List[float]]):
|
||||
self._multiplier = value
|
||||
self._update_lora_multiplier()
|
||||
|
||||
def _update_lora_multiplier(self):
|
||||
|
||||
if self.is_active:
|
||||
if hasattr(self, 'unet_loras'):
|
||||
for lora in self.unet_loras:
|
||||
lora.multiplier = self._multiplier
|
||||
if hasattr(self, 'text_encoder_loras'):
|
||||
for lora in self.text_encoder_loras:
|
||||
lora.multiplier = self._multiplier
|
||||
else:
|
||||
if hasattr(self, 'unet_loras'):
|
||||
for lora in self.unet_loras:
|
||||
lora.multiplier = 0
|
||||
if hasattr(self, 'text_encoder_loras'):
|
||||
for lora in self.text_encoder_loras:
|
||||
lora.multiplier = 0
|
||||
|
||||
# called when the context manager is entered
|
||||
# ie: with network:
|
||||
def __enter__(self):
|
||||
self.is_active = True
|
||||
self._update_lora_multiplier()
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
self.is_active = False
|
||||
self._update_lora_multiplier()
|
||||
|
||||
def force_to(self, device, dtype):
|
||||
self.to(device, dtype)
|
||||
loras = []
|
||||
if hasattr(self, 'unet_loras'):
|
||||
loras += self.unet_loras
|
||||
if hasattr(self, 'text_encoder_loras'):
|
||||
loras += self.text_encoder_loras
|
||||
for lora in loras:
|
||||
lora.to(device, dtype)
|
||||
|
||||
def get_all_modules(self):
|
||||
loras = []
|
||||
if hasattr(self, 'unet_loras'):
|
||||
loras += self.unet_loras
|
||||
if hasattr(self, 'text_encoder_loras'):
|
||||
loras += self.text_encoder_loras
|
||||
return loras
|
||||
|
||||
def _update_checkpointing(self):
|
||||
for module in self.get_all_modules():
|
||||
if self.is_checkpointing:
|
||||
module.enable_gradient_checkpointing()
|
||||
else:
|
||||
module.disable_gradient_checkpointing()
|
||||
|
||||
def enable_gradient_checkpointing(self):
|
||||
# not supported
|
||||
self.is_checkpointing = True
|
||||
self._update_checkpointing()
|
||||
|
||||
def disable_gradient_checkpointing(self):
|
||||
# not supported
|
||||
self.is_checkpointing = False
|
||||
self._update_checkpointing()
|
||||
|
||||
@property
|
||||
def is_normalizing(self) -> bool:
|
||||
return self._is_normalizing
|
||||
|
||||
@is_normalizing.setter
|
||||
def is_normalizing(self, value: bool):
|
||||
self._is_normalizing = value
|
||||
for module in self.get_all_modules():
|
||||
module.is_normalizing = self._is_normalizing
|
||||
|
||||
def apply_stored_normalizer(self, target_normalize_scaler: float = 1.0):
|
||||
for module in self.get_all_modules():
|
||||
module.apply_stored_normalizer(target_normalize_scaler)
|
||||
|
||||
Reference in New Issue
Block a user