mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-04-30 03:01:15 +00:00
Gradio 4 + WebUI 1.10
This commit is contained in:
@@ -7,6 +7,7 @@ import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from modules import sd_models, cache, errors, hashes, shared
|
||||
import modules.models.sd3.mmdit
|
||||
|
||||
NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module'])
|
||||
|
||||
@@ -29,7 +30,6 @@ class NetworkOnDisk:
|
||||
|
||||
def read_metadata():
|
||||
metadata = sd_models.read_metadata_from_safetensors(filename)
|
||||
metadata.pop('ssmd_cover_images', None) # those are cover images, and they are too big to display in UI as text
|
||||
|
||||
return metadata
|
||||
|
||||
@@ -115,8 +115,17 @@ class NetworkModule:
|
||||
self.sd_key = weights.sd_key
|
||||
self.sd_module = weights.sd_module
|
||||
|
||||
if hasattr(self.sd_module, 'weight'):
|
||||
if isinstance(self.sd_module, modules.models.sd3.mmdit.QkvLinear):
|
||||
s = self.sd_module.weight.shape
|
||||
self.shape = (s[0] // 3, s[1])
|
||||
elif hasattr(self.sd_module, 'weight'):
|
||||
self.shape = self.sd_module.weight.shape
|
||||
elif isinstance(self.sd_module, nn.MultiheadAttention):
|
||||
# For now, only self-attn use Pytorch's MHA
|
||||
# So assume all qkvo proj have same shape
|
||||
self.shape = self.sd_module.out_proj.weight.shape
|
||||
else:
|
||||
self.shape = None
|
||||
|
||||
self.ops = None
|
||||
self.extra_kwargs = {}
|
||||
@@ -146,6 +155,9 @@ class NetworkModule:
|
||||
self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None
|
||||
self.scale = weights.w["scale"].item() if "scale" in weights.w else None
|
||||
|
||||
self.dora_scale = weights.w.get("dora_scale", None)
|
||||
self.dora_norm_dims = len(self.shape) - 1
|
||||
|
||||
def multiplier(self):
|
||||
if 'transformer' in self.sd_key[:20]:
|
||||
return self.network.te_multiplier
|
||||
@@ -160,6 +172,27 @@ class NetworkModule:
|
||||
|
||||
return 1.0
|
||||
|
||||
def apply_weight_decompose(self, updown, orig_weight):
|
||||
# Match the device/dtype
|
||||
orig_weight = orig_weight.to(updown.dtype)
|
||||
dora_scale = self.dora_scale.to(device=orig_weight.device, dtype=updown.dtype)
|
||||
updown = updown.to(orig_weight.device)
|
||||
|
||||
merged_scale1 = updown + orig_weight
|
||||
merged_scale1_norm = (
|
||||
merged_scale1.transpose(0, 1)
|
||||
.reshape(merged_scale1.shape[1], -1)
|
||||
.norm(dim=1, keepdim=True)
|
||||
.reshape(merged_scale1.shape[1], *[1] * self.dora_norm_dims)
|
||||
.transpose(0, 1)
|
||||
)
|
||||
|
||||
dora_merged = (
|
||||
merged_scale1 * (dora_scale / merged_scale1_norm)
|
||||
)
|
||||
final_updown = dora_merged - orig_weight
|
||||
return final_updown
|
||||
|
||||
def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
|
||||
if self.bias is not None:
|
||||
updown = updown.reshape(self.bias.shape)
|
||||
@@ -175,7 +208,12 @@ class NetworkModule:
|
||||
if ex_bias is not None:
|
||||
ex_bias = ex_bias * self.multiplier()
|
||||
|
||||
return updown * self.calc_scale() * self.multiplier(), ex_bias
|
||||
updown = updown * self.calc_scale()
|
||||
|
||||
if self.dora_scale is not None:
|
||||
updown = self.apply_weight_decompose(updown, orig_weight)
|
||||
|
||||
return updown * self.multiplier(), ex_bias
|
||||
|
||||
def calc_updown(self, target):
|
||||
raise NotImplementedError()
|
||||
|
||||
Reference in New Issue
Block a user