mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-04-30 03:01:15 +00:00
control rework
This commit is contained in:
285
backend/nn/cnets/cldm.py
Normal file
285
backend/nn/cnets/cldm.py
Normal file
@@ -0,0 +1,285 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from backend.nn.unet import timestep_embedding, exists, conv_nd, SpatialTransformer, TimestepEmbedSequential, ResBlock, Downsample
|
||||
|
||||
|
||||
class ControlNet(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
in_channels,
|
||||
model_channels,
|
||||
hint_channels,
|
||||
num_res_blocks,
|
||||
dropout=0,
|
||||
channel_mult=(1, 2, 4, 8),
|
||||
conv_resample=True,
|
||||
dims=2,
|
||||
num_classes=None,
|
||||
use_checkpoint=False,
|
||||
dtype=torch.float32,
|
||||
num_heads=-1,
|
||||
num_head_channels=-1,
|
||||
num_heads_upsample=-1,
|
||||
use_scale_shift_norm=False,
|
||||
resblock_updown=False,
|
||||
use_new_attention_order=False,
|
||||
use_spatial_transformer=False,
|
||||
transformer_depth=1,
|
||||
context_dim=None,
|
||||
n_embed=None,
|
||||
disable_self_attentions=None,
|
||||
num_attention_blocks=None,
|
||||
disable_middle_self_attn=False,
|
||||
use_linear_in_transformer=False,
|
||||
adm_in_channels=None,
|
||||
transformer_depth_middle=None,
|
||||
transformer_depth_output=None,
|
||||
device=None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
assert use_spatial_transformer == True, "use_spatial_transformer has to be true"
|
||||
if use_spatial_transformer:
|
||||
assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
|
||||
|
||||
if context_dim is not None:
|
||||
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
|
||||
# from omegaconf.listconfig import ListConfig
|
||||
# if type(context_dim) == ListConfig:
|
||||
# context_dim = list(context_dim)
|
||||
|
||||
if num_heads_upsample == -1:
|
||||
num_heads_upsample = num_heads
|
||||
|
||||
if num_heads == -1:
|
||||
assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
|
||||
|
||||
if num_head_channels == -1:
|
||||
assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
|
||||
|
||||
self.dims = dims
|
||||
self.in_channels = in_channels
|
||||
self.model_channels = model_channels
|
||||
|
||||
if isinstance(num_res_blocks, int):
|
||||
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
|
||||
else:
|
||||
if len(num_res_blocks) != len(channel_mult):
|
||||
raise ValueError("provide num_res_blocks either as an int (globally constant) or "
|
||||
"as a list/tuple (per-level) with the same length as channel_mult")
|
||||
self.num_res_blocks = num_res_blocks
|
||||
|
||||
if disable_self_attentions is not None:
|
||||
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
|
||||
assert len(disable_self_attentions) == len(channel_mult)
|
||||
if num_attention_blocks is not None:
|
||||
assert len(num_attention_blocks) == len(self.num_res_blocks)
|
||||
assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
|
||||
|
||||
transformer_depth = transformer_depth[:]
|
||||
|
||||
self.dropout = dropout
|
||||
self.channel_mult = channel_mult
|
||||
self.conv_resample = conv_resample
|
||||
self.num_classes = num_classes
|
||||
self.use_checkpoint = use_checkpoint
|
||||
self.dtype = dtype
|
||||
self.num_heads = num_heads
|
||||
self.num_head_channels = num_head_channels
|
||||
self.num_heads_upsample = num_heads_upsample
|
||||
self.predict_codebook_ids = n_embed is not None
|
||||
|
||||
time_embed_dim = model_channels * 4
|
||||
self.time_embed = nn.Sequential(
|
||||
nn.Linear(model_channels, time_embed_dim, dtype=self.dtype, device=device),
|
||||
nn.SiLU(),
|
||||
nn.Linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device),
|
||||
)
|
||||
|
||||
if self.num_classes is not None:
|
||||
if isinstance(self.num_classes, int):
|
||||
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
|
||||
elif self.num_classes == "continuous":
|
||||
print("setting up linear c_adm embedding layer")
|
||||
self.label_emb = nn.Linear(1, time_embed_dim)
|
||||
elif self.num_classes == "sequential":
|
||||
assert adm_in_channels is not None
|
||||
self.label_emb = nn.Sequential(
|
||||
nn.Sequential(
|
||||
nn.Linear(adm_in_channels, time_embed_dim, dtype=self.dtype, device=device),
|
||||
nn.SiLU(),
|
||||
nn.Linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device),
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise ValueError()
|
||||
|
||||
self.input_blocks = nn.ModuleList(
|
||||
[
|
||||
TimestepEmbedSequential(
|
||||
nn.Conv2d(in_channels, model_channels, 3, padding=1, dtype=self.dtype, device=device)
|
||||
)
|
||||
]
|
||||
)
|
||||
self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels, dtype=self.dtype, device=device)])
|
||||
|
||||
self.input_hint_block = TimestepEmbedSequential(
|
||||
conv_nd(dims, hint_channels, 16, 3, padding=1, dtype=self.dtype, device=device),
|
||||
nn.SiLU(),
|
||||
conv_nd(dims, 16, 16, 3, padding=1, dtype=self.dtype, device=device),
|
||||
nn.SiLU(),
|
||||
conv_nd(dims, 16, 32, 3, padding=1, stride=2, dtype=self.dtype, device=device),
|
||||
nn.SiLU(),
|
||||
conv_nd(dims, 32, 32, 3, padding=1, dtype=self.dtype, device=device),
|
||||
nn.SiLU(),
|
||||
conv_nd(dims, 32, 96, 3, padding=1, stride=2, dtype=self.dtype, device=device),
|
||||
nn.SiLU(),
|
||||
conv_nd(dims, 96, 96, 3, padding=1, dtype=self.dtype, device=device),
|
||||
nn.SiLU(),
|
||||
conv_nd(dims, 96, 256, 3, padding=1, stride=2, dtype=self.dtype, device=device),
|
||||
nn.SiLU(),
|
||||
conv_nd(dims, 256, model_channels, 3, padding=1, dtype=self.dtype, device=device)
|
||||
)
|
||||
|
||||
self._feature_size = model_channels
|
||||
input_block_chans = [model_channels]
|
||||
ch = model_channels
|
||||
ds = 1
|
||||
for level, mult in enumerate(channel_mult):
|
||||
for nr in range(self.num_res_blocks[level]):
|
||||
layers = [
|
||||
ResBlock(
|
||||
ch,
|
||||
time_embed_dim,
|
||||
dropout,
|
||||
out_channels=mult * model_channels,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
dtype=self.dtype,
|
||||
device=device,
|
||||
)
|
||||
]
|
||||
ch = mult * model_channels
|
||||
num_transformers = transformer_depth.pop(0)
|
||||
if num_transformers > 0:
|
||||
if num_head_channels == -1:
|
||||
dim_head = ch // num_heads
|
||||
else:
|
||||
num_heads = ch // num_head_channels
|
||||
dim_head = num_head_channels
|
||||
|
||||
if exists(disable_self_attentions):
|
||||
disabled_sa = disable_self_attentions[level]
|
||||
else:
|
||||
disabled_sa = False
|
||||
|
||||
if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
|
||||
layers.append(
|
||||
SpatialTransformer(
|
||||
ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim,
|
||||
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
|
||||
use_checkpoint=use_checkpoint, dtype=self.dtype, device=device
|
||||
)
|
||||
)
|
||||
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
||||
self.zero_convs.append(self.make_zero_conv(ch, dtype=self.dtype, device=device))
|
||||
self._feature_size += ch
|
||||
input_block_chans.append(ch)
|
||||
if level != len(channel_mult) - 1:
|
||||
out_ch = ch
|
||||
self.input_blocks.append(
|
||||
TimestepEmbedSequential(
|
||||
ResBlock(
|
||||
ch,
|
||||
time_embed_dim,
|
||||
dropout,
|
||||
out_channels=out_ch,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
down=True,
|
||||
dtype=self.dtype,
|
||||
device=device,
|
||||
)
|
||||
if resblock_updown
|
||||
else Downsample(
|
||||
ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype, device=device
|
||||
)
|
||||
)
|
||||
)
|
||||
ch = out_ch
|
||||
input_block_chans.append(ch)
|
||||
self.zero_convs.append(self.make_zero_conv(ch, dtype=self.dtype, device=device))
|
||||
ds *= 2
|
||||
self._feature_size += ch
|
||||
|
||||
if num_head_channels == -1:
|
||||
dim_head = ch // num_heads
|
||||
else:
|
||||
num_heads = ch // num_head_channels
|
||||
dim_head = num_head_channels
|
||||
|
||||
mid_block = [
|
||||
ResBlock(
|
||||
ch,
|
||||
time_embed_dim,
|
||||
dropout,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
dtype=self.dtype,
|
||||
device=device,
|
||||
)]
|
||||
if transformer_depth_middle >= 0:
|
||||
mid_block += [
|
||||
SpatialTransformer( # always uses a self-attn
|
||||
ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim,
|
||||
disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
|
||||
use_checkpoint=use_checkpoint, dtype=self.dtype, device=device
|
||||
),
|
||||
ResBlock(
|
||||
ch,
|
||||
time_embed_dim,
|
||||
dropout,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
dtype=self.dtype,
|
||||
device=device,
|
||||
)]
|
||||
self.middle_block = TimestepEmbedSequential(*mid_block)
|
||||
self.middle_block_out = self.make_zero_conv(ch, dtype=self.dtype, device=device)
|
||||
self._feature_size += ch
|
||||
|
||||
def make_zero_conv(self, channels, dtype=None, device=None):
|
||||
return TimestepEmbedSequential(conv_nd(self.dims, channels, channels, 1, padding=0, dtype=dtype, device=device))
|
||||
|
||||
def forward(self, x, hint, timesteps, context, y=None, **kwargs):
|
||||
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype)
|
||||
emb = self.time_embed(t_emb)
|
||||
|
||||
guided_hint = self.input_hint_block(hint, emb, context)
|
||||
|
||||
outs = []
|
||||
|
||||
hs = []
|
||||
if self.num_classes is not None:
|
||||
assert y.shape[0] == x.shape[0]
|
||||
emb = emb + self.label_emb(y)
|
||||
|
||||
h = x
|
||||
for module, zero_conv in zip(self.input_blocks, self.zero_convs):
|
||||
if guided_hint is not None:
|
||||
h = module(h, emb, context)
|
||||
h += guided_hint
|
||||
guided_hint = None
|
||||
else:
|
||||
h = module(h, emb, context)
|
||||
outs.append(zero_conv(h, emb, context))
|
||||
|
||||
h = self.middle_block(h, emb, context)
|
||||
outs.append(self.middle_block_out(h, emb, context))
|
||||
|
||||
return outs
|
||||
293
backend/nn/cnets/t2i_adapter.py
Normal file
293
backend/nn/cnets/t2i_adapter.py
Normal file
@@ -0,0 +1,293 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
def conv_nd(dims, *args, **kwargs):
|
||||
"""
|
||||
Create a 1D, 2D, or 3D convolution module.
|
||||
"""
|
||||
if dims == 1:
|
||||
return nn.Conv1d(*args, **kwargs)
|
||||
elif dims == 2:
|
||||
return nn.Conv2d(*args, **kwargs)
|
||||
elif dims == 3:
|
||||
return nn.Conv3d(*args, **kwargs)
|
||||
raise ValueError(f"unsupported dimensions: {dims}")
|
||||
|
||||
|
||||
def avg_pool_nd(dims, *args, **kwargs):
|
||||
"""
|
||||
Create a 1D, 2D, or 3D average pooling module.
|
||||
"""
|
||||
if dims == 1:
|
||||
return nn.AvgPool1d(*args, **kwargs)
|
||||
elif dims == 2:
|
||||
return nn.AvgPool2d(*args, **kwargs)
|
||||
elif dims == 3:
|
||||
return nn.AvgPool3d(*args, **kwargs)
|
||||
raise ValueError(f"unsupported dimensions: {dims}")
|
||||
|
||||
|
||||
class Downsample(nn.Module):
|
||||
"""
|
||||
A downsampling layer with an optional convolution.
|
||||
:param channels: channels in the inputs and outputs.
|
||||
:param use_conv: a bool determining if a convolution is applied.
|
||||
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
||||
downsampling occurs in the inner-two dimensions.
|
||||
"""
|
||||
|
||||
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
|
||||
super().__init__()
|
||||
self.channels = channels
|
||||
self.out_channels = out_channels or channels
|
||||
self.use_conv = use_conv
|
||||
self.dims = dims
|
||||
stride = 2 if dims != 3 else (1, 2, 2)
|
||||
if use_conv:
|
||||
self.op = conv_nd(
|
||||
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
|
||||
)
|
||||
else:
|
||||
assert self.channels == self.out_channels
|
||||
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
|
||||
|
||||
def forward(self, x):
|
||||
assert x.shape[1] == self.channels
|
||||
if not self.use_conv:
|
||||
padding = [x.shape[2] % 2, x.shape[3] % 2]
|
||||
self.op.padding = padding
|
||||
|
||||
x = self.op(x)
|
||||
return x
|
||||
|
||||
|
||||
class ResnetBlock(nn.Module):
|
||||
def __init__(self, in_c, out_c, down, ksize=3, sk=False, use_conv=True):
|
||||
super().__init__()
|
||||
ps = ksize // 2
|
||||
if in_c != out_c or sk == False:
|
||||
self.in_conv = nn.Conv2d(in_c, out_c, ksize, 1, ps)
|
||||
else:
|
||||
# print('n_in')
|
||||
self.in_conv = None
|
||||
self.block1 = nn.Conv2d(out_c, out_c, 3, 1, 1)
|
||||
self.act = nn.ReLU()
|
||||
self.block2 = nn.Conv2d(out_c, out_c, ksize, 1, ps)
|
||||
if sk == False:
|
||||
self.skep = nn.Conv2d(in_c, out_c, ksize, 1, ps)
|
||||
else:
|
||||
self.skep = None
|
||||
|
||||
self.down = down
|
||||
if self.down == True:
|
||||
self.down_opt = Downsample(in_c, use_conv=use_conv)
|
||||
|
||||
def forward(self, x):
|
||||
if self.down == True:
|
||||
x = self.down_opt(x)
|
||||
if self.in_conv is not None: # edit
|
||||
x = self.in_conv(x)
|
||||
|
||||
h = self.block1(x)
|
||||
h = self.act(h)
|
||||
h = self.block2(h)
|
||||
if self.skep is not None:
|
||||
return h + self.skep(x)
|
||||
else:
|
||||
return h + x
|
||||
|
||||
|
||||
class Adapter(nn.Module):
|
||||
def __init__(self, channels=[320, 640, 1280, 1280], nums_rb=3, cin=64, ksize=3, sk=False, use_conv=True, xl=True):
|
||||
super(Adapter, self).__init__()
|
||||
self.unshuffle_amount = 8
|
||||
resblock_no_downsample = []
|
||||
resblock_downsample = [3, 2, 1]
|
||||
self.xl = xl
|
||||
if self.xl:
|
||||
self.unshuffle_amount = 16
|
||||
resblock_no_downsample = [1]
|
||||
resblock_downsample = [2]
|
||||
|
||||
self.input_channels = cin // (self.unshuffle_amount * self.unshuffle_amount)
|
||||
self.unshuffle = nn.PixelUnshuffle(self.unshuffle_amount)
|
||||
self.channels = channels
|
||||
self.nums_rb = nums_rb
|
||||
self.body = []
|
||||
for i in range(len(channels)):
|
||||
for j in range(nums_rb):
|
||||
if (i in resblock_downsample) and (j == 0):
|
||||
self.body.append(
|
||||
ResnetBlock(channels[i - 1], channels[i], down=True, ksize=ksize, sk=sk, use_conv=use_conv))
|
||||
elif (i in resblock_no_downsample) and (j == 0):
|
||||
self.body.append(
|
||||
ResnetBlock(channels[i - 1], channels[i], down=False, ksize=ksize, sk=sk, use_conv=use_conv))
|
||||
else:
|
||||
self.body.append(
|
||||
ResnetBlock(channels[i], channels[i], down=False, ksize=ksize, sk=sk, use_conv=use_conv))
|
||||
self.body = nn.ModuleList(self.body)
|
||||
self.conv_in = nn.Conv2d(cin, channels[0], 3, 1, 1)
|
||||
|
||||
def forward(self, x):
|
||||
# unshuffle
|
||||
x = self.unshuffle(x)
|
||||
# extract features
|
||||
features = []
|
||||
x = self.conv_in(x)
|
||||
for i in range(len(self.channels)):
|
||||
for j in range(self.nums_rb):
|
||||
idx = i * self.nums_rb + j
|
||||
x = self.body[idx](x)
|
||||
if self.xl:
|
||||
features.append(None)
|
||||
if i == 0:
|
||||
features.append(None)
|
||||
features.append(None)
|
||||
if i == 2:
|
||||
features.append(None)
|
||||
else:
|
||||
features.append(None)
|
||||
features.append(None)
|
||||
features.append(x)
|
||||
|
||||
return features
|
||||
|
||||
|
||||
class LayerNorm(nn.LayerNorm):
|
||||
"""Subclass torch's LayerNorm to handle fp16."""
|
||||
|
||||
def forward(self, x: torch.Tensor):
|
||||
orig_type = x.dtype
|
||||
ret = super().forward(x.type(torch.float32))
|
||||
return ret.type(orig_type)
|
||||
|
||||
|
||||
class QuickGELU(nn.Module):
|
||||
|
||||
def forward(self, x: torch.Tensor):
|
||||
return x * torch.sigmoid(1.702 * x)
|
||||
|
||||
|
||||
class ResidualAttentionBlock(nn.Module):
|
||||
|
||||
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
|
||||
super().__init__()
|
||||
|
||||
self.attn = nn.MultiheadAttention(d_model, n_head)
|
||||
self.ln_1 = LayerNorm(d_model)
|
||||
self.mlp = nn.Sequential(
|
||||
OrderedDict([("c_fc", nn.Linear(d_model, d_model * 4)), ("gelu", QuickGELU()),
|
||||
("c_proj", nn.Linear(d_model * 4, d_model))]))
|
||||
self.ln_2 = LayerNorm(d_model)
|
||||
self.attn_mask = attn_mask
|
||||
|
||||
def attention(self, x: torch.Tensor):
|
||||
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
|
||||
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
|
||||
|
||||
def forward(self, x: torch.Tensor):
|
||||
x = x + self.attention(self.ln_1(x))
|
||||
x = x + self.mlp(self.ln_2(x))
|
||||
return x
|
||||
|
||||
|
||||
class StyleAdapter(nn.Module):
|
||||
|
||||
def __init__(self, width=1024, context_dim=768, num_head=8, n_layes=3, num_token=4):
|
||||
super().__init__()
|
||||
|
||||
scale = width ** -0.5
|
||||
self.transformer_layes = nn.Sequential(*[ResidualAttentionBlock(width, num_head) for _ in range(n_layes)])
|
||||
self.num_token = num_token
|
||||
self.style_embedding = nn.Parameter(torch.randn(1, num_token, width) * scale)
|
||||
self.ln_post = LayerNorm(width)
|
||||
self.ln_pre = LayerNorm(width)
|
||||
self.proj = nn.Parameter(scale * torch.randn(width, context_dim))
|
||||
|
||||
def forward(self, x):
|
||||
# x shape [N, HW+1, C]
|
||||
style_embedding = self.style_embedding + torch.zeros(
|
||||
(x.shape[0], self.num_token, self.style_embedding.shape[-1]), device=x.device)
|
||||
x = torch.cat([x, style_embedding], dim=1)
|
||||
x = self.ln_pre(x)
|
||||
x = x.permute(1, 0, 2) # NLD -> LND
|
||||
x = self.transformer_layes(x)
|
||||
x = x.permute(1, 0, 2) # LND -> NLD
|
||||
|
||||
x = self.ln_post(x[:, -self.num_token:, :])
|
||||
x = x @ self.proj
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class ResnetBlock_light(nn.Module):
|
||||
def __init__(self, in_c):
|
||||
super().__init__()
|
||||
self.block1 = nn.Conv2d(in_c, in_c, 3, 1, 1)
|
||||
self.act = nn.ReLU()
|
||||
self.block2 = nn.Conv2d(in_c, in_c, 3, 1, 1)
|
||||
|
||||
def forward(self, x):
|
||||
h = self.block1(x)
|
||||
h = self.act(h)
|
||||
h = self.block2(h)
|
||||
|
||||
return h + x
|
||||
|
||||
|
||||
class extractor(nn.Module):
|
||||
def __init__(self, in_c, inter_c, out_c, nums_rb, down=False):
|
||||
super().__init__()
|
||||
self.in_conv = nn.Conv2d(in_c, inter_c, 1, 1, 0)
|
||||
self.body = []
|
||||
for _ in range(nums_rb):
|
||||
self.body.append(ResnetBlock_light(inter_c))
|
||||
self.body = nn.Sequential(*self.body)
|
||||
self.out_conv = nn.Conv2d(inter_c, out_c, 1, 1, 0)
|
||||
self.down = down
|
||||
if self.down == True:
|
||||
self.down_opt = Downsample(in_c, use_conv=False)
|
||||
|
||||
def forward(self, x):
|
||||
if self.down == True:
|
||||
x = self.down_opt(x)
|
||||
x = self.in_conv(x)
|
||||
x = self.body(x)
|
||||
x = self.out_conv(x)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class Adapter_light(nn.Module):
|
||||
def __init__(self, channels=[320, 640, 1280, 1280], nums_rb=3, cin=64):
|
||||
super(Adapter_light, self).__init__()
|
||||
self.unshuffle_amount = 8
|
||||
self.unshuffle = nn.PixelUnshuffle(self.unshuffle_amount)
|
||||
self.input_channels = cin // (self.unshuffle_amount * self.unshuffle_amount)
|
||||
self.channels = channels
|
||||
self.nums_rb = nums_rb
|
||||
self.body = []
|
||||
self.xl = False
|
||||
|
||||
for i in range(len(channels)):
|
||||
if i == 0:
|
||||
self.body.append(extractor(in_c=cin, inter_c=channels[i] // 4, out_c=channels[i], nums_rb=nums_rb, down=False))
|
||||
else:
|
||||
self.body.append(extractor(in_c=channels[i - 1], inter_c=channels[i] // 4, out_c=channels[i], nums_rb=nums_rb, down=True))
|
||||
self.body = nn.ModuleList(self.body)
|
||||
|
||||
def forward(self, x):
|
||||
# unshuffle
|
||||
x = self.unshuffle(x)
|
||||
# extract features
|
||||
features = []
|
||||
for i in range(len(self.channels)):
|
||||
x = self.body[i](x)
|
||||
features.append(None)
|
||||
features.append(None)
|
||||
features.append(x)
|
||||
|
||||
return features
|
||||
@@ -655,12 +655,32 @@ class IntegratedUNet2DConditionModel(nn.Module, ConfigMixin):
|
||||
device = unet_initial_device
|
||||
|
||||
self.legacy_config = dict(
|
||||
num_res_blocks=num_res_blocks,
|
||||
channel_mult=channel_mult,
|
||||
transformer_depth=transformer_depth,
|
||||
transformer_depth_output=transformer_depth_output,
|
||||
transformer_depth_middle=transformer_depth_middle,
|
||||
in_channels=in_channels,
|
||||
out_channels=out_channels,
|
||||
model_channels=model_channels,
|
||||
num_res_blocks=num_res_blocks,
|
||||
dropout=dropout,
|
||||
channel_mult=channel_mult,
|
||||
conv_resample=conv_resample,
|
||||
dims=dims,
|
||||
num_classes=num_classes,
|
||||
dtype=dtype,
|
||||
num_heads=num_heads,
|
||||
num_head_channels=num_head_channels,
|
||||
num_heads_upsample=num_heads_upsample,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
resblock_updown=resblock_updown,
|
||||
use_spatial_transformer=use_spatial_transformer,
|
||||
transformer_depth=transformer_depth,
|
||||
context_dim=context_dim,
|
||||
disable_self_attentions=disable_self_attentions,
|
||||
num_attention_blocks=num_attention_blocks,
|
||||
disable_middle_self_attn=disable_middle_self_attn,
|
||||
use_linear_in_transformer=use_linear_in_transformer,
|
||||
adm_in_channels=adm_in_channels,
|
||||
transformer_depth_middle=transformer_depth_middle,
|
||||
transformer_depth_output=transformer_depth_output,
|
||||
device=device,
|
||||
)
|
||||
|
||||
if context_dim is not None:
|
||||
|
||||
Reference in New Issue
Block a user