diff --git a/extensions_built_in/flex2/flex2.py b/extensions_built_in/flex2/flex2.py
index 3340f64a..7bb89506 100644
--- a/extensions_built_in/flex2/flex2.py
+++ b/extensions_built_in/flex2/flex2.py
@@ -424,7 +424,9 @@ class Flex2(BaseModel):
if self.random_blur_mask:
# blur the mask
# Give it a channel dim of 1
- inpainting_tensor_mask = inpainting_tensor_mask.unsqueeze(1)
+ if len(inpainting_tensor_mask.shape) == 3:
+ # if it is 3d, add a channel dim
+ inpainting_tensor_mask = inpainting_tensor_mask.unsqueeze(1)
# we are at latent size, so keep kernel smaller
inpainting_tensor_mask = random_blur(
inpainting_tensor_mask,
@@ -432,8 +434,6 @@ class Flex2(BaseModel):
max_kernel_size=8,
p=0.5
)
- # remove the channel dim
- inpainting_tensor_mask = inpainting_tensor_mask.squeeze(1)
do_mask_invert = False
if self.invert_inpaint_mask_chance > 0.0:
diff --git a/scripts/update_sponsors.py b/scripts/update_sponsors.py
index 1e3bbf63..d80b4bc1 100644
--- a/scripts/update_sponsors.py
+++ b/scripts/update_sponsors.py
@@ -246,7 +246,7 @@ def generate_readme(supporters):
f.write("### GitHub Sponsors\n\n")
for sponsor in github_sponsors:
if sponsor['profile_image']:
- f.write(f"
")
+ f.write(f"
")
else:
f.write(f"[{sponsor['name']}]({sponsor['profile_url']}) ")
f.write("\n\n")
@@ -257,7 +257,7 @@ def generate_readme(supporters):
f.write("### Patreon Supporters\n\n")
for supporter in patreon_supporters:
if supporter['profile_image']:
- f.write(f"
")
+ f.write(f"
")
else:
f.write(f"[{supporter['name']}]({supporter['profile_url']}) ")
f.write("\n\n")
diff --git a/toolkit/lora_special.py b/toolkit/lora_special.py
index aa221480..c6691dec 100644
--- a/toolkit/lora_special.py
+++ b/toolkit/lora_special.py
@@ -361,6 +361,10 @@ class LoRASpecialNetwork(ToolkitNetworkMixin, LoRANetwork):
if self.transformer_only and is_unet and hasattr(root_module, 'blocks'):
if "blocks" not in lora_name:
skip = True
+
+ if self.transformer_only and is_unet and hasattr(root_module, 'single_blocks'):
+ if "single_blocks" not in lora_name and "double_blocks" not in lora_name:
+ skip = True
if (is_linear or is_conv2d) and not skip:
diff --git a/toolkit/prompt_utils.py b/toolkit/prompt_utils.py
index 52e15907..eb213cca 100644
--- a/toolkit/prompt_utils.py
+++ b/toolkit/prompt_utils.py
@@ -149,7 +149,12 @@ def concat_prompt_embeds(prompt_embeds: list[PromptEmbeds]):
pooled_embeds = None
if prompt_embeds[0].pooled_embeds is not None:
pooled_embeds = torch.cat([p.pooled_embeds for p in prompt_embeds], dim=0)
- return PromptEmbeds([text_embeds, pooled_embeds])
+ attention_mask = None
+ if prompt_embeds[0].attention_mask is not None:
+ attention_mask = torch.cat([p.attention_mask for p in prompt_embeds], dim=0)
+ pe = PromptEmbeds([text_embeds, pooled_embeds])
+ pe.attention_mask = attention_mask
+ return pe
def concat_prompt_pairs(prompt_pairs: list[EncodedPromptPair]):