diff --git a/README.md b/README.md index feab90ec..b614201a 100644 --- a/README.md +++ b/README.md @@ -422,6 +422,7 @@ class ControlNetExampleForge(scripts.Script): B, C, H, W = kwargs['noise'].shape # latent_shape height = H * 8 width = W * 8 + batch_size = p.batch_size input_image = cv2.resize(input_image, (width, height)) @@ -446,10 +447,36 @@ class ControlNetExampleForge(scripts.Script): unet = p.sd_model.forge_objects.unet + # Unet has input, middle, output blocks, and we can give different weights to each layers in all blocks. + # Below is an example for stronger control in middle block. + # This is helpful for some high-res fix passes. (p.is_hr_pass) + positive_advanced_weighting = { + 'input': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2], + 'middle': [1.0], + 'output': [1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] + } + negative_advanced_weighting = { + 'input': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2], + 'middle': [1.0], + 'output': [1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] + } + + # The advanced_frame_weighting is a weight applied to each image in a batch. + # The length of this list must be same with batch size + # For example, if batch size is 5, the below list is [0, 0.25, 0.5, 0.75, 1.0] + # If you view the 5 images as 5 frames in a video, this will lead to progressively stronger control over time. + advanced_frame_weighting = [float(i) / float(batch_size - 1) for i in range(batch_size)] + + # But in this simple example we do not use them + positive_advanced_weighting = None + negative_advanced_weighting = None + advanced_frame_weighting = None + unet = apply_controlnet_advanced(unet=unet, controlnet=self.model, image_bhwc=control_image, strength=0.6, start_percent=0.0, end_percent=0.8, - positive_advanced_weighting=None, negative_advanced_weighting=None, - advanced_frame_weighting=None) + positive_advanced_weighting=positive_advanced_weighting, + negative_advanced_weighting=negative_advanced_weighting, + advanced_frame_weighting=advanced_frame_weighting) p.sd_model.forge_objects.unet = unet diff --git a/extensions-builtin/sd_forge_controlnet_example/scripts/sd_forge_controlnet_example.py b/extensions-builtin/sd_forge_controlnet_example/scripts/sd_forge_controlnet_example.py index f1aa5a18..4bc0cf2a 100644 --- a/extensions-builtin/sd_forge_controlnet_example/scripts/sd_forge_controlnet_example.py +++ b/extensions-builtin/sd_forge_controlnet_example/scripts/sd_forge_controlnet_example.py @@ -74,6 +74,7 @@ class ControlNetExampleForge(scripts.Script): B, C, H, W = kwargs['noise'].shape # latent_shape height = H * 8 width = W * 8 + batch_size = p.batch_size input_image = cv2.resize(input_image, (width, height)) @@ -98,10 +99,36 @@ class ControlNetExampleForge(scripts.Script): unet = p.sd_model.forge_objects.unet + # Unet has input, middle, output blocks, and we can give different weights to each layers in all blocks. + # Below is an example for stronger control in middle block. + # This is helpful for some high-res fix passes. (p.is_hr_pass) + positive_advanced_weighting = { + 'input': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2], + 'middle': [1.0], + 'output': [1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] + } + negative_advanced_weighting = { + 'input': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2], + 'middle': [1.0], + 'output': [1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] + } + + # The advanced_frame_weighting is a weight applied to each image in a batch. + # The length of this list must be same with batch size + # For example, if batch size is 5, the below list is [0, 0.25, 0.5, 0.75, 1.0] + # If you view the 5 images as 5 frames in a video, this will lead to progressively stronger control over time. + advanced_frame_weighting = [float(i) / float(batch_size - 1) for i in range(batch_size)] + + # But in this simple example we do not use them + positive_advanced_weighting = None + negative_advanced_weighting = None + advanced_frame_weighting = None + unet = apply_controlnet_advanced(unet=unet, controlnet=self.model, image_bhwc=control_image, strength=0.6, start_percent=0.0, end_percent=0.8, - positive_advanced_weighting=None, negative_advanced_weighting=None, - advanced_frame_weighting=None) + positive_advanced_weighting=positive_advanced_weighting, + negative_advanced_weighting=negative_advanced_weighting, + advanced_frame_weighting=advanced_frame_weighting) p.sd_model.forge_objects.unet = unet diff --git a/modules_forge/controlnet.py b/modules_forge/controlnet.py index 49544442..151e0102 100644 --- a/modules_forge/controlnet.py +++ b/modules_forge/controlnet.py @@ -9,6 +9,33 @@ def apply_controlnet_advanced( negative_advanced_weighting=None, advanced_frame_weighting=None, ): + """ + + # positive_advanced_weighting or negative_advanced_weighting + + Unet has input, middle, output blocks, and we can give different weights to each layers in all blocks. + Below is an example for stronger control in middle block. + This is helpful for some high-res fix passes. + + positive_advanced_weighting = { + 'input': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2], + 'middle': [1.0], + 'output': [1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] + } + negative_advanced_weighting = { + 'input': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2], + 'middle': [1.0], + 'output': [1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] + } + + # advanced_frame_weighting + + The advanced_frame_weighting is a weight applied to each image in a batch. + The length of this list must be same with batch size + For example, if batch size is 5, you can use advanced_frame_weighting = [0, 0.25, 0.5, 0.75, 1.0] + If you view the 5 images as 5 frames in a video, this will lead to progressively stronger control over time. + + """ cnet = controlnet.copy().set_cond_hint(image_bhwc.movedim(-1, 1), strength, (start_percent, end_percent)) cnet.positive_advanced_weighting = positive_advanced_weighting