diff --git a/extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor_compiled.py b/extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor_compiled.py index b96cc8c8..13901cc6 100644 --- a/extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor_compiled.py +++ b/extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor_compiled.py @@ -615,7 +615,8 @@ legacy_preprocessors = { "priority": 100, "tags": [ "MLSD" - ] + ], + "use_soft_projection_in_hr_fix": True }, # "normal_bae": { # "label": "normal_bae", diff --git a/extensions-builtin/forge_legacy_preprocessors/scripts/legacy_preprocessors.py b/extensions-builtin/forge_legacy_preprocessors/scripts/legacy_preprocessors.py index a2d4f8be..b90fb61f 100644 --- a/extensions-builtin/forge_legacy_preprocessors/scripts/legacy_preprocessors.py +++ b/extensions-builtin/forge_legacy_preprocessors/scripts/legacy_preprocessors.py @@ -59,6 +59,9 @@ class LegacyPreprocessor(Preprocessor): 'instant-iD': ['instant_id', 'instantid'], } + if legacy_dict.get('use_soft_projection_in_hr_fix', False): + self.use_soft_projection_in_hr_fix = True + self.model_filename_filters = [] for tag in self.tags: tag_lower = tag.lower() diff --git a/extensions-builtin/sd_forge_controlnet/scripts/controlnet.py b/extensions-builtin/sd_forge_controlnet/scripts/controlnet.py index f970dc8a..9815b989 100644 --- a/extensions-builtin/sd_forge_controlnet/scripts/controlnet.py +++ b/extensions-builtin/sd_forge_controlnet/scripts/controlnet.py @@ -467,8 +467,11 @@ class ControlNetForForgeOfficial(scripts.Script): params.model.positive_advanced_weighting = soft_weighting.copy() params.model.negative_advanced_weighting = zero_weighting.copy() - # high-ref fix pass always use softer injections - if is_hr_pass or unit.control_mode == external_code.ControlMode.PROMPT.value: + if unit.control_mode == external_code.ControlMode.PROMPT.value: + params.model.positive_advanced_weighting = soft_weighting.copy() + params.model.negative_advanced_weighting = soft_weighting.copy() + + if is_hr_pass and params.preprocessor.use_soft_projection_in_hr_fix: params.model.positive_advanced_weighting = soft_weighting.copy() params.model.negative_advanced_weighting = soft_weighting.copy() diff --git a/modules_forge/supported_preprocessor.py b/modules_forge/supported_preprocessor.py index b9914080..1dd5485d 100644 --- a/modules_forge/supported_preprocessor.py +++ b/modules_forge/supported_preprocessor.py @@ -32,6 +32,7 @@ class Preprocessor: self.sorting_priority = 0 # higher goes to top in the list self.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab = True self.fill_mask_with_one_when_resize_and_fill = False + self.use_soft_projection_in_hr_fix = False def setup_model_patcher(self, model, load_device=None, offload_device=None, dtype=torch.float32, **kwargs): if load_device is None: @@ -86,6 +87,7 @@ class PreprocessorCanny(Preprocessor): self.slider_1 = PreprocessorParameter(minimum=0, maximum=256, step=1, value=100, label='Low Threshold', visible=True) self.slider_2 = PreprocessorParameter(minimum=0, maximum=256, step=1, value=200, label='High Threshold', visible=True) self.sorting_priority = 100 + self.use_soft_projection_in_hr_fix = True def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs): input_image, remove_pad = resize_image_with_pad(input_image, resolution)