From ef356193255ddd2499759700596bbde69f6dd6ef Mon Sep 17 00:00:00 2001 From: Andray Date: Sun, 17 Mar 2024 14:14:12 +0400 Subject: [PATCH 01/65] Extras upscaler: option limit target resolution --- scripts/postprocessing_upscale.py | 56 +++++++++++++++++++++++-------- 1 file changed, 42 insertions(+), 14 deletions(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index c2574346d..6259017b5 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -12,6 +12,19 @@ from modules.ui import switch_values_symbol upscale_cache = {} +def limitSizeByOneDemention(size: tuple, limit: int): + w, h = size + if h > w: + if h > limit: + w = limit / h * w + h = limit + else: + if w > limit: + h = limit / w * h + w = limit + return (int(w), int(h)) + + class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): name = "Upscale" order = 1000 @@ -31,6 +44,8 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): with gr.Tabs(elem_id="extras_resize_mode"): with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by: upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") + limit_target_resolution = gr.Slider(minimum=0, maximum=10000, step=8, label="Limit target resolution", value=0, elem_id="extras_upscale_limit_target_resolution", + tooltip="0 = no limit. Limit target resolution by one demension. Useful for batches where can be big images.") with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: with FormRow(): @@ -61,6 +76,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): "upscale_enabled": upscale_enabled, "upscale_mode": selected_tab, "upscale_by": upscaling_resize, + "limit_target_resolution": limit_target_resolution, "upscale_to_width": upscaling_resize_w, "upscale_to_height": upscaling_resize_h, "upscale_crop": upscaling_crop, @@ -69,12 +85,18 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): "upscaler_2_visibility": extras_upscaler_2_visibility, } - def upscale(self, image, info, upscaler, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop): + def upscale(self, image, info, upscaler, upscale_mode, upscale_by, limit_target_resolution, upscale_to_width, upscale_to_height, upscale_crop): if upscale_mode == 1: upscale_by = max(upscale_to_width/image.width, upscale_to_height/image.height) info["Postprocess upscale to"] = f"{upscale_to_width}x{upscale_to_height}" else: info["Postprocess upscale by"] = upscale_by + if limit_target_resolution != 0 and max(*image.size)*upscale_by > limit_target_resolution: + upscale_mode = 1 + upscale_crop = False + upscale_to_width, upscale_to_height = limitSizeByOneDemention((image.width*upscale_by, image.height*upscale_by), limit_target_resolution) + upscale_by = max(upscale_to_width/image.width, upscale_to_height/image.height) + info["Limit target resolution"] = limit_target_resolution cache_key = (hash(np.array(image.getdata()).tobytes()), upscaler.name, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) cached_image = upscale_cache.pop(cache_key, None) @@ -96,18 +118,21 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): return image - def process_firstpass(self, pp: scripts_postprocessing.PostprocessedImage, upscale_enabled=True, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): - if upscale_mode == 1: - pp.shared.target_width = upscale_to_width - pp.shared.target_height = upscale_to_height + def process_firstpass(self, pp: scripts_postprocessing.PostprocessedImage, **args): + if args['upscale_mode'] == 1: + pp.shared.target_width = args['upscale_to_width'] + pp.shared.target_height = args['upscale_to_height'] else: - pp.shared.target_width = int(pp.image.width * upscale_by) - pp.shared.target_height = int(pp.image.height * upscale_by) + pp.shared.target_width = int(pp.image.width * args['upscale_by']) + pp.shared.target_height = int(pp.image.height * args['upscale_by']) + if args['limit_target_resolution'] != 0: + pp.shared.target_width, pp.shared.target_height = limitSizeByOneDemention((pp.shared.target_width, pp.shared.target_height), args['limit_target_resolution']) - def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_enabled=True, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): - if not upscale_enabled: + def process(self, pp: scripts_postprocessing.PostprocessedImage, **args): + if not args['upscale_enabled']: return + upscaler_1_name = args['upscaler_1_name'] if upscaler_1_name == "None": upscaler_1_name = None @@ -117,18 +142,21 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): if not upscaler1: return + upscaler_2_name = args['upscaler_2_name'] if upscaler_2_name == "None": upscaler_2_name = None upscaler2 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_2_name and x.name != "None"]), None) assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}' - upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) + upscaled_image = self.upscale(pp.image, pp.info, upscaler1, args['upscale_mode'], args['upscale_by'], args['limit_target_resolution'], args['upscale_to_width'], + args['upscale_to_height'], args['upscale_crop']) pp.info["Postprocess upscaler"] = upscaler1.name - if upscaler2 and upscaler_2_visibility > 0: - second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) - upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility) + if upscaler2 and args['upscaler_2_visibility'] > 0: + second_upscale = self.upscale(pp.image, pp.info, upscaler2, args['upscale_mode'], args['upscale_by'], args['upscale_to_width'], + args['upscale_to_height'], args['upscale_crop']) + upscaled_image = Image.blend(upscaled_image, second_upscale, args['upscaler_2_visibility']) pp.info["Postprocess upscaler 2"] = upscaler2.name @@ -163,5 +191,5 @@ class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale): upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_name]), None) assert upscaler1, f'could not find upscaler named {upscaler_name}' - pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False) + pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, 0, False) pp.info["Postprocess upscaler"] = upscaler1.name From 06c5dd0907ab15a1ef608b73ec0541fc258712c8 Mon Sep 17 00:00:00 2001 From: Andray Date: Sun, 17 Mar 2024 14:28:26 +0400 Subject: [PATCH 02/65] maybe fix tests --- modules/postprocessing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 754cc9e3a..0818eeebc 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -133,13 +133,14 @@ def run_postprocessing_webui(id_task, *args, **kwargs): return run_postprocessing(*args, **kwargs) -def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True): +def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True, limit_target_resolution = 0): """old handler for API""" args = scripts.scripts_postproc.create_args_for_run({ "Upscale": { "upscale_mode": resize_mode, "upscale_by": upscaling_resize, + "limit_target_resolution": limit_target_resolution, "upscale_to_width": upscaling_resize_w, "upscale_to_height": upscaling_resize_h, "upscale_crop": upscaling_crop, From 81be357925241359e6d40dd603923182faa8a2da Mon Sep 17 00:00:00 2001 From: Andray Date: Sun, 17 Mar 2024 14:51:19 +0400 Subject: [PATCH 03/65] hide limit target resolution under option --- modules/shared_options.py | 1 + scripts/postprocessing_upscale.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index fc9f13d6f..73ec93c22 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -102,6 +102,7 @@ options_templates.update(options_section(('upscaling', "Upscaling", "postprocess "DAT_tile_overlap": OptionInfo(8, "Tile overlap for DAT upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}), "set_scale_by_when_changing_upscaler": OptionInfo(False, "Automatically set the Scale by factor based on the name of the selected Upscaler."), + "show_limit_target_resolution_in_extras_upscale": OptionInfo(False, 'Show "Limit target resolution" slider in "Upscale" extras script. Useful for batches where can be big images.'), })) options_templates.update(options_section(('face-restoration', "Face restoration", "postprocessing"), { diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index 6259017b5..485ff7848 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -44,8 +44,11 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): with gr.Tabs(elem_id="extras_resize_mode"): with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by: upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") - limit_target_resolution = gr.Slider(minimum=0, maximum=10000, step=8, label="Limit target resolution", value=0, elem_id="extras_upscale_limit_target_resolution", - tooltip="0 = no limit. Limit target resolution by one demension. Useful for batches where can be big images.") + if shared.opts.show_limit_target_resolution_in_extras_upscale: + limit_target_resolution = gr.Slider(minimum=0, maximum=10000, step=8, label="Limit target resolution", value=8000, elem_id="extras_upscale_limit_target_resolution", + tooltip="0 = no limit. Limit target resolution by one demension. Useful for batches where can be big images.") + else: + limit_target_resolution = gr.Number(0, visible=False) with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: with FormRow(): From fd83d4eec3852bacbf69dbfb0486017a0bc4342f Mon Sep 17 00:00:00 2001 From: Andray Date: Sun, 17 Mar 2024 18:19:13 +0400 Subject: [PATCH 04/65] add .needs_reload_ui() --- modules/shared_options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 73ec93c22..050ede182 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -102,7 +102,7 @@ options_templates.update(options_section(('upscaling', "Upscaling", "postprocess "DAT_tile_overlap": OptionInfo(8, "Tile overlap for DAT upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}), "set_scale_by_when_changing_upscaler": OptionInfo(False, "Automatically set the Scale by factor based on the name of the selected Upscaler."), - "show_limit_target_resolution_in_extras_upscale": OptionInfo(False, 'Show "Limit target resolution" slider in "Upscale" extras script. Useful for batches where can be big images.'), + "show_limit_target_resolution_in_extras_upscale": OptionInfo(False, 'Show "Limit target resolution" slider in "Upscale" extras script. Useful for batches where can be big images.').needs_reload_ui(), })) options_templates.update(options_section(('face-restoration', "Face restoration", "postprocessing"), { From 203afa39c4cf144e7f373800dbf866b2d74565cb Mon Sep 17 00:00:00 2001 From: Andray Date: Mon, 18 Mar 2024 06:52:46 +0400 Subject: [PATCH 05/65] update tooltip --- scripts/postprocessing_upscale.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index 485ff7848..3132e4991 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -46,7 +46,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") if shared.opts.show_limit_target_resolution_in_extras_upscale: limit_target_resolution = gr.Slider(minimum=0, maximum=10000, step=8, label="Limit target resolution", value=8000, elem_id="extras_upscale_limit_target_resolution", - tooltip="0 = no limit. Limit target resolution by one demension. Useful for batches where can be big images.") + tooltip="0 = no limit. Limit maximal target resolution by the biggest demension. Useful for batches where can be big images.") else: limit_target_resolution = gr.Number(0, visible=False) From dfbdb5a135b2170c0a2330e5cf052a00784dbf74 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 25 Mar 2024 18:00:58 +0300 Subject: [PATCH 06/65] put request: gr.Request at start of img2img function similar to txt2img --- modules/img2img.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/img2img.py b/modules/img2img.py index 9e316d451..a1d042c21 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -146,7 +146,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal return batch_results -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): +def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 From f62217b65d9328e96d8c8f26d12df7f90a38a8b6 Mon Sep 17 00:00:00 2001 From: Boning Date: Thu, 21 Mar 2024 15:28:38 -0700 Subject: [PATCH 07/65] minor bug fix of sd model memory management --- modules/sd_models.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 747fc39ee..d5cccd83a 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -787,6 +787,13 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer): Additionaly deletes loaded models that are over the limit set in settings (sd_checkpoints_limit). """ + if sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename: + return sd_model + + if shared.opts.sd_checkpoints_keep_in_cpu: + send_model_to_cpu(sd_model) + timer.record("send model to cpu") + already_loaded = None for i in reversed(range(len(model_data.loaded_sd_models))): loaded_model = model_data.loaded_sd_models[i] @@ -800,10 +807,6 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer): send_model_to_trash(loaded_model) timer.record("send model to trash") - if shared.opts.sd_checkpoints_keep_in_cpu: - send_model_to_cpu(sd_model) - timer.record("send model to cpu") - if already_loaded is not None: send_model_to_device(already_loaded) timer.record("send model to device") From f4633cb9c03c2fc91e6862bf9bc2acab4f6ca762 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Tue, 26 Mar 2024 13:53:16 +0800 Subject: [PATCH 08/65] fix: when find already_loaded model, remove loaded by array index --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index b35aecbca..ba5bbea4f 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -796,7 +796,7 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer): if len(model_data.loaded_sd_models) > shared.opts.sd_checkpoints_limit > 0: print(f"Unloading model {len(model_data.loaded_sd_models)} over the limit of {shared.opts.sd_checkpoints_limit}: {loaded_model.sd_checkpoint_info.title}") - model_data.loaded_sd_models.pop() + del model_data.loaded_sd_models[i] send_model_to_trash(loaded_model) timer.record("send model to trash") From c321680b3d6ea13c12d5836a2803f04cd45dba83 Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 26 Mar 2024 14:53:38 +0400 Subject: [PATCH 09/65] interrupt upscale --- modules/upscaler.py | 3 +++ modules/upscaler_utils.py | 2 ++ 2 files changed, 5 insertions(+) diff --git a/modules/upscaler.py b/modules/upscaler.py index 4ffd428c6..59f8fbbf5 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -60,6 +60,9 @@ class Upscaler: if img.width >= dest_w and img.height >= dest_h: break + if shared.state.interrupted: + break + shape = (img.width, img.height) img = self.do_upscale(img, selected_model) diff --git a/modules/upscaler_utils.py b/modules/upscaler_utils.py index 17223ca0d..5ecbbed96 100644 --- a/modules/upscaler_utils.py +++ b/modules/upscaler_utils.py @@ -69,6 +69,8 @@ def upscale_with_model( for y, h, row in grid.tiles: newrow = [] for x, w, tile in row: + if shared.state.interrupted: + return img output = upscale_pil_patch(model, tile) scale_factor = output.width // tile.width newrow.append([x * scale_factor, w * scale_factor, output]) From 16522cb0e3457c3b9dbbe961c982ca4f8e20baf4 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Wed, 27 Mar 2024 03:01:06 +0900 Subject: [PATCH 10/65] fix typo in call_queue.py amout -> amount --- modules/call_queue.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/call_queue.py b/modules/call_queue.py index bcd7c5462..b50931bcd 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -100,8 +100,8 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): sys_pct = sys_peak/max(sys_total, 1) * 100 toltip_a = "Active: peak amount of video memory used during generation (excluding cached data)" - toltip_r = "Reserved: total amout of video memory allocated by the Torch library " - toltip_sys = "System: peak amout of video memory allocated by all running programs, out of total capacity" + toltip_r = "Reserved: total amount of video memory allocated by the Torch library " + toltip_sys = "System: peak amount of video memory allocated by all running programs, out of total capacity" text_a = f"A: {active_peak/1024:.2f} GB" text_r = f"R: {reserved_peak/1024:.2f} GB" From 5461b00e89eb4a8e4b66befab4be5f4103e274fd Mon Sep 17 00:00:00 2001 From: ochen1 Date: Tue, 26 Mar 2024 21:22:09 -0600 Subject: [PATCH 11/65] fix: Python version check for PyTorch installation compatibility --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index f116376f7..5fc1eb2e4 100755 --- a/webui.sh +++ b/webui.sh @@ -129,7 +129,7 @@ case "$gpu_info" in export HSA_OVERRIDE_GFX_VERSION=10.3.0 if [[ -z "${TORCH_COMMAND}" ]] then - pyv="$(${python_cmd} -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))')" + pyv="$(${python_cmd} -c 'import sys; print(f"{sys.version_info[0]}.{sys.version_info[1]:02d}")')" if [[ $(bc <<< "$pyv <= 3.10") -eq 1 ]] then # Navi users will still use torch 1.13 because 2.0 does not seem to work. From 4e2bb7250fc959b41f0575c3f2bc23e108b4096f Mon Sep 17 00:00:00 2001 From: Andray Date: Wed, 27 Mar 2024 15:35:06 +0400 Subject: [PATCH 12/65] fix_ui_config_for_hires_sampler_and_scheduler --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 9b138e0aa..403425f29 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -325,8 +325,8 @@ def create_ui(): hr_checkpoint_name = gr.Dropdown(label='Checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") - hr_sampler_name = gr.Dropdown(label='Sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") - hr_scheduler = gr.Dropdown(label='Schedule type', elem_id="hr_scheduler", choices=["Use same scheduler"] + [x.label for x in sd_schedulers.schedulers], value="Use same scheduler") + hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") + hr_scheduler = gr.Dropdown(label='Hires schedule type', elem_id="hr_scheduler", choices=["Use same scheduler"] + [x.label for x in sd_schedulers.schedulers], value="Use same scheduler") with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: with gr.Column(scale=80): From c4c8a641115b3c6c19ad02409deb80f10f2bf232 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 30 Mar 2024 07:33:39 +0300 Subject: [PATCH 13/65] restore the line lost in the merge --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index b348c387e..d28c7c19b 100755 --- a/webui.sh +++ b/webui.sh @@ -129,7 +129,7 @@ case "$gpu_info" in export HSA_OVERRIDE_GFX_VERSION=10.3.0 if [[ -z "${TORCH_COMMAND}" ]] then - pyv="$(${python_cmd} -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))')" + pyv="$(${python_cmd} -c 'import sys; print(f"{sys.version_info[0]}.{sys.version_info[1]:02d}")')" # Using an old nightly compiled against rocm 5.2 for Navi1, see https://github.com/pytorch/pytorch/issues/106728#issuecomment-1749511711 if [[ $pyv == "3.8" ]] then From dcd4f880a86e500ec88ddf7eafe65894a24b85a3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 31 Mar 2024 08:17:22 +0300 Subject: [PATCH 14/65] rework code/UI for #15293 --- modules/shared_options.py | 1 - scripts/postprocessing_upscale.py | 75 +++++++++++++++---------------- 2 files changed, 35 insertions(+), 41 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index bb1d232e2..590ae6a69 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -102,7 +102,6 @@ options_templates.update(options_section(('upscaling', "Upscaling", "postprocess "DAT_tile_overlap": OptionInfo(8, "Tile overlap for DAT upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}), "set_scale_by_when_changing_upscaler": OptionInfo(False, "Automatically set the Scale by factor based on the name of the selected Upscaler."), - "show_limit_target_resolution_in_extras_upscale": OptionInfo(False, 'Show "Limit target resolution" slider in "Upscale" extras script. Useful for batches where can be big images.').needs_reload_ui(), })) options_templates.update(options_section(('face-restoration', "Face restoration", "postprocessing"), { diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index 3132e4991..b9573a515 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -12,17 +12,15 @@ from modules.ui import switch_values_symbol upscale_cache = {} -def limitSizeByOneDemention(size: tuple, limit: int): - w, h = size - if h > w: - if h > limit: - w = limit / h * w - h = limit - else: - if w > limit: - h = limit / w * h - w = limit - return (int(w), int(h)) +def limit_size_by_one_dimention(w, h, limit): + if h > w and h > limit: + w = limit * w // h + h = limit + elif w > h and w > limit: + h = limit * h // w + w = limit + + return int(w), int(h) class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): @@ -43,12 +41,11 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): with FormRow(): with gr.Tabs(elem_id="extras_resize_mode"): with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by: - upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") - if shared.opts.show_limit_target_resolution_in_extras_upscale: - limit_target_resolution = gr.Slider(minimum=0, maximum=10000, step=8, label="Limit target resolution", value=8000, elem_id="extras_upscale_limit_target_resolution", - tooltip="0 = no limit. Limit maximal target resolution by the biggest demension. Useful for batches where can be big images.") - else: - limit_target_resolution = gr.Number(0, visible=False) + with gr.Row(): + with gr.Column(scale=3): + upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") + with gr.Column(scale=1): + max_side_length = gr.Number(label="Max side length", value=0, elem_id="extras_upscale_max_side_length", tooltip="If any of two sides of the image ends up larger than specified, will downscale it to fit. 0 = no limit.") with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: with FormRow(): @@ -79,7 +76,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): "upscale_enabled": upscale_enabled, "upscale_mode": selected_tab, "upscale_by": upscaling_resize, - "limit_target_resolution": limit_target_resolution, + "max_side_length": max_side_length, "upscale_to_width": upscaling_resize_w, "upscale_to_height": upscaling_resize_h, "upscale_crop": upscaling_crop, @@ -88,18 +85,18 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): "upscaler_2_visibility": extras_upscaler_2_visibility, } - def upscale(self, image, info, upscaler, upscale_mode, upscale_by, limit_target_resolution, upscale_to_width, upscale_to_height, upscale_crop): + def upscale(self, image, info, upscaler, upscale_mode, upscale_by, max_side_length, upscale_to_width, upscale_to_height, upscale_crop): if upscale_mode == 1: upscale_by = max(upscale_to_width/image.width, upscale_to_height/image.height) info["Postprocess upscale to"] = f"{upscale_to_width}x{upscale_to_height}" else: info["Postprocess upscale by"] = upscale_by - if limit_target_resolution != 0 and max(*image.size)*upscale_by > limit_target_resolution: + if max_side_length != 0 and max(*image.size)*upscale_by > max_side_length: upscale_mode = 1 upscale_crop = False - upscale_to_width, upscale_to_height = limitSizeByOneDemention((image.width*upscale_by, image.height*upscale_by), limit_target_resolution) + upscale_to_width, upscale_to_height = limit_size_by_one_dimention(image.width*upscale_by, image.height*upscale_by, max_side_length) upscale_by = max(upscale_to_width/image.width, upscale_to_height/image.height) - info["Limit target resolution"] = limit_target_resolution + info["Max side length"] = max_side_length cache_key = (hash(np.array(image.getdata()).tobytes()), upscaler.name, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) cached_image = upscale_cache.pop(cache_key, None) @@ -121,21 +118,21 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): return image - def process_firstpass(self, pp: scripts_postprocessing.PostprocessedImage, **args): - if args['upscale_mode'] == 1: - pp.shared.target_width = args['upscale_to_width'] - pp.shared.target_height = args['upscale_to_height'] + def process_firstpass(self, pp: scripts_postprocessing.PostprocessedImage, upscale_enabled=True, upscale_mode=1, upscale_by=2.0, max_side_length=0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): + if upscale_mode == 1: + pp.shared.target_width = upscale_to_width + pp.shared.target_height = upscale_to_height else: - pp.shared.target_width = int(pp.image.width * args['upscale_by']) - pp.shared.target_height = int(pp.image.height * args['upscale_by']) - if args['limit_target_resolution'] != 0: - pp.shared.target_width, pp.shared.target_height = limitSizeByOneDemention((pp.shared.target_width, pp.shared.target_height), args['limit_target_resolution']) + pp.shared.target_width = int(pp.image.width * upscale_by) + pp.shared.target_height = int(pp.image.height * upscale_by) - def process(self, pp: scripts_postprocessing.PostprocessedImage, **args): - if not args['upscale_enabled']: + pp.shared.target_width, pp.shared.target_height = limit_size_by_one_dimention(pp.shared.target_width, pp.shared.target_height, max_side_length) + + def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_enabled=True, upscale_mode=1, upscale_by=2.0, max_side_length=0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): + if not upscale_enabled: return - upscaler_1_name = args['upscaler_1_name'] + upscaler_1_name = upscaler_1_name if upscaler_1_name == "None": upscaler_1_name = None @@ -145,21 +142,19 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): if not upscaler1: return - upscaler_2_name = args['upscaler_2_name'] + upscaler_2_name = upscaler_2_name if upscaler_2_name == "None": upscaler_2_name = None upscaler2 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_2_name and x.name != "None"]), None) assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}' - upscaled_image = self.upscale(pp.image, pp.info, upscaler1, args['upscale_mode'], args['upscale_by'], args['limit_target_resolution'], args['upscale_to_width'], - args['upscale_to_height'], args['upscale_crop']) + upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, max_side_length, upscale_to_width, upscale_to_height, upscale_crop) pp.info["Postprocess upscaler"] = upscaler1.name - if upscaler2 and args['upscaler_2_visibility'] > 0: - second_upscale = self.upscale(pp.image, pp.info, upscaler2, args['upscale_mode'], args['upscale_by'], args['upscale_to_width'], - args['upscale_to_height'], args['upscale_crop']) - upscaled_image = Image.blend(upscaled_image, second_upscale, args['upscaler_2_visibility']) + if upscaler2 and upscaler_2_visibility > 0: + second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) + upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility) pp.info["Postprocess upscaler 2"] = upscaler2.name From bfa20d2758b654b8522cb4bcd6af08e1e36fd7cb Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 31 Mar 2024 08:20:19 +0300 Subject: [PATCH 15/65] resize Max side length field --- scripts/postprocessing_upscale.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index b9573a515..9b5c8c5e5 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -42,10 +42,10 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): with gr.Tabs(elem_id="extras_resize_mode"): with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by: with gr.Row(): - with gr.Column(scale=3): + with gr.Column(scale=4): upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") - with gr.Column(scale=1): - max_side_length = gr.Number(label="Max side length", value=0, elem_id="extras_upscale_max_side_length", tooltip="If any of two sides of the image ends up larger than specified, will downscale it to fit. 0 = no limit.") + with gr.Column(scale=1, min_width=160): + max_side_length = gr.Number(label="Max side length", value=0, elem_id="extras_upscale_max_side_length", tooltip="If any of two sides of the image ends up larger than specified, will downscale it to fit. 0 = no limit.", min_width=160) with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: with FormRow(): From f1a6c5fe17ce316b3617b6e23c0e81c623089ccf Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 31 Mar 2024 08:30:00 +0300 Subject: [PATCH 16/65] add an option to hide postprocessing options in Extras tab --- modules/scripts_postprocessing.py | 6 ++++-- modules/shared_options.py | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py index 901cad080..4b3b7afda 100644 --- a/modules/scripts_postprocessing.py +++ b/modules/scripts_postprocessing.py @@ -143,6 +143,7 @@ class ScriptPostprocessingRunner: self.initialize_scripts(modules.scripts.postprocessing_scripts_data) scripts_order = shared.opts.postprocessing_operation_order + scripts_filter_out = set(shared.opts.postprocessing_disable_in_extras) def script_score(name): for i, possible_match in enumerate(scripts_order): @@ -151,9 +152,10 @@ class ScriptPostprocessingRunner: return len(self.scripts) - script_scores = {script.name: (script_score(script.name), script.order, script.name, original_index) for original_index, script in enumerate(self.scripts)} + filtered_scripts = [script for script in self.scripts if script.name not in scripts_filter_out] + script_scores = {script.name: (script_score(script.name), script.order, script.name, original_index) for original_index, script in enumerate(filtered_scripts)} - return sorted(self.scripts, key=lambda x: script_scores[x.name]) + return sorted(filtered_scripts, key=lambda x: script_scores[x.name]) def setup_ui(self): inputs = [] diff --git a/modules/shared_options.py b/modules/shared_options.py index 590ae6a69..a2b595ff3 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -383,6 +383,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), { 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), + 'postprocessing_disable_in_extras': OptionInfo([], "Disable postprocessing operations in extras tab", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), 'postprocessing_existing_caption_action': OptionInfo("Ignore", "Action for existing captions", gr.Radio, {"choices": ["Ignore", "Keep", "Prepend", "Append"]}).info("when generating captions using postprocessing; Ignore = use generated; Keep = use original; Prepend/Append = combine both"), From ea83180761d450690cf590d4fd9b582241a9846d Mon Sep 17 00:00:00 2001 From: DrBiggusDickus Date: Sun, 31 Mar 2024 14:41:06 +0200 Subject: [PATCH 17/65] fix CodeFormer weight --- modules/codeformer_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 44b84618e..0b353353b 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -50,7 +50,7 @@ class FaceRestorerCodeFormer(face_restoration_utils.CommonFaceRestoration): def restore_face(cropped_face_t): assert self.net is not None - return self.net(cropped_face_t, w=w, adain=True)[0] + return self.net(cropped_face_t, weight=w, adain=True)[0] return self.restore_with_helper(np_image, restore_face) From 4ccbae320e0dddccd78edcb328f5ad160ec474af Mon Sep 17 00:00:00 2001 From: Andray Date: Sun, 31 Mar 2024 17:05:15 +0400 Subject: [PATCH 18/65] fix dcd4f880a86e500ec88ddf7eafe65894a24b85a3 --- scripts/postprocessing_upscale.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index 9b5c8c5e5..0d7a19c62 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -16,7 +16,7 @@ def limit_size_by_one_dimention(w, h, limit): if h > w and h > limit: w = limit * w // h h = limit - elif w > h and w > limit: + elif w > limit: h = limit * h // w w = limit From 0a7d1e756f335199f3a42f6ce6a7e88093a56c96 Mon Sep 17 00:00:00 2001 From: Andray Date: Sun, 31 Mar 2024 19:34:58 +0400 Subject: [PATCH 19/65] fix upscaler 2 --- scripts/postprocessing_upscale.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index 0d7a19c62..9628c4e9a 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -153,7 +153,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): pp.info["Postprocess upscaler"] = upscaler1.name if upscaler2 and upscaler_2_visibility > 0: - second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) + second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, max_side_length, upscale_to_width, upscale_to_height, upscale_crop) upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility) pp.info["Postprocess upscaler 2"] = upscaler2.name From e73a7e40067c3beb9aacc9d27129354446098edb Mon Sep 17 00:00:00 2001 From: storyicon Date: Mon, 1 Apr 2024 09:13:07 +0000 Subject: [PATCH 20/65] feat: ensure the indexability of dynamically imported packages Signed-off-by: storyicon --- modules/script_loading.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/script_loading.py b/modules/script_loading.py index 0d55f1932..2bd26f013 100644 --- a/modules/script_loading.py +++ b/modules/script_loading.py @@ -2,13 +2,18 @@ import os import importlib.util from modules import errors - +import sys def load_module(path): module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path) module = importlib.util.module_from_spec(module_spec) module_spec.loader.exec_module(module) - + if os.path.isfile(path): + sp = os.path.splitext(path) + module_name = sp[0] + else: + module_name = os.path.basename(path) + sys.modules[module_name] = module return module From 86861f8379e92a778f886ecf49f0d28380df2933 Mon Sep 17 00:00:00 2001 From: Andray Date: Mon, 1 Apr 2024 13:58:45 +0400 Subject: [PATCH 21/65] fix upscaler 2 images do not match --- scripts/postprocessing_upscale.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index 9628c4e9a..2409fd207 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -45,7 +45,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): with gr.Column(scale=4): upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") with gr.Column(scale=1, min_width=160): - max_side_length = gr.Number(label="Max side length", value=0, elem_id="extras_upscale_max_side_length", tooltip="If any of two sides of the image ends up larger than specified, will downscale it to fit. 0 = no limit.", min_width=160) + max_side_length = gr.Number(label="Max side length", value=0, elem_id="extras_upscale_max_side_length", tooltip="If any of two sides of the image ends up larger than specified, will downscale it to fit. 0 = no limit.", min_width=160, step=8, minimum=0) with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: with FormRow(): @@ -154,6 +154,8 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): if upscaler2 and upscaler_2_visibility > 0: second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, max_side_length, upscale_to_width, upscale_to_height, upscale_crop) + if upscaled_image.mode != second_upscale.mode: + second_upscale = second_upscale.convert(upscaled_image.mode) upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility) pp.info["Postprocess upscaler 2"] = upscaler2.name From a669b8a6bcf0cf13e70ded68f8c35dbd8133c068 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Mon, 1 Apr 2024 12:51:09 -0700 Subject: [PATCH 22/65] fix: remove script callbacks in ordered_callbacks_map --- modules/script_callbacks.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index d5a97ecff..74f41f09d 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -439,6 +439,9 @@ def remove_current_script_callbacks(): for callback_list in callback_map.values(): for callback_to_remove in [cb for cb in callback_list if cb.script == filename]: callback_list.remove(callback_to_remove) + for ordered_callbacks_list in ordered_callbacks_map.values(): + for callback_to_remove in [cb for cb in ordered_callbacks_list if cb.script == filename]: + ordered_callbacks_list.remove(callback_to_remove) def remove_callbacks_for_function(callback_func): From b372fb6165c2b0f3990d9a3ff06245d25358c0b8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 1 Apr 2024 23:33:45 +0300 Subject: [PATCH 23/65] fix API upscale --- modules/postprocessing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 299332320..ab3274df3 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -131,14 +131,14 @@ def run_postprocessing_webui(id_task, *args, **kwargs): return run_postprocessing(*args, **kwargs) -def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True, limit_target_resolution = 0): +def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True, max_side_length: int = 0): """old handler for API""" args = scripts.scripts_postproc.create_args_for_run({ "Upscale": { "upscale_mode": resize_mode, "upscale_by": upscaling_resize, - "limit_target_resolution": limit_target_resolution, + "max_side_length": max_side_length, "upscale_to_width": upscaling_resize_w, "upscale_to_height": upscaling_resize_h, "upscale_crop": upscaling_crop, From 92e6aa36537120a4b27944181b4b1cb1c1b80a1d Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 5 Apr 2024 15:51:42 +0900 Subject: [PATCH 24/65] open_folder as util --- modules/ui_common.py | 31 ++----------------------------- modules/util.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 29 deletions(-) diff --git a/modules/ui_common.py b/modules/ui_common.py index cf1b8b32c..dcd2d3718 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -3,13 +3,10 @@ import dataclasses import json import html import os -import platform -import sys import gradio as gr -import subprocess as sp -from modules import call_queue, shared, ui_tempdir +from modules import call_queue, shared, ui_tempdir, util from modules.infotext_utils import image_from_url_text import modules.images from modules.ui_components import ToolButton @@ -176,31 +173,7 @@ def create_output_panel(tabname, outdir, toprow=None): except Exception: pass - if not os.path.exists(f): - msg = f'Folder "{f}" does not exist. After you create an image, the folder will be created.' - print(msg) - gr.Info(msg) - return - elif not os.path.isdir(f): - msg = f""" -WARNING -An open_folder request was made with an argument that is not a folder. -This could be an error or a malicious attempt to run code on your computer. -Requested path was: {f} -""" - print(msg, file=sys.stderr) - gr.Warning(msg) - return - - path = os.path.normpath(f) - if platform.system() == "Windows": - os.startfile(path) - elif platform.system() == "Darwin": - sp.Popen(["open", path]) - elif "microsoft-standard-WSL2" in platform.uname().release: - sp.Popen(["wsl-open", path]) - else: - sp.Popen(["xdg-open", path]) + util.open_folder(f) with gr.Column(elem_id=f"{tabname}_results"): if toprow: diff --git a/modules/util.py b/modules/util.py index 8d1aea44f..19685a8e8 100644 --- a/modules/util.py +++ b/modules/util.py @@ -136,3 +136,36 @@ class MassFileLister: def reset(self): """Clear the cache of all directories.""" self.cached_dirs.clear() + +def open_folder(path): + # import at function level to avoid potential issues + import gradio as gr + import platform + import sys + import subprocess + + if not os.path.exists(path): + msg = f'Folder "{path}" does not exist. after you save an image, the folder will be created.' + print(msg) + gr.Info(msg) + return + elif not os.path.isdir(path): + msg = f""" +WARNING +An open_folder request was made with an path that is not a folder. +This could be an error or a malicious attempt to run code on your computer. +Requested path was: {path} +""" + print(msg, file=sys.stderr) + gr.Warning(msg) + return + + path = os.path.normpath(path) + if platform.system() == "Windows": + os.startfile(path) + elif platform.system() == "Darwin": + subprocess.Popen(["open", path]) + elif "microsoft-standard-WSL2" in platform.uname().release: + subprocess.Popen(["wsl-open", path]) + else: + subprocess.Popen(["xdg-open", path]) From 20123d427b09901396133643be78f6b692393b0c Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 5 Apr 2024 16:19:20 +0900 Subject: [PATCH 25/65] open_folder docstring --- modules/util.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/util.py b/modules/util.py index b0da19905..b6e6010c9 100644 --- a/modules/util.py +++ b/modules/util.py @@ -174,6 +174,7 @@ def topological_sort(dependencies): def open_folder(path): + """Open a folder in the file manager of the respect OS.""" # import at function level to avoid potential issues import gradio as gr import platform From 989b89b12a8c4255f91254bb32da8cfa72f72122 Mon Sep 17 00:00:00 2001 From: Marsel Markhabulin Date: Fri, 5 Apr 2024 12:42:08 +0300 Subject: [PATCH 26/65] Use HF_ENDPOINT variable for HuggingFace domain with default Modified the list_models function to dynamically construct the model URL by using an environment variable for the HuggingFace domain. This allows for greater flexibility in specifying the domain and ensures that the modification is also compatible with the Hub client library. By supporting different environments or requirements without hardcoding the domain name, this change facilitates the use of custom HuggingFace domains not only within our code but also when interacting with the Hub client library. --- modules/sd_models.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 747fc39ee..61f2b2ac0 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -8,6 +8,7 @@ import re import safetensors.torch from omegaconf import OmegaConf, ListConfig from os import mkdir +from os import getenv from urllib import request import ldm.modules.midas as midas @@ -151,7 +152,8 @@ def list_models(): if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt): model_url = None else: - model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors" + hugging_host = getenv('HF_ENDPOINT', 'https://huggingface.co') + model_url = f"{hugging_host}/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors" model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"]) From acb20338b1c7e93927c8456e04cb2842d5798aff Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 6 Apr 2024 08:53:21 +0300 Subject: [PATCH 27/65] put HF_ENDPOINT into shared for #15443 --- modules/sd_models.py | 9 +++------ modules/shared.py | 2 ++ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 1e0003ec0..ff245b7a6 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -1,5 +1,5 @@ import collections -import os.path +import os import sys import threading @@ -7,8 +7,6 @@ import torch import re import safetensors.torch from omegaconf import OmegaConf, ListConfig -from os import mkdir -from os import getenv from urllib import request import ldm.modules.midas as midas @@ -152,8 +150,7 @@ def list_models(): if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt): model_url = None else: - hugging_host = getenv('HF_ENDPOINT', 'https://huggingface.co') - model_url = f"{hugging_host}/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors" + model_url = f"{shared.hf_endpoint}/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors" model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"]) @@ -510,7 +507,7 @@ def enable_midas_autodownload(): path = midas.api.ISL_PATHS[model_type] if not os.path.exists(path): if not os.path.exists(midas_path): - mkdir(midas_path) + os.mkdir(midas_path) print(f"Downloading midas model weights for {model_type} to {path}") request.urlretrieve(midas_urls[model_type], path) diff --git a/modules/shared.py b/modules/shared.py index 4cf7f6a81..a41cd457c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -90,3 +90,5 @@ list_checkpoint_tiles = shared_items.list_checkpoint_tiles refresh_checkpoints = shared_items.refresh_checkpoints list_samplers = shared_items.list_samplers reload_hypernetworks = shared_items.reload_hypernetworks + +hf_endpoint = os.getenv('HF_ENDPOINT', 'https://huggingface.co') From 23c06a51ccc54d76db465f8f66a5dcb0f7ca33e5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 6 Apr 2024 09:05:04 +0300 Subject: [PATCH 28/65] use 'scripts.' prefix for names of dynamically loaded modules --- modules/script_loading.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/modules/script_loading.py b/modules/script_loading.py index 2bd26f013..17f658b15 100644 --- a/modules/script_loading.py +++ b/modules/script_loading.py @@ -4,16 +4,20 @@ import importlib.util from modules import errors import sys + +loaded_scripts = {} + + def load_module(path): module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path) module = importlib.util.module_from_spec(module_spec) module_spec.loader.exec_module(module) - if os.path.isfile(path): - sp = os.path.splitext(path) - module_name = sp[0] - else: - module_name = os.path.basename(path) - sys.modules[module_name] = module + + loaded_scripts[path] = module + + module_name, _ = os.path.splitext(os.path.basename(path)) + sys.modules["scripts." + module_name] = module + return module From 2ad17a6100be887616e91a84bd5ecb39d0771155 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 6 Apr 2024 15:56:57 +0900 Subject: [PATCH 29/65] re-add update_file_entry MassFileLister.update_file_entry was accidentally removed in https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15205/files#diff-c39b942d8f8620d46d314db8301189b8d6195fc97aedbeb124a33694b738d69cL151-R173 --- modules/util.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/util.py b/modules/util.py index b6e6010c9..0db13736c 100644 --- a/modules/util.py +++ b/modules/util.py @@ -148,6 +148,11 @@ class MassFileLister: """Clear the cache of all directories.""" self.cached_dirs.clear() + def update_file_entry(self, path): + """Update the cache for a specific directory.""" + dirname, filename = os.path.split(path) + if cached_dir := self.cached_dirs.get(dirname): + cached_dir.update_entry(filename) def topological_sort(dependencies): """Accepts a dictionary mapping name to its dependencies, returns a list of names ordered according to dependencies. From e1640314df34b4cc8ed0422ab04a2886a3d9beb3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 6 Apr 2024 21:46:56 +0300 Subject: [PATCH 30/65] 1.9.0 changelog --- CHANGELOG.md | 120 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 118 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0df47801b..362b4861f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,120 @@ -## 1.8.0-RC +## 1.9.0 + +### Features: +* Make refiner switchover based on model timesteps instead of sampling steps ([#14978](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14978)) +* add an option to have old-style directory view instead of tree view; stylistic changes for extra network sorting/search controls +* add UI for reordering callbacks, support for specifying callback order in extension metadata ([#15205](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15205)) +* Sgm uniform scheduler for SDXL-Lightning models ([#15325](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15325)) +* Scheduler selection in main UI ([#15333](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15333), [#15361](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15361), [#15394](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15394)) + +### Minor: +* "open images directory" button now opens the actual dir ([#14947](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14947)) +* Support inference with LyCORIS BOFT networks ([#14871](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14871), [#14973](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14973)) +* make extra network card description plaintext by default, with an option to re-enable HTML as it was +* resize handle for extra networks ([#15041](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15041)) +* cmd args: `--unix-filenames-sanitization` and `--filenames-max-length` ([#15031](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15031)) +* show extra networks parameters in HTML table rather than raw JSON ([#15131](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15131)) +* Add DoRA (weight-decompose) support for LoRA/LoHa/LoKr ([#15160](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15160), [#15283](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15283)) +* Add '--no-prompt-history' cmd args for disable last generation prompt history ([#15189](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15189)) +* update preview on Replace Preview ([#15201](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15201)) +* only fetch updates for extensions' active git branches ([#15233](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15233)) +* put upscale postprocessing UI into an accordion ([#15223](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15223)) +* Support dragdrop for URLs to read infotext ([#15262](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15262)) +* use diskcache library for caching ([#15287](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15287), [#15299](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15299)) +* Allow PNG-RGBA for Extras Tab ([#15334](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15334)) +* Support cover images embedded in safetensors metadata ([#15319](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15319)) +* faster interrupt when using NN upscale ([#15380](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15380)) +* Extras upscaler: an input field to limit maximul side length for the output image ([#15293](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15293), [#15415](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15415), [#15417](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15417), [#15425](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15425)) +* add an option to hide postprocessing options in Extras tab + +### Extensions and API: +* ResizeHandleRow - allow overriden column scale parametr ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004)) +* call script_callbacks.ui_settings_callback earlier; fix extra-options-section built-in extension killing the ui if using a setting that doesn't exist +* make it possible to use zoom.js outside webui context ([#15286](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15286), [#15288](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15288)) +* allow variants for extension name in metadata.ini ([#15290](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15290)) +* make reloading UI scripts optional when doing Reload UI, and off by default +* put request: gr.Request at start of img2img function similar to txt2img +* open_folder as util ([#15442](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15442)) +* make it possible to import extensions' script files as `import scripts.` ([#15423](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15423)) + +### Performance: +* performance optimization for extra networks HTML pages +* optimization for extra networks filtering +* optimization for extra networks sorting + +### Bug Fixes: +* prevent escape button causing an interrupt when no generation has been made yet +* [bug] avoid doble upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966)) +* possible fix for reload button not appearing in some cases for extra networks. +* fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006)) +* Fix resize-handle visability for vertical layout (mobile) ([#15010](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15010)) +* register_tmp_file also for mtime ([#15012](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15012)) +* Protect alphas_cumprod during refiner switchover ([#14979](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14979)) +* Fix EXIF orientation in API image loading ([#15062](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15062)) +* Only override emphasis if actually used in prompt ([#15141](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15141)) +* Fix emphasis infotext missing from `params.txt` ([#15142](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15142)) +* fix extract_style_text_from_prompt #15132 ([#15135](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15135)) +* Fix Soft Inpaint for AnimateDiff ([#15148](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15148)) +* edit-attention: deselect surrounding whitespace ([#15178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15178)) +* chore: fix font not loaded ([#15183](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15183)) +* use natural sort in extra networks when ordering by path +* Fix built-in lora system bugs caused by torch.nn.MultiheadAttention ([#15190](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15190)) +* Avoid error from None in get_learned_conditioning ([#15191](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15191)) +* Add entry to MassFileLister after writing metadata ([#15199](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15199)) +* fix issue with Styles when Hires prompt is used ([#15269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15269), [#15276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15276)) +* Strip comments from hires fix prompt ([#15263](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15263)) +* Make imageviewer event listeners browser consistent ([#15261](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15261)) +* Fix AttributeError in OFT when trying to get MultiheadAttention weight ([#15260](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15260)) +* Add missing .mean() back ([#15239](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15239)) +* fix "Restore progress" button ([#15221](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15221)) +* fix ui-config for InputAccordion [custom_script_source] ([#15231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15231)) +* handle 0 wheel deltaY ([#15268](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15268)) +* prevent alt menu for firefox ([#15267](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15267)) +* fix: fix syntax errors ([#15179](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15179)) +* restore outputs path ([#15307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15307)) +* Escape btn_copy_path filename ([#15316](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15316)) +* Fix extra networks buttons when filename contains an apostrophe ([#15331](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15331)) +* escape brackets in lora random prompt generator ([#15343](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15343)) +* fix: Python version check for PyTorch installation compatibility ([#15390](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15390)) +* fix typo in call_queue.py ([#15386](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15386)) +* fix: when find already_loaded model, remove loaded by array index ([#15382](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15382)) +* minor bug fix of sd model memory management ([#15350](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15350)) +* Fix CodeFormer weight ([#15414](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15414)) +* Fix: Remove script callbacks in ordered_callbacks_map ([#15428](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15428)) + +### Hardware: +* Add training support and change lspci for Ascend NPU ([#14981](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14981)) +* Update to ROCm5.7 and PyTorch ([#14820](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14820)) +* Better workaround for Navi1, removing --pre for Navi3 ([#15224](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15224)) +* Ascend NPU wiki page ([#15228](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15228)) + +### Other: +* Update comment for Pad prompt/negative prompt v0 to add a warning about truncation, make it override the v1 implementation +* support resizable columns for touch (tablets) ([#15002](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15002)) +* Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995)) +* Use `absolute` path for normalized filepath ([#15035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15035)) +* resizeHandle handle double tap ([#15065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15065)) +* --dat-models-path cmd flag ([#15039](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15039)) +* Add a direct link to the binary release ([#15059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15059)) +* upscaler_utils: Reduce logging ([#15084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15084)) +* Fix various typos with crate-ci/typos ([#15116](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15116)) +* fix_jpeg_live_preview ([#15102](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15102)) +* [alternative fix] can't load webui if selected wrong extra option in ui ([#15121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15121)) +* Error handling for unsupported transparency ([#14958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14958)) +* Add model description to searched terms ([#15198](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15198)) +* bump action version ([#15272](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15272)) +* PEP 604 annotations ([#15259](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15259)) +* Automatically Set the Scale by value when user selects an Upscale Model ([#15244](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15244)) +* move postprocessing-for-training into builtin extensions ([#15222](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15222)) +* type hinting in shared.py ([#15211](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15211)) +* update ruff to 0.3.3 +* Update pytorch lightning utilities ([#15310](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15310)) +* Add Size as an XYZ Grid option ([#15354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15354)) +* Use HF_ENDPOINT variable for HuggingFace domain with default ([#15443](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15443)) +* re-add update_file_entry ([#15446](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15446)) + + +## 1.8.0 ### Features: * Update torch to version 2.1.2 @@ -61,7 +177,7 @@ * add before_token_counter callback and use it for prompt comments * ResizeHandleRow - allow overridden column scale parameter ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004)) -### Performance +### Performance: * Massive performance improvement for extra networks directories with a huge number of files in them in an attempt to tackle #14507 ([#14528](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14528)) * Reduce unnecessary re-indexing extra networks directory ([#14512](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14512)) * Avoid unnecessary `isfile`/`exists` calls ([#14527](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14527)) From 6efdfe3234d0510a6fd522c31dc62366d363ae73 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 7 Apr 2024 22:58:12 +0900 Subject: [PATCH 31/65] if use use_main_prompt index = 0 --- modules/processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 2baca4f5f..c1e689c37 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -704,7 +704,9 @@ def program_version(): def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None, all_hr_prompts=None, all_hr_negative_prompts=None): - if index is None: + if use_main_prompt: + index = 0 + elif index is None: index = position_in_batch + iteration * p.batch_size if all_negative_prompts is None: From 47ed9b2d398287f14a6dcab6fa4fe8b78bccf1c8 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 8 Apr 2024 01:39:31 +0900 Subject: [PATCH 32/65] allow list or callables in generation_params --- modules/processing.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/modules/processing.py b/modules/processing.py index c1e689c37..50570c49b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -756,6 +756,16 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "User": p.user if opts.add_user_name_to_info else None, } + for key, value in generation_params.items(): + try: + if isinstance(value, list): + generation_params[key] = value[index] + elif callable(value): + generation_params[key] = value(**locals()) + except Exception: + errors.report(f'Error creating infotext for key "{key}"', exc_info=True) + generation_params[key] = None + if all_hr_prompts := all_hr_prompts or getattr(p, 'all_hr_prompts', None): generation_params['Hires prompt'] = all_hr_prompts[index] if all_hr_prompts[index] != all_prompts[index] else None if all_hr_negative_prompts := all_hr_negative_prompts or getattr(p, 'all_hr_negative_prompts', None): From 219e64489c9c2a712dc31fe254d1a32ce3caf7c2 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 8 Apr 2024 01:41:52 +0900 Subject: [PATCH 33/65] re-work extra_generation_params for Hires prompt --- modules/processing.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 50570c49b..97739fcb3 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -703,7 +703,7 @@ def program_version(): return res -def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None, all_hr_prompts=None, all_hr_negative_prompts=None): +def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None): if use_main_prompt: index = 0 elif index is None: @@ -717,6 +717,9 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter token_merging_ratio = p.get_token_merging_ratio() token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True) + prompt_text = p.main_prompt if use_main_prompt else all_prompts[index] + negative_prompt = p.main_negative_prompt if use_main_prompt else all_negative_prompts[index] + uses_ensd = opts.eta_noise_seed_delta != 0 if uses_ensd: uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p) @@ -749,8 +752,6 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, "Tiling": "True" if p.tiling else None, - "Hires prompt": None, # This is set later, insert here to keep order - "Hires negative prompt": None, # This is set later, insert here to keep order **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, "User": p.user if opts.add_user_name_to_info else None, @@ -766,15 +767,9 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter errors.report(f'Error creating infotext for key "{key}"', exc_info=True) generation_params[key] = None - if all_hr_prompts := all_hr_prompts or getattr(p, 'all_hr_prompts', None): - generation_params['Hires prompt'] = all_hr_prompts[index] if all_hr_prompts[index] != all_prompts[index] else None - if all_hr_negative_prompts := all_hr_negative_prompts or getattr(p, 'all_hr_negative_prompts', None): - generation_params['Hires negative prompt'] = all_hr_negative_prompts[index] if all_hr_negative_prompts[index] != all_negative_prompts[index] else None - generation_params_text = ", ".join([k if k == v else f'{k}: {infotext_utils.quote(v)}' for k, v in generation_params.items() if v is not None]) - prompt_text = p.main_prompt if use_main_prompt else all_prompts[index] - negative_prompt_text = f"\nNegative prompt: {p.main_negative_prompt if use_main_prompt else all_negative_prompts[index]}" if all_negative_prompts[index] else "" + negative_prompt_text = f"\nNegative prompt: {negative_prompt}" if negative_prompt else "" return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip() @@ -1216,6 +1211,17 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name: self.extra_generation_params["Hires sampler"] = self.hr_sampler_name + def get_hr_prompt(p, index, prompt_text, **kwargs): + hr_prompt = p.all_hr_prompts[index] + return hr_prompt if hr_prompt != prompt_text else None + + def get_hr_negative_prompt(p, index, negative_prompt, **kwargs): + hr_negative_prompt = p.all_hr_negative_prompts[index] + return hr_negative_prompt if hr_negative_prompt != negative_prompt else None + + self.extra_generation_params["Hires prompt"] = get_hr_prompt + self.extra_generation_params["Hires negative prompt"] = get_hr_negative_prompt + self.extra_generation_params["Hires schedule type"] = None # to be set in sd_samplers_kdiffusion.py if self.hr_scheduler is None: From 1e1176b6eb4b0054ad44b29787cb8e9217da5daf Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 8 Apr 2024 18:18:33 +0900 Subject: [PATCH 34/65] non-serializable as None --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 97739fcb3..bae00d74a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -608,7 +608,7 @@ class Processed: "version": self.version, } - return json.dumps(obj) + return json.dumps(obj, default=lambda o: None) def infotext(self, p: StableDiffusionProcessing, index): return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size) From e3aabe6959c2088f6b6e917b4f84185ae95a3af6 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 8 Apr 2024 19:48:38 +0900 Subject: [PATCH 35/65] add documentation for create_infotext --- modules/processing.py | 44 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/modules/processing.py b/modules/processing.py index bae00d74a..d8ba5ca4d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -704,6 +704,50 @@ def program_version(): def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None): + """ + this function is used to generate the infotext that is stored in the generated images, it's contains the parameters that are required to generate the imagee + Args: + p: StableDiffusionProcessing + all_prompts: list[str] + all_seeds: list[int] + all_subseeds: list[int] + comments: list[str] + iteration: int + position_in_batch: int + use_main_prompt: bool + index: int + all_negative_prompts: list[str] + + Returns: str + + Extra generation params + p.extra_generation_params dictionary allows for additional parameters to be added to the infotext + this can be use by the base webui or extensions. + To add a new entry, add a new key value pair, the dictionary key will be used as the key of the parameter in the infotext + the value generation_params can be defined as: + - str | None + - List[str|None] + - callable func(**kwargs) -> str | None + + When defined as a string, it will be used as without extra processing; this is this most common use case. + + Defining as a list allows for parameter that changes across images in the job, for example, the 'Seed' parameter. + The list should have the same length as the total number of images in the entire job. + + Defining as a callable function allows parameter cannot be generated earlier or when extra logic is required. + For example 'Hires prompt', due to reasons the hr_prompt might be changed by process in the pipeline or extensions + and may vary across different images, defining as a static string or list would not work. + + The function takes locals() as **kwargs, as such will have access to variables like 'p' and 'index'. + the base signature of the function should be: + func(**kwargs) -> str | None + optionally it can have additional arguments that will be used in the function: + func(p, index, **kwargs) -> str | None + note: for better future compatibility even though this function will have access to all variables in the locals(), + it is recommended to only use the arguments present in the function signature of create_infotext. + For actual implementation examples, see StableDiffusionProcessingTxt2Img.init > get_hr_prompt. + """ + if use_main_prompt: index = 0 elif index is None: From d9708c92b444894bce8070e4dcfaa093f8eb8d43 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 8 Apr 2024 16:15:25 +0300 Subject: [PATCH 36/65] fix limited file write (thanks, Sylwia) --- modules/ui_extensions.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 913e1444e..d822c0b89 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -58,8 +58,9 @@ def apply_and_restart(disable_list, update_list, disable_all): def save_config_state(name): current_config_state = config_states.get_config() - if not name: - name = "Config" + + name = os.path.basename(name or "Config") + current_config_state["name"] = name timestamp = datetime.now().strftime('%Y_%m_%d-%H_%M_%S') filename = os.path.join(config_states_dir, f"{timestamp}_{name}.json") From 3786f3742fdada13de9adcaabd4c6150fdffc15c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 8 Apr 2024 16:15:25 +0300 Subject: [PATCH 37/65] fix limited file write (thanks, Sylwia) --- modules/ui_extensions.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 913e1444e..d822c0b89 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -58,8 +58,9 @@ def apply_and_restart(disable_list, update_list, disable_all): def save_config_state(name): current_config_state = config_states.get_config() - if not name: - name = "Config" + + name = os.path.basename(name or "Config") + current_config_state["name"] = name timestamp = datetime.now().strftime('%Y_%m_%d-%H_%M_%S') filename = os.path.join(config_states_dir, f"{timestamp}_{name}.json") From 2580235c72b0cde49519678756c26417248348f5 Mon Sep 17 00:00:00 2001 From: Jorden Tse Date: Tue, 9 Apr 2024 11:13:47 +0800 Subject: [PATCH 38/65] Fix extra-single-image API not doing upscale failed --- modules/postprocessing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index ab3274df3..812cbccae 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -136,6 +136,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ args = scripts.scripts_postproc.create_args_for_run({ "Upscale": { + "upscale_enabled": True, "upscale_mode": resize_mode, "upscale_by": upscaling_resize, "max_side_length": max_side_length, From 696d6813e06179b05aa835ca1959d570b28bacb0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 9 Apr 2024 11:00:30 +0300 Subject: [PATCH 39/65] Merge pull request #15465 from jordenyt/fix-extras-api-upscale-enabled Fix extra-single-image API not doing upscale failed --- modules/postprocessing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index ab3274df3..812cbccae 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -136,6 +136,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ args = scripts.scripts_postproc.create_args_for_run({ "Upscale": { + "upscale_enabled": True, "upscale_mode": resize_mode, "upscale_by": upscaling_resize, "max_side_length": max_side_length, From 7f691612caf65eec7dfccb12f813eac46293563e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 9 Apr 2024 12:05:02 +0300 Subject: [PATCH 40/65] Merge pull request #15460 from AUTOMATIC1111/create_infotext-index-and-callable create_infotext allow index and callable, re-work Hires prompt infotext --- modules/processing.py | 84 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 73 insertions(+), 11 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 2baca4f5f..d8ba5ca4d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -608,7 +608,7 @@ class Processed: "version": self.version, } - return json.dumps(obj) + return json.dumps(obj, default=lambda o: None) def infotext(self, p: StableDiffusionProcessing, index): return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size) @@ -703,8 +703,54 @@ def program_version(): return res -def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None, all_hr_prompts=None, all_hr_negative_prompts=None): - if index is None: +def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None): + """ + this function is used to generate the infotext that is stored in the generated images, it's contains the parameters that are required to generate the imagee + Args: + p: StableDiffusionProcessing + all_prompts: list[str] + all_seeds: list[int] + all_subseeds: list[int] + comments: list[str] + iteration: int + position_in_batch: int + use_main_prompt: bool + index: int + all_negative_prompts: list[str] + + Returns: str + + Extra generation params + p.extra_generation_params dictionary allows for additional parameters to be added to the infotext + this can be use by the base webui or extensions. + To add a new entry, add a new key value pair, the dictionary key will be used as the key of the parameter in the infotext + the value generation_params can be defined as: + - str | None + - List[str|None] + - callable func(**kwargs) -> str | None + + When defined as a string, it will be used as without extra processing; this is this most common use case. + + Defining as a list allows for parameter that changes across images in the job, for example, the 'Seed' parameter. + The list should have the same length as the total number of images in the entire job. + + Defining as a callable function allows parameter cannot be generated earlier or when extra logic is required. + For example 'Hires prompt', due to reasons the hr_prompt might be changed by process in the pipeline or extensions + and may vary across different images, defining as a static string or list would not work. + + The function takes locals() as **kwargs, as such will have access to variables like 'p' and 'index'. + the base signature of the function should be: + func(**kwargs) -> str | None + optionally it can have additional arguments that will be used in the function: + func(p, index, **kwargs) -> str | None + note: for better future compatibility even though this function will have access to all variables in the locals(), + it is recommended to only use the arguments present in the function signature of create_infotext. + For actual implementation examples, see StableDiffusionProcessingTxt2Img.init > get_hr_prompt. + """ + + if use_main_prompt: + index = 0 + elif index is None: index = position_in_batch + iteration * p.batch_size if all_negative_prompts is None: @@ -715,6 +761,9 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter token_merging_ratio = p.get_token_merging_ratio() token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True) + prompt_text = p.main_prompt if use_main_prompt else all_prompts[index] + negative_prompt = p.main_negative_prompt if use_main_prompt else all_negative_prompts[index] + uses_ensd = opts.eta_noise_seed_delta != 0 if uses_ensd: uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p) @@ -747,22 +796,24 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, "Tiling": "True" if p.tiling else None, - "Hires prompt": None, # This is set later, insert here to keep order - "Hires negative prompt": None, # This is set later, insert here to keep order **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, "User": p.user if opts.add_user_name_to_info else None, } - if all_hr_prompts := all_hr_prompts or getattr(p, 'all_hr_prompts', None): - generation_params['Hires prompt'] = all_hr_prompts[index] if all_hr_prompts[index] != all_prompts[index] else None - if all_hr_negative_prompts := all_hr_negative_prompts or getattr(p, 'all_hr_negative_prompts', None): - generation_params['Hires negative prompt'] = all_hr_negative_prompts[index] if all_hr_negative_prompts[index] != all_negative_prompts[index] else None + for key, value in generation_params.items(): + try: + if isinstance(value, list): + generation_params[key] = value[index] + elif callable(value): + generation_params[key] = value(**locals()) + except Exception: + errors.report(f'Error creating infotext for key "{key}"', exc_info=True) + generation_params[key] = None generation_params_text = ", ".join([k if k == v else f'{k}: {infotext_utils.quote(v)}' for k, v in generation_params.items() if v is not None]) - prompt_text = p.main_prompt if use_main_prompt else all_prompts[index] - negative_prompt_text = f"\nNegative prompt: {p.main_negative_prompt if use_main_prompt else all_negative_prompts[index]}" if all_negative_prompts[index] else "" + negative_prompt_text = f"\nNegative prompt: {negative_prompt}" if negative_prompt else "" return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip() @@ -1204,6 +1255,17 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name: self.extra_generation_params["Hires sampler"] = self.hr_sampler_name + def get_hr_prompt(p, index, prompt_text, **kwargs): + hr_prompt = p.all_hr_prompts[index] + return hr_prompt if hr_prompt != prompt_text else None + + def get_hr_negative_prompt(p, index, negative_prompt, **kwargs): + hr_negative_prompt = p.all_hr_negative_prompts[index] + return hr_negative_prompt if hr_negative_prompt != negative_prompt else None + + self.extra_generation_params["Hires prompt"] = get_hr_prompt + self.extra_generation_params["Hires negative prompt"] = get_hr_negative_prompt + self.extra_generation_params["Hires schedule type"] = None # to be set in sd_samplers_kdiffusion.py if self.hr_scheduler is None: From 600f339c4cf1829c4a52bf83a71544ba7fe7bb5b Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 9 Apr 2024 20:59:04 +0900 Subject: [PATCH 41/65] Warning when Script is not found --- modules/scripts.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 264503ca3..70ccfbe46 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -739,12 +739,17 @@ class ScriptRunner: def onload_script_visibility(params): title = params.get('Script', None) if title: - title_index = self.titles.index(title) - visibility = title_index == self.script_load_ctr - self.script_load_ctr = (self.script_load_ctr + 1) % len(self.titles) - return gr.update(visible=visibility) - else: - return gr.update(visible=False) + try: + title_index = self.titles.index(title) + visibility = title_index == self.script_load_ctr + self.script_load_ctr = (self.script_load_ctr + 1) % len(self.titles) + return gr.update(visible=visibility) + except ValueError: + params['Script'] = None + massage = f'Cannot find Script: "{title}"' + print(massage) + gr.Warning(massage) + return gr.update(visible=False) self.infotext_fields.append((dropdown, lambda x: gr.update(value=x.get('Script', 'None')))) self.infotext_fields.extend([(script.group, onload_script_visibility) for script in self.selectable_scripts]) From ef83f6831fd747fea4d0fa638fe5ffb71aaa5b0f Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 9 Apr 2024 21:28:44 +0900 Subject: [PATCH 42/65] catch exception for all paste_fields callable --- modules/infotext_utils.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index 1c91d076d..f1e8f54ba 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -8,7 +8,7 @@ import sys import gradio as gr from modules.paths import data_path -from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions, images, prompt_parser +from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions, images, prompt_parser, errors from PIL import Image sys.modules['modules.generation_parameters_copypaste'] = sys.modules[__name__] # alias for old name @@ -488,7 +488,11 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, for output, key in paste_fields: if callable(key): - v = key(params) + try: + v = key(params) + except Exception: + errors.report(f"Error executing {key}", exc_info=True) + v = None else: v = params.get(key, None) From 88f70ce63cb9bfee1e0ff9ab7c409a03ac631396 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 9 Apr 2024 16:00:56 +0300 Subject: [PATCH 43/65] Merge pull request #15470 from AUTOMATIC1111/read-infotext-Script-not-found error handling paste_field callables --- modules/infotext_utils.py | 8 ++++++-- modules/scripts.py | 17 +++++++++++------ 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index 1c91d076d..f1e8f54ba 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -8,7 +8,7 @@ import sys import gradio as gr from modules.paths import data_path -from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions, images, prompt_parser +from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions, images, prompt_parser, errors from PIL import Image sys.modules['modules.generation_parameters_copypaste'] = sys.modules[__name__] # alias for old name @@ -488,7 +488,11 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, for output, key in paste_fields: if callable(key): - v = key(params) + try: + v = key(params) + except Exception: + errors.report(f"Error executing {key}", exc_info=True) + v = None else: v = params.get(key, None) diff --git a/modules/scripts.py b/modules/scripts.py index 264503ca3..70ccfbe46 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -739,12 +739,17 @@ class ScriptRunner: def onload_script_visibility(params): title = params.get('Script', None) if title: - title_index = self.titles.index(title) - visibility = title_index == self.script_load_ctr - self.script_load_ctr = (self.script_load_ctr + 1) % len(self.titles) - return gr.update(visible=visibility) - else: - return gr.update(visible=False) + try: + title_index = self.titles.index(title) + visibility = title_index == self.script_load_ctr + self.script_load_ctr = (self.script_load_ctr + 1) % len(self.titles) + return gr.update(visible=visibility) + except ValueError: + params['Script'] = None + massage = f'Cannot find Script: "{title}"' + print(massage) + gr.Warning(massage) + return gr.update(visible=False) self.infotext_fields.append((dropdown, lambda x: gr.update(value=x.get('Script', 'None')))) self.infotext_fields.extend([(script.group, onload_script_visibility) for script in self.selectable_scripts]) From 4068429ac72cd83e03abe779148bbe1d6a141a09 Mon Sep 17 00:00:00 2001 From: storyicon Date: Wed, 10 Apr 2024 10:53:25 +0000 Subject: [PATCH 44/65] fix: Coordinate 'right' is less than 'left' Signed-off-by: storyicon --- modules/masking.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/masking.py b/modules/masking.py index 29a394527..9f5b0cd03 100644 --- a/modules/masking.py +++ b/modules/masking.py @@ -9,8 +9,8 @@ def get_crop_region(mask, pad=0): if box: x1, y1, x2, y2 = box else: # when no box is found - x1, y1 = mask_img.size - x2 = y2 = 0 + x1 = y1 = 0 + x2, y2 = mask_img.size return max(x1 - pad, 0), max(y1 - pad, 0), min(x2 + pad, mask_img.size[0]), min(y2 + pad, mask_img.size[1]) From 592e40ebe937cea6325412fe3650f4b693e0ab95 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 11 Apr 2024 22:51:29 +0900 Subject: [PATCH 45/65] update restricted_opts --- modules/shared_options.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index a2b595ff3..326a317e0 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -19,7 +19,9 @@ restricted_opts = { "outdir_grids", "outdir_txt2img_grids", "outdir_save", - "outdir_init_images" + "outdir_init_images", + "temp_dir", + "clean_temp_dir_at_start", } categories.register_category("saving", "Saving images") From a196319edf55f5454bd0ad4f466bb19db757e0e0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 11 Apr 2024 19:33:55 +0300 Subject: [PATCH 46/65] Merge pull request #15492 from w-e-w/update-restricted_opts update restricted_opts --- modules/shared_options.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index a2b595ff3..326a317e0 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -19,7 +19,9 @@ restricted_opts = { "outdir_grids", "outdir_txt2img_grids", "outdir_save", - "outdir_init_images" + "outdir_init_images", + "temp_dir", + "clean_temp_dir_at_start", } categories.register_category("saving", "Saving images") From d282d248000a40611f1b40f55bb4b3a4af5fb17b Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 13 Apr 2024 06:37:03 +0300 Subject: [PATCH 47/65] update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 362b4861f..5bb816cc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,10 @@ * minor bug fix of sd model memory management ([#15350](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15350)) * Fix CodeFormer weight ([#15414](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15414)) * Fix: Remove script callbacks in ordered_callbacks_map ([#15428](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15428)) +* fix limited file write (thanks, Sylwia) +* Fix extra-single-image API not doing upscale failed ([#15465](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15465)) +* error handling paste_field callables ([#15470](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15470)) + ### Hardware: * Add training support and change lspci for Ascend NPU ([#14981](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14981)) @@ -112,6 +116,8 @@ * Add Size as an XYZ Grid option ([#15354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15354)) * Use HF_ENDPOINT variable for HuggingFace domain with default ([#15443](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15443)) * re-add update_file_entry ([#15446](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15446)) +* create_infotext allow index and callable, re-work Hires prompt infotext ([#15460](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15460)) +* update restricted_opts to include more options for --hide-ui-dir-config ([#15492](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15492)) ## 1.8.0 From 8e1c3561beb33fa58e39c6fb2594ece02aca188a Mon Sep 17 00:00:00 2001 From: thatfuckingbird <67429906+thatfuckingbird@users.noreply.github.com> Date: Mon, 15 Apr 2024 21:17:24 +0200 Subject: [PATCH 48/65] fix typo in function call (eror -> error) --- javascript/extraNetworks.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 358ecd36c..c5cced973 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -568,7 +568,7 @@ function extraNetworksShowMetadata(text) { return; } } catch (error) { - console.eror(error); + console.error(error); } var elem = document.createElement('pre'); From 0f82948e4f46ca27acbf3ffb817cabec402c6438 Mon Sep 17 00:00:00 2001 From: huchenlei Date: Mon, 15 Apr 2024 22:14:19 -0400 Subject: [PATCH 49/65] Fix cls.__module__ --- .../hypertile/scripts/hypertile_xyz.py | 2 +- modules/script_loading.py | 12 +++++------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/extensions-builtin/hypertile/scripts/hypertile_xyz.py b/extensions-builtin/hypertile/scripts/hypertile_xyz.py index 9e96ae3c5..386c6b2d6 100644 --- a/extensions-builtin/hypertile/scripts/hypertile_xyz.py +++ b/extensions-builtin/hypertile/scripts/hypertile_xyz.py @@ -1,7 +1,7 @@ from modules import scripts from modules.shared import opts -xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module +xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "scripts.xyz_grid"][0].module def int_applier(value_name:str, min_range:int = -1, max_range:int = -1): """ diff --git a/modules/script_loading.py b/modules/script_loading.py index 17f658b15..c505c0b84 100644 --- a/modules/script_loading.py +++ b/modules/script_loading.py @@ -9,15 +9,13 @@ loaded_scripts = {} def load_module(path): - module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path) + module_name, _ = os.path.splitext(os.path.basename(path)) + full_module_name = "scripts." + module_name + module_spec = importlib.util.spec_from_file_location(full_module_name, path) module = importlib.util.module_from_spec(module_spec) module_spec.loader.exec_module(module) - - loaded_scripts[path] = module - - module_name, _ = os.path.splitext(os.path.basename(path)) - sys.modules["scripts." + module_name] = module - + loaded_scripts[full_module_name] = module + sys.modules[full_module_name] = module return module From a95326bec434da5d0a2aeb943d35cfded75e3afa Mon Sep 17 00:00:00 2001 From: huchenlei Date: Mon, 15 Apr 2024 22:34:01 -0400 Subject: [PATCH 50/65] nit --- modules/script_loading.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/script_loading.py b/modules/script_loading.py index c505c0b84..20c7998ac 100644 --- a/modules/script_loading.py +++ b/modules/script_loading.py @@ -14,7 +14,7 @@ def load_module(path): module_spec = importlib.util.spec_from_file_location(full_module_name, path) module = importlib.util.module_from_spec(module_spec) module_spec.loader.exec_module(module) - loaded_scripts[full_module_name] = module + loaded_scripts[path] = module sys.modules[full_module_name] = module return module From bba306d414f1e81c979ae53271c022df08ef7388 Mon Sep 17 00:00:00 2001 From: Travis Geiselbrecht Date: Mon, 15 Apr 2024 21:10:11 -0700 Subject: [PATCH 51/65] fix: remove callbacks properly in remove_callbacks_for_function() Like remove_current_script_callback just before, also remove from the ordered_callbacks_map to keep the callback map and ordered callback map in sync. --- modules/script_callbacks.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 74f41f09d..9059d4d93 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -448,6 +448,9 @@ def remove_callbacks_for_function(callback_func): for callback_list in callback_map.values(): for callback_to_remove in [cb for cb in callback_list if cb.callback == callback_func]: callback_list.remove(callback_to_remove) + for ordered_callback_list in ordered_callbacks_map.values(): + for callback_to_remove in [cb for cb in ordered_callback_list if cb.callback == callback_func]: + ordered_callback_list.remove(callback_to_remove) def on_app_started(callback, *, name=None): From 0980fdfe8c85bb4ff915bab100a383674a6a0171 Mon Sep 17 00:00:00 2001 From: storyicon Date: Tue, 16 Apr 2024 07:35:33 +0000 Subject: [PATCH 52/65] fix: images do not match Signed-off-by: storyicon --- modules/processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 411c7c3f4..c14b68965 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1537,6 +1537,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.mask_blur_x > 0 or self.mask_blur_y > 0: self.extra_generation_params["Mask blur"] = self.mask_blur + if image_mask.size != (self.width, self.height): + image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) + if self.inpaint_full_res: self.mask_for_overlay = image_mask mask = image_mask.convert('L') @@ -1551,7 +1554,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.extra_generation_params["Inpaint area"] = "Only masked" self.extra_generation_params["Masked area padding"] = self.inpaint_full_res_padding else: - image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) np_mask = np.array(image_mask) np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8) self.mask_for_overlay = Image.fromarray(np_mask) From 50190ca669fde082cd45a5030127813617a7f777 Mon Sep 17 00:00:00 2001 From: "Alessandro de Oliveira Faria (A.K.A. CABELO)" Date: Wed, 17 Apr 2024 00:01:56 -0300 Subject: [PATCH 53/65] Compatibility with Debian 11, Fedora 34+ and openSUSE 15.4+ --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index d28c7c19b..fee5e1182 100755 --- a/webui.sh +++ b/webui.sh @@ -243,7 +243,7 @@ prepare_tcmalloc() { for lib in "${TCMALLOC_LIBS[@]}" do # Determine which type of tcmalloc library the library supports - TCMALLOC="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -P $lib | head -n 1)" + TCMALLOC="$(PATH=/sbin:/usr/sbin:$PATH ldconfig -p | grep -P $lib | head -n 1)" TC_INFO=(${TCMALLOC//=>/}) if [[ ! -z "${TC_INFO}" ]]; then echo "Check TCMalloc: ${TC_INFO}" From 63fd38a04f91ed97bf0f51c9b63f134a2ed81d59 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 17 Apr 2024 15:44:49 +0900 Subject: [PATCH 54/65] numpy DeprecationWarning product -> prod --- modules/textual_inversion/image_embedding.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index ea4b88333..0898d8b77 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -43,7 +43,7 @@ def lcg(m=2**32, a=1664525, c=1013904223, seed=0): def xor_block(block): g = lcg() - randblock = np.array([next(g) for _ in range(np.product(block.shape))]).astype(np.uint8).reshape(block.shape) + randblock = np.array([next(g) for _ in range(np.prod(block.shape))]).astype(np.uint8).reshape(block.shape) return np.bitwise_xor(block.astype(np.uint8), randblock & 0x0F) From 9d4fdc45d3c4b9431551fd53de64a67726dcbd64 Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 18 Apr 2024 01:53:23 +0400 Subject: [PATCH 55/65] fix x1 upscalers --- modules/upscaler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/upscaler.py b/modules/upscaler.py index 59f8fbbf5..28c60cdcd 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -57,7 +57,7 @@ class Upscaler: dest_h = int((img.height * scale) // 8 * 8) for _ in range(3): - if img.width >= dest_w and img.height >= dest_h: + if img.width >= dest_w and img.height >= dest_h and scale != 1: break if shared.state.interrupted: From 909c3dfe83ac8c55edebb8c23e265dbfe5532081 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Wed, 17 Apr 2024 21:20:03 -0600 Subject: [PATCH 56/65] Remove API upscaling factor limits --- modules/api/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/api/models.py b/modules/api/models.py index 16edf11cf..ff3777134 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -147,7 +147,7 @@ class ExtrasBaseRequest(BaseModel): gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.") codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.") codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.") - upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=8, description="By how much to upscale the image, only used when resize_mode=0.") + upscaling_resize: float = Field(default=2, title="Upscaling Factor", gt=0, description="By how much to upscale the image, only used when resize_mode=0.") upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.") upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.") upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?") From ba2a737cceced2355f10e2f645e6794762858d12 Mon Sep 17 00:00:00 2001 From: Speculative Moonstone <167392122+speculativemoonstone@users.noreply.github.com> Date: Thu, 18 Apr 2024 04:25:32 +0000 Subject: [PATCH 57/65] Allow webui.sh to be runnable from directories containing a .git file --- webui.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/webui.sh b/webui.sh index d28c7c19b..c22a68227 100755 --- a/webui.sh +++ b/webui.sh @@ -113,13 +113,13 @@ then exit 1 fi -if [[ -d .git ]] +if [[ -d "$SCRIPT_DIR/.git" ]] then printf "\n%s\n" "${delimiter}" printf "Repo already cloned, using it as install directory" printf "\n%s\n" "${delimiter}" - install_dir="${PWD}/../" - clone_dir="${PWD##*/}" + install_dir="${SCRIPT_DIR}/../" + clone_dir="${SCRIPT_DIR##*/}" fi # Check prerequisites From 71314e47b1e505744f5ec8336fea0f7e45e0b4fb Mon Sep 17 00:00:00 2001 From: storyicon Date: Thu, 18 Apr 2024 11:59:25 +0000 Subject: [PATCH 58/65] feat:compatible with inconsistent/empty mask Signed-off-by: storyicon --- modules/masking.py | 4 ++-- modules/processing.py | 23 +++++++++++++---------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/modules/masking.py b/modules/masking.py index 9f5b0cd03..29a394527 100644 --- a/modules/masking.py +++ b/modules/masking.py @@ -9,8 +9,8 @@ def get_crop_region(mask, pad=0): if box: x1, y1, x2, y2 = box else: # when no box is found - x1 = y1 = 0 - x2, y2 = mask_img.size + x1, y1 = mask_img.size + x2 = y2 = 0 return max(x1 - pad, 0), max(y1 - pad, 0), min(x2 + pad, mask_img.size[0]), min(y2 + pad, mask_img.size[1]) diff --git a/modules/processing.py b/modules/processing.py index c14b68965..1ee4c0477 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1537,23 +1537,24 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.mask_blur_x > 0 or self.mask_blur_y > 0: self.extra_generation_params["Mask blur"] = self.mask_blur - if image_mask.size != (self.width, self.height): - image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) - if self.inpaint_full_res: self.mask_for_overlay = image_mask mask = image_mask.convert('L') crop_region = masking.get_crop_region(mask, self.inpaint_full_res_padding) - crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height) - x1, y1, x2, y2 = crop_region - - mask = mask.crop(crop_region) - image_mask = images.resize_image(2, mask, self.width, self.height) - self.paste_to = (x1, y1, x2-x1, y2-y1) - + if crop_region[0] >= crop_region[2] and crop_region[1] >= crop_region[3]: + crop_region = None + image_mask = None + self.mask_for_overlay = None + else: + crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height) + x1, y1, x2, y2 = crop_region + mask = mask.crop(crop_region) + image_mask = images.resize_image(2, mask, self.width, self.height) + self.paste_to = (x1, y1, x2-x1, y2-y1) self.extra_generation_params["Inpaint area"] = "Only masked" self.extra_generation_params["Masked area padding"] = self.inpaint_full_res_padding else: + image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) np_mask = np.array(image_mask) np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8) self.mask_for_overlay = Image.fromarray(np_mask) @@ -1579,6 +1580,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image = images.resize_image(self.resize_mode, image, self.width, self.height) if image_mask is not None: + if self.mask_for_overlay.size != (image.width, image.height): + self.mask_for_overlay = images.resize_image(self.resize_mode, self.mask_for_overlay, image.width, image.height) image_masked = Image.new('RGBa', (image.width, image.height)) image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L'))) From d212fb59fe897327142c9b5049460689148be7a7 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 18 Apr 2024 20:56:51 -0600 Subject: [PATCH 59/65] Hide 'No Image data blocks found.' message --- modules/textual_inversion/image_embedding.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index ea4b88333..d6a6a260b 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -1,12 +1,15 @@ import base64 import json import warnings +import logging import numpy as np import zlib from PIL import Image, ImageDraw import torch +logger = logging.getLogger(__name__) + class EmbeddingEncoder(json.JSONEncoder): def default(self, obj): @@ -114,7 +117,7 @@ def extract_image_data_embed(image): outarr = crop_black(np.array(image.convert('RGB').getdata()).reshape(image.size[1], image.size[0], d).astype(np.uint8)) & 0x0F black_cols = np.where(np.sum(outarr, axis=(0, 2)) == 0) if black_cols[0].shape[0] < 2: - print('No Image data blocks found.') + logger.debug('No Image data blocks found.') return None data_block_lower = outarr[:, :black_cols[0].min(), :].astype(np.uint8) From 5cb567c1384a019e5ba534b023b0f5d2dfff931f Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 19 Apr 2024 20:29:22 -0600 Subject: [PATCH 60/65] Add schedulers API endpoint --- modules/api/api.py | 14 +++++++++++++- modules/api/models.py | 7 +++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index 29fa0011a..f468c3852 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -17,7 +17,7 @@ from fastapi.encoders import jsonable_encoder from secrets import compare_digest import modules.shared as shared -from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items, script_callbacks, infotext_utils, sd_models +from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items, script_callbacks, infotext_utils, sd_models, sd_schedulers from modules.api import models from modules.shared import opts from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images @@ -221,6 +221,7 @@ class Api: self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"]) self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel) self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=list[models.SamplerItem]) + self.add_api_route("/sdapi/v1/schedulers", self.get_schedulers, methods=["GET"], response_model=list[models.SchedulerItem]) self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=list[models.UpscalerItem]) self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=list[models.LatentUpscalerModeItem]) self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=list[models.SDModelItem]) @@ -683,6 +684,17 @@ class Api: def get_samplers(self): return [{"name": sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in sd_samplers.all_samplers] + def get_schedulers(self): + return [ + { + "name": scheduler.name, + "label": scheduler.label, + "aliases": scheduler.aliases, + "default_rho": scheduler.default_rho, + "need_inner_model": scheduler.need_inner_model, + } + for scheduler in sd_schedulers.schedulers] + def get_upscalers(self): return [ { diff --git a/modules/api/models.py b/modules/api/models.py index 16edf11cf..758da6312 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -235,6 +235,13 @@ class SamplerItem(BaseModel): aliases: list[str] = Field(title="Aliases") options: dict[str, str] = Field(title="Options") +class SchedulerItem(BaseModel): + name: str = Field(title="Name") + label: str = Field(title="Label") + aliases: Optional[list[str]] = Field(title="Aliases") + default_rho: Optional[float] = Field(title="Default Rho") + need_inner_model: Optional[bool] = Field(title="Needs Inner Model") + class UpscalerItem(BaseModel): name: str = Field(title="Name") model_name: Optional[str] = Field(title="Model Name") From b5b1487f6a7ccc9c80251f100a92b004f727bee7 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 21 Apr 2024 02:26:50 +0900 Subject: [PATCH 61/65] FilenameGenerator Sampler Scheduler --- modules/images.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index c50b2455d..eda02836e 100644 --- a/modules/images.py +++ b/modules/images.py @@ -1,7 +1,7 @@ from __future__ import annotations import datetime - +import functools import pytz import io import math @@ -347,6 +347,32 @@ def sanitize_filename_part(text, replace_spaces=True): return text +@functools.cache +def get_scheduler_str(sampler_name, scheduler_name): + """Returns {Scheduler} if the scheduler is applicable to the sampler""" + if scheduler_name == 'Automatic': + config = sd_samplers.find_sampler_config(sampler_name) + scheduler_name = config.options.get('scheduler', 'Automatic') + return scheduler_name.capitalize() + + +@functools.cache +def get_sampler_scheduler_str(sampler_name, scheduler_name): + """Returns the '{Sampler} {Scheduler}' if the scheduler is applicable to the sampler""" + return f'{sampler_name} {get_scheduler_str(sampler_name, scheduler_name)}' + + +def get_sampler_scheduler(p, sampler): + """Returns '{Sampler} {Scheduler}' / '{Scheduler}' / 'NOTHING_AND_SKIP_PREVIOUS_TEXT'""" + if hasattr(p, 'scheduler') and hasattr(p, 'sampler_name'): + if sampler: + sampler_scheduler = get_sampler_scheduler_str(p.sampler_name, p.scheduler) + else: + sampler_scheduler = get_scheduler_str(p.sampler_name, p.scheduler) + return sanitize_filename_part(sampler_scheduler, replace_spaces=False) + return NOTHING_AND_SKIP_PREVIOUS_TEXT + + class FilenameGenerator: replacements = { 'seed': lambda self: self.seed if self.seed is not None else '', @@ -358,6 +384,8 @@ class FilenameGenerator: 'height': lambda self: self.image.height, 'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False), 'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False), + 'sampler_scheduler': lambda self: self.p and get_sampler_scheduler(self.p, True), + 'scheduler': lambda self: self.p and get_sampler_scheduler(self.p, False), 'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash), 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.name_for_extra, replace_spaces=False), 'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'), From 49fee7c8dbb7f17e3903c2a238c53ddd53bc503f Mon Sep 17 00:00:00 2001 From: kaanyalova <76952012+kaanyalova@users.noreply.github.com> Date: Sat, 20 Apr 2024 23:18:54 +0300 Subject: [PATCH 62/65] Add avif support --- modules/images.py | 13 ++++++++++++- requirements.txt | 1 + requirements_versions.txt | 1 + 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index c50b2455d..09d3523e8 100644 --- a/modules/images.py +++ b/modules/images.py @@ -13,6 +13,8 @@ import numpy as np import piexif import piexif.helper from PIL import Image, ImageFont, ImageDraw, ImageColor, PngImagePlugin, ImageOps +# pillow_avif needs to be imported somewhere in code for it to work +import pillow_avif # noqa: F401 import string import json import hashlib @@ -569,6 +571,16 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p }) piexif.insert(exif_bytes, filename) + elif extension.lower() == '.avif': + if opts.enable_pnginfo and geninfo is not None: + exif_bytes = piexif.dump({ + "Exif": { + piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(geninfo or "", encoding="unicode") + }, + }) + + + image.save(filename,format=image_format, exif=exif_bytes) elif extension.lower() == ".gif": image.save(filename, format=image_format, comment=geninfo) else: @@ -747,7 +759,6 @@ def read_info_from_image(image: Image.Image) -> tuple[str | None, dict]: exif_comment = exif_comment.decode('utf8', errors="ignore") if exif_comment: - items['exif comment'] = exif_comment geninfo = exif_comment elif "comment" in items: # for gif geninfo = items["comment"].decode('utf8', errors="ignore") diff --git a/requirements.txt b/requirements.txt index 8699c02be..9e2ecfe4d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,3 +30,4 @@ torch torchdiffeq torchsde transformers==4.30.2 +pillow-avif-plugin==1.4.3 \ No newline at end of file diff --git a/requirements_versions.txt b/requirements_versions.txt index 87aae9136..3df74f3d6 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -29,3 +29,4 @@ torchdiffeq==0.2.3 torchsde==0.2.6 transformers==4.30.2 httpx==0.24.1 +pillow-avif-plugin==1.4.3 From 6f4f6bff6b4af4e65264618a7aebe8a6435d1350 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 21 Apr 2024 07:18:58 +0300 Subject: [PATCH 63/65] add more info to the error message for #15567 --- modules/textual_inversion/image_embedding.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index d6a6a260b..644e1152e 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -1,5 +1,6 @@ import base64 import json +import os.path import warnings import logging @@ -117,7 +118,7 @@ def extract_image_data_embed(image): outarr = crop_black(np.array(image.convert('RGB').getdata()).reshape(image.size[1], image.size[0], d).astype(np.uint8)) & 0x0F black_cols = np.where(np.sum(outarr, axis=(0, 2)) == 0) if black_cols[0].shape[0] < 2: - logger.debug('No Image data blocks found.') + logger.debug(f'{os.path.basename(getattr(image, "filename", "unknown image file"))}: no embedded information found.') return None data_block_lower = outarr[:, :black_cols[0].min(), :].astype(np.uint8) From 9bcfb92a00df2ff217359be68ea2b21b3260f341 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 21 Apr 2024 07:41:28 +0300 Subject: [PATCH 64/65] rename logging from textual inversion to not confuse it with global logging module --- modules/hypernetworks/hypernetwork.py | 4 ++-- modules/textual_inversion/{logging.py => saving_settings.py} | 0 modules/textual_inversion/textual_inversion.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) rename modules/textual_inversion/{logging.py => saving_settings.py} (100%) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 6082d9cb3..17454665f 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -11,7 +11,7 @@ import tqdm from einops import rearrange, repeat from ldm.util import default from modules import devices, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors -from modules.textual_inversion import textual_inversion, logging +from modules.textual_inversion import textual_inversion, saving_settings from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ @@ -533,7 +533,7 @@ def train_hypernetwork(id_task, hypernetwork_name: str, learn_rate: float, batch model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), **{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]} ) - logging.save_settings_to_file(log_directory, {**saved_params, **locals()}) + saving_settings.save_settings_to_file(log_directory, {**saved_params, **locals()}) latent_sampling_method = ds.latent_sampling_method diff --git a/modules/textual_inversion/logging.py b/modules/textual_inversion/saving_settings.py similarity index 100% rename from modules/textual_inversion/logging.py rename to modules/textual_inversion/saving_settings.py diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index c206ef5fd..253f219c4 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -17,7 +17,7 @@ import modules.textual_inversion.dataset from modules.textual_inversion.learn_schedule import LearnRateScheduler from modules.textual_inversion.image_embedding import embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay -from modules.textual_inversion.logging import save_settings_to_file +from modules.textual_inversion.saving_settings import save_settings_to_file TextualInversionTemplate = namedtuple("TextualInversionTemplate", ["name", "path"]) From db263df5d5c1ba6d56b277a155186b80d24ac5bd Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 21 Apr 2024 19:34:11 +0900 Subject: [PATCH 65/65] get_crop_region_v2 --- modules/masking.py | 42 ++++++++++++++++++++++++++++++++---------- modules/processing.py | 20 ++++++++++++-------- 2 files changed, 44 insertions(+), 18 deletions(-) diff --git a/modules/masking.py b/modules/masking.py index 29a394527..8e869d1b1 100644 --- a/modules/masking.py +++ b/modules/masking.py @@ -1,17 +1,39 @@ from PIL import Image, ImageFilter, ImageOps -def get_crop_region(mask, pad=0): - """finds a rectangular region that contains all masked ares in an image. Returns (x1, y1, x2, y2) coordinates of the rectangle. - For example, if a user has painted the top-right part of a 512x512 image, the result may be (256, 0, 512, 256)""" - mask_img = mask if isinstance(mask, Image.Image) else Image.fromarray(mask) - box = mask_img.getbbox() - if box: +def get_crop_region_v2(mask, pad=0): + """ + Finds a rectangular region that contains all masked ares in a mask. + Returns None if mask is completely black mask (all 0) + + Parameters: + mask: PIL.Image.Image L mode or numpy 1d array + pad: int number of pixels that the region will be extended on all sides + Returns: (x1, y1, x2, y2) | None + + Introduced post 1.9.0 + """ + mask = mask if isinstance(mask, Image.Image) else Image.fromarray(mask) + if box := mask.getbbox(): x1, y1, x2, y2 = box - else: # when no box is found - x1, y1 = mask_img.size - x2 = y2 = 0 - return max(x1 - pad, 0), max(y1 - pad, 0), min(x2 + pad, mask_img.size[0]), min(y2 + pad, mask_img.size[1]) + return max(x1 - pad, 0), max(y1 - pad, 0), min(x2 + pad, mask.size[0]), min(y2 + pad, mask.size[1]) if pad else box + + +def get_crop_region(mask, pad=0): + """ + Same function as get_crop_region_v2 but handles completely black mask (all 0) differently + when mask all black still return coordinates but the coordinates may be invalid ie x2>x1 or y2>y1 + Notes: it is possible for the coordinates to be "valid" again if pad size is sufficiently large + (mask_size.x-pad, mask_size.y-pad, pad, pad) + + Extension developer should use get_crop_region_v2 instead unless for compatibility considerations. + """ + mask = mask if isinstance(mask, Image.Image) else Image.fromarray(mask) + if box := get_crop_region_v2(mask, pad): + return box + x1, y1 = mask.size + x2 = y2 = 0 + return max(x1 - pad, 0), max(y1 - pad, 0), min(x2 + pad, mask.size[0]), min(y2 + pad, mask.size[1]) def expand_crop_region(crop_region, processing_width, processing_height, image_width, image_height): diff --git a/modules/processing.py b/modules/processing.py index b5a04634a..f77123b9f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1611,19 +1611,23 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.inpaint_full_res: self.mask_for_overlay = image_mask mask = image_mask.convert('L') - crop_region = masking.get_crop_region(mask, self.inpaint_full_res_padding) - if crop_region[0] >= crop_region[2] and crop_region[1] >= crop_region[3]: - crop_region = None - image_mask = None - self.mask_for_overlay = None - else: + crop_region = masking.get_crop_region_v2(mask, self.inpaint_full_res_padding) + if crop_region: crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height) x1, y1, x2, y2 = crop_region mask = mask.crop(crop_region) image_mask = images.resize_image(2, mask, self.width, self.height) + self.inpaint_full_res = False self.paste_to = (x1, y1, x2-x1, y2-y1) - self.extra_generation_params["Inpaint area"] = "Only masked" - self.extra_generation_params["Masked area padding"] = self.inpaint_full_res_padding + self.extra_generation_params["Inpaint area"] = "Only masked" + self.extra_generation_params["Masked area padding"] = self.inpaint_full_res_padding + else: + crop_region = None + image_mask = None + self.mask_for_overlay = None + massage = 'Unable to perform "Inpaint Only mask" because mask is blank, switch to img2img mode.' + model_hijack.comments.append(massage) + logging.info(massage) else: image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) np_mask = np.array(image_mask)