Clipskip override from infotext (#2099)

* support more infotext overrides
clip skip, model, and vae/te, and old VAE too
This commit is contained in:
DenOfEquity
2024-10-18 16:47:49 +01:00
committed by GitHub
parent 1ef5436833
commit 1fae20d94f
2 changed files with 47 additions and 7 deletions

View File

@@ -11,6 +11,8 @@ from modules.paths import data_path
from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions, images, prompt_parser, errors
from PIL import Image
from modules_forge import main_entry
sys.modules['modules.generation_parameters_copypaste'] = sys.modules[__name__] # alias for old name
re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)'
@@ -414,11 +416,44 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
for key in skip_fields:
res.pop(key, None)
# basic check for same checkpoint using short name
checkpoint = res.get('Model', None)
if checkpoint is not None:
if checkpoint in shared.opts.sd_model_checkpoint:
res.pop('Model')
# VAE / TE
modules = []
vae = res.pop('VAE', None) # old form
if vae:
modules = [vae]
else:
for key in res:
if key.startswith('Module '):
added = False
for knownmodule in main_entry.module_list.keys():
filename, _ = os.path.splitext(knownmodule)
if res[key] == filename:
added = True
modules.append(knownmodule)
break
if not added:
modules.append(res[key]) # so it shows in the override section (consistent with checkpoint and old vae)
if modules != []:
current_modules = shared.opts.forge_additional_modules
basename_modules = []
for m in current_modules:
basename_modules.append(os.path.basename(m))
if sorted(modules) != sorted(basename_modules):
res['VAE/TE'] = modules
return res
infotext_to_setting_name_mapping = [
('VAE/TE', 'forge_additional_modules'),
]
"""Mapping of infotext labels to setting names. Only left for backwards compatibility - use OptionInfo(..., infotext='...') instead.
Example content:
@@ -431,7 +466,7 @@ infotext_to_setting_name_mapping = [
]
"""
from ast import literal_eval
def create_override_settings_dict(text_pairs):
"""creates processing's override_settings parameters from gradio's multiselect
@@ -460,6 +495,10 @@ def create_override_settings_dict(text_pairs):
if value is None:
continue
if setting_name == "forge_additional_modules":
res[setting_name] = literal_eval(value)
continue
res[setting_name] = shared.opts.cast_value(setting_name, value)
return res
@@ -493,8 +532,9 @@ def get_override_settings(params, *, skip_fields=None):
if v is None:
continue
if setting_name == "sd_model_checkpoint" and shared.opts.disable_weights_auto_swap:
continue
if setting_name in ["sd_model_checkpoint", "forge_additional_modules"]:
if shared.opts.disable_weights_auto_swap:
continue
v = shared.opts.cast_value(setting_name, v)
current_value = getattr(shared.opts, setting_name, None)

View File

@@ -170,7 +170,7 @@ options_templates.update(options_section(('training', "Training", "training"), {
}))
options_templates.update(options_section(('sd', "Stable Diffusion", "sd"), {
"sd_model_checkpoint": OptionInfo(None, "(Managed by Forge)", gr.State),
"sd_model_checkpoint": OptionInfo(None, "(Managed by Forge)", gr.State, infotext="Model"),
"sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}),
"sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"),
@@ -180,7 +180,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion", "sd"), {
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
"sdxl_clip_l_skip": OptionInfo(False, "Clip skip SDXL", gr.Checkbox).info("Enable Clip skip for the secondary clip model in sdxl. Has no effect on SD 1.5 or SD 2.0/2.1."),
"CLIP_stop_at_last_layers": OptionInfo(1, "(Managed by Forge)", gr.State),
"CLIP_stop_at_last_layers": OptionInfo(1, "(Managed by Forge)", gr.State, infotext="Clip skip"),
"upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
"randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}, infotext="RNG").info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"),
"tiling": OptionInfo(False, "Tiling", infotext='Tiling').info("produce a tileable picture"),
@@ -206,7 +206,7 @@ image into latent space representation and back. Latent space representation is
For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling.
"""),
"sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae": OptionInfo("Automatic", "(Managed by Forge)", gr.State),
"sd_vae": OptionInfo("Automatic", "(Managed by Forge)", gr.State, infotext='VAE'),
"sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"),
"auto_vae_precision_bfloat16": OptionInfo(False, "Automatically convert VAE to bfloat16").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image; if enabled, overrides the option below"),
"auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"),