diff --git a/modules/processing_scripts/comments.py b/modules/processing_scripts/comments.py index cf81dfd8..f896428f 100644 --- a/modules/processing_scripts/comments.py +++ b/modules/processing_scripts/comments.py @@ -1,49 +1,13 @@ -from modules import scripts, shared, script_callbacks +from modules import shared import re - def strip_comments(text): - text = re.sub('(^|\n)#[^\n]*(\n|$)', '\n', text) # while line comment - text = re.sub('#[^\n]*(\n|$)', '\n', text) # in the middle of the line comment + if shared.opts.enable_prompt_comments: + text = re.sub('(^|\n)#[^\n]*(\n|$)', '\n', text) # whole line comment + text = re.sub('#[^\n]*(\n|$)', '\n', text) # in the middle of the line comment return text - -class ScriptStripComments(scripts.Script): - def title(self): - return "Comments" - - def show(self, is_img2img): - return scripts.AlwaysVisible - - def process(self, p, *args): - if not shared.opts.enable_prompt_comments: - return - - p.all_prompts = [strip_comments(x) for x in p.all_prompts] - p.all_negative_prompts = [strip_comments(x) for x in p.all_negative_prompts] - - p.main_prompt = strip_comments(p.main_prompt) - p.main_negative_prompt = strip_comments(p.main_negative_prompt) - - if getattr(p, 'enable_hr', False): - p.all_hr_prompts = [strip_comments(x) for x in p.all_hr_prompts] - p.all_hr_negative_prompts = [strip_comments(x) for x in p.all_hr_negative_prompts] - - p.hr_prompt = strip_comments(p.hr_prompt) - p.hr_negative_prompt = strip_comments(p.hr_negative_prompt) - - -def before_token_counter(params: script_callbacks.BeforeTokenCounterParams): - if not shared.opts.enable_prompt_comments: - return - - params.prompt = strip_comments(params.prompt) - - -script_callbacks.on_before_token_counter(before_token_counter) - - shared.options_templates.update(shared.options_section(('sd', "Stable Diffusion", "sd"), { "enable_prompt_comments": shared.OptionInfo(True, "Enable comments").info("Use # anywhere in the prompt to hide the text between # and the end of the line from the generation."), })) diff --git a/modules/styles.py b/modules/styles.py index 25f22d3d..350ea830 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -1,10 +1,10 @@ -from __future__ import annotations from pathlib import Path from modules import errors import csv import os import typing import shutil +import modules.processing_scripts.comments as comments class PromptStyle(typing.NamedTuple): @@ -14,19 +14,18 @@ class PromptStyle(typing.NamedTuple): path: str | None = None -def merge_prompts(style_prompt: str, prompt: str) -> str: - if "{prompt}" in style_prompt: - res = style_prompt.replace("{prompt}", prompt) - else: - parts = filter(None, (prompt.strip(), style_prompt.strip())) - res = ", ".join(parts) - - return res - - def apply_styles_to_prompt(prompt, styles): + prompt = comments.strip_comments(prompt).strip() + for style in styles: - prompt = merge_prompts(style, prompt) + style = comments.strip_comments(style).strip() + + if "{prompt}" in style: + prompt = style.replace("{prompt}", prompt) + elif style != "": + if prompt != "": + prompt += ", " + prompt += style return prompt @@ -40,7 +39,7 @@ def extract_style_text_from_prompt(style_text, prompt): """ stripped_prompt = prompt.strip() - stripped_style_text = style_text.strip() + stripped_style_text = comments.strip_comments(style_text).strip() if "{prompt}" in stripped_style_text: left, _, right = stripped_style_text.partition("{prompt}") @@ -207,28 +206,30 @@ class StyleDatabase: {k: v for k, v in style._asdict().items() if k != "path"} ) - def extract_styles_from_prompt(self, prompt, negative_prompt): + def extract_styles_from_prompt(self, positive, negative): extracted = [] applicable_styles = list(self.styles.values()) + positive = comments.strip_comments(positive) + negative = comments.strip_comments(negative) + while True: found_style = None for style in applicable_styles: - is_match, new_prompt, new_neg_prompt = extract_original_prompts( - style, prompt, negative_prompt + is_match, new_positive, new_negative = extract_original_prompts( + style, positive, negative ) if is_match: found_style = style - prompt = new_prompt - negative_prompt = new_neg_prompt + positive = new_positive + negative = new_negative + applicable_styles.remove(found_style) + extracted.append(found_style.name) break if not found_style: break - applicable_styles.remove(found_style) - extracted.append(found_style.name) - - return list(reversed(extracted)), prompt, negative_prompt + return list(reversed(extracted)), positive, negative \ No newline at end of file diff --git a/modules/ui.py b/modules/ui.py index f9c7f493..4f6b5807 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -27,6 +27,7 @@ from modules import prompt_parser from modules.infotext_utils import image_from_url_text, PasteField from modules_forge.forge_canvas.canvas import ForgeCanvas, canvas_head from modules_forge import main_entry, forge_space +import modules.processing_scripts.comments as comments create_setting_component = ui_settings.create_setting_component @@ -173,6 +174,8 @@ def update_token_counter(text, steps, styles, *, is_positive=True): if shared.opts.include_styles_into_token_counters: apply_styles = shared.prompt_styles.apply_styles_to_prompt if is_positive else shared.prompt_styles.apply_negative_styles_to_prompt text = apply_styles(text, styles) + else: + text = comments.strip_comments(text).strip() try: text, _ = extra_networks.parse_prompt(text) diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py index f71b40c4..be678bbf 100644 --- a/modules/ui_prompt_styles.py +++ b/modules/ui_prompt_styles.py @@ -67,7 +67,7 @@ class UiPromptStyles: with gr.Row(): self.selection = gr.Dropdown(label="Styles", elem_id=f"{tabname}_styles_edit_select", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info="Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.") ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {"choices": list(shared.prompt_styles.styles)}, f"refresh_{tabname}_styles") - self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply_dialog", tooltip="Apply all selected styles from the style selection dropdown in main UI to the prompt.") + self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply_dialog", tooltip="Apply all selected styles from the style selection dropdown in main UI to the prompt. Strips comments, if enabled.") self.copy = ui_components.ToolButton(value=styles_copy_symbol, elem_id=f"{tabname}_style_copy", tooltip="Copy main UI prompt to style.") with gr.Row(): diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py index 622ed587..11cef1bf 100644 --- a/modules/ui_toprow.py +++ b/modules/ui_toprow.py @@ -119,7 +119,7 @@ class Toprow: self.paste = ToolButton(value=paste_symbol, elem_id="paste", tooltip="Read generation parameters from prompt or last generation if prompt is empty into user interface.") self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{self.id_part}_clear_prompt", tooltip="Clear prompt") - self.apply_styles = ToolButton(value=ui_prompt_styles.styles_materialize_symbol, elem_id=f"{self.id_part}_style_apply", tooltip="Apply all selected styles to prompts.") + self.apply_styles = ToolButton(value=ui_prompt_styles.styles_materialize_symbol, elem_id=f"{self.id_part}_style_apply", tooltip="Apply all selected styles to prompts. Strips comments, if enabled.") if self.is_img2img: self.button_interrogate = ToolButton('📎', tooltip='Interrogate CLIP - use CLIP neural network to create a text describing the image, and put it into the prompt field', elem_id="interrogate")