Merge branch 'dev' into main

This commit is contained in:
Bingsu
2023-08-31 21:26:27 +09:00
19 changed files with 136 additions and 562 deletions

View File

@@ -8,12 +8,12 @@ repos:
- id: mixed-line-ending
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: "v0.0.285"
rev: "v0.0.286"
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
- repo: https://github.com/psf/black
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 23.7.0
hooks:
- id: black

View File

@@ -1,5 +1,14 @@
# Changelog
## 2023-08-31
- v23.9.0
- (실험적) 체크포인트 선택기능
- 버그가 있어 리프레시 버튼은 구현에서 빠짐
- 1.6.0 업데이트에 따라 img2img에서 사용불가능한 샘플러를 선택했을 때 더이상 Euler로 변경하지 않음
- 유효하지 않은 인자가 전달되었을 때, 에러를 일으키지 않고 대신 adetailer를 비활성화함
## 2023-08-25
- v23.8.1

View File

@@ -5,9 +5,6 @@ version: "3"
dotenv:
- .env
vars:
SHELL: '{{if eq .OS "Windows_NT"}}powershell{{end}}'
tasks:
default:
cmds:
@@ -18,7 +15,8 @@ tasks:
launch:
dir: "{{.WEBUI}}"
cmds:
- "{{.PYTHON}} launch.py --xformers --api --autolaunch"
- "{{.PYTHON}} launch.py --xformers --api"
silent: true
lint:
cmds:

View File

@@ -1 +1 @@
__version__ = "23.8.1"
__version__ = "23.9.0"

View File

@@ -59,6 +59,8 @@ class ADetailerArgs(BaseModel, extra=Extra.forbid):
ad_steps: PositiveInt = 28
ad_use_cfg_scale: bool = False
ad_cfg_scale: NonNegativeFloat = 7.0
ad_use_checkpoint: bool = False
ad_checkpoint: Optional[str] = None
ad_use_sampler: bool = False
ad_sampler: str = "DPM++ 2M Karras"
ad_use_noise_multiplier: bool = False
@@ -136,6 +138,10 @@ class ADetailerArgs(BaseModel, extra=Extra.forbid):
"ADetailer use separate CFG scale",
["ADetailer use separate CFG scale", "ADetailer CFG scale"],
)
ppop(
"ADetailer use separate checkpoint",
["ADetailer use separate checkpoint", "ADetailer checkpoint"],
)
ppop(
"ADetailer use separate sampler",
["ADetailer use separate sampler", "ADetailer sampler"],
@@ -208,6 +214,8 @@ _all_args = [
("ad_steps", "ADetailer steps"),
("ad_use_cfg_scale", "ADetailer use separate CFG scale"),
("ad_cfg_scale", "ADetailer CFG scale"),
("ad_use_checkpoint", "ADetailer use separate checkpoint"),
("ad_checkpoint", "ADetailer checkpoint"),
("ad_use_sampler", "ADetailer use separate sampler"),
("ad_sampler", "ADetailer sampler"),
("ad_use_noise_multiplier", "ADetailer use separate noise multiplier"),

View File

@@ -1,8 +1,9 @@
from __future__ import annotations
from dataclasses import dataclass
from functools import partial
from types import SimpleNamespace
from typing import Any
from typing import Any, Callable
import gradio as gr
@@ -22,6 +23,15 @@ class Widgets(SimpleNamespace):
return [getattr(self, attr) for attr in ALL_ARGS.attrs]
@dataclass
class WebuiInfo:
ad_model_list: list[str]
sampler_names: list[str]
t2i_button: gr.Button
i2i_button: gr.Button
checkpoints_list: Callable
def gr_interactive(value: bool = True):
return gr.update(interactive=value)
@@ -64,10 +74,7 @@ def elem_id(item_id: str, n: int, is_img2img: bool) -> str:
def adui(
num_models: int,
is_img2img: bool,
model_list: list[str],
samplers: list[str],
t2i_button: gr.Button,
i2i_button: gr.Button,
webui_info: WebuiInfo,
):
states = []
infotext_fields = []
@@ -97,10 +104,7 @@ def adui(
state, infofields = one_ui_group(
n=n,
is_img2img=is_img2img,
model_list=model_list,
samplers=samplers,
t2i_button=t2i_button,
i2i_button=i2i_button,
webui_info=webui_info,
)
states.append(state)
@@ -111,20 +115,17 @@ def adui(
return components, infotext_fields
def one_ui_group(
n: int,
is_img2img: bool,
model_list: list[str],
samplers: list[str],
t2i_button: gr.Button,
i2i_button: gr.Button,
):
def one_ui_group(n: int, is_img2img: bool, webui_info: WebuiInfo):
w = Widgets()
state = gr.State({})
eid = partial(elem_id, n=n, is_img2img=is_img2img)
with gr.Row():
model_choices = [*model_list, "None"] if n == 0 else ["None", *model_list]
model_choices = (
[*webui_info.ad_model_list, "None"]
if n == 0
else ["None", *webui_info.ad_model_list]
)
w.ad_model = gr.Dropdown(
label="ADetailer model" + suffix(n),
@@ -174,13 +175,13 @@ def one_ui_group(
with gr.Accordion(
"Inpainting", open=False, elem_id=eid("ad_inpainting_accordion")
):
inpainting(w, n, is_img2img, samplers)
inpainting(w, n, is_img2img, webui_info)
with gr.Group():
controlnet(w, n, is_img2img)
all_inputs = [state, *w.tolist()]
target_button = i2i_button if is_img2img else t2i_button
target_button = webui_info.i2i_button if is_img2img else webui_info.t2i_button
target_button.click(
fn=on_generate_click, inputs=all_inputs, outputs=state, queue=False
)
@@ -280,7 +281,7 @@ def mask_preprocessing(w: Widgets, n: int, is_img2img: bool):
)
def inpainting(w: Widgets, n: int, is_img2img: bool, samplers: list[str]):
def inpainting(w: Widgets, n: int, is_img2img: bool, webui_info: WebuiInfo):
eid = partial(elem_id, n=n, is_img2img=is_img2img)
with gr.Group():
@@ -417,6 +418,27 @@ def inpainting(w: Widgets, n: int, is_img2img: bool, samplers: list[str]):
)
with gr.Row():
with gr.Column(variant="compact"):
w.ad_use_checkpoint = gr.Checkbox(
label="Use separate checkpoint (experimental)" + suffix(n),
value=False,
visible=True,
elem_id=eid("ad_use_checkpoint"),
)
ckpts = [
"Use same checkpoint",
*webui_info.checkpoints_list(use_short=True),
]
w.ad_checkpoint = gr.Dropdown(
label="ADetailer checkpoint" + suffix(n),
choices=ckpts,
value=ckpts[0],
visible=True,
elem_id=eid("ad_checkpoint"),
)
with gr.Column(variant="compact"):
w.ad_use_sampler = gr.Checkbox(
label="Use separate sampler" + suffix(n),
@@ -427,8 +449,8 @@ def inpainting(w: Widgets, n: int, is_img2img: bool, samplers: list[str]):
w.ad_sampler = gr.Dropdown(
label="ADetailer sampler" + suffix(n),
choices=samplers,
value=samplers[0],
choices=webui_info.sampler_names,
value=webui_info.sampler_names[0],
visible=True,
elem_id=eid("ad_sampler"),
)
@@ -440,6 +462,7 @@ def inpainting(w: Widgets, n: int, is_img2img: bool, samplers: list[str]):
queue=False,
)
with gr.Row():
with gr.Column(variant="compact"):
w.ad_use_noise_multiplier = gr.Checkbox(
label="Use separate noise multiplier" + suffix(n),
@@ -465,7 +488,6 @@ def inpainting(w: Widgets, n: int, is_img2img: bool, samplers: list[str]):
queue=False,
)
with gr.Row():
with gr.Column(variant="compact"):
w.ad_use_clip_skip = gr.Checkbox(
label="Use separate CLIP skip" + suffix(n),
@@ -491,12 +513,12 @@ def inpainting(w: Widgets, n: int, is_img2img: bool, samplers: list[str]):
queue=False,
)
with gr.Column(variant="compact"):
w.ad_restore_face = gr.Checkbox(
label="Restore faces after ADetailer" + suffix(n),
value=False,
elem_id=eid("ad_restore_face"),
)
with gr.Row(), gr.Column(variant="compact"):
w.ad_restore_face = gr.Checkbox(
label="Restore faces after ADetailer" + suffix(n),
value=False,
elem_id=eid("ad_restore_face"),
)
def controlnet(w: Widgets, n: int, is_img2img: bool):

View File

@@ -44,7 +44,7 @@ def run_pip(*args):
def install():
deps = [
# requirements
("ultralytics", "8.0.162", None),
("ultralytics", "8.0.167", None),
("mediapipe", "0.10.3", None),
("rich", "13.0.0", None),
# mediapipe

View File

@@ -1,12 +1,10 @@
[project]
name = "adetailer"
description = "An object detection and auto-mask extension for stable diffusion webui."
authors = [
{name = "dowon", email = "ks2515@naver.com"},
]
authors = [{ name = "dowon", email = "ks2515@naver.com" }]
requires-python = ">=3.8,<3.12"
readme = "README.md"
license = {text = "AGPL-3.0"}
license = { text = "AGPL-3.0" }
[project.urls]
repository = "https://github.com/Bing-su/adetailer"
@@ -16,11 +14,27 @@ profile = "black"
known_first_party = ["launch", "modules"]
[tool.ruff]
select = ["A", "B", "C4", "C90", "E", "EM", "F", "FA", "I001", "ISC", "N", "PIE", "PT", "RET", "RUF", "SIM", "UP", "W"]
select = [
"A",
"B",
"C4",
"C90",
"E",
"EM",
"F",
"FA",
"I001",
"ISC",
"N",
"PIE",
"PT",
"RET",
"RUF",
"SIM",
"UP",
"W",
]
ignore = ["B008", "B905", "E501", "F401", "UP007"]
[tool.ruff.isort]
known-first-party = ["launch", "modules"]
[tool.ruff.per-file-ignores]
"sd_webui/*.py" = ["B027", "F403"]

View File

@@ -33,23 +33,23 @@ from adetailer.mask import (
sort_bboxes,
)
from adetailer.traceback import rich_traceback
from adetailer.ui import adui, ordinal, suffix
from adetailer.ui import WebuiInfo, adui, ordinal, suffix
from controlnet_ext import ControlNetExt, controlnet_exists, get_cn_models
from controlnet_ext.restore import (
CNHijackRestore,
cn_allow_script_control,
)
from sd_webui import images, safe, script_callbacks, scripts, shared
from sd_webui.devices import NansException
from sd_webui.paths import data_path, models_path
from sd_webui.processing import (
from modules import images, safe, script_callbacks, scripts, shared
from modules.devices import NansException
from modules.paths import data_path, models_path
from modules.processing import (
Processed,
StableDiffusionProcessingImg2Img,
create_infotext,
process_images,
)
from sd_webui.sd_samplers import all_samplers
from sd_webui.shared import cmd_opts, opts, state
from modules.sd_samplers import all_samplers
from modules.shared import cmd_opts, opts, state
no_huggingface = getattr(cmd_opts, "ad_no_huggingface", False)
adetailer_dir = Path(models_path, "adetailer")
@@ -118,18 +118,18 @@ class AfterDetailerScript(scripts.Script):
def ui(self, is_img2img):
num_models = opts.data.get("ad_max_models", 2)
model_list = list(model_mapping.keys())
samplers = [sampler.name for sampler in all_samplers]
components, infotext_fields = adui(
num_models,
is_img2img,
model_list,
samplers,
txt2img_submit_button,
img2img_submit_button,
ad_model_list = list(model_mapping.keys())
sampler_names = [sampler.name for sampler in all_samplers]
webui_info = WebuiInfo(
ad_model_list=ad_model_list,
sampler_names=sampler_names,
t2i_button=txt2img_submit_button,
i2i_button=img2img_submit_button,
checkpoints_list=modules.sd_models.checkpoint_tiles,
)
components, infotext_fields = adui(num_models, is_img2img, webui_info)
self.infotext_fields = infotext_fields
return components
@@ -172,8 +172,10 @@ class AfterDetailerScript(scripts.Script):
message = f"""
[-] ADetailer: Invalid arguments passed to ADetailer.
input: {args_!r}
ADetailer disabled.
"""
raise ValueError(dedent(message))
print(dedent(message), file=sys.stderr)
return False
enable = args_[0] if isinstance(args_[0], bool) else True
checker = EnableChecker(enable=enable, arg_list=arg_list)
return checker.is_enabled()
@@ -296,32 +298,30 @@ class AfterDetailerScript(scripts.Script):
return width, height
def get_steps(self, p, args: ADetailerArgs) -> int:
if args.ad_use_steps:
return args.ad_steps
return p.steps
return args.ad_steps if args.ad_use_steps else p.steps
def get_cfg_scale(self, p, args: ADetailerArgs) -> float:
if args.ad_use_cfg_scale:
return args.ad_cfg_scale
return p.cfg_scale
return args.ad_cfg_scale if args.ad_use_cfg_scale else p.cfg_scale
def get_sampler(self, p, args: ADetailerArgs) -> str:
sampler_name = args.ad_sampler if args.ad_use_sampler else p.sampler_name
if sampler_name in ["PLMS", "UniPC"]:
sampler_name = "Euler"
return sampler_name
return args.ad_sampler if args.ad_use_sampler else p.sampler_name
def get_override_settings(self, p, args: ADetailerArgs) -> dict[str, Any]:
d = {}
if args.ad_use_clip_skip:
d["CLIP_stop_at_last_layers"] = args.ad_clip_skip
if (
args.ad_use_checkpoint
and args.ad_checkpoint
and args.ad_checkpoint not in ("None", "Use same checkpoint")
):
d["sd_model_checkpoint"] = args.ad_checkpoint
return d
def get_initial_noise_multiplier(self, p, args: ADetailerArgs) -> float | None:
if args.ad_use_noise_multiplier:
return args.ad_noise_multiplier
return None
return args.ad_noise_multiplier if args.ad_use_noise_multiplier else None
@staticmethod
def infotext(p) -> str:
@@ -423,7 +423,7 @@ class AfterDetailerScript(scripts.Script):
i2i.cached_c = [None, None]
i2i.cached_uc = [None, None]
i2i.scripts, i2i.script_args = self.script_filter(p, args)
i2i._disable_adetailer = True
i2i._ad_disabled = True
if args.ad_controlnet_model != "None":
self.update_controlnet_args(i2i, args)
@@ -522,7 +522,7 @@ class AfterDetailerScript(scripts.Script):
@rich_traceback
def process(self, p, *args_):
if getattr(p, "_disable_adetailer", False):
if getattr(p, "_ad_disabled", False):
return
if self.is_ad_enabled(*args_):
@@ -530,7 +530,9 @@ class AfterDetailerScript(scripts.Script):
extra_params = self.extra_params(arg_list)
p.extra_generation_params.update(extra_params)
def _postprocess_image(self, p, pp, args: ADetailerArgs, *, n: int = 0) -> bool:
def _postprocess_image_inner(
self, p, pp, args: ADetailerArgs, *, n: int = 0
) -> bool:
"""
Returns
-------
@@ -616,7 +618,7 @@ class AfterDetailerScript(scripts.Script):
@rich_traceback
def postprocess_image(self, p, pp, *args_):
if getattr(p, "_disable_adetailer", False):
if getattr(p, "_ad_disabled", False):
return
if not self.is_ad_enabled(*args_):
@@ -636,7 +638,7 @@ class AfterDetailerScript(scripts.Script):
for n, args in enumerate(arg_list):
if args.ad_model == "None":
continue
is_processed |= self._postprocess_image(p, pp, args, n=n)
is_processed |= self._postprocess_image_inner(p, pp, args, n=n)
if is_processed:
self.save_image(

View File

View File

@@ -1,11 +0,0 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
class NansException(Exception): # noqa: N818
pass
else:
from modules.devices import NansException

View File

@@ -1,62 +0,0 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from PIL import Image, PngImagePlugin
from sd_webui.processing import StableDiffusionProcessing
def save_image(
image: Image.Image,
path: str,
basename: str,
seed: int | None = None,
prompt: str = "",
extension: str = "png",
info: str | PngImagePlugin.iTXt = "",
short_filename: bool = False,
no_prompt: bool = False,
grid: bool = False,
pnginfo_section_name: str = "parameters",
p: StableDiffusionProcessing | None = None,
existing_info: dict | None = None,
forced_filename: str | None = None,
suffix: str = "",
save_to_dirs: bool = False,
) -> tuple[str, str | None]:
"""Save an image.
Args:
image (`PIL.Image`):
The image to be saved.
path (`str`):
The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
basename (`str`):
The base filename which will be applied to `filename pattern`.
seed, prompt, short_filename,
extension (`str`):
Image file extension, default is `png`.
pngsectionname (`str`):
Specify the name of the section which `info` will be saved in.
info (`str` or `PngImagePlugin.iTXt`):
PNG info chunks.
existing_info (`dict`):
Additional PNG info. `existing_info == {pngsectionname: info, ...}`
no_prompt:
TODO I don't know its meaning.
p (`StableDiffusionProcessing`)
forced_filename (`str`):
If specified, `basename` and filename pattern will be ignored.
save_to_dirs (bool):
If true, the image will be saved into a subdirectory of `path`.
Returns: (fullfn, txt_fullfn)
fullfn (`str`):
The full path of the saved imaged.
txt_fullfn (`str` or None):
If a text file is saved for this image, this will be its full path. Otherwise None.
"""
else:
from modules.images import save_image

View File

@@ -1,14 +0,0 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import os
models_path = os.path.join(os.path.dirname(__file__), "1")
script_path = os.path.join(os.path.dirname(__file__), "2")
data_path = os.path.join(os.path.dirname(__file__), "3")
extensions_dir = os.path.join(os.path.dirname(__file__), "4")
extensions_builtin_dir = os.path.join(os.path.dirname(__file__), "5")
else:
from modules.paths import data_path, models_path, script_path

View File

@@ -1,179 +0,0 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from dataclasses import dataclass, field
from typing import Any, Callable
import numpy as np
import torch
from PIL import Image
def _image():
return Image.new("L", (512, 512))
@dataclass
class StableDiffusionProcessing:
sd_model: torch.nn.Module = field(default_factory=lambda: torch.nn.Linear(1, 1))
outpath_samples: str = ""
outpath_grids: str = ""
prompt: str = ""
prompt_for_display: str = ""
negative_prompt: str = ""
styles: list[str] = field(default_factory=list)
seed: int = -1
subseed: int = -1
subseed_strength: float = 0.0
seed_resize_from_h: int = -1
seed_resize_from_w: int = -1
sampler_name: str | None = None
batch_size: int = 1
n_iter: int = 1
steps: int = 50
cfg_scale: float = 7.0
width: int = 512
height: int = 512
restore_faces: bool = False
tiling: bool = False
do_not_save_samples: bool = False
do_not_save_grid: bool = False
extra_generation_params: dict[str, Any] = field(default_factory=dict)
overlay_images: list[Image.Image] = field(default_factory=list)
eta: float = 0.0
do_not_reload_embeddings: bool = False
paste_to: tuple[int | float, ...] = (0, 0, 0, 0)
color_corrections: list[np.ndarray] = field(default_factory=list)
denoising_strength: float = 0.0
sampler_noise_scheduler_override: Callable | None = None
ddim_discretize: str = ""
s_min_uncond: float = 0.0
s_churn: float = 0.0
s_tmin: float = 0.0
s_tmax: float = 0.0
s_noise: float = 0.0
override_settings: dict[str, Any] = field(default_factory=dict)
override_settings_restore_afterwards: bool = False
is_using_inpainting_conditioning: bool = False
disable_extra_networks: bool = False
scripts: Any = None
script_args: list[Any] = field(default_factory=list)
all_prompts: list[str] = field(default_factory=list)
all_negative_prompts: list[str] = field(default_factory=list)
all_seeds: list[int] = field(default_factory=list)
all_subseeds: list[int] = field(default_factory=list)
iteration: int = 1
is_hr_pass: bool = False
def close(self) -> None:
pass
@dataclass
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
sampler: Callable | None = None
enable_hr: bool = False
denoising_strength: float = 0.75
hr_scale: float = 2.0
hr_upscaler: str = ""
hr_second_pass_steps: int = 0
hr_resize_x: int = 0
hr_resize_y: int = 0
hr_upscale_to_x: int = 0
hr_upscale_to_y: int = 0
width: int = 512
height: int = 512
truncate_x: int = 512
truncate_y: int = 512
applied_old_hires_behavior_to: tuple[int, int] = (512, 512)
@dataclass
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler: Callable | None = None
init_images: list[Image.Image] = field(default_factory=list)
resize_mode: int = 0
denoising_strength: float = 0.75
image_cfg_scale: float | None = None
init_latent: torch.Tensor | None = None
image_mask: Image.Image = field(default_factory=_image)
latent_mask: Image.Image = field(default_factory=_image)
mask_for_overlay: Image.Image = field(default_factory=_image)
mask_blur: int = 4
inpainting_fill: int = 0
inpaint_full_res: bool = True
inpaint_full_res_padding: int = 0
inpainting_mask_invert: int | bool = 0
initial_noise_multiplier: float = 1.0
mask: torch.Tensor | None = None
nmask: torch.Tensor | None = None
image_conditioning: torch.Tensor | None = None
@dataclass
class Processed:
images: list[Image.Image] = field(default_factory=list)
prompt: list[str] = field(default_factory=list)
negative_prompt: list[str] = field(default_factory=list)
seed: list[int] = field(default_factory=list)
subseed: list[int] = field(default_factory=list)
subseed_strength: float = 0.0
info: str = ""
comments: str = ""
width: int = 512
height: int = 512
sampler_name: str = ""
cfg_scale: float = 7.0
image_cfg_scale: float | None = None
steps: int = 50
batch_size: int = 1
restore_faces: bool = False
face_restoration_model: str | None = None
sd_model_hash: str = ""
seed_resize_from_w: int = -1
seed_resize_from_h: int = -1
denoising_strength: float = 0.0
extra_generation_params: dict[str, Any] = field(default_factory=dict)
index_of_first_image: int = 0
styles: list[str] = field(default_factory=list)
job_timestamp: str = ""
clip_skip: int = 1
eta: float = 0.0
ddim_discretize: str = ""
s_churn: float = 0.0
s_tmin: float = 0.0
s_tmax: float = 0.0
s_noise: float = 0.0
sampler_noise_scheduler_override: Callable | None = None
is_using_inpainting_conditioning: bool = False
all_prompts: list[str] = field(default_factory=list)
all_negative_prompts: list[str] = field(default_factory=list)
all_seeds: list[int] = field(default_factory=list)
all_subseeds: list[int] = field(default_factory=list)
infotexts: list[str] = field(default_factory=list)
def create_infotext(
p: StableDiffusionProcessingTxt2Img | StableDiffusionProcessingImg2Img,
all_prompts: list[str],
all_seeds: list[int],
all_subseeds: list[int],
comments: Any,
iteration: int = 0,
position_in_batch: int = 0,
use_main_prompt: bool = False,
index: int | None = None,
all_negative_prompts: list[str] | None = None,
) -> str:
pass
def process_images(
p: StableDiffusionProcessingTxt2Img | StableDiffusionProcessingImg2Img,
) -> Processed:
pass
else:
from modules.processing import (
Processed,
StableDiffusionProcessing,
StableDiffusionProcessingImg2Img,
StableDiffusionProcessingTxt2Img,
create_infotext,
process_images,
)

View File

@@ -1,10 +0,0 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import torch
unsafe_torch_load = torch.load
else:
from modules.safe import unsafe_torch_load

View File

@@ -1,26 +0,0 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Callable
def on_app_started(callback: Callable):
pass
def on_ui_settings(callback: Callable):
pass
def on_after_component(callback: Callable):
pass
def on_before_ui(callback: Callable):
pass
else:
from modules.script_callbacks import (
on_after_component,
on_app_started,
on_before_ui,
on_ui_settings,
)

View File

@@ -1,94 +0,0 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from abc import ABC, abstractmethod
from collections import namedtuple
from dataclasses import dataclass
from typing import Any
import gradio as gr
from PIL import Image
from sd_webui.processing import (
Processed,
StableDiffusionProcessingImg2Img,
StableDiffusionProcessingTxt2Img,
)
SDPType = StableDiffusionProcessingImg2Img | StableDiffusionProcessingTxt2Img
AlwaysVisible = object()
@dataclass
class PostprocessImageArgs:
image: Image.Image
class Script(ABC):
filename: str
args_from: int
args_to: int
alwayson: bool
is_txt2img: bool
is_img2img: bool
group: gr.Group
infotext_fields: list[tuple[str, str]]
paste_field_names: list[str]
@abstractmethod
def title(self):
raise NotImplementedError
def ui(self, is_img2img: bool):
pass
def show(self, is_img2img: bool):
return True
def run(self, p: SDPType, *args):
pass
def process(self, p: SDPType, *args):
pass
def before_process_batch(self, p: SDPType, *args, **kwargs):
pass
def process_batch(self, p: SDPType, *args, **kwargs):
pass
def postprocess_batch(self, p: SDPType, *args, **kwargs):
pass
def postprocess_image(self, p: SDPType, pp: PostprocessImageArgs, *args):
pass
def postprocess(self, p: SDPType, processed: Processed, *args):
pass
def before_component(self, component, **kwargs):
pass
def after_component(self, component, **kwargs):
pass
def describe(self):
return ""
def elem_id(self, item_id: Any) -> str:
pass
ScriptClassData = namedtuple(
"ScriptClassData", ["script_class", "path", "basedir", "module"]
)
scripts_data: list[ScriptClassData] = []
else:
from modules.scripts import (
AlwaysVisible,
PostprocessImageArgs,
Script,
scripts_data,
)

View File

@@ -1,17 +0,0 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Callable, NamedTuple
class SamplerData(NamedTuple):
name: str
constructor: Callable
aliases: list[str]
options: dict[str, Any]
all_samplers: list[SamplerData] = []
else:
from modules.sd_samplers import all_samplers

View File

@@ -1,66 +0,0 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import argparse
from dataclasses import dataclass
from typing import Any, Callable
import torch
from PIL import Image
@dataclass
class State:
skipped: bool = False
interrupted: bool = False
job: str = ""
job_no: int = 0
job_count: int = 0
processing_has_refined_job_count: bool = False
job_timestamp: str = "0"
sampling_step: int = 0
sampling_steps: int = 0
current_latent: torch.Tensor | None = None
current_image: Image.Image | None = None
current_image_sampling_step: int = 0
id_live_preview: int = 0
textinfo: str | None = None
time_start: float | None = None
need_restart: bool = False
server_start: float | None = None
@dataclass
class OptionInfo:
default: Any = None
label: str = ""
component: Any = None
component_args: Callable[[], dict] | dict[str, Any] | None = None
onchange: Callable[[], None] | None = None
section: tuple[str, str] | None = None
refresh: Callable[[], None] | None = None
class Option:
data_labels: dict[str, OptionInfo]
def __init__(self):
self.data: dict[str, Any] = {}
def add_option(self, key: str, info: OptionInfo):
pass
def __getattr__(self, item: str):
if self.data is not None and item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super().__getattribute__(item)
opts = Option()
cmd_opts = argparse.Namespace()
state = State()
else:
from modules.shared import OptionInfo, cmd_opts, opts, state