Merge branch 'dev' into main

This commit is contained in:
Bingsu
2023-05-17 10:05:51 +09:00
16 changed files with 532 additions and 23 deletions

View File

@@ -10,10 +10,9 @@ repos:
rev: 5.12.0
hooks:
- id: isort
args: [--profile=black]
- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: "v0.0.265"
rev: "v0.0.267"
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]

View File

@@ -1,5 +1,13 @@
# Changelog
### 2023-05-17
- v23.5.14
- `[SKIP]`으로 ad prompt 일부를 건너뛰는 기능 추가
- bbox 정렬 옵션 추가
- sd_webui 타입힌트를 만들어냄
- enable checker와 관련된 api 오류 수정?
### 2023-05-15
- v23.5.13

View File

@@ -14,6 +14,10 @@
6. Go to "Installed" tab, click "Check for updates", and then click "Apply and restart UI". (The next time you can also use this method to update extensions.)
7. Completely restart A1111 webui including your terminal. (If you do not know what is a "terminal", you can reboot your computer: turn your computer off and turn it on again.)
You can now install it directly from the Extensions tab.
![image](https://i.imgur.com/g6GdRBT.png)
You **DON'T** need to download any model from huggingface.
## Usage
@@ -34,6 +38,8 @@ Other options:
| Mask erosion (-) / dilation (+) | Enlarge or reduce the detected mask. | [opencv example](https://docs.opencv.org/4.7.0/db/df6/tutorial_erosion_dilatation.html) |
| Mask x, y offset | Moves the mask horizontally and vertically by pixels. | | |
See the [wiki](https://github.com/Bing-su/adetailer/wiki) for more options and other features.
## ControlNet Inpainting
You can use the ControlNet inpaint extension if you have ControlNet installed and a ControlNet inpaint model.

View File

@@ -1 +1 @@
__version__ = "23.5.13"
__version__ = "23.5.14"

View File

@@ -110,13 +110,13 @@ class ADetailerArgs(BaseModel, extra=Extra.forbid):
class EnableChecker(BaseModel):
a0: Union[bool, dict]
a1: Optional[dict]
a1: Any
def is_enabled(self) -> bool:
ad_model = ALL_ARGS[0].attr
if isinstance(self.a0, dict):
return self.a0.get(ad_model, "None") != "None"
if self.a1 is None:
if not isinstance(self.a1, dict):
return False
return self.a0 and self.a1.get(ad_model, "None") != "None"
@@ -148,3 +148,9 @@ _all_args = [
AD_ENABLE = Arg(*_all_args[0])
_args = [Arg(*args) for args in _all_args[1:]]
ALL_ARGS = ArgsList(_args)
BBOX_SORTBY = [
"None",
"Position (left to right)",
"Position (center to edge)",
"Area (large to small)",
]

View File

@@ -2,6 +2,9 @@ from __future__ import annotations
from collections import OrderedDict
from dataclasses import dataclass
from enum import IntEnum
from functools import partial
from math import dist
from pathlib import Path
from typing import Optional, Union
@@ -20,6 +23,13 @@ class PredictOutput:
preview: Optional[Image.Image] = None
class SortBy(IntEnum):
NONE = 0
LEFT_TO_RIGHT = 1
CENTER_TO_EDGE = 2
AREA = 3
def get_models(
model_dir: Union[str, Path], huggingface: bool = True
) -> OrderedDict[str, Optional[str]]:
@@ -190,3 +200,68 @@ def mask_preprocess(
masks = [offset(m, x_offset, y_offset) for m in masks]
return masks
# Bbox sorting
def _key_left_to_right(bbox: list[float]) -> float:
"""
Left to right
Parameters
----------
bbox: list[float]
list of [x1, y1, x2, y2]
"""
return bbox[0]
def _key_center_to_edge(bbox: list[float], *, center: tuple[float, float]) -> float:
"""
Center to edge
Parameters
----------
bbox: list[float]
list of [x1, y1, x2, y2]
image: Image.Image
the image
"""
bbox_center = ((bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2)
return dist(center, bbox_center)
def _key_area(bbox: list[float]) -> float:
"""
Large to small
Parameters
----------
bbox: list[float]
list of [x1, y1, x2, y2]
"""
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
return -area
def sort_bboxes(
pred: PredictOutput, order: int | SortBy = SortBy.NONE
) -> PredictOutput:
if order == SortBy.NONE or not pred.bboxes:
return pred
if order == SortBy.LEFT_TO_RIGHT:
key = _key_left_to_right
elif order == SortBy.CENTER_TO_EDGE:
width, height = pred.preview.size
center = (width / 2, height / 2)
key = partial(_key_center_to_edge, center=center)
elif order == SortBy.AREA:
key = _key_area
else:
raise RuntimeError
items = len(pred.bboxes)
idx = sorted(range(items), key=lambda i: key(pred.bboxes[i]))
pred.bboxes = [pred.bboxes[i] for i in idx]
pred.masks = [pred.masks[i] for i in idx]
return pred

View File

@@ -21,3 +21,6 @@ ignore = ["B008", "B905", "E501", "F401", "UP007"]
[tool.ruff.isort]
known-first-party = ["launch", "modules"]
[tool.ruff.per-file-ignores]
"sd_webui/*.py" = ["B027", "F403"]

View File

@@ -16,25 +16,23 @@ import torch
import modules # noqa: F401
from adetailer import (
AFTER_DETAILER,
ALL_ARGS,
ADetailerArgs,
EnableChecker,
__version__,
get_models,
mediapipe_predict,
ultralytics_predict,
)
from adetailer.common import mask_preprocess
from adetailer.args import ALL_ARGS, BBOX_SORTBY, ADetailerArgs, EnableChecker
from adetailer.common import PredictOutput, mask_preprocess, sort_bboxes
from adetailer.ui import adui, ordinal, suffix
from controlnet_ext import ControlNetExt, controlnet_exists
from modules import images, safe, script_callbacks, scripts, shared
from modules.paths import data_path, models_path
from modules.processing import (
from sd_webui import images, safe, script_callbacks, scripts, shared
from sd_webui.paths import data_path, models_path
from sd_webui.processing import (
StableDiffusionProcessingImg2Img,
create_infotext,
process_images,
)
from modules.shared import cmd_opts, opts
from sd_webui.shared import cmd_opts, opts
try:
from rich import print
@@ -141,7 +139,11 @@ class AfterDetailerScript(scripts.Script):
"""
`args_` is at least 1 in length by `is_ad_enabled` immediately above
"""
args = args_[1:] if isinstance(args_[0], bool) else args_
args = [arg for arg in args_ if isinstance(arg, dict)]
if not args:
message = f"[-] ADetailer: Invalid arguments passed to ADetailer: {args_!r}"
raise ValueError(message)
all_inputs = []
@@ -149,18 +151,15 @@ class AfterDetailerScript(scripts.Script):
try:
inp = ADetailerArgs(**arg_dict)
except ValueError as e:
message = [
msgs = [
f"[-] ADetailer: ValidationError when validating {ordinal(n)} arguments: {e}\n"
]
for attr in ALL_ARGS.attrs:
arg = arg_dict.get(attr)
dtype = type(arg)
arg = "DEFAULT" if arg is None else repr(arg)
message.append(f" {attr}: {arg} ({dtype})")
raise ValueError("\n".join(message)) from e
except TypeError as e:
message = f"[-] ADetailer: {ordinal(n)} - Non-mapping arguments are sent: {arg_dict!r}\n{e}"
raise TypeError(message) from e
msgs.append(f" {attr}: {arg} ({dtype})")
raise ValueError("\n".join(msgs)) from e
all_inputs.append(inp)
@@ -281,6 +280,7 @@ class AfterDetailerScript(scripts.Script):
for script_name in ad_script_names.split(",")
for name in (script_name, script_name.strip())
}
if args.ad_controlnet_model != "None":
self.disable_controlnet_units(script_args)
script_names_set.add("controlnet")
@@ -376,6 +376,12 @@ class AfterDetailerScript(scripts.Script):
raise ValueError(msg)
return model_mapping[name]
def sort_bboxes(self, pred: PredictOutput) -> PredictOutput:
sortby = opts.data.get("ad_bbox_sortby", BBOX_SORTBY[0])
sortby_idx = BBOX_SORTBY.index(sortby)
pred = sort_bboxes(pred, sortby_idx)
return pred
def i2i_prompts_replace(
self, i2i, prompts: list[str], negative_prompts: list[str], j: int
):
@@ -423,6 +429,7 @@ class AfterDetailerScript(scripts.Script):
with ChangeTorchLoad():
pred = predictor(ad_model, pp.image, args.ad_conf, **kwargs)
pred = self.sort_bboxes(pred)
masks = mask_preprocess(
pred.masks,
kernel=args.ad_dilate_erode,
@@ -453,10 +460,12 @@ class AfterDetailerScript(scripts.Script):
for j in range(steps):
p2.image_mask = masks[j]
self.i2i_prompts_replace(p2, ad_prompts, ad_negatives, j)
processed = process_images(p2)
p2 = copy(i2i)
p2.init_images = [processed.images[0]]
if not re.match(r"^\s*\[SKIP\]\s*$", p2.prompt):
processed = process_images(p2)
p2 = copy(i2i)
p2.init_images = [processed.images[0]]
p2.seed = seed + j + 1
p2.subseed = subseed + j + 1
@@ -552,6 +561,17 @@ def on_ui_settings():
),
)
shared.opts.add_option(
"ad_bbox_sortby",
shared.OptionInfo(
default="None",
label="Sort bounding boxes by",
component=gr.Radio,
component_args={"choices": BBOX_SORTBY},
section=section,
),
)
script_callbacks.on_ui_settings(on_ui_settings)
script_callbacks.on_after_component(on_after_component)

4
sd_webui/__init__.py Normal file
View File

@@ -0,0 +1,4 @@
from typing import TYPE_CHECKING
if not TYPE_CHECKING:
from modules import *

62
sd_webui/images.py Normal file
View File

@@ -0,0 +1,62 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from PIL import Image, PngImagePlugin
from sd_webui.processing import StableDiffusionProcessing
def save_image(
image: Image.Image,
path: str,
basename: str,
seed: int | None = None,
prompt: str = "",
extension: str = "png",
info: str | PngImagePlugin.iTXt = "",
short_filename: bool = False,
no_prompt: bool = False,
grid: bool = False,
pnginfo_section_name: str = "parameters",
p: StableDiffusionProcessing | None = None,
existing_info: dict | None = None,
forced_filename: str | None = None,
suffix: str = "",
save_to_dirs: bool = False,
) -> tuple[str, str | None]:
"""Save an image.
Args:
image (`PIL.Image`):
The image to be saved.
path (`str`):
The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
basename (`str`):
The base filename which will be applied to `filename pattern`.
seed, prompt, short_filename,
extension (`str`):
Image file extension, default is `png`.
pngsectionname (`str`):
Specify the name of the section which `info` will be saved in.
info (`str` or `PngImagePlugin.iTXt`):
PNG info chunks.
existing_info (`dict`):
Additional PNG info. `existing_info == {pngsectionname: info, ...}`
no_prompt:
TODO I don't know its meaning.
p (`StableDiffusionProcessing`)
forced_filename (`str`):
If specified, `basename` and filename pattern will be ignored.
save_to_dirs (bool):
If true, the image will be saved into a subdirectory of `path`.
Returns: (fullfn, txt_fullfn)
fullfn (`str`):
The full path of the saved imaged.
txt_fullfn (`str` or None):
If a text file is saved for this image, this will be its full path. Otherwise None.
"""
else:
from modules.images import *

14
sd_webui/paths.py Normal file
View File

@@ -0,0 +1,14 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import os
models_path = os.path.join(os.path.dirname(__file__), "1")
script_path = os.path.join(os.path.dirname(__file__), "2")
data_path = os.path.join(os.path.dirname(__file__), "3")
extensions_dir = os.path.join(os.path.dirname(__file__), "4")
extensions_builtin_dir = os.path.join(os.path.dirname(__file__), "5")
else:
from modules.paths import *

166
sd_webui/processing.py Normal file
View File

@@ -0,0 +1,166 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from dataclasses import dataclass, field
from typing import Any, Callable
import numpy as np
import torch
from PIL import Image
def _image():
return Image.new("L", (512, 512))
@dataclass
class StableDiffusionProcessing:
sd_model: torch.nn.Module = field(default_factory=lambda: torch.nn.Linear(1, 1))
outpath_samples: str = ""
outpath_grids: str = ""
prompt: str = ""
prompt_for_display: str = ""
negative_prompt: str = ""
styles: list[str] = field(default_factory=list)
seed: int = -1
subseed: int = -1
subseed_strength: float = 0.0
seed_resize_from_h: int = -1
seed_resize_from_w: int = -1
sampler_name: str | None = None
batch_size: int = 1
n_iter: int = 1
steps: int = 50
cfg_scale: float = 7.0
width: int = 512
height: int = 512
restore_faces: bool = False
tiling: bool = False
do_not_save_samples: bool = False
do_not_save_grid: bool = False
extra_generation_params: dict[str, Any] = field(default_factory=dict)
overlay_images: list[Image.Image] = field(default_factory=list)
eta: float = 0.0
do_not_reload_embeddings: bool = False
paste_to: tuple[int | float, ...] = (0, 0, 0, 0)
color_corrections: list[np.ndarray] = field(default_factory=list)
denoising_strength: float = 0.0
sampler_noise_scheduler_override: Callable | None = None
ddim_discretize: str = ""
s_min_uncond: float = 0.0
s_churn: float = 0.0
s_tmin: float = 0.0
s_tmax: float = 0.0
s_noise: float = 0.0
override_settings: dict[str, Any] = field(default_factory=dict)
override_settings_restore_afterwards: bool = False
is_using_inpainting_conditioning: bool = False
disable_extra_networks: bool = False
scripts: Any = None
script_args: list[Any] = field(default_factory=list)
all_prompts: list[str] = field(default_factory=list)
all_negative_prompts: list[str] = field(default_factory=list)
all_seeds: list[int] = field(default_factory=list)
all_subseeds: list[int] = field(default_factory=list)
iteration: int = 1
is_hr_pass: bool = False
@dataclass
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
sampler: Callable | None = None
enable_hr: bool = False
denoising_strength: float = 0.75
hr_scale: float = 2.0
hr_upscaler: str = ""
hr_second_pass_steps: int = 0
hr_resize_x: int = 0
hr_resize_y: int = 0
hr_upscale_to_x: int = 0
hr_upscale_to_y: int = 0
width: int = 512
height: int = 512
truncate_x: int = 512
truncate_y: int = 512
applied_old_hires_behavior_to: tuple[int, int] = (512, 512)
@dataclass
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler: Callable | None = None
init_images: list[Image.Image] = field(default_factory=list)
resize_mode: int = 0
denoising_strength: float = 0.75
image_cfg_scale: float | None = None
init_latent: torch.Tensor | None = None
image_mask: Image.Image = field(default_factory=_image)
latent_mask: Image.Image = field(default_factory=_image)
mask_for_overlay: Image.Image = field(default_factory=_image)
mask_blur: int = 4
inpainting_fill: int = 0
inpaint_full_res: bool = True
inpaint_full_res_padding: int = 0
inpainting_mask_invert: int | bool = 0
initial_noise_multiplier: float = 1.0
mask: torch.Tensor | None = None
nmask: torch.Tensor | None = None
image_conditioning: torch.Tensor | None = None
@dataclass
class Processed:
images: list[Image.Image] = field(default_factory=list)
prompt: list[str] = field(default_factory=list)
negative_prompt: list[str] = field(default_factory=list)
seed: list[int] = field(default_factory=list)
subseed: list[int] = field(default_factory=list)
subseed_strength: float = 0.0
info: str = ""
comments: str = ""
width: int = 512
height: int = 512
sampler_name: str = ""
cfg_scale: float = 7.0
image_cfg_scale: float | None = None
steps: int = 50
batch_size: int = 1
restore_faces: bool = False
face_restoration_model: str | None = None
sd_model_hash: str = ""
seed_resize_from_w: int = -1
seed_resize_from_h: int = -1
denoising_strength: float = 0.0
extra_generation_params: dict[str, Any] = field(default_factory=dict)
index_of_first_image: int = 0
styles: list[str] = field(default_factory=list)
job_timestamp: str = ""
clip_skip: int = 1
eta: float = 0.0
ddim_discretize: str = ""
s_churn: float = 0.0
s_tmin: float = 0.0
s_tmax: float = 0.0
s_noise: float = 0.0
sampler_noise_scheduler_override: Callable | None = None
is_using_inpainting_conditioning: bool = False
all_prompts: list[str] = field(default_factory=list)
all_negative_prompts: list[str] = field(default_factory=list)
all_seeds: list[int] = field(default_factory=list)
all_subseeds: list[int] = field(default_factory=list)
infotexts: list[str] = field(default_factory=list)
def create_infotext(
p: StableDiffusionProcessingTxt2Img | StableDiffusionProcessingImg2Img,
all_prompts: list[str],
all_seeds: list[int],
all_subseeds: list[int],
comments: Any,
iteration: int = 0,
position_in_batch: int = 0,
) -> str:
pass
def process_images(
p: StableDiffusionProcessingTxt2Img | StableDiffusionProcessingImg2Img,
) -> Processed:
pass
else:
from modules.processing import *

10
sd_webui/safe.py Normal file
View File

@@ -0,0 +1,10 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import torch
unsafe_torch_load = torch.load
else:
from modules.safe import *

View File

@@ -0,0 +1,13 @@
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Callable
def on_ui_settings(callback: Callable):
pass
def on_after_component(callback: Callable):
pass
else:
from modules.script_callbacks import *

81
sd_webui/scripts.py Normal file
View File

@@ -0,0 +1,81 @@
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any
import gradio as gr
from PIL import Image
from sd_webui.processing import (
Processed,
StableDiffusionProcessingImg2Img,
StableDiffusionProcessingTxt2Img,
)
SDPType = StableDiffusionProcessingImg2Img | StableDiffusionProcessingTxt2Img
AlwaysVisible = object()
@dataclass
class PostprocessImageArgs:
image: Image.Image
class Script(ABC):
filename: str
args_from: int
args_to: int
alwayson: bool
is_txt2img: bool
is_img2img: bool
group: gr.Group
infotext_fields: list[tuple[str, str]]
paste_field_names: list[str]
@abstractmethod
def title(self):
raise NotImplementedError
def ui(self, is_img2img: bool):
pass
def show(self, is_img2img: bool):
return True
def run(self, p: SDPType, *args):
pass
def process(self, p: SDPType, *args):
pass
def before_process_batch(self, p: SDPType, *args, **kwargs):
pass
def process_batch(self, p: SDPType, *args, **kwargs):
pass
def postprocess_batch(self, p: SDPType, *args, **kwargs):
pass
def postprocess_image(self, p: SDPType, pp: PostprocessImageArgs, *args):
pass
def postprocess(self, p: SDPType, processed: Processed, *args):
pass
def before_component(self, component, **kwargs):
pass
def after_component(self, component, **kwargs):
pass
def describe(self):
return ""
def elem_id(self, item_id: Any) -> str:
pass
else:
from modules.scripts import *

42
sd_webui/shared.py Normal file
View File

@@ -0,0 +1,42 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import argparse
from dataclasses import dataclass
from typing import Any, Callable
@dataclass
class OptionInfo:
default: Any = None
label: str = ""
component: Any = None
component_args: Callable[[], dict] | dict[str, Any] | None = None
onchange: Callable[[], None] | None = None
section: tuple[str, str] | None = None
refresh: Callable[[], None] | None = None
class Option:
data_labels: dict[str, OptionInfo]
def __init__(self):
self.data: dict[str, Any] = {}
def add_option(self, key: str, info: OptionInfo):
pass
def __getattr__(self, item: str):
if self.data is not None and item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super().__getattribute__(item)
opts = Option()
cmd_opts = argparse.Namespace()
else:
from modules.shared import *