Merge branch 'dev' into main

This commit is contained in:
Bingsu
2023-05-19 09:45:49 +09:00
13 changed files with 481 additions and 316 deletions

View File

@@ -1,5 +1,14 @@
# Changelog
### 2023-05-19
- v23.5.16
- 추가한 옵션
- Mask min/max ratio
- Mask merge mode
- Restore faces after ADetailer
- 옵션들을 Accordion으로 묶음
### 2023-05-18
- v23.5.15

View File

@@ -20,25 +20,31 @@ You can now install it directly from the Extensions tab.
You **DON'T** need to download any model from huggingface.
## Usage
## Options
It's auto detecting, masking, and inpainting tool.
| Model, Prompts | | |
| --------------------------------- | ------------------------------------- | ------------------------------------------------- |
| ADetailer model | Determine what to detect. | `None` = disable |
| ADetailer prompt, negative prompt | Prompts and negative prompts to apply | If left blank, it will use the same as the input. |
So some options correspond to options on the inpaint tab.
| Detection | | |
| ------------------------------------ | -------------------------------------------------------------------------------------------- | --- |
| Detection model confidence threshold | Only objects with a detection model confidence above this threshold are used for inpainting. | |
| Mask min/max ratio | Only use masks whose area is between those ratios for the area of the entire image. | |
![image](https://i.imgur.com/Bm7YLEA.png)
If you want to exclude objects in the background, try setting the min ratio to around `0.01`.
Other options:
| Mask Preprocessing | | |
| ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
| Mask x, y offset | Moves the mask horizontally and vertically by | |
| Mask erosion (-) / dilation (+) | Enlarge or reduce the detected mask. | [opencv example](https://docs.opencv.org/4.7.0/db/df6/tutorial_erosion_dilatation.html) |
| Mask merge mode | `None`: Inpaint each mask<br/>`Merge`: Merge all masks and inpaint<br/>`Merge and Invert`: Merge all masks and Invert, then inpaint | |
| Option | | |
| -------------------------------------- | -------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
| ADetailer model | Determine what to detect. | `None` = disable |
| ADetailer prompt, negative prompt | Prompts and negative prompts to apply | If left blank, it will use the same as the input. |
| Detection model confidence threshold % | Only objects with a detection model confidence above this threshold are used for inpainting. | |
| Mask erosion (-) / dilation (+) | Enlarge or reduce the detected mask. | [opencv example](https://docs.opencv.org/4.7.0/db/df6/tutorial_erosion_dilatation.html) |
| Mask x, y offset | Moves the mask horizontally and vertically by pixels. | | |
#### Inpainting
See the [wiki](https://github.com/Bing-su/adetailer/wiki) for more options and other features.
![image](https://i.imgur.com/KbAeWar.png)
Each option corresponds to a corresponding option on the inpaint tab.
## ControlNet Inpainting

View File

@@ -1 +1 @@
__version__ = "23.5.15"
__version__ = "23.5.16"

View File

@@ -1,8 +1,8 @@
from __future__ import annotations
from collections import UserList
from functools import cached_property
from typing import Any, NamedTuple, Optional, Union
from functools import cached_property, partial
from typing import Any, Literal, NamedTuple, Union
import pydantic
from pydantic import (
@@ -36,9 +36,12 @@ class ADetailerArgs(BaseModel, extra=Extra.forbid):
ad_prompt: str = ""
ad_negative_prompt: str = ""
ad_conf: confloat(ge=0.0, le=1.0) = 0.3
ad_mask_min_ratio: confloat(ge=0.0, le=1.0) = 0.0
ad_mask_max_ratio: confloat(ge=0.0, le=1.0) = 1.0
ad_dilate_erode: int = 32
ad_x_offset: int = 0
ad_y_offset: int = 0
ad_mask_merge_invert: Literal["None", "Merge", "Merge and Invert"] = "None"
ad_mask_blur: NonNegativeInt = 4
ad_denoising_strength: confloat(ge=0.0, le=1.0) = 0.4
ad_inpaint_full_res: bool = True
@@ -50,12 +53,12 @@ class ADetailerArgs(BaseModel, extra=Extra.forbid):
ad_steps: PositiveInt = 28
ad_use_cfg_scale: bool = False
ad_cfg_scale: NonNegativeFloat = 7.0
ad_restore_face: bool = False
ad_controlnet_model: str = "None"
ad_controlnet_weight: confloat(ge=0.0, le=1.0) = 1.0
@validator("ad_conf", pre=True)
def check_ad_conf(cls, v: Any): # noqa: N805
"ad_conf가 문자열로 들어올 경우를 대비"
if not isinstance(v, (int, float)):
try:
v = int(v)
@@ -65,47 +68,65 @@ class ADetailerArgs(BaseModel, extra=Extra.forbid):
v /= 100.0
return v
@staticmethod
def ppop(
p: dict[str, Any],
key: str,
pops: list[str] | None = None,
cond: Any = None,
):
if pops is None:
pops = [key]
value = p[key]
cond = (not bool(value)) if cond is None else value == cond
if cond:
for k in pops:
p.pop(k)
def extra_params(self, suffix: str = ""):
if self.ad_model == "None":
return {}
params = {name: getattr(self, attr) for attr, name in ALL_ARGS}
params["ADetailer conf"] = int(params["ADetailer conf"] * 100)
p = {name: getattr(self, attr) for attr, name in ALL_ARGS}
p["ADetailer conf"] = int(p["ADetailer conf"] * 100)
ppop = partial(self.ppop, p)
if not params["ADetailer prompt"]:
params.pop("ADetailer prompt")
if not params["ADetailer negative prompt"]:
params.pop("ADetailer negative prompt")
if params["ADetailer x offset"] == 0:
params.pop("ADetailer x offset")
if params["ADetailer y offset"] == 0:
params.pop("ADetailer y offset")
if not params["ADetailer inpaint full"]:
params.pop("ADetailer inpaint padding")
if not params["ADetailer use inpaint width/height"]:
params.pop("ADetailer use inpaint width/height")
params.pop("ADetailer inpaint width")
params.pop("ADetailer inpaint height")
if not params["ADetailer use separate steps"]:
params.pop("ADetailer use separate steps")
params.pop("ADetailer steps")
if not params["ADetailer use separate CFG scale"]:
params.pop("ADetailer use separate CFG scale")
params.pop("ADetailer CFG scale")
if params["ADetailer ControlNet model"] == "None":
params.pop("ADetailer ControlNet model")
params.pop("ADetailer ControlNet weight")
ppop("ADetailer prompt")
ppop("ADetailer negative prompt")
ppop("ADetailer mask min ratio", cond=0.0)
ppop("ADetailer mask max ratio", cond=1.0)
ppop("ADetailer x offset", cond=0)
ppop("ADetailer y offset", cond=0)
ppop("ADetailer mask merge/invert", cond="None")
ppop("ADetailer inpaint full", ["ADetailer inpaint padding"])
ppop(
"ADetailer use inpaint width/height",
[
"ADetailer use inpaint width/height",
"ADetailer inpaint width",
"ADetailer inpaint height",
],
)
ppop(
"ADetailer use separate steps",
["ADetailer use separate steps", "ADetailer steps"],
)
ppop(
"ADetailer use separate CFG scale",
["ADetailer use separate CFG scale", "ADetailer CFG scale"],
)
ppop("ADetailer restore face")
ppop(
"ADetailer ControlNet model",
["ADetailer ControlNet model", "ADetailer ControlNet weight"],
cond="None",
)
if suffix:
params = {k + suffix: v for k, v in params.items()}
p = {k + suffix: v for k, v in p.items()}
return params
return p
class EnableChecker(BaseModel):
@@ -127,9 +148,12 @@ _all_args = [
("ad_prompt", "ADetailer prompt"),
("ad_negative_prompt", "ADetailer negative prompt"),
("ad_conf", "ADetailer conf"),
("ad_dilate_erode", "ADetailer dilate/erode"),
("ad_mask_min_ratio", "ADetailer mask min ratio"),
("ad_mask_max_ratio", "ADetailer mask max ratio"),
("ad_x_offset", "ADetailer x offset"),
("ad_y_offset", "ADetailer y offset"),
("ad_dilate_erode", "ADetailer dilate/erode"),
("ad_mask_merge_invert", "ADetailer mask merge/invert"),
("ad_mask_blur", "ADetailer mask blur"),
("ad_denoising_strength", "ADetailer denoising strength"),
("ad_inpaint_full_res", "ADetailer inpaint full"),
@@ -141,6 +165,7 @@ _all_args = [
("ad_steps", "ADetailer steps"),
("ad_use_cfg_scale", "ADetailer use separate CFG scale"),
("ad_cfg_scale", "ADetailer CFG scale"),
("ad_restore_face", "ADetailer restore face"),
("ad_controlnet_model", "ADetailer ControlNet model"),
("ad_controlnet_weight", "ADetailer ControlNet weight"),
]
@@ -148,9 +173,11 @@ _all_args = [
AD_ENABLE = Arg(*_all_args[0])
_args = [Arg(*args) for args in _all_args[1:]]
ALL_ARGS = ArgsList(_args)
BBOX_SORTBY = [
"None",
"Position (left to right)",
"Position (center to edge)",
"Area (large to small)",
]
MASK_MERGE_INVERT = ["None", "Merge", "Merge and Invert"]

View File

@@ -1,35 +1,23 @@
from __future__ import annotations
from collections import OrderedDict
from dataclasses import dataclass
from enum import IntEnum
from functools import partial
from math import dist
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
import cv2
import numpy as np
from huggingface_hub import hf_hub_download
from PIL import Image, ImageChops, ImageDraw
from PIL import Image, ImageDraw
repo_id = "Bingsu/adetailer"
@dataclass
class PredictOutput:
bboxes: Optional[list[list[int]]] = None
masks: Optional[list[Image.Image]] = None
bboxes: list[list[float]] = field(default_factory=list)
masks: list[Image.Image] = field(default_factory=list)
preview: Optional[Image.Image] = None
class SortBy(IntEnum):
NONE = 0
LEFT_TO_RIGHT = 1
CENTER_TO_EDGE = 2
AREA = 3
def get_models(
model_dir: Union[str, Path], huggingface: bool = True
) -> OrderedDict[str, Optional[str]]:
@@ -100,168 +88,3 @@ def create_mask_from_bbox(
mask_draw.rectangle(bbox, fill=255)
masks.append(mask)
return masks
def _dilate(arr: np.ndarray, value: int) -> np.ndarray:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (value, value))
return cv2.dilate(arr, kernel, iterations=1)
def _erode(arr: np.ndarray, value: int) -> np.ndarray:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (value, value))
return cv2.erode(arr, kernel, iterations=1)
def dilate_erode(img: Image.Image, value: int) -> Image.Image:
"""
The dilate_erode function takes an image and a value.
If the value is positive, it dilates the image by that amount.
If the value is negative, it erodes the image by that amount.
Parameters
----------
img: PIL.Image.Image
the image to be processed
value: int
kernel size of dilation or erosion
Returns
-------
PIL.Image.Image
The image that has been dilated or eroded
"""
if value == 0:
return img
arr = np.array(img)
arr = _dilate(arr, value) if value > 0 else _erode(arr, -value)
return Image.fromarray(arr)
def offset(img: Image.Image, x: int = 0, y: int = 0) -> Image.Image:
"""
The offset function takes an image and offsets it by a given x(→) and y(↑) value.
Parameters
----------
mask: Image.Image
Pass the mask image to the function
x: int
y: int
Returns
-------
PIL.Image.Image
A new image that is offset by x and y
"""
return ImageChops.offset(img, x, -y)
def is_all_black(img: Image.Image) -> bool:
arr = np.array(img)
return cv2.countNonZero(arr) == 0
def mask_preprocess(
masks: list[Image.Image] | None,
kernel: int = 0,
x_offset: int = 0,
y_offset: int = 0,
) -> list[Image.Image]:
"""
The mask_preprocess function takes a list of masks and preprocesses them.
It dilates and erodes the masks, and offsets them by x_offset and y_offset.
Parameters
----------
masks: list[Image.Image] | None
A list of masks
kernel: int
kernel size of dilation or erosion
x_offset: int
y_offset: int
Returns
-------
list[Image.Image]
A list of processed masks
"""
if masks is None:
return []
masks = [dilate_erode(m, kernel) for m in masks]
masks = [m for m in masks if not is_all_black(m)]
if x_offset != 0 or y_offset != 0:
masks = [offset(m, x_offset, y_offset) for m in masks]
return masks
# Bbox sorting
def _key_left_to_right(bbox: list[float]) -> float:
"""
Left to right
Parameters
----------
bbox: list[float]
list of [x1, y1, x2, y2]
"""
return bbox[0]
def _key_center_to_edge(bbox: list[float], *, center: tuple[float, float]) -> float:
"""
Center to edge
Parameters
----------
bbox: list[float]
list of [x1, y1, x2, y2]
image: Image.Image
the image
"""
bbox_center = ((bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2)
return dist(center, bbox_center)
def _key_area(bbox: list[float]) -> float:
"""
Large to small
Parameters
----------
bbox: list[float]
list of [x1, y1, x2, y2]
"""
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
return -area
def sort_bboxes(
pred: PredictOutput, order: int | SortBy = SortBy.NONE
) -> PredictOutput:
if order == SortBy.NONE or not pred.bboxes:
return pred
if order == SortBy.LEFT_TO_RIGHT:
key = _key_left_to_right
elif order == SortBy.CENTER_TO_EDGE:
width, height = pred.preview.size
center = (width / 2, height / 2)
key = partial(_key_center_to_edge, center=center)
elif order == SortBy.AREA:
key = _key_area
else:
raise RuntimeError
items = len(pred.bboxes)
idx = sorted(range(items), key=lambda i: key(pred.bboxes[i]))
pred.bboxes = [pred.bboxes[i] for i in idx]
pred.masks = [pred.masks[i] for i in idx]
return pred

247
adetailer/mask.py Normal file
View File

@@ -0,0 +1,247 @@
from __future__ import annotations
from enum import IntEnum
from functools import partial, reduce
from math import dist
import cv2
import numpy as np
from PIL import Image, ImageChops
from adetailer.args import MASK_MERGE_INVERT
from adetailer.common import PredictOutput
class SortBy(IntEnum):
NONE = 0
LEFT_TO_RIGHT = 1
CENTER_TO_EDGE = 2
AREA = 3
class MergeInvert(IntEnum):
NONE = 0
MERGE = 1
MERGE_INVERT = 2
def _dilate(arr: np.ndarray, value: int) -> np.ndarray:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (value, value))
return cv2.dilate(arr, kernel, iterations=1)
def _erode(arr: np.ndarray, value: int) -> np.ndarray:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (value, value))
return cv2.erode(arr, kernel, iterations=1)
def dilate_erode(img: Image.Image, value: int) -> Image.Image:
"""
The dilate_erode function takes an image and a value.
If the value is positive, it dilates the image by that amount.
If the value is negative, it erodes the image by that amount.
Parameters
----------
img: PIL.Image.Image
the image to be processed
value: int
kernel size of dilation or erosion
Returns
-------
PIL.Image.Image
The image that has been dilated or eroded
"""
if value == 0:
return img
arr = np.array(img)
arr = _dilate(arr, value) if value > 0 else _erode(arr, -value)
return Image.fromarray(arr)
def offset(img: Image.Image, x: int = 0, y: int = 0) -> Image.Image:
"""
The offset function takes an image and offsets it by a given x(→) and y(↑) value.
Parameters
----------
mask: Image.Image
Pass the mask image to the function
x: int
y: int
Returns
-------
PIL.Image.Image
A new image that is offset by x and y
"""
return ImageChops.offset(img, x, -y)
def is_all_black(img: Image.Image) -> bool:
arr = np.array(img)
return cv2.countNonZero(arr) == 0
def bbox_area(bbox: list[float]):
return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
def mask_preprocess(
masks: list[Image.Image],
kernel: int = 0,
x_offset: int = 0,
y_offset: int = 0,
merge_invert: int | MergeInvert | str = MergeInvert.NONE,
) -> list[Image.Image]:
"""
The mask_preprocess function takes a list of masks and preprocesses them.
It dilates and erodes the masks, and offsets them by x_offset and y_offset.
Parameters
----------
masks: list[Image.Image]
A list of masks
kernel: int
kernel size of dilation or erosion
x_offset: int
y_offset: int
Returns
-------
list[Image.Image]
A list of processed masks
"""
if not masks:
return []
if x_offset != 0 or y_offset != 0:
masks = [offset(m, x_offset, y_offset) for m in masks]
if kernel != 0:
masks = [dilate_erode(m, kernel) for m in masks]
masks = [m for m in masks if not is_all_black(m)]
masks = mask_merge_invert(masks, mode=merge_invert)
return masks
# Bbox sorting
def _key_left_to_right(bbox: list[float]) -> float:
"""
Left to right
Parameters
----------
bbox: list[float]
list of [x1, y1, x2, y2]
"""
return bbox[0]
def _key_center_to_edge(bbox: list[float], *, center: tuple[float, float]) -> float:
"""
Center to edge
Parameters
----------
bbox: list[float]
list of [x1, y1, x2, y2]
image: Image.Image
the image
"""
bbox_center = ((bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2)
return dist(center, bbox_center)
def _key_area(bbox: list[float]) -> float:
"""
Large to small
Parameters
----------
bbox: list[float]
list of [x1, y1, x2, y2]
"""
return -bbox_area(bbox)
def sort_bboxes(
pred: PredictOutput, order: int | SortBy = SortBy.NONE
) -> PredictOutput:
if order == SortBy.NONE or len(pred.bboxes) <= 1:
return pred
if order == SortBy.LEFT_TO_RIGHT:
key = _key_left_to_right
elif order == SortBy.CENTER_TO_EDGE:
width, height = pred.preview.size
center = (width / 2, height / 2)
key = partial(_key_center_to_edge, center=center)
elif order == SortBy.AREA:
key = _key_area
else:
raise RuntimeError
items = len(pred.bboxes)
idx = sorted(range(items), key=lambda i: key(pred.bboxes[i]))
pred.bboxes = [pred.bboxes[i] for i in idx]
pred.masks = [pred.masks[i] for i in idx]
return pred
# Filter by ratio
def is_in_ratio(bbox: list[float], low: float, high: float, orig_area: int) -> bool:
area = bbox_area(bbox)
return low <= area / orig_area <= high
def filter_by_ratio(pred: PredictOutput, low: float, high: float) -> PredictOutput:
if not pred.bboxes:
return pred
w, h = pred.preview.size
orig_area = w * h
items = len(pred.bboxes)
idx = [i for i in range(items) if is_in_ratio(pred.bboxes[i], low, high, orig_area)]
pred.bboxes = [pred.bboxes[i] for i in idx]
pred.masks = [pred.masks[i] for i in idx]
return pred
# Merge / Invert
def mask_merge(masks: list[Image.Image]) -> list[Image.Image]:
arrs = [np.array(m) for m in masks]
arr = reduce(cv2.bitwise_or, arrs)
return [Image.fromarray(arr)]
def mask_invert(masks: list[Image.Image]) -> list[Image.Image]:
return [ImageChops.invert(m) for m in masks]
def mask_merge_invert(
masks: list[Image.Image], mode: int | MergeInvert | str
) -> list[Image.Image]:
if isinstance(mode, str):
mode = MASK_MERGE_INVERT.index(mode)
if mode == MergeInvert.NONE or not masks:
return masks
if mode == MergeInvert.MERGE:
return mask_merge(masks)
if mode == MergeInvert.MERGE_INVERT:
merged = mask_merge(masks)
return mask_invert(merged)
raise RuntimeError

View File

@@ -1,4 +1,4 @@
from typing import Union
from __future__ import annotations
import numpy as np
from PIL import Image
@@ -8,7 +8,7 @@ from adetailer.common import create_mask_from_bbox
def mediapipe_predict(
model_type: Union[int, str], image: Image.Image, confidence: float = 0.3
model_type: int | str, image: Image.Image, confidence: float = 0.3
) -> PredictOutput:
import mediapipe as mp

View File

@@ -6,7 +6,7 @@ from typing import Any
import gradio as gr
from adetailer import AFTER_DETAILER
from adetailer.args import AD_ENABLE, ALL_ARGS
from adetailer.args import AD_ENABLE, ALL_ARGS, MASK_MERGE_INVERT
from controlnet_ext import controlnet_exists, get_cn_inpaint_models
@@ -46,7 +46,6 @@ def adui(
t2i_button: gr.Button,
i2i_button: gr.Button,
):
widgets = []
states = []
infotext_fields = []
@@ -63,7 +62,7 @@ def adui(
with gr.Group(), gr.Tabs():
for n in range(num_models):
with gr.Tab(ordinal(n + 1)):
w, state, infofields = one_ui_group(
state, infofields = one_ui_group(
n=n,
is_img2img=is_img2img,
model_list=model_list,
@@ -71,7 +70,6 @@ def adui(
i2i_button=i2i_button,
)
widgets.append(w)
states.append(state)
infotext_fields.extend(infofields)
@@ -121,7 +119,56 @@ def one_ui_group(
)
with gr.Group():
with gr.Row():
with gr.Accordion("Detection", open=False):
detection(w, n)
with gr.Accordion("Mask Preprocessing", open=False):
mask_preprocessing(w, n)
with gr.Accordion("Inpainting", open=False):
inpainting(w, n)
with gr.Group(), gr.Row(variant="panel"):
cn_inpaint_models = ["None"] + get_cn_inpaint_models()
w.ad_controlnet_model = gr.Dropdown(
label="ControlNet model" + suffix(n),
choices=cn_inpaint_models,
value="None",
visible=True,
type="value",
interactive=controlnet_exists,
)
w.ad_controlnet_weight = gr.Slider(
label="ControlNet weight" + suffix(n),
minimum=0.0,
maximum=1.0,
step=0.05,
value=1.0,
visible=True,
interactive=controlnet_exists,
)
for attr in ALL_ARGS.attrs:
widget = getattr(w, attr)
on_change = partial(on_widget_change, attr=attr)
widget.change(
fn=on_change, inputs=[state, widget], outputs=[state], queue=False
)
all_inputs = [state] + w.tolist()
target_button = i2i_button if is_img2img else t2i_button
target_button.click(
fn=on_generate_click, inputs=all_inputs, outputs=state, queue=False
)
infotext_fields = [(getattr(w, attr), name + suffix(n)) for attr, name in ALL_ARGS]
return state, infotext_fields
def detection(w: Widgets, n: int):
with gr.Row():
with gr.Column():
w.ad_conf = gr.Slider(
label="Detection model confidence threshold %" + suffix(n),
minimum=0,
@@ -130,33 +177,67 @@ def one_ui_group(
value=30,
visible=True,
)
w.ad_dilate_erode = gr.Slider(
label="Mask erosion (-) / dilation (+)" + suffix(n),
minimum=-128,
maximum=128,
step=4,
value=32,
with gr.Column(variant="compact"):
w.ad_mask_min_ratio = gr.Slider(
label="Mask min area ratio" + suffix(n),
minimum=0.0,
maximum=1.0,
step=0.001,
value=0.0,
visible=True,
)
w.ad_mask_max_ratio = gr.Slider(
label="Mask max area ratio" + suffix(n),
minimum=0.0,
maximum=1.0,
step=0.001,
value=1.0,
visible=True,
)
def mask_preprocessing(w: Widgets, n: int):
with gr.Group():
with gr.Row():
with gr.Column(variant="compact"):
w.ad_x_offset = gr.Slider(
label="Mask x(→) offset" + suffix(n),
minimum=-200,
maximum=200,
step=1,
value=0,
visible=True,
)
w.ad_y_offset = gr.Slider(
label="Mask y(↑) offset" + suffix(n),
minimum=-200,
maximum=200,
step=1,
value=0,
visible=True,
)
with gr.Column(variant="compact"):
w.ad_dilate_erode = gr.Slider(
label="Mask erosion (-) / dilation (+)" + suffix(n),
minimum=-128,
maximum=128,
step=4,
value=32,
visible=True,
)
with gr.Row():
w.ad_x_offset = gr.Slider(
label="Mask x(→) offset" + suffix(n),
minimum=-200,
maximum=200,
step=1,
value=0,
visible=True,
)
w.ad_y_offset = gr.Slider(
label="Mask y(↑) offset" + suffix(n),
minimum=-200,
maximum=200,
step=1,
value=0,
visible=True,
w.ad_mask_merge_invert = gr.Radio(
label="Mask merge mode" + suffix(n),
choices=MASK_MERGE_INVERT,
value="None",
)
def inpainting(w: Widgets, n: int):
with gr.Group():
with gr.Row():
w.ad_mask_blur = gr.Slider(
label="Inpaint mask blur" + suffix(n),
@@ -176,7 +257,6 @@ def one_ui_group(
visible=True,
)
with gr.Group():
with gr.Row():
with gr.Column(variant="compact"):
w.ad_inpaint_full_res = gr.Checkbox(
@@ -279,41 +359,8 @@ def one_ui_group(
queue=False,
)
with gr.Group(), gr.Row(variant="panel"):
cn_inpaint_models = ["None"] + get_cn_inpaint_models()
w.ad_controlnet_model = gr.Dropdown(
label="ControlNet model" + suffix(n),
choices=cn_inpaint_models,
value="None",
visible=True,
type="value",
interactive=controlnet_exists,
)
w.ad_controlnet_weight = gr.Slider(
label="ControlNet weight" + suffix(n),
minimum=0.0,
maximum=1.0,
step=0.05,
value=1.0,
visible=True,
interactive=controlnet_exists,
)
for attr in ALL_ARGS.attrs:
widget = getattr(w, attr)
on_change = partial(on_widget_change, attr=attr)
widget.change(
fn=on_change, inputs=[state, widget], outputs=[state], queue=False
)
all_inputs = [state] + w.tolist()
target_button = i2i_button if is_img2img else t2i_button
target_button.click(
fn=on_generate_click, inputs=all_inputs, outputs=state, queue=False
)
infotext_fields = [(getattr(w, attr), name + suffix(n)) for attr, name in ALL_ARGS]
return w, state, infotext_fields
with gr.Row():
w.ad_restore_face = gr.Checkbox(
label="Restore faces after ADetailer" + suffix(n),
value=False,
)

View File

@@ -1,7 +1,6 @@
from __future__ import annotations
from pathlib import Path
from typing import Union
import cv2
from PIL import Image
@@ -11,7 +10,7 @@ from adetailer.common import create_mask_from_bbox
def ultralytics_predict(
model_path: Union[str, Path],
model_path: str | Path,
image: Image.Image,
confidence: float = 0.3,
device: str = "",

View File

@@ -22,7 +22,8 @@ from adetailer import (
ultralytics_predict,
)
from adetailer.args import ALL_ARGS, BBOX_SORTBY, ADetailerArgs, EnableChecker
from adetailer.common import PredictOutput, mask_preprocess, sort_bboxes
from adetailer.common import PredictOutput
from adetailer.mask import filter_by_ratio, mask_preprocess, sort_bboxes
from adetailer.ui import adui, ordinal, suffix
from controlnet_ext import ControlNetExt, controlnet_exists
from sd_webui import images, safe, script_callbacks, scripts, shared
@@ -340,6 +341,7 @@ class AfterDetailerScript(scripts.Script):
cfg_scale=cfg_scale,
width=width,
height=height,
restore_faces=args.ad_restore_face,
tiling=p.tiling,
extra_generation_params=p.extra_generation_params,
do_not_save_samples=True,
@@ -382,6 +384,19 @@ class AfterDetailerScript(scripts.Script):
pred = sort_bboxes(pred, sortby_idx)
return pred
def pred_preprocessing(self, pred: PredictOutput, args: ADetailerArgs):
pred = filter_by_ratio(
pred, low=args.ad_mask_min_ratio, high=args.ad_mask_max_ratio
)
pred = self.sort_bboxes(pred)
return mask_preprocess(
pred.masks,
kernel=args.ad_dilate_erode,
x_offset=args.ad_x_offset,
y_offset=args.ad_y_offset,
merge_invert=args.ad_mask_merge_invert,
)
def i2i_prompts_replace(
self, i2i, prompts: list[str], negative_prompts: list[str], j: int
):
@@ -429,13 +444,7 @@ class AfterDetailerScript(scripts.Script):
with ChangeTorchLoad():
pred = predictor(ad_model, pp.image, args.ad_conf, **kwargs)
pred = self.sort_bboxes(pred)
masks = mask_preprocess(
pred.masks,
kernel=args.ad_dilate_erode,
x_offset=args.ad_x_offset,
y_offset=args.ad_y_offset,
)
masks = self.pred_preprocessing(pred, args)
if not masks:
print(

View File

@@ -11,10 +11,4 @@ if TYPE_CHECKING:
extensions_dir = os.path.join(os.path.dirname(__file__), "4")
extensions_builtin_dir = os.path.join(os.path.dirname(__file__), "5")
else:
from modules.paths import (
data_path,
extensions_builtin_dir,
extensions_dir,
models_path,
script_path,
)
from modules.paths import data_path, models_path, script_path

View File

@@ -1,3 +1,5 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:

View File

@@ -1,3 +1,5 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING: