diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 902ce42..d718f9b 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -12,14 +12,13 @@ repos:
- id: isort
args: [--profile=black]
- - repo: https://github.com/psf/black
- rev: 23.3.0
- hooks:
- - id: black
-
- repo: https://github.com/charliermarsh/ruff-pre-commit
- # Ruff version.
rev: "v0.0.265"
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
+
+ - repo: https://github.com/psf/black
+ rev: 23.3.0
+ hooks:
+ - id: black
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5ec9dd8..ff06be9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,12 @@
# Changelog
+### 2023-05-09
+
+- v23.5.10
+- 선택한 스크립트만 ADetailer에 적용하는 옵션 추가, 기본값 `True`. 설정 탭에서 지정가능.
+ - 기본값: `dynamic_prompting,dynamic_thresholding,wildcards,wildcard_recursive`
+- `person_yolov8s-seg.pt` 모델 추가
+
### 2023-05-08
- v23.5.9
diff --git a/README.md b/README.md
index 3b40d8a..36117f7 100644
--- a/README.md
+++ b/README.md
@@ -36,6 +36,7 @@ On the ControlNet tab, select a ControlNet inpaint model and set the model weigh
| mediapipe_face_short | realistic face | - | - |
| hand_yolov8n.pt | 2D / realistic hand | 0.767 | 0.505 |
| person_yolov8n-seg.pt | 2D / realistic person | 0.782 (bbox)
0.761 (mask) | 0.555 (bbox)
0.460 (mask) |
+| person_yolov8s-seg.pt | 2D / realistic person | 0.824 (bbox)
0.809 (mask) | 0.605 (bbox)
0.508 (mask) |
The yolo models can be found on huggingface [Bingsu/adetailer](https://huggingface.co/Bingsu/adetailer).
diff --git a/adetailer/__version__.py b/adetailer/__version__.py
index 8d20f28..182fb40 100644
--- a/adetailer/__version__.py
+++ b/adetailer/__version__.py
@@ -1 +1 @@
-__version__ = "23.5.9"
+__version__ = "23.5.10"
diff --git a/adetailer/common.py b/adetailer/common.py
index f12b682..5b03da5 100644
--- a/adetailer/common.py
+++ b/adetailer/common.py
@@ -39,6 +39,7 @@ def get_models(model_dir: Union[str, Path]) -> OrderedDict[str, Optional[str]]:
"mediapipe_face_short": None,
"hand_yolov8n.pt": hf_hub_download(repo_id, "hand_yolov8n.pt"),
"person_yolov8n-seg.pt": hf_hub_download(repo_id, "person_yolov8n-seg.pt"),
+ "person_yolov8s-seg.pt": hf_hub_download(repo_id, "person_yolov8s-seg.pt"),
}
)
@@ -51,16 +52,16 @@ def get_models(model_dir: Union[str, Path]) -> OrderedDict[str, Optional[str]]:
def create_mask_from_bbox(
- image: Image.Image, bboxes: list[list[float]]
+ bboxes: list[list[float]], shape: tuple[int, int]
) -> list[Image.Image]:
"""
Parameters
----------
- image: Image.Image
- The image to create the mask from
bboxes: list[list[float]]
list of [x1, y1, x2, y2]
bounding boxes
+ shape: tuple[int, int]
+ shape of the image (width, height)
Returns
-------
@@ -70,7 +71,7 @@ def create_mask_from_bbox(
"""
masks = []
for bbox in bboxes:
- mask = Image.new("L", image.size, 0)
+ mask = Image.new("L", shape, 0)
mask_draw = ImageDraw.Draw(mask)
mask_draw.rectangle(bbox, fill=255)
masks.append(mask)
diff --git a/adetailer/mediapipe.py b/adetailer/mediapipe.py
index 2e1ea43..7a3e775 100644
--- a/adetailer/mediapipe.py
+++ b/adetailer/mediapipe.py
@@ -45,7 +45,7 @@ def mediapipe_predict(
bboxes.append([x1, y1, x2, y2])
- masks = create_mask_from_bbox(image, bboxes)
+ masks = create_mask_from_bbox(bboxes, image.size)
preview = Image.fromarray(preview_array)
return PredictOutput(bboxes=bboxes, masks=masks, preview=preview)
diff --git a/adetailer/ultralytics.py b/adetailer/ultralytics.py
index 90bc68f..8d378f1 100644
--- a/adetailer/ultralytics.py
+++ b/adetailer/ultralytics.py
@@ -33,7 +33,7 @@ def ultralytics_predict(
bboxes = bboxes.tolist()
if pred[0].masks is None:
- masks = create_mask_from_bbox(image, bboxes)
+ masks = create_mask_from_bbox(bboxes, image.size)
else:
masks = mask_to_pil(pred[0].masks.data, image.size)
preview = pred[0].plot()
@@ -56,17 +56,17 @@ def ultralytics_check():
print(message)
-def mask_to_pil(masks, orig_shape: tuple[int, int]) -> list[Image.Image]:
+def mask_to_pil(masks, shape: tuple[int, int]) -> list[Image.Image]:
"""
Parameters
----------
masks: torch.Tensor, dtype=torch.float32, shape=(N, H, W).
The device can be CUDA, but `to_pil_image` takes care of that.
- orig_shape: tuple[int, int]
+ shape: tuple[int, int]
(width, height) of the original image
"""
from torchvision.transforms.functional import to_pil_image
n = masks.shape[0]
- return [to_pil_image(masks[i], mode="L").resize(orig_shape) for i in range(n)]
+ return [to_pil_image(masks[i], mode="L").resize(shape) for i in range(n)]
diff --git a/scripts/!adetailer.py b/scripts/!adetailer.py
index fbacb8b..d37f64f 100644
--- a/scripts/!adetailer.py
+++ b/scripts/!adetailer.py
@@ -448,6 +448,33 @@ class AfterDetailerScript(scripts.Script):
params_txt = Path(data_path, "params.txt")
params_txt.write_text(infotext, encoding="utf-8")
+ def script_filter(self, p, args: ADetailerArgs):
+ script_runner = copy(p.scripts)
+
+ ad_only_seleted_scripts = opts.data.get("ad_only_seleted_scripts", True)
+ if not ad_only_seleted_scripts:
+ return script_runner
+
+ default = "dynamic_prompting,dynamic_thresholding,wildcards,wildcard_recursive"
+ ad_script_names = opts.data.get("ad_script_names", default)
+ script_names_set = {
+ name
+ for script_name in ad_script_names.split(",")
+ for name in (script_name, script_name.strip())
+ }
+ if args.ad_controlnet_model != "None":
+ script_names_set.add("controlnet")
+
+ filtered_alwayson = []
+ for script_object in script_runner.alwayson_scripts:
+ filepath = script_object.filename
+ filename = Path(filepath).stem
+ if filename in script_names_set:
+ filtered_alwayson.append(script_object)
+
+ script_runner.alwayson_scripts = filtered_alwayson
+ return script_runner
+
def get_i2i_p(self, p, args: ADetailerArgs, image):
prompt, negative_prompt = self.get_prompt(p, args)
seed, subseed = self.get_seed(p)
@@ -492,7 +519,7 @@ class AfterDetailerScript(scripts.Script):
do_not_save_grid=True,
)
- i2i.scripts = copy(p.scripts)
+ i2i.scripts = self.script_filter(p, args)
i2i.script_args = deepcopy(p.script_args)
i2i._disable_adetailer = True
@@ -631,10 +658,10 @@ def on_ui_settings():
shared.opts.add_option(
"ad_max_models",
shared.OptionInfo(
- 2,
- "Max models",
- gr.Slider,
- {"minimum": 1, "maximum": 5, "step": 1},
+ default=2,
+ label="Max models",
+ component=gr.Slider,
+ component_args={"minimum": 1, "maximum": 5, "step": 1},
section=section,
),
)
@@ -649,5 +676,28 @@ def on_ui_settings():
shared.OptionInfo(False, "Save images before ADetailer", section=section),
)
+ shared.opts.add_option(
+ "ad_only_seleted_scripts",
+ shared.OptionInfo(
+ True, "Apply only selected scripts to ADetailer", section=section
+ ),
+ )
+
+ textbox_args = {
+ "placeholder": "comma-separated list of script names",
+ "interactive": True,
+ }
+
+ shared.opts.add_option(
+ "ad_script_names",
+ shared.OptionInfo(
+ default="dynamic_prompting,dynamic_thresholding,wildcards,wildcard_recursive",
+ label="Script names to apply to ADetailer (separated by comma)",
+ component=gr.Textbox,
+ component_args=textbox_args,
+ section=section,
+ ),
+ )
+
script_callbacks.on_ui_settings(on_ui_settings)