mirror of
https://github.com/Bing-su/adetailer.git
synced 2026-02-20 07:04:05 +00:00
Merge branch 'dev' into main
This commit is contained in:
10
CHANGELOG.md
10
CHANGELOG.md
@@ -1,10 +1,20 @@
|
||||
# Changelog
|
||||
|
||||
## 2023-07-28
|
||||
|
||||
- v23.7.10
|
||||
- ultralytics, mediapipe import문 정리
|
||||
- traceback에서 컬러를 없앰 (api 때문), 라이브러리 버전도 보여주게 설정.
|
||||
- huggingface_hub, pydantic을 install.py에서 없앰
|
||||
- 안쓰는 컨트롤넷 관련 코드 삭제
|
||||
|
||||
|
||||
## 2023-07-23
|
||||
|
||||
- v23.7.9
|
||||
- `ultralytics.utils` ModuleNotFoundError 해결 (https://github.com/ultralytics/ultralytics/issues/3856)
|
||||
- `pydantic` 2.0 이상 버전 설치안되도록 함
|
||||
- `controlnet_dir` cmd args 문제 수정 (PR #107)
|
||||
|
||||
## 2023-07-20
|
||||
|
||||
|
||||
45
README.md
45
README.md
@@ -44,15 +44,26 @@ Applied in this order: x, y offset → erosion/dilation → merge/invert.
|
||||
|
||||
#### Inpainting
|
||||
|
||||

|
||||
|
||||
Each option corresponds to a corresponding option on the inpaint tab.
|
||||
Each option corresponds to a corresponding option on the inpaint tab. Therefore, please refer to the inpaint tab for usage details on how to use each option.
|
||||
|
||||
## ControlNet Inpainting
|
||||
|
||||
You can use the ControlNet extension if you have ControlNet installed and ControlNet models.
|
||||
|
||||
Support `inpaint, scribble, lineart, openpose, tile` controlnet models. Once you choose a model, the preprocessor is set automatically.
|
||||
Support `inpaint, scribble, lineart, openpose, tile` controlnet models. Once you choose a model, the preprocessor is set automatically. It works separately from the model set by the Controlnet extension.
|
||||
|
||||
## Advanced Options
|
||||
|
||||
API request example: [wiki/API](https://github.com/Bing-su/adetailer/wiki/API)
|
||||
|
||||
`ui-config.json` entries: [wiki/ui-config.json](https://github.com/Bing-su/adetailer/wiki/ui-config.json)
|
||||
|
||||
`[SEP], [SKIP]` tokens: [wiki/Advanced](https://github.com/Bing-su/adetailer/wiki/Advanced)
|
||||
|
||||
## Media
|
||||
|
||||
- 🎥 [どこよりも詳しいAfter Detailer (adetailer)の使い方① 【Stable Diffusion】](https://youtu.be/sF3POwPUWCE)
|
||||
- 🎥 [どこよりも詳しいAfter Detailer (adetailer)の使い方② 【Stable Diffusion】](https://youtu.be/urNISRdbIEg)
|
||||
|
||||
## Model
|
||||
|
||||
@@ -69,34 +80,12 @@ Support `inpaint, scribble, lineart, openpose, tile` controlnet models. Once you
|
||||
|
||||
The yolo models can be found on huggingface [Bingsu/adetailer](https://huggingface.co/Bingsu/adetailer).
|
||||
|
||||
### User Model
|
||||
### Additional Model
|
||||
|
||||
Put your [ultralytics](https://github.com/ultralytics/ultralytics) model in `webui/models/adetailer`. The model name should end with `.pt` or `.pth`.
|
||||
Put your [ultralytics](https://github.com/ultralytics/ultralytics) yolo model in `webui/models/adetailer`. The model name should end with `.pt` or `.pth`.
|
||||
|
||||
It must be a bbox detection or segment model and use all label.
|
||||
|
||||
### Dataset
|
||||
|
||||
Datasets used for training the yolo models are:
|
||||
|
||||
#### Face
|
||||
|
||||
- [Anime Face CreateML](https://universe.roboflow.com/my-workspace-mph8o/anime-face-createml)
|
||||
- [xml2txt](https://universe.roboflow.com/0oooooo0/xml2txt-njqx1)
|
||||
- [AN](https://universe.roboflow.com/sed-b8vkf/an-lfg5i)
|
||||
- [wider face](http://shuoyang1213.me/WIDERFACE/index.html)
|
||||
|
||||
#### Hand
|
||||
|
||||
- [AnHDet](https://universe.roboflow.com/1-yshhi/anhdet)
|
||||
- [hand-detection-fuao9](https://universe.roboflow.com/catwithawand/hand-detection-fuao9)
|
||||
|
||||
#### Person
|
||||
|
||||
- [coco2017](https://cocodataset.org/#home) (only person)
|
||||
- [AniSeg](https://github.com/jerryli27/AniSeg)
|
||||
- [skytnt/anime-segmentation](https://huggingface.co/datasets/skytnt/anime-segmentation)
|
||||
|
||||
## Example
|
||||
|
||||

|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "23.7.9"
|
||||
__version__ = "23.7.10"
|
||||
|
||||
@@ -2,6 +2,7 @@ from __future__ import annotations
|
||||
|
||||
from functools import partial
|
||||
|
||||
import mediapipe as mp
|
||||
import numpy as np
|
||||
from PIL import Image, ImageDraw
|
||||
|
||||
@@ -28,8 +29,6 @@ def mediapipe_predict(
|
||||
def mediapipe_face_detection(
|
||||
model_type: int, image: Image.Image, confidence: float = 0.3
|
||||
) -> PredictOutput:
|
||||
import mediapipe as mp
|
||||
|
||||
img_width, img_height = image.size
|
||||
|
||||
mp_face_detection = mp.solutions.face_detection
|
||||
@@ -85,8 +84,6 @@ def get_convexhull(points: np.ndarray) -> list[tuple[int, int]]:
|
||||
|
||||
|
||||
def mediapipe_face_mesh(image: Image.Image, confidence: float = 0.3) -> PredictOutput:
|
||||
import mediapipe as mp
|
||||
|
||||
mp_face_mesh = mp.solutions.face_mesh
|
||||
draw_util = mp.solutions.drawing_utils
|
||||
drawing_styles = mp.solutions.drawing_styles
|
||||
@@ -130,8 +127,6 @@ def mediapipe_face_mesh(image: Image.Image, confidence: float = 0.3) -> PredictO
|
||||
def mediapipe_face_mesh_eyes_only(
|
||||
image: Image.Image, confidence: float = 0.3
|
||||
) -> PredictOutput:
|
||||
import mediapipe as mp
|
||||
|
||||
mp_face_mesh = mp.solutions.face_mesh
|
||||
|
||||
left_idx = np.array(list(mp_face_mesh.FACEMESH_LEFT_EYE)).flatten()
|
||||
|
||||
@@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
import io
|
||||
import platform
|
||||
import sys
|
||||
from importlib.metadata import version
|
||||
from typing import Any, Callable
|
||||
|
||||
from rich.console import Console, Group
|
||||
@@ -85,6 +86,17 @@ def ad_args(*args: Any) -> dict[str, Any]:
|
||||
}
|
||||
|
||||
|
||||
def library_version():
|
||||
libraries = ["torch", "torchvision", "ultralytics", "mediapipe"]
|
||||
d = {}
|
||||
for lib in libraries:
|
||||
try:
|
||||
d[lib] = version(lib)
|
||||
except Exception:
|
||||
d[lib] = "Unknown"
|
||||
return d
|
||||
|
||||
|
||||
def sys_info() -> dict[str, Any]:
|
||||
try:
|
||||
import launch
|
||||
@@ -93,7 +105,7 @@ def sys_info() -> dict[str, Any]:
|
||||
commit = launch.commit_hash()
|
||||
except Exception:
|
||||
version = "Unknown (too old or vladmandic)"
|
||||
commit = "-------"
|
||||
commit = "Unknown"
|
||||
|
||||
return {
|
||||
"Platform": platform.platform(),
|
||||
@@ -101,6 +113,7 @@ def sys_info() -> dict[str, Any]:
|
||||
"Version": version,
|
||||
"Commit": commit,
|
||||
"Commandline": sys.argv,
|
||||
"Libraries": library_version(),
|
||||
}
|
||||
|
||||
|
||||
@@ -116,23 +129,12 @@ def get_table(title: str, data: dict[str, Any]) -> Table:
|
||||
return table
|
||||
|
||||
|
||||
def force_terminal_value():
|
||||
try:
|
||||
from modules.shared import cmd_opts
|
||||
|
||||
return True if hasattr(cmd_opts, "skip_torch_cuda_test") else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def rich_traceback(func: Callable) -> Callable:
|
||||
force_terminal = force_terminal_value()
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
string = io.StringIO()
|
||||
width = Console().width
|
||||
width = width - 4 if width > 4 else None
|
||||
console = Console(file=string, force_terminal=force_terminal, width=width)
|
||||
console = Console(file=string, width=width)
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
|
||||
@@ -4,14 +4,14 @@ from pathlib import Path
|
||||
|
||||
import cv2
|
||||
from PIL import Image
|
||||
from torchvision.transforms.functional import to_pil_image
|
||||
from ultralytics import YOLO
|
||||
|
||||
from adetailer import PredictOutput
|
||||
from adetailer.common import create_mask_from_bbox
|
||||
|
||||
|
||||
def load_yolo(model_path: str | Path):
|
||||
from ultralytics import YOLO
|
||||
|
||||
try:
|
||||
return YOLO(model_path)
|
||||
except ModuleNotFoundError:
|
||||
@@ -57,7 +57,5 @@ def mask_to_pil(masks, shape: tuple[int, int]) -> list[Image.Image]:
|
||||
shape: tuple[int, int]
|
||||
(width, height) of the original image
|
||||
"""
|
||||
from torchvision.transforms.functional import to_pil_image
|
||||
|
||||
n = masks.shape[0]
|
||||
return [to_pil_image(masks[i], mode="L").resize(shape) for i in range(n)]
|
||||
|
||||
@@ -5,12 +5,6 @@ from contextlib import contextmanager
|
||||
from modules import img2img, processing, shared
|
||||
|
||||
|
||||
def cn_restore_unet_hook(p, cn_latest_network):
|
||||
if cn_latest_network is not None:
|
||||
unet = p.sd_model.model.diffusion_model
|
||||
cn_latest_network.restore(unet)
|
||||
|
||||
|
||||
class CNHijackRestore:
|
||||
def __init__(self):
|
||||
self.process = hasattr(processing, "__controlnet_original_process_images_inner")
|
||||
|
||||
@@ -46,8 +46,6 @@ def install():
|
||||
# requirements
|
||||
("ultralytics", "8.0.97", None),
|
||||
("mediapipe", "0.10.0", None),
|
||||
("huggingface_hub", None, None),
|
||||
("pydantic", "1.10.8", "1.999999.999999"),
|
||||
("rich", "13.4.2", None),
|
||||
# ultralytics
|
||||
("py-cpuinfo", None, None),
|
||||
|
||||
@@ -101,8 +101,6 @@ class AfterDetailerScript(scripts.Script):
|
||||
self.ultralytics_device = self.get_ultralytics_device()
|
||||
|
||||
self.controlnet_ext = None
|
||||
self.cn_script = None
|
||||
self.cn_latest_network = None
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}(version={__version__})"
|
||||
@@ -347,9 +345,6 @@ class AfterDetailerScript(scripts.Script):
|
||||
filename = Path(filepath).stem
|
||||
if filename in script_names_set:
|
||||
filtered_alwayson.append(script_object)
|
||||
if filename == "controlnet":
|
||||
self.cn_script = script_object
|
||||
self.cn_latest_network = script_object.latest_network
|
||||
|
||||
script_runner.alwayson_scripts = filtered_alwayson
|
||||
return script_runner, script_args
|
||||
|
||||
@@ -157,6 +157,9 @@ if TYPE_CHECKING:
|
||||
comments: Any,
|
||||
iteration: int = 0,
|
||||
position_in_batch: int = 0,
|
||||
use_main_prompt: bool = False,
|
||||
index: int | None = None,
|
||||
all_negative_prompts: list[str] | None = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
|
||||
Reference in New Issue
Block a user