feat: type hints

This commit is contained in:
Bingsu
2023-05-16 17:22:24 +09:00
parent f0ad1473e3
commit 62e77a55b2
9 changed files with 395 additions and 0 deletions

View File

@@ -21,3 +21,6 @@ ignore = ["B008", "B905", "E501", "F401", "UP007"]
[tool.ruff.isort]
known-first-party = ["launch", "modules"]
[tool.ruff.per-file-ignores]
"sd_webui/*.py" = ["B027", "F403"]

4
sd_webui/__init__.py Normal file
View File

@@ -0,0 +1,4 @@
from typing import TYPE_CHECKING
if not TYPE_CHECKING:
from modules import *

62
sd_webui/images.py Normal file
View File

@@ -0,0 +1,62 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from PIL import Image, PngImagePlugin
from sd_webui.processing import StableDiffusionProcessing
def save_image(
image: Image.Image,
path: str,
basename: str,
seed: int | None = None,
prompt: str = "",
extension: str = "png",
info: str | PngImagePlugin.iTXt = "",
short_filename: bool = False,
no_prompt: bool = False,
grid: bool = False,
pnginfo_section_name: str = "parameters",
p: StableDiffusionProcessing | None = None,
existing_info: dict | None = None,
forced_filename: str | None = None,
suffix: str = "",
save_to_dirs: bool = False,
) -> tuple[str, str | None]:
"""Save an image.
Args:
image (`PIL.Image`):
The image to be saved.
path (`str`):
The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
basename (`str`):
The base filename which will be applied to `filename pattern`.
seed, prompt, short_filename,
extension (`str`):
Image file extension, default is `png`.
pngsectionname (`str`):
Specify the name of the section which `info` will be saved in.
info (`str` or `PngImagePlugin.iTXt`):
PNG info chunks.
existing_info (`dict`):
Additional PNG info. `existing_info == {pngsectionname: info, ...}`
no_prompt:
TODO I don't know its meaning.
p (`StableDiffusionProcessing`)
forced_filename (`str`):
If specified, `basename` and filename pattern will be ignored.
save_to_dirs (bool):
If true, the image will be saved into a subdirectory of `path`.
Returns: (fullfn, txt_fullfn)
fullfn (`str`):
The full path of the saved imaged.
txt_fullfn (`str` or None):
If a text file is saved for this image, this will be its full path. Otherwise None.
"""
else:
from modules.images import *

14
sd_webui/paths.py Normal file
View File

@@ -0,0 +1,14 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import os
models_path = os.path.join(os.path.dirname(__file__), "1")
script_path = os.path.join(os.path.dirname(__file__), "2")
data_path = os.path.join(os.path.dirname(__file__), "3")
extensions_dir = os.path.join(os.path.dirname(__file__), "4")
extensions_builtin_dir = os.path.join(os.path.dirname(__file__), "5")
else:
from modules.paths import *

166
sd_webui/processing.py Normal file
View File

@@ -0,0 +1,166 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from dataclasses import dataclass, field
from typing import Any, Callable
import numpy as np
import torch
from PIL import Image
def _image():
return Image.new("L", (512, 512))
@dataclass
class StableDiffusionProcessing:
sd_model: torch.nn.Module = field(default_factory=lambda: torch.nn.Linear(1, 1))
outpath_samples: str = ""
outpath_grids: str = ""
prompt: str = ""
prompt_for_display: str = ""
negative_prompt: str = ""
styles: list[str] = field(default_factory=list)
seed: int = -1
subseed: int = -1
subseed_strength: float = 0.0
seed_resize_from_h: int = -1
seed_resize_from_w: int = -1
sampler_name: str | None = None
batch_size: int = 1
n_iter: int = 1
steps: int = 50
cfg_scale: float = 7.0
width: int = 512
height: int = 512
restore_faces: bool = False
tiling: bool = False
do_not_save_samples: bool = False
do_not_save_grid: bool = False
extra_generation_params: dict[str, Any] = field(default_factory=dict)
overlay_images: list[Image.Image] = field(default_factory=list)
eta: float = 0.0
do_not_reload_embeddings: bool = False
paste_to: tuple[int | float, ...] = (0, 0, 0, 0)
color_corrections: list[np.ndarray] = field(default_factory=list)
denoising_strength: float = 0.0
sampler_noise_scheduler_override: Callable | None = None
ddim_discretize: str = ""
s_min_uncond: float = 0.0
s_churn: float = 0.0
s_tmin: float = 0.0
s_tmax: float = 0.0
s_noise: float = 0.0
override_settings: dict[str, Any] = field(default_factory=dict)
override_settings_restore_afterwards: bool = False
is_using_inpainting_conditioning: bool = False
disable_extra_networks: bool = False
scripts: Any = None
script_args: list[Any] = field(default_factory=list)
all_prompts: list[str] = field(default_factory=list)
all_negative_prompts: list[str] = field(default_factory=list)
all_seeds: list[int] = field(default_factory=list)
all_subseeds: list[int] = field(default_factory=list)
iteration: int = 1
is_hr_pass: bool = False
@dataclass
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
sampler: Callable | None = None
enable_hr: bool = False
denoising_strength: float = 0.75
hr_scale: float = 2.0
hr_upscaler: str = ""
hr_second_pass_steps: int = 0
hr_resize_x: int = 0
hr_resize_y: int = 0
hr_upscale_to_x: int = 0
hr_upscale_to_y: int = 0
width: int = 512
height: int = 512
truncate_x: int = 512
truncate_y: int = 512
applied_old_hires_behavior_to: tuple[int, int] = (512, 512)
@dataclass
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler: Callable | None = None
init_images: list[Image.Image] = field(default_factory=list)
resize_mode: int = 0
denoising_strength: float = 0.75
image_cfg_scale: float | None = None
init_latent: torch.Tensor | None = None
image_mask: Image.Image = field(default_factory=_image)
latent_mask: Image.Image = field(default_factory=_image)
mask_for_overlay: Image.Image = field(default_factory=_image)
mask_blur: int = 4
inpainting_fill: int = 0
inpaint_full_res: bool = True
inpaint_full_res_padding: int = 0
inpainting_mask_invert: int | bool = 0
initial_noise_multiplier: float = 1.0
mask: torch.Tensor | None = None
nmask: torch.Tensor | None = None
image_conditioning: torch.Tensor | None = None
@dataclass
class Processed:
images: list[Image.Image] = field(default_factory=list)
prompt: list[str] = field(default_factory=list)
negative_prompt: list[str] = field(default_factory=list)
seed: list[int] = field(default_factory=list)
subseed: list[int] = field(default_factory=list)
subseed_strength: float = 0.0
info: str = ""
comments: str = ""
width: int = 512
height: int = 512
sampler_name: str = ""
cfg_scale: float = 7.0
image_cfg_scale: float | None = None
steps: int = 50
batch_size: int = 1
restore_faces: bool = False
face_restoration_model: str | None = None
sd_model_hash: str = ""
seed_resize_from_w: int = -1
seed_resize_from_h: int = -1
denoising_strength: float = 0.0
extra_generation_params: dict[str, Any] = field(default_factory=dict)
index_of_first_image: int = 0
styles: list[str] = field(default_factory=list)
job_timestamp: str = ""
clip_skip: int = 1
eta: float = 0.0
ddim_discretize: str = ""
s_churn: float = 0.0
s_tmin: float = 0.0
s_tmax: float = 0.0
s_noise: float = 0.0
sampler_noise_scheduler_override: Callable | None = None
is_using_inpainting_conditioning: bool = False
all_prompts: list[str] = field(default_factory=list)
all_negative_prompts: list[str] = field(default_factory=list)
all_seeds: list[int] = field(default_factory=list)
all_subseeds: list[int] = field(default_factory=list)
infotexts: list[str] = field(default_factory=list)
def create_infotext(
p: StableDiffusionProcessingTxt2Img | StableDiffusionProcessingImg2Img,
all_prompts: list[str],
all_seeds: list[int],
all_subseeds: list[int],
comments: Any,
iteration: int = 0,
position_in_batch: int = 0,
) -> str:
pass
def process_images(
p: StableDiffusionProcessingTxt2Img | StableDiffusionProcessingImg2Img,
) -> Processed:
pass
else:
from modules.processing import *

10
sd_webui/safe.py Normal file
View File

@@ -0,0 +1,10 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import torch
unsafe_torch_load = torch.load
else:
from module.safe import *

View File

@@ -0,0 +1,13 @@
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Callable
def on_ui_settings(callback: Callable):
pass
def on_after_component(callback: Callable):
pass
else:
from modules.script_callbacks import *

81
sd_webui/scripts.py Normal file
View File

@@ -0,0 +1,81 @@
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any
import gradio as gr
from PIL import Image
from sd_webui.processing import (
Processed,
StableDiffusionProcessingImg2Img,
StableDiffusionProcessingTxt2Img,
)
SDPType = StableDiffusionProcessingImg2Img | StableDiffusionProcessingTxt2Img
AlwaysVisible = object()
@dataclass
class PostprocessImageArgs:
image: Image.Image
class Script(ABC):
filename: str
args_from: int
args_to: int
alwayson: bool
is_txt2img: bool
is_img2img: bool
group: gr.Group
infotext_fields: list[tuple[str, str]]
paste_field_names: list[str]
@abstractmethod
def title(self):
raise NotImplementedError
def ui(self, is_img2img: bool):
pass
def show(self, is_img2img: bool):
return True
def run(self, p: SDPType, *args):
pass
def process(self, p: SDPType, *args):
pass
def before_process_batch(self, p: SDPType, *args, **kwargs):
pass
def process_batch(self, p: SDPType, *args, **kwargs):
pass
def postprocess_batch(self, p: SDPType, *args, **kwargs):
pass
def postprocess_image(self, p: SDPType, pp: PostprocessImageArgs, *args):
pass
def postprocess(self, p: SDPType, processed: Processed, *args):
pass
def before_component(self, component, **kwargs):
pass
def after_component(self, component, **kwargs):
pass
def describe(self):
return ""
def elem_id(self, item_id: Any) -> str:
pass
else:
from modules.scripts import *

42
sd_webui/shared.py Normal file
View File

@@ -0,0 +1,42 @@
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import argparse
from dataclasses import dataclass
from typing import Any, Callable
@dataclass
class OptionInfo:
default: Any
label: str
component: Any
component_args: dict[str, Any]
onchange: Callable[[], None]
section: tuple[str, str]
refresh: Callable[[], None]
class Option:
data_labels: dict[str, OptionInfo]
def __init__(self):
self.data: dict[str, Any] = {}
def add_option(self, key: str, info: OptionInfo):
pass
def __getattr__(self, item: str):
if self.data is not None and item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super().__getattribute__(item)
opts = Option()
cmd_opts = argparse.Namespace()
else:
from module.shared import *