mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-01-31 13:29:46 +00:00
update
This commit is contained in:
26
README.md
26
README.md
@@ -357,7 +357,7 @@ import numpy as np
|
||||
|
||||
from modules import scripts
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
from modules.paths import models_path
|
||||
from modules_forge.shared import shared_preprocessors
|
||||
from modules.modelloader import load_file_from_url
|
||||
from ldm_patched.modules.controlnet import load_controlnet
|
||||
from modules_forge.controlnet import apply_controlnet_advanced
|
||||
@@ -425,21 +425,23 @@ class ControlNetExampleForge(scripts.Script):
|
||||
width = W * 8
|
||||
batch_size = p.batch_size
|
||||
|
||||
input_image = cv2.resize(input_image, (width, height))
|
||||
canny_image = cv2.cvtColor(cv2.Canny(input_image, 100, 200), cv2.COLOR_GRAY2RGB)
|
||||
|
||||
# # Or you can get a list of preprocessors in this way
|
||||
# from modules_forge.shared import shared_preprocessors
|
||||
# canny_preprocessor = shared_preprocessors['canny']
|
||||
# canny_image = canny_preprocessor(
|
||||
# input_image, resolution=512, slider_1=100, slider_2=200, slider_3=None)
|
||||
preprocessor = shared_preprocessors['canny']
|
||||
|
||||
# detect control at certain resolution
|
||||
control_image = preprocessor(
|
||||
input_image, resolution=512, slider_1=100, slider_2=200, slider_3=None)
|
||||
|
||||
# here we just use nearest neighbour to align input shape.
|
||||
# You may want crop and resize, or crop and fill, or others.
|
||||
control_image = cv2.resize(
|
||||
control_image, (width, height), interpolation=cv2.INTER_NEAREST)
|
||||
|
||||
# Output preprocessor result. Now called every sampling. Cache in your own way.
|
||||
p.extra_result_images.append(canny_image)
|
||||
p.extra_result_images.append(control_image)
|
||||
|
||||
print('Preprocessor Canny finished.')
|
||||
|
||||
control_image = numpy_to_pytorch(canny_image)
|
||||
control_image_bchw = numpy_to_pytorch(control_image).movedim(-1, 1)
|
||||
|
||||
unet = p.sd_model.forge_objects.unet
|
||||
|
||||
@@ -478,7 +480,7 @@ class ControlNetExampleForge(scripts.Script):
|
||||
advanced_frame_weighting = None
|
||||
advanced_sigma_weighting = None
|
||||
|
||||
unet = apply_controlnet_advanced(unet=unet, controlnet=self.model, image_bchw=control_image.movedim(-1, 1),
|
||||
unet = apply_controlnet_advanced(unet=unet, controlnet=self.model, image_bchw=control_image_bchw,
|
||||
strength=0.6, start_percent=0.0, end_percent=0.8,
|
||||
positive_advanced_weighting=positive_advanced_weighting,
|
||||
negative_advanced_weighting=negative_advanced_weighting,
|
||||
|
||||
@@ -138,7 +138,7 @@ class AnimeFaceSegment:
|
||||
remote_model_path = "https://huggingface.co/bdsqlsz/qinglong_controlnet-lllite/resolve/main/Annotators/UNet.pth"
|
||||
modelpath = os.path.join(self.model_dir, "UNet.pth")
|
||||
if not os.path.exists(modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path, model_dir=self.model_dir)
|
||||
net = UNet()
|
||||
ckpt = torch.load(modelpath, map_location=self.device)
|
||||
|
||||
@@ -24,7 +24,7 @@ def apply_densepose(input_image, cmap="viridis"):
|
||||
if torchscript_model is None:
|
||||
model_path = os.path.join(model_dir, "densepose_r50_fpn_dl.torchscript")
|
||||
if not os.path.exists(model_path):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_torchscript_path, model_dir=model_dir)
|
||||
torchscript_model = torch.jit.load(model_path, map_location="cpu").to(devices.get_device_for("controlnet")).eval()
|
||||
H, W = input_image.shape[:2]
|
||||
|
||||
@@ -70,7 +70,7 @@ def apply_hed(input_image, is_safe=False):
|
||||
if os.path.exists(old_modelpath):
|
||||
modelpath = old_modelpath
|
||||
elif not os.path.exists(modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path, model_dir=modeldir)
|
||||
netNetwork = ControlNetHED_Apache2().to(devices.get_device_for("controlnet"))
|
||||
netNetwork.load_state_dict(torch.load(modelpath, map_location='cpu'))
|
||||
|
||||
@@ -147,7 +147,7 @@ def find_download_model(checkpoint, remote_path):
|
||||
if os.path.exists(old_modelpath):
|
||||
modelpath = old_modelpath
|
||||
elif not os.path.exists(modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_path, model_dir=modeldir)
|
||||
|
||||
return modelpath
|
||||
|
||||
@@ -23,7 +23,7 @@ class LamaInpainting:
|
||||
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetLama.pth"
|
||||
modelpath = os.path.join(self.model_dir, "ControlNetLama.pth")
|
||||
if not os.path.exists(modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path, model_dir=self.model_dir)
|
||||
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.yaml')
|
||||
cfg = yaml.safe_load(open(config_path, 'rt'))
|
||||
|
||||
@@ -41,7 +41,7 @@ def apply_leres(input_image, thr_a, thr_b, boost=False):
|
||||
if os.path.exists(old_model_path):
|
||||
model_path = old_model_path
|
||||
elif not os.path.exists(model_path):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path_leres, model_dir=base_model_path)
|
||||
|
||||
if torch.cuda.is_available():
|
||||
@@ -56,7 +56,7 @@ def apply_leres(input_image, thr_a, thr_b, boost=False):
|
||||
if boost and pix2pixmodel is None:
|
||||
pix2pixmodel_path = os.path.join(base_model_path, "latest_net_G.pth")
|
||||
if not os.path.exists(pix2pixmodel_path):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path_pix2pix, model_dir=base_model_path)
|
||||
|
||||
opt = TestOptions().parse()
|
||||
|
||||
@@ -103,7 +103,7 @@ class LineartDetector:
|
||||
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/" + name
|
||||
model_path = os.path.join(self.model_dir, name)
|
||||
if not os.path.exists(model_path):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path, model_dir=self.model_dir)
|
||||
model = Generator(3, 1, 3)
|
||||
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
|
||||
|
||||
@@ -121,7 +121,7 @@ class LineartAnimeDetector:
|
||||
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/netG.pth"
|
||||
modelpath = os.path.join(self.model_dir, "netG.pth")
|
||||
if not os.path.exists(modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path, model_dir=self.model_dir)
|
||||
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
|
||||
net = UnetGenerator(3, 1, 8, 64, norm_layer=norm_layer, use_dropout=False)
|
||||
|
||||
@@ -215,7 +215,7 @@ class MangaLineExtration:
|
||||
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/erika.pth"
|
||||
modelpath = os.path.join(self.model_dir, "erika.pth")
|
||||
if not os.path.exists(modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path, model_dir=self.model_dir)
|
||||
#norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
|
||||
net = res_skip()
|
||||
|
||||
@@ -102,7 +102,7 @@ def load_model(model_type):
|
||||
if os.path.exists(old_model_path):
|
||||
model_path = old_model_path
|
||||
elif not os.path.exists(model_path):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path, model_dir=base_model_path)
|
||||
|
||||
model = DPTDepthModel(
|
||||
|
||||
@@ -28,7 +28,7 @@ def apply_mlsd(input_image, thr_v, thr_d):
|
||||
if os.path.exists(old_modelpath):
|
||||
modelpath = old_modelpath
|
||||
elif not os.path.exists(modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path, model_dir=modeldir)
|
||||
mlsdmodel = MobileV2_MLSD_Large()
|
||||
mlsdmodel.load_state_dict(torch.load(modelpath), strict=True)
|
||||
|
||||
@@ -27,7 +27,7 @@ class OneformerDetector:
|
||||
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/" + self.config["name"]
|
||||
modelpath = os.path.join(self.model_dir, self.config["name"])
|
||||
if not os.path.exists(modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path, model_dir=self.model_dir)
|
||||
config = os.path.join(os.path.dirname(__file__), self.config["config"])
|
||||
model, self.metadata = make_detectron2_model(config, modelpath)
|
||||
|
||||
@@ -203,17 +203,17 @@ class OpenposeDetector:
|
||||
face_modelpath = os.path.join(self.model_dir, "facenet.pth")
|
||||
|
||||
if not os.path.exists(body_modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
|
||||
load_file_from_url(body_model_path, model_dir=self.model_dir)
|
||||
|
||||
if not os.path.exists(hand_modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
|
||||
load_file_from_url(hand_model_path, model_dir=self.model_dir)
|
||||
|
||||
if not os.path.exists(face_modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
|
||||
load_file_from_url(face_model_path, model_dir=self.model_dir)
|
||||
|
||||
@@ -227,7 +227,7 @@ class OpenposeDetector:
|
||||
def load_model(filename: str, remote_url: str):
|
||||
local_path = os.path.join(self.model_dir, filename)
|
||||
if not os.path.exists(local_path):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
|
||||
load_file_from_url(remote_url, model_dir=self.model_dir)
|
||||
return local_path
|
||||
@@ -249,7 +249,7 @@ class OpenposeDetector:
|
||||
"""
|
||||
local_path = os.path.join(self.model_dir, filename)
|
||||
if not os.path.exists(local_path):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
|
||||
load_file_from_url(remote_url, model_dir=self.model_dir)
|
||||
return local_path
|
||||
|
||||
@@ -21,7 +21,7 @@ def apply_pidinet(input_image, is_safe=False, apply_fliter=False):
|
||||
if os.path.exists(old_modelpath):
|
||||
modelpath = old_modelpath
|
||||
elif not os.path.exists(modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path, model_dir=modeldir)
|
||||
netNetwork = pidinet()
|
||||
ckp = load_state_dict(modelpath)
|
||||
|
||||
@@ -27,7 +27,7 @@ def apply_uniformer(img):
|
||||
if os.path.exists(old_modelpath):
|
||||
modelpath = old_modelpath
|
||||
elif not os.path.exists(modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(checkpoint_file, model_dir=modeldir)
|
||||
|
||||
model = init_segmentor(config_file, modelpath, device=devices.get_device_for("controlnet"))
|
||||
|
||||
@@ -13,7 +13,7 @@ def load_model(filename: str, remote_url: str, model_dir: str) -> str:
|
||||
"""
|
||||
local_path = os.path.join(model_dir, filename)
|
||||
if not os.path.exists(local_path):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
|
||||
load_file_from_url(remote_url, model_dir=model_dir)
|
||||
return local_path
|
||||
|
||||
@@ -21,7 +21,7 @@ class ZoeDetector:
|
||||
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ZoeD_M12_N.pt"
|
||||
modelpath = os.path.join(self.model_dir, "ZoeD_M12_N.pt")
|
||||
if not os.path.exists(modelpath):
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
load_file_from_url(remote_model_path, model_dir=self.model_dir)
|
||||
conf = get_config("zoedepth", "infer")
|
||||
model = ZoeDepth.build_from_config(conf)
|
||||
|
||||
@@ -710,7 +710,7 @@ class InsightFaceModel:
|
||||
def install_antelopev2(self):
|
||||
"""insightface's github release on antelopev2 model is down. Downloading
|
||||
from huggingface mirror."""
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from modules.modelloader import load_file_from_url
|
||||
from annotator.annotator_path import models_path
|
||||
model_root = os.path.join(models_path, "insightface", "models", "antelopev2")
|
||||
if not model_root:
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
|
||||
|
||||
import contextlib
|
||||
|
||||
from annotator.util import HWC3
|
||||
from modules_forge.ops import automatic_memory_management
|
||||
from legacy_preprocessors.preprocessor_compiled import legacy_preprocessors
|
||||
from modules_forge.shared import Preprocessor, PreprocessorParameter, add_preprocessor
|
||||
@@ -55,12 +57,15 @@ class LegacyPreprocessor(Preprocessor):
|
||||
del slider_3
|
||||
|
||||
if self.unload_function is not None or self.managed_model is not None:
|
||||
context = contextlib.nullcontext()
|
||||
else:
|
||||
context = automatic_memory_management()
|
||||
else:
|
||||
context = contextlib.nullcontext()
|
||||
|
||||
with context:
|
||||
result = self.call_function(img=input_image, res=resolution, thr_a=slider_1, thr_b=slider_2, **kwargs)
|
||||
result, is_image = self.call_function(img=input_image, res=resolution, thr_a=slider_1, thr_b=slider_2, **kwargs)
|
||||
|
||||
del is_image # Not used anymore
|
||||
result = HWC3(result)
|
||||
|
||||
if self.unload_function is not None:
|
||||
self.unload_function()
|
||||
|
||||
@@ -7,7 +7,7 @@ import numpy as np
|
||||
|
||||
from modules import scripts
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
from modules.paths import models_path
|
||||
from modules_forge.shared import shared_preprocessors
|
||||
from modules.modelloader import load_file_from_url
|
||||
from ldm_patched.modules.controlnet import load_controlnet
|
||||
from modules_forge.controlnet import apply_controlnet_advanced
|
||||
@@ -75,24 +75,23 @@ class ControlNetExampleForge(scripts.Script):
|
||||
width = W * 8
|
||||
batch_size = p.batch_size
|
||||
|
||||
input_image = cv2.resize(input_image, (width, height))
|
||||
canny_image = cv2.cvtColor(cv2.Canny(input_image, 100, 200), cv2.COLOR_GRAY2RGB)
|
||||
preprocessor = shared_preprocessors['canny']
|
||||
|
||||
from modules_forge.shared import shared_preprocessors
|
||||
a = 0
|
||||
# detect control at certain resolution
|
||||
control_image = preprocessor(
|
||||
input_image, resolution=512, slider_1=100, slider_2=200, slider_3=None)
|
||||
|
||||
# # Or you can get a list of preprocessors in this way
|
||||
# from modules_forge.shared import shared_preprocessors
|
||||
# canny_preprocessor = shared_preprocessors['canny']
|
||||
# canny_image = canny_preprocessor(
|
||||
# input_image, resolution=512, slider_1=100, slider_2=200, slider_3=None)
|
||||
# here we just use nearest neighbour to align input shape.
|
||||
# You may want crop and resize, or crop and fill, or others.
|
||||
control_image = cv2.resize(
|
||||
control_image, (width, height), interpolation=cv2.INTER_NEAREST)
|
||||
|
||||
# Output preprocessor result. Now called every sampling. Cache in your own way.
|
||||
p.extra_result_images.append(canny_image)
|
||||
p.extra_result_images.append(control_image)
|
||||
|
||||
print('Preprocessor Canny finished.')
|
||||
|
||||
control_image = numpy_to_pytorch(canny_image)
|
||||
control_image_bchw = numpy_to_pytorch(control_image).movedim(-1, 1)
|
||||
|
||||
unet = p.sd_model.forge_objects.unet
|
||||
|
||||
@@ -131,7 +130,7 @@ class ControlNetExampleForge(scripts.Script):
|
||||
advanced_frame_weighting = None
|
||||
advanced_sigma_weighting = None
|
||||
|
||||
unet = apply_controlnet_advanced(unet=unet, controlnet=self.model, image_bchw=control_image.movedim(-1, 1),
|
||||
unet = apply_controlnet_advanced(unet=unet, controlnet=self.model, image_bchw=control_image_bchw,
|
||||
strength=0.6, start_percent=0.0, end_percent=0.8,
|
||||
positive_advanced_weighting=positive_advanced_weighting,
|
||||
negative_advanced_weighting=negative_advanced_weighting,
|
||||
|
||||
Reference in New Issue
Block a user