mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-03-02 19:49:48 +00:00
Cnet (#22)
* ini * remove shit * Create control_model.py * i * i * Update controlnet_supported.py * Update controlnet_supported.py * Update controlnet_supported.py * i * i * Update controlnet_supported.py * i * Update controlnet_supported.py * remove shits * remove shit * Update global_state.py * i * i * Update legacy_preprocessors.py * Update legacy_preprocessors.py * remove shit * Update batch_hijack.py * remove shit * remove shit * i * i * i * Update external_code.py * Update global_state.py * Update infotext.py * Update utils.py * Update external_code.py * i * i * i * Update controlnet_ui_group.py * remove shit * remove shit * i * Update controlnet.py * Update controlnet.py * Update controlnet.py * Update controlnet.py * Update controlnet.py * i * Update global_state.py * Update global_state.py * i * Update global_state.py * Update global_state.py * Update global_state.py * Update global_state.py * Update controlnet_ui_group.py * i * Update global_state.py * Update controlnet_ui_group.py * Update controlnet_ui_group.py * i * Update controlnet_ui_group.py * Update controlnet_ui_group.py * Update controlnet_ui_group.py * Update controlnet_ui_group.py
This commit is contained in:
@@ -1,11 +1,7 @@
|
||||
import cv2
|
||||
import os
|
||||
import torch
|
||||
import ldm_patched.modules.utils
|
||||
|
||||
from modules.paths import models_path
|
||||
from ldm_patched.modules import model_management
|
||||
from ldm_patched.modules.model_patcher import ModelPatcher
|
||||
from modules_forge.forge_util import resize_image_with_pad
|
||||
|
||||
|
||||
controlnet_dir = os.path.join(models_path, 'ControlNet')
|
||||
@@ -14,97 +10,29 @@ os.makedirs(controlnet_dir, exist_ok=True)
|
||||
preprocessor_dir = os.path.join(models_path, 'ControlNetPreprocessor')
|
||||
os.makedirs(preprocessor_dir, exist_ok=True)
|
||||
|
||||
shared_preprocessors = {}
|
||||
supported_preprocessors = {}
|
||||
supported_control_models = []
|
||||
|
||||
|
||||
def add_preprocessor(preprocessor):
|
||||
global shared_preprocessors
|
||||
def add_supported_preprocessor(preprocessor):
|
||||
global supported_preprocessors
|
||||
p = preprocessor
|
||||
shared_preprocessors[p.name] = p
|
||||
supported_preprocessors[p.name] = p
|
||||
return
|
||||
|
||||
|
||||
class PreprocessorParameter:
|
||||
def __init__(self, minimum=0.0, maximum=1.0, step=0.01, label='Parameter 1', value=0.5, visible=False, **kwargs):
|
||||
self.gradio_update_kwargs = dict(
|
||||
minimum=minimum, maximum=maximum, step=step, label=label, value=value, visible=visible, **kwargs
|
||||
)
|
||||
def add_supported_control_model(control_model):
|
||||
global supported_control_models
|
||||
supported_control_models.append(control_model)
|
||||
return
|
||||
|
||||
|
||||
class Preprocessor:
|
||||
def __init__(self):
|
||||
self.name = 'PreprocessorBase'
|
||||
self.tags = []
|
||||
self.slider_resolution = PreprocessorParameter(label='Resolution', minimum=128, maximum=2048, value=512, step=8, visible=True)
|
||||
self.slider_1 = PreprocessorParameter()
|
||||
self.slider_2 = PreprocessorParameter()
|
||||
self.slider_3 = PreprocessorParameter()
|
||||
self.model_patcher: ModelPatcher = None
|
||||
self.show_control_mode = True
|
||||
self.do_not_need_model = False
|
||||
self.sorting_priority = 0.0 # higher goes to top in the list
|
||||
|
||||
def setup_model_patcher(self, model, load_device=None, offload_device=None, dtype=torch.float32, **kwargs):
|
||||
if load_device is None:
|
||||
load_device = model_management.get_torch_device()
|
||||
|
||||
if offload_device is None:
|
||||
offload_device = torch.device('cpu')
|
||||
|
||||
if not model_management.should_use_fp16(load_device):
|
||||
dtype = torch.float32
|
||||
|
||||
model.eval()
|
||||
model = model.to(device=offload_device, dtype=dtype)
|
||||
|
||||
self.model_patcher = ModelPatcher(model=model, load_device=load_device, offload_device=offload_device, **kwargs)
|
||||
self.model_patcher.dtype = dtype
|
||||
return self.model_patcher
|
||||
|
||||
def move_all_model_patchers_to_gpu(self):
|
||||
model_management.load_models_gpu([self.model_patcher])
|
||||
return
|
||||
|
||||
def send_tensor_to_model_device(self, x):
|
||||
return x.to(device=self.model_patcher.current_device, dtype=self.model_patcher.dtype)
|
||||
|
||||
def lazy_memory_management(self, model):
|
||||
# This is a lazy method to just free some memory
|
||||
# so that we can still use old codes to manage memory in a bad way
|
||||
# Ideally this should all be removed and all memory should be managed by model patcher.
|
||||
# But the workload is too big, so we just use a quick method to manage in dirty way.
|
||||
required_memory = model_management.module_size(model) + model_management.minimum_inference_memory()
|
||||
model_management.free_memory(required_memory, device=model_management.get_torch_device())
|
||||
return
|
||||
|
||||
def process_before_every_sampling(self, process, cnet):
|
||||
return
|
||||
|
||||
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
|
||||
return input_image
|
||||
|
||||
|
||||
class PreprocessorNone(Preprocessor):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.name = 'None'
|
||||
self.sorting_priority = 10
|
||||
|
||||
|
||||
class PreprocessorCanny(Preprocessor):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.name = 'canny'
|
||||
self.tags = ['Canny']
|
||||
self.slider_1 = PreprocessorParameter(minimum=0, maximum=256, step=1, value=100, label='Low Threshold', visible=True)
|
||||
self.slider_2 = PreprocessorParameter(minimum=0, maximum=256, step=1, value=200, label='High Threshold', visible=True)
|
||||
self.sorting_priority = 100
|
||||
|
||||
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
|
||||
input_image, remove_pad = resize_image_with_pad(input_image, resolution)
|
||||
canny_image = cv2.cvtColor(cv2.Canny(input_image, int(slider_1), int(slider_2)), cv2.COLOR_GRAY2RGB)
|
||||
return remove_pad(canny_image)
|
||||
|
||||
|
||||
add_preprocessor(PreprocessorNone())
|
||||
add_preprocessor(PreprocessorCanny())
|
||||
def try_load_supported_control_model(ckpt_path):
|
||||
global supported_control_models
|
||||
state_dict = ldm_patched.modules.utils.load_torch_file(ckpt_path, safe_load=True)
|
||||
for supported_type in supported_control_models:
|
||||
state_dict_copy = {k: v for k, v in state_dict.items()}
|
||||
model = supported_type.try_build_from_state_dict(state_dict_copy, ckpt_path)
|
||||
if model is not None:
|
||||
return model
|
||||
return None
|
||||
|
||||
Reference in New Issue
Block a user