mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-03-10 07:29:50 +00:00
i
This commit is contained in:
16
README.md
16
README.md
@@ -351,6 +351,7 @@ Note that this extension is hidden because it is only for developers. To see it
|
||||
import os
|
||||
import cv2
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
|
||||
from modules import scripts
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
@@ -423,7 +424,18 @@ class ControlNetExampleForge(scripts.Script):
|
||||
width = W * 8
|
||||
|
||||
input_image = cv2.resize(input_image, (width, height))
|
||||
canny_image = cv2.cvtColor(cv2.Canny(input_image, 100, 200), cv2.COLOR_GRAY2RGB)
|
||||
|
||||
# Below are two methods to preprocess images.
|
||||
# Method 1: do it in your own way
|
||||
canny_image_1 = cv2.cvtColor(cv2.Canny(input_image, 100, 200), cv2.COLOR_GRAY2RGB)
|
||||
|
||||
# Method 2: use built-in preprocessor
|
||||
from modules_forge.shared import shared_preprocessors
|
||||
canny_image_2 = shared_preprocessors['canny'](input_image, 100, 200)
|
||||
|
||||
# The two methods will give your same result
|
||||
assert np.allclose(canny_image_1, canny_image_2)
|
||||
canny_image = canny_image_1
|
||||
|
||||
# Output preprocessor result. Now called every sampling. Cache in your own way.
|
||||
p.extra_result_images.append(canny_image)
|
||||
@@ -437,7 +449,7 @@ class ControlNetExampleForge(scripts.Script):
|
||||
unet = apply_controlnet_advanced(unet=unet, controlnet=self.model, image_bhwc=control_image,
|
||||
strength=0.6, start_percent=0.0, end_percent=0.8,
|
||||
positive_advanced_weighting=None, negative_advanced_weighting=None,
|
||||
frame_advanced_weighting=None)
|
||||
advanced_frame_weighting=None)
|
||||
|
||||
p.sd_model.forge_objects.unet = unet
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import os
|
||||
import cv2
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
|
||||
from modules import scripts
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
@@ -75,7 +76,18 @@ class ControlNetExampleForge(scripts.Script):
|
||||
width = W * 8
|
||||
|
||||
input_image = cv2.resize(input_image, (width, height))
|
||||
canny_image = cv2.cvtColor(cv2.Canny(input_image, 100, 200), cv2.COLOR_GRAY2RGB)
|
||||
|
||||
# Below are two methods to preprocess images.
|
||||
# Method 1: do it in your own way
|
||||
canny_image_1 = cv2.cvtColor(cv2.Canny(input_image, 100, 200), cv2.COLOR_GRAY2RGB)
|
||||
|
||||
# Method 2: use built-in preprocessor
|
||||
from modules_forge.shared import shared_preprocessors
|
||||
canny_image_2 = shared_preprocessors['canny'](input_image, 100, 200)
|
||||
|
||||
# The two methods will give your same result
|
||||
assert np.allclose(canny_image_1, canny_image_2)
|
||||
canny_image = canny_image_1
|
||||
|
||||
# Output preprocessor result. Now called every sampling. Cache in your own way.
|
||||
p.extra_result_images.append(canny_image)
|
||||
|
||||
48
modules_forge/shared.py
Normal file
48
modules_forge/shared.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import cv2
|
||||
|
||||
shared_preprocessors = {}
|
||||
|
||||
|
||||
class PreprocessorParameter:
|
||||
def __init__(self, minimum=0.0, maximum=1.0, step=0.01, label='Parameter 1', value=0.5, visible=False):
|
||||
self.gradio_update_kwargs = dict(
|
||||
minimum=minimum, maximum=maximum, step=step, label=label, value=value, visible=visible
|
||||
)
|
||||
|
||||
|
||||
class PreprocessorBase:
|
||||
def __init__(self):
|
||||
self.name = 'PreprocessorBase'
|
||||
self.slider_1 = PreprocessorParameter()
|
||||
self.slider_2 = PreprocessorParameter()
|
||||
self.slider_3 = PreprocessorParameter()
|
||||
|
||||
def __call__(self, input_image, slider_1=None, slider_2=None, slider_3=None, **kwargs):
|
||||
return input_image
|
||||
|
||||
|
||||
class PreprocessorNone(PreprocessorBase):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.name = 'None'
|
||||
|
||||
def __call__(self, input_image, slider_1=None, slider_2=None, slider_3=None, **kwargs):
|
||||
return input_image
|
||||
|
||||
|
||||
shared_preprocessors['none'] = PreprocessorNone()
|
||||
|
||||
|
||||
class PreprocessorCanny(PreprocessorBase):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.name = 'canny'
|
||||
self.slider_1 = PreprocessorParameter(minimum=0, maximum=256, step=1, value=100, label='Low Threshold', visible=True)
|
||||
self.slider_2 = PreprocessorParameter(minimum=0, maximum=256, step=1, value=200, label='High Threshold', visible=True)
|
||||
|
||||
def __call__(self, input_image, slider_1=100, slider_2=200, slider_3=None, **kwargs):
|
||||
canny_image = cv2.cvtColor(cv2.Canny(input_image, int(slider_1), int(slider_2)), cv2.COLOR_GRAY2RGB)
|
||||
return canny_image
|
||||
|
||||
|
||||
shared_preprocessors['canny'] = PreprocessorCanny()
|
||||
Reference in New Issue
Block a user