initial commit

This commit is contained in:
2025-10-21 23:06:12 +07:00
commit 384a42b08e
1550 changed files with 2675522 additions and 0 deletions

22
modules_forge/alter_samplers.py Executable file
View File

@@ -0,0 +1,22 @@
from modules import sd_samplers_kdiffusion, sd_samplers_common
from backend.modules import k_diffusion_extra
class AlterSampler(sd_samplers_kdiffusion.KDiffusionSampler):
def __init__(self, sd_model, sampler_name):
self.sampler_name = sampler_name
self.unet = sd_model.forge_objects.unet
sampler_function = getattr(k_diffusion_extra, "sample_{}".format(sampler_name))
super().__init__(sampler_function, sd_model, None)
def build_constructor(sampler_name):
def constructor(m):
return AlterSampler(m, sampler_name)
return constructor
samplers_data_alter = [
sd_samplers_common.SamplerData('DDPM', build_constructor(sampler_name='ddpm'), ['ddpm'], {}),
]

21
modules_forge/bnb_installer.py Executable file
View File

@@ -0,0 +1,21 @@
import pkg_resources
from modules.launch_utils import run_pip
target_bitsandbytes_version = '0.45.3'
def try_install_bnb():
try:
bitsandbytes_version = pkg_resources.get_distribution('bitsandbytes').version
except Exception:
bitsandbytes_version = None
try:
if bitsandbytes_version != target_bitsandbytes_version:
run_pip(
f"install -U bitsandbytes=={target_bitsandbytes_version}",
f"bitsandbytes=={target_bitsandbytes_version}",
)
except Exception as e:
print(f'Cannot install bitsandbytes. Skipped.')

5
modules_forge/config.py Executable file
View File

@@ -0,0 +1,5 @@
always_disabled_extensions = [
'sd-webui-controlnet',
'multidiffusion-upscaler-for-automatic1111',
'forge_space_example'
]

92
modules_forge/cuda_malloc.py Executable file
View File

@@ -0,0 +1,92 @@
import os
import importlib.util
# https://github.com/comfyanonymous/ComfyUI/blob/master/cuda_malloc.py
def get_gpu_names():
if os.name == 'nt':
import ctypes
# Define necessary C structures and types
class DISPLAY_DEVICEA(ctypes.Structure):
_fields_ = [
('cb', ctypes.c_ulong),
('DeviceName', ctypes.c_char * 32),
('DeviceString', ctypes.c_char * 128),
('StateFlags', ctypes.c_ulong),
('DeviceID', ctypes.c_char * 128),
('DeviceKey', ctypes.c_char * 128)
]
# Load user32.dll
user32 = ctypes.windll.user32
# Call EnumDisplayDevicesA
def enum_display_devices():
device_info = DISPLAY_DEVICEA()
device_info.cb = ctypes.sizeof(device_info)
device_index = 0
gpu_names = set()
while user32.EnumDisplayDevicesA(None, device_index, ctypes.byref(device_info), 0):
device_index += 1
gpu_names.add(device_info.DeviceString.decode('utf-8'))
return gpu_names
return enum_display_devices()
else:
return set()
blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M",
"GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620",
"Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000",
"Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000",
"GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M",
"GeForce GTX 1650", "GeForce GTX 1630"
}
def cuda_malloc_supported():
try:
names = get_gpu_names()
except:
names = set()
for x in names:
if "NVIDIA" in x:
for b in blacklist:
if b in x:
return False
return True
def try_cuda_malloc():
do_cuda_malloc = False
try:
version = ""
torch_spec = importlib.util.find_spec("torch")
for folder in torch_spec.submodule_search_locations:
ver_file = os.path.join(folder, "version.py")
if os.path.isfile(ver_file):
spec = importlib.util.spec_from_file_location("torch_version_import", ver_file)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
version = module.__version__
if int(version[0]) >= 2:
do_cuda_malloc = cuda_malloc_supported()
except:
pass
if do_cuda_malloc:
env_var = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', None)
if env_var is None:
env_var = "backend:cudaMallocAsync"
else:
env_var += ",backend:cudaMallocAsync"
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = env_var
print('Using cudaMallocAsync backend.')
else:
print('Failed to use cudaMallocAsync backend.')
return

View File

@@ -0,0 +1,49 @@
import torch
from backend import operations, memory_management
from backend.patcher.base import ModelPatcher
from transformers import modeling_utils
class DiffusersModelPatcher:
def __init__(self, pipeline_class, dtype=torch.float16, *args, **kwargs):
load_device = memory_management.get_torch_device()
offload_device = torch.device("cpu")
if not memory_management.should_use_fp16(device=load_device):
dtype = torch.float32
self.dtype = dtype
with operations.using_forge_operations():
with modeling_utils.no_init_weights():
self.pipeline = pipeline_class.from_pretrained(*args, **kwargs)
if hasattr(self.pipeline, 'unet'):
if hasattr(self.pipeline.unet, 'set_attn_processor'):
from diffusers.models.attention_processor import AttnProcessor2_0
self.pipeline.unet.set_attn_processor(AttnProcessor2_0())
print('Attention optimization applied to DiffusersModelPatcher')
self.pipeline = self.pipeline.to(device=offload_device)
if self.dtype == torch.float16:
self.pipeline = self.pipeline.half()
self.pipeline.eval()
self.patcher = ModelPatcher(
model=self.pipeline,
load_device=load_device,
offload_device=offload_device)
def prepare_memory_before_sampling(self, batchsize, latent_width, latent_height):
area = 2 * batchsize * latent_width * latent_height
inference_memory = (((area * 0.6) / 0.9) + 1024) * (1024 * 1024)
memory_management.load_models_gpu(
models=[self.patcher],
memory_required=inference_memory
)
def move_tensor_to_current_device(self, x):
return x.to(device=self.patcher.current_device, dtype=self.dtype)

View File

@@ -0,0 +1,179 @@
.forge-container {
width: 100%;
height: 512px;
position: relative;
overflow: hidden;
}
.forge-image-container-plain {
width: 100%;
height: calc(100% - 6px);
position: relative;
overflow: hidden;
background-color: #202020;
background-size: 20px 20px;
background-position: 0 0, 10px 10px;
}
.forge-image-container {
width: 100%;
height: calc(100% - 6px);
position: relative;
overflow: hidden;
background-color: #cccccc;
background-image: linear-gradient(45deg, #eee 25%, transparent 25%, transparent 75%, #eee 75%, #eee),
linear-gradient(45deg, #eee 25%, transparent 25%, transparent 75%, #eee 75%, #eee);
background-size: 20px 20px;
background-position: 0 0, 10px 10px;
}
.forge-image {
position: absolute;
top: 0;
left: 0;
background-size: contain;
background-repeat: no-repeat;
cursor: grab;
max-width: unset !important;
max-height: unset !important;
}
.forge-image:active {
cursor: grabbing;
}
.forge-file-upload {
display: none;
}
.forge-resize-line {
width: 100%;
height: 6px;
background-image: linear-gradient(to bottom, grey 50%, darkgrey 50%);
background-size: 4px 4px;
background-repeat: repeat;
cursor: ns-resize;
position: absolute;
bottom: 0;
left: 0;
}
.forge-toolbar-static {
position: absolute;
top: 0px;
left: 0px;
z-index: 10 !important;
background: rgba(47, 47, 47, 0.8);
padding: 6px 10px;
opacity: 1.0 !important;
}
.forge-toolbar {
position: absolute;
top: 0px;
left: 0px;
z-index: 10;
background: rgba(47, 47, 47, 0.8);
padding: 6px 10px;
opacity: 0;
transition: opacity 0.3s ease;
}
.forge-toolbar .forge-btn, .forge-toolbar-static .forge-btn {
padding: 2px 6px;
border: none;
background-color: #4a4a4a;
color: white;
font-size: 14px;
cursor: pointer;
transition: background-color 0.3s ease;
}
.forge-toolbar .forge-btn, .forge-toolbar-static .forge-btn:hover {
background-color: #5e5e5e;
}
.forge-toolbar .forge-btn, .forge-toolbar-static .forge-btn:active {
background-color: #3e3e3e;
}
.forge-toolbar-box-a {
flex-wrap: wrap;
}
.forge-toolbar-box-b {
display: flex;
flex-wrap: wrap;
align-items: center;
justify-content: space-between;
gap: 4px;
}
.forge-color-picker-block {
display: flex;
align-items: center;
}
.forge-range-row {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
}
.forge-toolbar-color {
border: none;
background: none;
padding: 3px;
border-radius: 50%;
width: 20px;
height: 20px;
-webkit-appearance: none;
appearance: none;
cursor: pointer;
}
.forge-toolbar-color::-webkit-color-swatch-wrapper {
padding: 0;
border-radius: 50%;
}
.forge-toolbar-color::-webkit-color-swatch {
border: none;
border-radius: 50%;
background: none;
}
.forge-toolbar-label {
color: white !important;
padding: 0 4px;
display: flex;
align-items: center;
margin-bottom: 4px; /* Adjust margin as needed */
}
.forge-toolbar-range {
}
.forge-scribble-indicator {
position: relative;
border-radius: 50%;
border: 1px solid;
pointer-events: none;
display: none;
width: 80px;
height: 80px;
}
.forge-no-select {
user-select: none;
}
.forge-upload-hint {
position: absolute;
top: 50%;
left: 50%;
width: 30%;
height: 30%;
transform: translate(-50%, -50%);
}

View File

@@ -0,0 +1,63 @@
<div class="forge-container" id="container_forge_mixin">
<input type="file" id="imageInput_forge_mixin" class="forge-file-upload">
<div id="imageContainer_forge_mixin" class="forge-image-container">
<div id="uploadHint_forge_mixin">
<svg xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 24 24" fill="none"
stroke="white"
stroke-width="4"
stroke-linecap="round"
stroke-linejoin="round"
class="forge-upload-hint">
<path d="M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4"></path>
<polyline points="17 8 12 3 7 8"></polyline>
<line x1="12" y1="3" x2="12" y2="15"></line>
</svg>
<svg xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 24 24" fill="none"
stroke="grey"
stroke-width="2"
stroke-linecap="round"
stroke-linejoin="round"
class="forge-upload-hint">
<path d="M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4"></path>
<polyline points="17 8 12 3 7 8"></polyline>
<line x1="12" y1="3" x2="12" y2="15"></line>
</svg>
</div>
<img id="image_forge_mixin" class="forge-image forge-no-select">
<canvas id="drawingCanvas_forge_mixin" class="forge-drawing-canvas"
style="position:absolute;top:0;left:0;" width="1" height="1"></canvas>
<div class="forge-toolbar" id="toolbar_forge_mixin">
<div class="forge-toolbar-box-a">
<button id="maxButton_forge_mixin" class="forge-btn forge-no-select" title="Maximize"></button>
<button id="minButton_forge_mixin" class="forge-btn forge-no-select" title="Minimize" style="display: none;"></button>
<button id="uploadButton_forge_mixin" class="forge-btn forge-no-select" title="Upload">📂</button>
<button id="removeButton_forge_mixin" class="forge-btn forge-no-select" title="Remove">🗑️</button>
<button id="centerButton_forge_mixin" class="forge-btn forge-no-select" title="Center Position"></button>
<button id="resetButton_forge_mixin" class="forge-btn forge-no-select" title="Reset">🔄</button>
<button id="undoButton_forge_mixin" class="forge-btn forge-no-select" title="Undo">↩️</button>
<button id="redoButton_forge_mixin" class="forge-btn forge-no-select" title="Redo">↪️</button>
</div>
<div class="forge-toolbar-box-b">
<div class="forge-color-picker-block" id="scribbleColorBlock_forge_mixin">
<input type="color" id="scribbleColor_forge_mixin" class="forge-toolbar-color" value="#000000">
</div>
<div class="forge-range-row" id="scribbleWidthBlock_forge_mixin">
<div id="widthLabel_forge_mixin" class="forge-toolbar-label">brush width</div>
<input type="range" id="scribbleWidth_forge_mixin" class="forge-toolbar-range" min="1" max="20" value="4">
</div>
<div class="forge-range-row" id="scribbleAlphaBlock_forge_mixin">
<div id="alphaLabel_forge_mixin" class="forge-toolbar-label">brush opacity</div>
<input type="range" id="scribbleAlpha_forge_mixin" class="forge-toolbar-range" min="0" max="100" value="100">
</div>
<div class="forge-range-row" id="scribbleSoftnessBlock_forge_mixin">
<div id="softnessLabel_forge_mixin" class="forge-toolbar-label">brush softness</div>
<input type="range" id="scribbleSoftness_forge_mixin" class="forge-toolbar-range" min="0" max="100" value="0">
</div>
</div>
</div>
<div id="scribbleIndicator_forge_mixin" class="forge-scribble-indicator"></div>
</div>
<div class="forge-resize-line" id="resizeLine_forge_mixin"></div>
</div>

1
modules_forge/forge_canvas/canvas.min.js vendored Executable file

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,153 @@
# Forge Canvas
# AGPL V3
# by lllyasviel
# Commercial Use is not allowed. (Contact us for commercial use.)
import gradio.component_meta
create_or_modify_pyi_org = gradio.component_meta.create_or_modify_pyi
def create_or_modify_pyi_org_patched(component_class, class_name, events):
try:
if component_class.__name__ == 'LogicalImage':
return
return create_or_modify_pyi_org(component_class, class_name, events)
except:
return
gradio.component_meta.create_or_modify_pyi = create_or_modify_pyi_org_patched
import os
import uuid
import base64
import gradio as gr
import numpy as np
from PIL import Image
from io import BytesIO
from gradio.context import Context
from functools import wraps
from modules.shared import opts
canvas_js_root_path = os.path.dirname(__file__)
def web_js(file_name):
full_path = os.path.join(canvas_js_root_path, file_name)
return f'<script src="file={full_path}?{os.path.getmtime(full_path)}"></script>\n'
def web_css(file_name):
full_path = os.path.join(canvas_js_root_path, file_name)
return f'<link rel="stylesheet" href="file={full_path}?{os.path.getmtime(full_path)}">\n'
DEBUG_MODE = False
canvas_html = open(os.path.join(canvas_js_root_path, 'canvas.html'), encoding='utf-8').read()
canvas_head = ''
canvas_head += web_css('canvas.css')
canvas_head += web_js('canvas.min.js')
def image_to_base64(image_array, numpy=True):
image = Image.fromarray(image_array) if numpy else image_array
image = image.convert("RGBA")
buffered = BytesIO()
image.save(buffered, format="PNG")
image_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
return f"data:image/png;base64,{image_base64}"
def base64_to_image(base64_str, numpy=True):
if base64_str.startswith("data:image/png;base64,"):
base64_str = base64_str.replace("data:image/png;base64,", "")
image_data = base64.b64decode(base64_str)
image = Image.open(BytesIO(image_data))
image = image.convert("RGBA")
image_array = np.array(image) if numpy else image
return image_array
class LogicalImage(gr.Textbox):
@wraps(gr.Textbox.__init__)
def __init__(self, *args, numpy=True, **kwargs):
self.numpy = numpy
self.infotext = dict()
if 'value' in kwargs:
initial_value = kwargs['value']
if initial_value is not None:
kwargs['value'] = self.image_to_base64(initial_value)
else:
del kwargs['value']
super().__init__(*args, **kwargs)
def preprocess(self, payload):
if not isinstance(payload, str):
return None
if not payload.startswith("data:image/png;base64,"):
return None
image = base64_to_image(payload, numpy=self.numpy)
if hasattr(image, 'info'):
image.info = self.infotext
return image
def postprocess(self, value):
if value is None:
return None
if hasattr(value, 'info'):
self.infotext = value.info
return image_to_base64(value, numpy=self.numpy)
def get_block_name(self):
return "textbox"
class ForgeCanvas:
def __init__(
self,
no_upload=False,
no_scribbles=False,
contrast_scribbles=False,
height=512,
scribble_color='#000000',
scribble_color_fixed=False,
scribble_width=4,
scribble_width_fixed=False,
scribble_alpha=100,
scribble_alpha_fixed=False,
scribble_softness=0,
scribble_softness_fixed=False,
visible=True,
numpy=False,
initial_image=None,
elem_id=None,
elem_classes=None
):
self.uuid = 'uuid_' + uuid.uuid4().hex
canvas_html_uuid = canvas_html.replace('forge_mixin', self.uuid)
if opts.forge_canvas_plain:
canvas_html_uuid = canvas_html_uuid.replace('class="forge-image-container"', 'class="forge-image-container-plain"').replace('stroke="white"', 'stroke=#444')
if opts.forge_canvas_toolbar_always:
canvas_html_uuid = canvas_html_uuid.replace('class="forge-toolbar"', 'class="forge-toolbar-static"')
self.block = gr.HTML(canvas_html_uuid, visible=visible, elem_id=elem_id, elem_classes=elem_classes)
self.foreground = LogicalImage(visible=DEBUG_MODE, label='foreground', numpy=numpy, elem_id=self.uuid, elem_classes=['logical_image_foreground'])
self.background = LogicalImage(visible=DEBUG_MODE, label='background', numpy=numpy, value=initial_image, elem_id=self.uuid, elem_classes=['logical_image_background'])
Context.root_block.load(None, js=f'async ()=>{{new ForgeCanvas("{self.uuid}", {no_upload}, {no_scribbles}, {contrast_scribbles}, {height}, '
f"'{scribble_color}', {scribble_color_fixed}, {scribble_width}, {scribble_width_fixed}, "
f'{scribble_alpha}, {scribble_alpha_fixed}, {scribble_softness}, {scribble_softness_fixed});}}')

253
modules_forge/forge_space.py Executable file
View File

@@ -0,0 +1,253 @@
import os
import sys
import uuid
import time
import socket
import gradio as gr
import importlib.util
from gradio.context import Context
from threading import Thread
from huggingface_hub import snapshot_download
from backend import memory_management
spaces = []
def build_html(title, installed=False, url=None):
if not installed:
return f'<div>{title}</div><div style="color: grey;">Not Installed</div>'
if isinstance(url, str):
return f'<div>{title}</div><div style="color: green;">Currently Running: <a href="{url}" style="color: blue;" target="_blank">{url}</a></div>'
else:
return f'<div>{title}</div><div style="color: grey;">Installed, Ready to Launch</div>'
def find_free_port(server_name, start_port=None):
port = start_port
if port is None:
port = 7860
if server_name is None:
server_name = '127.0.0.1'
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind((server_name, port))
return port
except OSError:
port += 1
def long_path_prefix(path):
if os.name == 'nt' and not path.startswith("\\\\?\\") and not os.path.exists(path):
return f"\\\\?\\{path}"
return path
def remove_dir(dir_path):
dir_path = long_path_prefix(dir_path)
for root, dirs, files in os.walk(dir_path, topdown=False):
for name in files:
file_path = os.path.join(root, name)
file_path = long_path_prefix(file_path)
try:
os.remove(file_path)
except Exception as e:
print(f"Error removing file {file_path}: {e}")
for name in dirs:
dir_to_remove = os.path.join(root, name)
dir_to_remove = long_path_prefix(dir_to_remove)
try:
os.rmdir(dir_to_remove)
except Exception as e:
print(f"Error removing directory {dir_to_remove}: {e}")
try:
os.rmdir(dir_path)
print(f"Deleted: {dir_path}")
except Exception as e:
print(f"Error removing directory {dir_path}: {e}. You may try to manually delete the folder.")
return
class ForgeSpace:
def __init__(self, root_path, title, repo_id=None, repo_type='space', revision=None, allow_patterns=None, ignore_patterns=None, **kwargs):
self.title = title
self.root_path = root_path
self.hf_path = os.path.join(root_path, 'huggingface_space_mirror')
self.repo_id = repo_id
self.repo_type = repo_type
self.revision = revision
self.is_running = False
self.gradio_metas = None
self.allow_patterns = allow_patterns
self.ignore_patterns = ignore_patterns
self.label = gr.HTML(build_html(title=title, url=None), elem_classes=['forge_space_label'])
self.btn_launch = gr.Button('Launch', elem_classes=['forge_space_btn'])
self.btn_terminate = gr.Button('Terminate', elem_classes=['forge_space_btn'])
self.btn_install = gr.Button('Install', elem_classes=['forge_space_btn'])
self.btn_uninstall = gr.Button('Uninstall', elem_classes=['forge_space_btn'])
comps = [
self.label,
self.btn_install,
self.btn_uninstall,
self.btn_launch,
self.btn_terminate
]
self.btn_launch.click(self.run, outputs=comps)
self.btn_terminate.click(self.terminate, outputs=comps)
self.btn_install.click(self.install, outputs=comps)
self.btn_uninstall.click(self.uninstall, outputs=comps)
Context.root_block.load(self.refresh_gradio, outputs=comps, queue=False, show_progress=False)
return
def refresh_gradio(self):
results = []
installed = os.path.exists(self.hf_path)
requirements_filename = os.path.abspath(os.path.realpath(os.path.join(self.root_path, 'requirements.txt')))
has_requirement = os.path.exists(requirements_filename)
if isinstance(self.gradio_metas, tuple):
results.append(build_html(title=self.title, installed=installed, url=self.gradio_metas[1]))
else:
results.append(build_html(title=self.title, installed=installed, url=None))
results.append(gr.update(interactive=not self.is_running and not (installed and not has_requirement), value=("Reinstall" if (installed and has_requirement) else "Install")))
results.append(gr.update(interactive=not self.is_running and installed))
results.append(gr.update(interactive=installed and not self.is_running))
results.append(gr.update(interactive=installed and self.is_running))
return results
def install(self):
os.makedirs(self.hf_path, exist_ok=True)
if self.repo_id is not None:
downloaded = snapshot_download(
repo_id=self.repo_id,
repo_type=self.repo_type,
revision=self.revision,
local_dir=self.hf_path,
force_download=False,
allow_patterns=self.allow_patterns,
ignore_patterns=self.ignore_patterns
)
print(f'Downloaded: {downloaded}')
requirements_filename = os.path.abspath(os.path.realpath(os.path.join(self.root_path, 'requirements.txt')))
if os.path.exists(requirements_filename):
from modules.launch_utils import run_pip
run_pip(f'install -r "{requirements_filename}"', desc=f"space requirements for [{self.title}]")
print(f'Install finished: {self.title}')
return self.refresh_gradio()
def uninstall(self):
remove_dir(self.hf_path)
print('Uninstall finished. You can also manually delete some diffusers models in "/models/diffusers" to release more spaces, but those diffusers models may be reused by other spaces or extensions. ')
return self.refresh_gradio()
def terminate(self):
self.is_running = False
while self.gradio_metas is not None:
time.sleep(0.1)
return self.refresh_gradio()
def run(self):
self.is_running = True
Thread(target=self.gradio_worker).start()
while self.gradio_metas is None:
time.sleep(0.1)
return self.refresh_gradio()
def gradio_worker(self):
import spaces
spaces.unload_module()
original_cwd = os.getcwd()
os.chdir(self.hf_path)
unsafe_module_prefixes = ['models', 'annotator']
modules_backup = {}
for module_name in list(sys.modules.keys()):
if any(module_name.startswith(prefix + '.') or module_name == prefix for prefix in unsafe_module_prefixes):
modules_backup[module_name] = sys.modules[module_name]
del sys.modules[module_name]
memory_management.unload_all_models()
sys.path.insert(0, self.hf_path)
sys.path.insert(0, self.root_path)
file_path = os.path.join(self.root_path, 'forge_app.py')
module_name = 'forge_space_' + str(uuid.uuid4()).replace('-', '_')
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
demo = getattr(module, 'demo')
from modules import initialize_util
from modules.shared import cmd_opts
server_name = initialize_util.gradio_server_name()
port = find_free_port(server_name=server_name, start_port=cmd_opts.port)
self.gradio_metas = demo.launch(
inbrowser=True,
prevent_thread_lock=True,
server_name=server_name,
server_port=port
)
sys.modules.update(modules_backup)
if 'models' in sys.modules:
del sys.modules['models']
sys.path.remove(self.hf_path)
sys.path.remove(self.root_path)
os.chdir(original_cwd)
while self.is_running:
time.sleep(0.1)
demo.close()
self.gradio_metas = None
return
def main_entry():
global spaces
from modules.extensions import extensions
tagged_extensions = {}
for ex in extensions:
if ex.enabled and ex.is_forge_space:
tag = ex.space_meta['tag']
if tag not in tagged_extensions:
tagged_extensions[tag] = []
tagged_extensions[tag].append(ex)
for tag, exs in tagged_extensions.items():
with gr.Accordion(tag, open=True):
for ex in exs:
with gr.Row(equal_height=True):
space = ForgeSpace(root_path=ex.path, **ex.space_meta)
spaces.append(space)
return

1
modules_forge/forge_version.py Executable file
View File

@@ -0,0 +1 @@
version = '2.0.1v1.10.1'

64
modules_forge/gradio_compile.py Executable file
View File

@@ -0,0 +1,64 @@
def gradio_compile(items, prefix):
names = []
for k, v in items["required"].items():
t = v[0]
d = v[1] if len(v) > 1 else None
if prefix != '':
name = (prefix + '_' + k).replace(' ', '_').lower()
else:
name = k.replace(' ', '_').lower()
title = name.replace('_', ' ').title()
if t == 'INT':
default = int(d['default'])
min = int(d['min'])
max = int(d['max'])
step = int(d.get('step', 1))
print(f'{name} = gr.Slider(label=\'{title}\', minimum={min}, maximum={max}, step={step}, value={default})')
names.append(name)
elif t == 'FLOAT':
default = float(d['default'])
min = float(d['min'])
max = float(d['max'])
step = float(d.get('step', 0.001))
print(f'{name} = gr.Slider(label=\'{title}\', minimum={min}, maximum={max}, step={step}, value={default})')
names.append(name)
elif isinstance(t, list):
print(f'{name} = gr.Radio(label=\'{title}\', choices={str(t)}, value=\'{t[0]}\')')
names.append(name)
elif t == 'MODEL':
pass
elif t == 'CONDITIONING':
pass
elif t == 'LATENT':
pass
elif t == 'CLIP_VISION':
pass
elif t == 'IMAGE':
pass
elif t == 'VAE':
pass
else:
print('error ' + str(t))
return ['enabled'] + names
def print_info_text(name_list, prefix):
print(', '.join(name_list))
print('p.extra_generation_params.update(dict(')
for n in name_list:
print(prefix + '_' + n + ' = ' + n + ', ')
print(')')
return
# from modules_forge.gradio_compile import gradio_compile
# ps = []
# ps += gradio_compile(SVD_img2vid_Conditioning.INPUT_TYPES(), prefix='')
# ps += gradio_compile(KSampler.INPUT_TYPES(), prefix='sampling')
# ps += gradio_compile(VideoLinearCFGGuidance.INPUT_TYPES(), prefix='guidance')
# print(', '.join(ps))
# print_info_text(ps, '123')

97
modules_forge/initialization.py Executable file
View File

@@ -0,0 +1,97 @@
import os
import sys
INITIALIZED = False
MONITOR_MODEL_MOVING = False
def monitor_module_moving():
if not MONITOR_MODEL_MOVING:
return
import torch
import traceback
old_to = torch.nn.Module.to
def new_to(*args, **kwargs):
traceback.print_stack()
print('Model Movement')
return old_to(*args, **kwargs)
torch.nn.Module.to = new_to
return
def initialize_forge():
global INITIALIZED
if INITIALIZED:
return
INITIALIZED = True
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'packages_3rdparty'))
bad_list = ['--lowvram', '--medvram', '--medvram-sdxl']
for bad in bad_list:
if bad in sys.argv:
print(f'Arg {bad} is removed in Forge.')
print(f'Now memory management is fully automatic and you do not need any command flags.')
print(f'Please just remove this flag.')
print(f'In extreme cases, if you want to force previous lowvram/medvram behaviors, '
f'please use --always-offload-from-vram')
from backend.args import args
if args.gpu_device_id is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id)
print("Set device to:", args.gpu_device_id)
if args.cuda_malloc:
from modules_forge.cuda_malloc import try_cuda_malloc
try_cuda_malloc()
from backend import memory_management
import torch
monitor_module_moving()
device = memory_management.get_torch_device()
torch.zeros((1, 1)).to(device, torch.float32)
memory_management.soft_empty_cache()
if memory_management.can_install_bnb():
from modules_forge.bnb_installer import try_install_bnb
try_install_bnb()
from backend import stream
print('CUDA Using Stream:', stream.should_use_stream())
from modules_forge.shared import diffusers_dir
# if 'TRANSFORMERS_CACHE' not in os.environ:
# os.environ['TRANSFORMERS_CACHE'] = diffusers_dir
if 'HF_HOME' not in os.environ:
os.environ['HF_HOME'] = diffusers_dir
if 'HF_DATASETS_CACHE' not in os.environ:
os.environ['HF_DATASETS_CACHE'] = diffusers_dir
if 'HUGGINGFACE_HUB_CACHE' not in os.environ:
os.environ['HUGGINGFACE_HUB_CACHE'] = diffusers_dir
if 'HUGGINGFACE_ASSETS_CACHE' not in os.environ:
os.environ['HUGGINGFACE_ASSETS_CACHE'] = diffusers_dir
if 'HF_HUB_CACHE' not in os.environ:
os.environ['HF_HUB_CACHE'] = diffusers_dir
import modules_forge.patch_basic
modules_forge.patch_basic.patch_all_basics()
return

475
modules_forge/main_entry.py Executable file
View File

@@ -0,0 +1,475 @@
import os
import torch
import gradio as gr
from gradio.context import Context
from modules import shared_items, shared, ui_common, sd_models, processing, infotext_utils, paths, ui_loadsave
from backend import memory_management, stream
from backend.args import dynamic_args
from modules.shared import cmd_opts
total_vram = int(memory_management.total_vram)
ui_forge_preset: gr.Radio = None
ui_checkpoint: gr.Dropdown = None
ui_vae: gr.Dropdown = None
ui_clip_skip: gr.Slider = None
ui_forge_unet_storage_dtype_options: gr.Radio = None
ui_forge_async_loading: gr.Radio = None
ui_forge_pin_shared_memory: gr.Radio = None
ui_forge_inference_memory: gr.Slider = None
forge_unet_storage_dtype_options = {
'Automatic': (None, False),
'Automatic (fp16 LoRA)': (None, True),
'bnb-nf4': ('nf4', False),
'bnb-nf4 (fp16 LoRA)': ('nf4', True),
'float8-e4m3fn': (torch.float8_e4m3fn, False),
'float8-e4m3fn (fp16 LoRA)': (torch.float8_e4m3fn, True),
'bnb-fp4': ('fp4', False),
'bnb-fp4 (fp16 LoRA)': ('fp4', True),
'float8-e5m2': (torch.float8_e5m2, False),
'float8-e5m2 (fp16 LoRA)': (torch.float8_e5m2, True),
}
module_list = {}
def bind_to_opts(comp, k, save=False, callback=None):
def on_change(v):
shared.opts.set(k, v)
if save:
shared.opts.save(shared.config_filename)
if callback is not None:
callback()
return
comp.change(on_change, inputs=[comp], queue=False, show_progress=False)
return
def make_checkpoint_manager_ui():
global ui_checkpoint, ui_vae, ui_clip_skip, ui_forge_unet_storage_dtype_options, ui_forge_async_loading, ui_forge_pin_shared_memory, ui_forge_inference_memory, ui_forge_preset
if shared.opts.sd_model_checkpoint in [None, 'None', 'none', '']:
if len(sd_models.checkpoints_list) == 0:
sd_models.list_models()
if len(sd_models.checkpoints_list) > 0:
shared.opts.set('sd_model_checkpoint', next(iter(sd_models.checkpoints_list.values())).name)
ui_forge_preset = gr.Radio(label="UI", value=lambda: shared.opts.forge_preset, choices=['sd', 'xl', 'flux', 'all'], elem_id="forge_ui_preset")
ckpt_list, vae_list = refresh_models()
ui_checkpoint = gr.Dropdown(
value=lambda: shared.opts.sd_model_checkpoint,
label="Checkpoint",
elem_classes=['model_selection'],
choices=ckpt_list
)
ui_vae = gr.Dropdown(
value=lambda: [os.path.basename(x) for x in shared.opts.forge_additional_modules],
multiselect=True,
label="VAE / Text Encoder",
render=False,
choices=vae_list
)
def gr_refresh_models():
a, b = refresh_models()
return gr.update(choices=a), gr.update(choices=b)
refresh_button = ui_common.ToolButton(value=ui_common.refresh_symbol, elem_id=f"forge_refresh_checkpoint", tooltip="Refresh")
refresh_button.click(
fn=gr_refresh_models,
inputs=[],
outputs=[ui_checkpoint, ui_vae],
show_progress=False,
queue=False
)
Context.root_block.load(
fn=gr_refresh_models,
inputs=[],
outputs=[ui_checkpoint, ui_vae],
show_progress=False,
queue=False
)
ui_vae.render()
ui_forge_unet_storage_dtype_options = gr.Dropdown(label="Diffusion in Low Bits", value=lambda: shared.opts.forge_unet_storage_dtype, choices=list(forge_unet_storage_dtype_options.keys()))
bind_to_opts(ui_forge_unet_storage_dtype_options, 'forge_unet_storage_dtype', save=True, callback=refresh_model_loading_parameters)
ui_forge_async_loading = gr.Radio(label="Swap Method", value=lambda: shared.opts.forge_async_loading, choices=['Queue', 'Async'])
ui_forge_pin_shared_memory = gr.Radio(label="Swap Location", value=lambda: shared.opts.forge_pin_shared_memory, choices=['CPU', 'Shared'])
ui_forge_inference_memory = gr.Slider(label="GPU Weights (MB)", value=lambda: total_vram - shared.opts.forge_inference_memory, minimum=0, maximum=int(memory_management.total_vram), step=1)
mem_comps = [ui_forge_inference_memory, ui_forge_async_loading, ui_forge_pin_shared_memory]
ui_forge_inference_memory.change(ui_refresh_memory_management_settings, inputs=mem_comps, queue=False, show_progress=False)
ui_forge_async_loading.change(ui_refresh_memory_management_settings, inputs=mem_comps, queue=False, show_progress=False)
ui_forge_pin_shared_memory.change(ui_refresh_memory_management_settings, inputs=mem_comps, queue=False, show_progress=False)
Context.root_block.load(ui_refresh_memory_management_settings, inputs=mem_comps, queue=False, show_progress=False)
ui_clip_skip = gr.Slider(label="Clip skip", value=lambda: shared.opts.CLIP_stop_at_last_layers, **{"minimum": 1, "maximum": 12, "step": 1})
bind_to_opts(ui_clip_skip, 'CLIP_stop_at_last_layers', save=True)
ui_checkpoint.change(checkpoint_change, inputs=[ui_checkpoint], show_progress=False)
ui_vae.change(modules_change, inputs=[ui_vae], queue=False, show_progress=False)
return
def find_files_with_extensions(base_path, extensions):
found_files = {}
for root, _, files in os.walk(base_path):
for file in files:
if any(file.endswith(ext) for ext in extensions):
full_path = os.path.join(root, file)
found_files[file] = full_path
return found_files
def refresh_models():
global module_list
shared_items.refresh_checkpoints()
ckpt_list = shared_items.list_checkpoint_tiles(shared.opts.sd_checkpoint_dropdown_use_short)
file_extensions = ['ckpt', 'pt', 'bin', 'safetensors', 'gguf']
module_list.clear()
module_paths = [
os.path.abspath(os.path.join(paths.models_path, "VAE")),
os.path.abspath(os.path.join(paths.models_path, "text_encoder")),
]
if isinstance(shared.cmd_opts.vae_dir, str):
module_paths.append(os.path.abspath(shared.cmd_opts.vae_dir))
if isinstance(shared.cmd_opts.text_encoder_dir, str):
module_paths.append(os.path.abspath(shared.cmd_opts.text_encoder_dir))
for vae_path in module_paths:
vae_files = find_files_with_extensions(vae_path, file_extensions)
module_list.update(vae_files)
return ckpt_list, module_list.keys()
def ui_refresh_memory_management_settings(model_memory, async_loading, pin_shared_memory):
""" Passes precalculated 'model_memory' from "GPU Weights" UI slider (skip redundant calculation) """
refresh_memory_management_settings(
async_loading=async_loading,
pin_shared_memory=pin_shared_memory,
model_memory=model_memory # Use model_memory directly from UI slider value
)
def refresh_memory_management_settings(async_loading=None, inference_memory=None, pin_shared_memory=None, model_memory=None):
# Fallback to defaults if values are not passed
async_loading = async_loading if async_loading is not None else shared.opts.forge_async_loading
inference_memory = inference_memory if inference_memory is not None else shared.opts.forge_inference_memory
pin_shared_memory = pin_shared_memory if pin_shared_memory is not None else shared.opts.forge_pin_shared_memory
# If model_memory is provided, calculate inference memory accordingly, otherwise use inference_memory directly
if model_memory is None:
model_memory = total_vram - inference_memory
else:
inference_memory = total_vram - model_memory
shared.opts.set('forge_async_loading', async_loading)
shared.opts.set('forge_inference_memory', inference_memory)
shared.opts.set('forge_pin_shared_memory', pin_shared_memory)
stream.stream_activated = async_loading == 'Async'
memory_management.current_inference_memory = inference_memory * 1024 * 1024 # Convert MB to bytes
memory_management.PIN_SHARED_MEMORY = pin_shared_memory == 'Shared'
log_dict = dict(
stream=stream.should_use_stream(),
inference_memory=memory_management.minimum_inference_memory() / (1024 * 1024),
pin_shared_memory=memory_management.PIN_SHARED_MEMORY
)
print(f'Environment vars changed: {log_dict}')
if inference_memory < min(512, total_vram * 0.05):
print('------------------')
print(f'[Low VRAM Warning] You just set Forge to use 100% GPU memory ({model_memory:.2f} MB) to load model weights.')
print('[Low VRAM Warning] This means you will have 0% GPU memory (0.00 MB) to do matrix computation. Computations may fallback to CPU or go Out of Memory.')
print('[Low VRAM Warning] In many cases, image generation will be 10x slower.')
print("[Low VRAM Warning] To solve the problem, you can set the 'GPU Weights' (on the top of page) to a lower value.")
print("[Low VRAM Warning] If you cannot find 'GPU Weights', you can click the 'all' option in the 'UI' area on the left-top corner of the webpage.")
print('[Low VRAM Warning] Make sure that you know what you are testing.')
print('------------------')
else:
compute_percentage = (inference_memory / total_vram) * 100.0
print(f'[GPU Setting] You will use {(100 - compute_percentage):.2f}% GPU memory ({model_memory:.2f} MB) to load weights, and use {compute_percentage:.2f}% GPU memory ({inference_memory:.2f} MB) to do matrix computation.')
processing.need_global_unload = True
return
def refresh_model_loading_parameters():
from modules.sd_models import select_checkpoint, model_data
checkpoint_info = select_checkpoint()
unet_storage_dtype, lora_fp16 = forge_unet_storage_dtype_options.get(shared.opts.forge_unet_storage_dtype, (None, False))
dynamic_args['online_lora'] = lora_fp16
model_data.forge_loading_parameters = dict(
checkpoint_info=checkpoint_info,
additional_modules=shared.opts.forge_additional_modules,
unet_storage_dtype=unet_storage_dtype
)
print(f'Model selected: {model_data.forge_loading_parameters}')
print(f'Using online LoRAs in FP16: {lora_fp16}')
processing.need_global_unload = True
return
def checkpoint_change(ckpt_name:str, save=True, refresh=True):
""" checkpoint name can be a number of valid aliases. Returns True if checkpoint changed. """
new_ckpt_info = sd_models.get_closet_checkpoint_match(ckpt_name)
current_ckpt_info = sd_models.get_closet_checkpoint_match(shared.opts.data.get('sd_model_checkpoint', ''))
if new_ckpt_info == current_ckpt_info:
return False
shared.opts.set('sd_model_checkpoint', ckpt_name)
if save:
shared.opts.save(shared.config_filename)
if refresh:
refresh_model_loading_parameters()
return True
def modules_change(module_values:list, save=True, refresh=True) -> bool:
""" module values may be provided as file paths, or just the module names. Returns True if modules changed. """
modules = []
for v in module_values:
module_name = os.path.basename(v) # If the input is a filepath, extract the file name
if module_name in module_list:
modules.append(module_list[module_name])
# skip further processing if value unchanged
if sorted(modules) == sorted(shared.opts.data.get('forge_additional_modules', [])):
return False
shared.opts.set('forge_additional_modules', modules)
if save:
shared.opts.save(shared.config_filename)
if refresh:
refresh_model_loading_parameters()
return True
def get_a1111_ui_component(tab, label):
fields = infotext_utils.paste_fields[tab]['fields']
for f in fields:
if f.label == label or f.api == label:
return f.component
def forge_main_entry():
ui_txt2img_width = get_a1111_ui_component('txt2img', 'Size-1')
ui_txt2img_height = get_a1111_ui_component('txt2img', 'Size-2')
ui_txt2img_cfg = get_a1111_ui_component('txt2img', 'CFG scale')
ui_txt2img_distilled_cfg = get_a1111_ui_component('txt2img', 'Distilled CFG Scale')
ui_txt2img_sampler = get_a1111_ui_component('txt2img', 'sampler_name')
ui_txt2img_scheduler = get_a1111_ui_component('txt2img', 'scheduler')
ui_img2img_width = get_a1111_ui_component('img2img', 'Size-1')
ui_img2img_height = get_a1111_ui_component('img2img', 'Size-2')
ui_img2img_cfg = get_a1111_ui_component('img2img', 'CFG scale')
ui_img2img_distilled_cfg = get_a1111_ui_component('img2img', 'Distilled CFG Scale')
ui_img2img_sampler = get_a1111_ui_component('img2img', 'sampler_name')
ui_img2img_scheduler = get_a1111_ui_component('img2img', 'scheduler')
ui_txt2img_hr_cfg = get_a1111_ui_component('txt2img', 'Hires CFG Scale')
ui_txt2img_hr_distilled_cfg = get_a1111_ui_component('txt2img', 'Hires Distilled CFG Scale')
output_targets = [
ui_vae,
ui_clip_skip,
ui_forge_unet_storage_dtype_options,
ui_forge_async_loading,
ui_forge_pin_shared_memory,
ui_forge_inference_memory,
ui_txt2img_width,
ui_img2img_width,
ui_txt2img_height,
ui_img2img_height,
ui_txt2img_cfg,
ui_img2img_cfg,
ui_txt2img_distilled_cfg,
ui_img2img_distilled_cfg,
ui_txt2img_sampler,
ui_img2img_sampler,
ui_txt2img_scheduler,
ui_img2img_scheduler,
ui_txt2img_hr_cfg,
ui_txt2img_hr_distilled_cfg,
]
ui_forge_preset.change(on_preset_change, inputs=[ui_forge_preset], outputs=output_targets, queue=False, show_progress=False)
ui_forge_preset.change(js="clickLoraRefresh", fn=None, queue=False, show_progress=False)
Context.root_block.load(on_preset_change, inputs=None, outputs=output_targets, queue=False, show_progress=False)
refresh_model_loading_parameters()
return
def on_preset_change(preset=None):
if preset is not None:
shared.opts.set('forge_preset', preset)
shared.opts.save(shared.config_filename)
if shared.opts.forge_preset == 'sd':
return [
gr.update(visible=True), # ui_vae
gr.update(visible=True, value=1), # ui_clip_skip
gr.update(visible=False, value='Automatic'), # ui_forge_unet_storage_dtype_options
gr.update(visible=False, value='Queue'), # ui_forge_async_loading
gr.update(visible=False, value='CPU'), # ui_forge_pin_shared_memory
gr.update(visible=False, value=total_vram - 1024), # ui_forge_inference_memory
gr.update(value=getattr(shared.opts, "sd_t2i_width", 512)), # ui_txt2img_width
gr.update(value=getattr(shared.opts, "sd_i2i_width", 512)), # ui_img2img_width
gr.update(value=getattr(shared.opts, "sd_t2i_height", 640)), # ui_txt2img_height
gr.update(value=getattr(shared.opts, "sd_i2i_height", 512)), # ui_img2img_height
gr.update(value=getattr(shared.opts, "sd_t2i_cfg", 7)), # ui_txt2img_cfg
gr.update(value=getattr(shared.opts, "sd_i2i_cfg", 7)), # ui_img2img_cfg
gr.update(visible=False, value=3.5), # ui_txt2img_distilled_cfg
gr.update(visible=False, value=3.5), # ui_img2img_distilled_cfg
gr.update(value=getattr(shared.opts, "sd_t2i_sampler", 'Euler a')), # ui_txt2img_sampler
gr.update(value=getattr(shared.opts, "sd_i2i_sampler", 'Euler a')), # ui_img2img_sampler
gr.update(value=getattr(shared.opts, "sd_t2i_scheduler", 'Automatic')), # ui_txt2img_scheduler
gr.update(value=getattr(shared.opts, "sd_i2i_scheduler", 'Automatic')), # ui_img2img_scheduler
gr.update(visible=True, value=getattr(shared.opts, "sd_t2i_hr_cfg", 7.0)), # ui_txt2img_hr_cfg
gr.update(visible=False, value=3.5), # ui_txt2img_hr_distilled_cfg
]
if shared.opts.forge_preset == 'xl':
model_mem = getattr(shared.opts, "xl_GPU_MB", total_vram - 1024)
if model_mem < 0 or model_mem > total_vram:
model_mem = total_vram - 1024
return [
gr.update(visible=True), # ui_vae
gr.update(visible=False, value=1), # ui_clip_skip
gr.update(visible=True, value='Automatic'), # ui_forge_unet_storage_dtype_options
gr.update(visible=False, value='Queue'), # ui_forge_async_loading
gr.update(visible=False, value='CPU'), # ui_forge_pin_shared_memory
gr.update(visible=True, value=model_mem), # ui_forge_inference_memory
gr.update(value=getattr(shared.opts, "xl_t2i_width", 896)), # ui_txt2img_width
gr.update(value=getattr(shared.opts, "xl_i2i_width", 1024)), # ui_img2img_width
gr.update(value=getattr(shared.opts, "xl_t2i_height", 1152)), # ui_txt2img_height
gr.update(value=getattr(shared.opts, "xl_i2i_height", 1024)), # ui_img2img_height
gr.update(value=getattr(shared.opts, "xl_t2i_cfg", 5)), # ui_txt2img_cfg
gr.update(value=getattr(shared.opts, "xl_i2i_cfg", 5)), # ui_img2img_cfg
gr.update(visible=False, value=3.5), # ui_txt2img_distilled_cfg
gr.update(visible=False, value=3.5), # ui_img2img_distilled_cfg
gr.update(value=getattr(shared.opts, "xl_t2i_sampler", 'Euler a')), # ui_txt2img_sampler
gr.update(value=getattr(shared.opts, "xl_i2i_sampler", 'Euler a')), # ui_img2img_sampler
gr.update(value=getattr(shared.opts, "xl_t2i_scheduler", 'Automatic')), # ui_txt2img_scheduler
gr.update(value=getattr(shared.opts, "xl_i2i_scheduler", 'Automatic')), # ui_img2img_scheduler
gr.update(visible=True, value=getattr(shared.opts, "xl_t2i_hr_cfg", 5.0)), # ui_txt2img_hr_cfg
gr.update(visible=False, value=3.5), # ui_txt2img_hr_distilled_cfg
]
if shared.opts.forge_preset == 'flux':
model_mem = getattr(shared.opts, "flux_GPU_MB", total_vram - 1024)
if model_mem < 0 or model_mem > total_vram:
model_mem = total_vram - 1024
return [
gr.update(visible=True), # ui_vae
gr.update(visible=False, value=1), # ui_clip_skip
gr.update(visible=True, value='Automatic'), # ui_forge_unet_storage_dtype_options
gr.update(visible=True, value='Queue'), # ui_forge_async_loading
gr.update(visible=True, value='CPU'), # ui_forge_pin_shared_memory
gr.update(visible=True, value=model_mem), # ui_forge_inference_memory
gr.update(value=getattr(shared.opts, "flux_t2i_width", 896)), # ui_txt2img_width
gr.update(value=getattr(shared.opts, "flux_i2i_width", 1024)), # ui_img2img_width
gr.update(value=getattr(shared.opts, "flux_t2i_height", 1152)), # ui_txt2img_height
gr.update(value=getattr(shared.opts, "flux_i2i_height", 1024)), # ui_img2img_height
gr.update(value=getattr(shared.opts, "flux_t2i_cfg", 1)), # ui_txt2img_cfg
gr.update(value=getattr(shared.opts, "flux_i2i_cfg", 1)), # ui_img2img_cfg
gr.update(visible=True, value=getattr(shared.opts, "flux_t2i_d_cfg", 3.5)), # ui_txt2img_distilled_cfg
gr.update(visible=True, value=getattr(shared.opts, "flux_i2i_d_cfg", 3.5)), # ui_img2img_distilled_cfg
gr.update(value=getattr(shared.opts, "flux_t2i_sampler", 'Euler')), # ui_txt2img_sampler
gr.update(value=getattr(shared.opts, "flux_i2i_sampler", 'Euler')), # ui_img2img_sampler
gr.update(value=getattr(shared.opts, "flux_t2i_scheduler", 'Simple')), # ui_txt2img_scheduler
gr.update(value=getattr(shared.opts, "flux_i2i_scheduler", 'Simple')), # ui_img2img_scheduler
gr.update(visible=True, value=getattr(shared.opts, "flux_t2i_hr_cfg", 1.0)), # ui_txt2img_hr_cfg
gr.update(visible=True, value=getattr(shared.opts, "flux_t2i_hr_d_cfg", 3.5)), # ui_txt2img_hr_distilled_cfg
]
loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file)
ui_settings_from_file = loadsave.ui_settings.copy()
return [
gr.update(visible=True), # ui_vae
gr.update(visible=True, value=1), # ui_clip_skip
gr.update(visible=True, value='Automatic'), # ui_forge_unet_storage_dtype_options
gr.update(visible=True, value='Queue'), # ui_forge_async_loading
gr.update(visible=True, value='CPU'), # ui_forge_pin_shared_memory
gr.update(visible=True, value=total_vram - 1024), # ui_forge_inference_memory
gr.update(value=ui_settings_from_file['txt2img/Width/value']), # ui_txt2img_width
gr.update(value=ui_settings_from_file['img2img/Width/value']), # ui_img2img_width
gr.update(value=ui_settings_from_file['txt2img/Height/value']), # ui_txt2img_height
gr.update(value=ui_settings_from_file['img2img/Height/value']), # ui_img2img_height
gr.update(value=ui_settings_from_file['txt2img/CFG Scale/value']), # ui_txt2img_cfg
gr.update(value=ui_settings_from_file['img2img/CFG Scale/value']), # ui_img2img_cfg
gr.update(visible=True, value=ui_settings_from_file['txt2img/Distilled CFG Scale/value']), # ui_txt2img_distilled_cfg
gr.update(visible=True, value=ui_settings_from_file['img2img/Distilled CFG Scale/value']), # ui_img2img_distilled_cfg
gr.update(value=ui_settings_from_file['customscript/sampler.py/txt2img/Sampling method/value']), # ui_txt2img_sampler
gr.update(value=ui_settings_from_file['customscript/sampler.py/img2img/Sampling method/value']), # ui_img2img_sampler
gr.update(value=ui_settings_from_file['customscript/sampler.py/txt2img/Schedule type/value']), # ui_txt2img_scheduler
gr.update(value=ui_settings_from_file['customscript/sampler.py/img2img/Schedule type/value']), # ui_img2img_scheduler
gr.update(visible=True, value=ui_settings_from_file['txt2img/Hires CFG Scale/value']), # ui_txt2img_hr_cfg
gr.update(visible=True, value=ui_settings_from_file['txt2img/Hires Distilled CFG Scale/value']), # ui_txt2img_hr_distilled_cfg
]
shared.options_templates.update(shared.options_section(('ui_sd', "UI defaults 'sd'", "ui"), {
"sd_t2i_width": shared.OptionInfo(512, "txt2img width", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
"sd_t2i_height": shared.OptionInfo(640, "txt2img height", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
"sd_t2i_cfg": shared.OptionInfo(7, "txt2img CFG", gr.Slider, {"minimum": 1, "maximum": 30, "step": 0.1}),
"sd_t2i_hr_cfg": shared.OptionInfo(7, "txt2img HiRes CFG", gr.Slider, {"minimum": 1, "maximum": 30, "step": 0.1}),
"sd_i2i_width": shared.OptionInfo(512, "img2img width", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
"sd_i2i_height": shared.OptionInfo(512, "img2img height", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
"sd_i2i_cfg": shared.OptionInfo(7, "img2img CFG", gr.Slider, {"minimum": 1, "maximum": 30, "step": 0.1}),
}))
shared.options_templates.update(shared.options_section(('ui_xl', "UI defaults 'xl'", "ui"), {
"xl_t2i_width": shared.OptionInfo(896, "txt2img width", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
"xl_t2i_height": shared.OptionInfo(1152, "txt2img height", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
"xl_t2i_cfg": shared.OptionInfo(5, "txt2img CFG", gr.Slider, {"minimum": 1, "maximum": 30, "step": 0.1}),
"xl_t2i_hr_cfg": shared.OptionInfo(5, "txt2img HiRes CFG", gr.Slider, {"minimum": 1, "maximum": 30, "step": 0.1}),
"xl_i2i_width": shared.OptionInfo(1024, "img2img width", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
"xl_i2i_height": shared.OptionInfo(1024, "img2img height", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
"xl_i2i_cfg": shared.OptionInfo(5, "img2img CFG", gr.Slider, {"minimum": 1, "maximum": 30, "step": 0.1}),
"xl_GPU_MB": shared.OptionInfo(total_vram - 1024, "GPU Weights (MB)", gr.Slider, {"minimum": 0, "maximum": total_vram, "step": 1}),
}))
shared.options_templates.update(shared.options_section(('ui_flux', "UI defaults 'flux'", "ui"), {
"flux_t2i_width": shared.OptionInfo(896, "txt2img width", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
"flux_t2i_height": shared.OptionInfo(1152, "txt2img height", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
"flux_t2i_cfg": shared.OptionInfo(1, "txt2img CFG", gr.Slider, {"minimum": 1, "maximum": 30, "step": 0.1}),
"flux_t2i_hr_cfg": shared.OptionInfo(1, "txt2img HiRes CFG", gr.Slider, {"minimum": 1, "maximum": 30, "step": 0.1}),
"flux_t2i_d_cfg": shared.OptionInfo(3.5, "txt2img Distilled CFG", gr.Slider, {"minimum": 0, "maximum": 30, "step": 0.1}),
"flux_t2i_hr_d_cfg": shared.OptionInfo(3.5, "txt2img Distilled HiRes CFG", gr.Slider, {"minimum": 0, "maximum": 30, "step": 0.1}),
"flux_i2i_width": shared.OptionInfo(1024, "img2img width", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
"flux_i2i_height": shared.OptionInfo(1024, "img2img height", gr.Slider, {"minimum": 64, "maximum": 2048, "step": 8}),
"flux_i2i_cfg": shared.OptionInfo(1, "img2img CFG", gr.Slider, {"minimum": 1, "maximum": 30, "step": 0.1}),
"flux_i2i_d_cfg": shared.OptionInfo(3.5, "img2img Distilled CFG", gr.Slider, {"minimum": 0, "maximum": 30, "step": 0.1}),
"flux_GPU_MB": shared.OptionInfo(total_vram - 1024, "GPU Weights (MB)",gr.Slider, {"minimum": 0, "maximum": total_vram, "step": 1}),
}))

77
modules_forge/main_thread.py Executable file
View File

@@ -0,0 +1,77 @@
# This file is the main thread that handles all gradio calls for major t2i or i2i processing.
# Other gradio calls (like those from extensions) are not influenced.
# By using one single thread to process all major calls, model moving is significantly faster.
import time
import traceback
import threading
lock = threading.Lock()
last_id = 0
waiting_list = []
finished_list = []
last_exception = None
class Task:
def __init__(self, task_id, func, args, kwargs):
self.task_id = task_id
self.func = func
self.args = args
self.kwargs = kwargs
self.result = None
self.exception = None
def work(self):
global last_exception
try:
self.result = self.func(*self.args, **self.kwargs)
self.exception = None
last_exception = None
except Exception as e:
traceback.print_exc()
print(e)
self.exception = e
last_exception = e
def loop():
global lock, last_id, waiting_list, finished_list
while True:
time.sleep(0.01)
if len(waiting_list) > 0:
with lock:
task = waiting_list.pop(0)
task.work()
with lock:
finished_list.append(task)
def async_run(func, *args, **kwargs):
global lock, last_id, waiting_list, finished_list
with lock:
last_id += 1
new_task = Task(task_id=last_id, func=func, args=args, kwargs=kwargs)
waiting_list.append(new_task)
return new_task.task_id
def run_and_wait_result(func, *args, **kwargs):
global lock, last_id, waiting_list, finished_list
current_id = async_run(func, *args, **kwargs)
while True:
time.sleep(0.01)
finished_task = None
for t in finished_list.copy(): # thread safe shallow copy without needing a lock
if t.task_id == current_id:
finished_task = t
break
if finished_task is not None:
with lock:
finished_list.remove(finished_task)
return finished_task.result

93
modules_forge/patch_basic.py Executable file
View File

@@ -0,0 +1,93 @@
import torch
import os
import time
import httpx
import warnings
import gradio.networking
import safetensors.torch
from pathlib import Path
from tqdm import tqdm
def gradio_url_ok_fix(url: str) -> bool:
try:
for _ in range(5):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
r = httpx.head(url, timeout=999, verify=False)
if r.status_code in (200, 401, 302):
return True
time.sleep(0.500)
except (ConnectionError, httpx.ConnectError):
return False
return False
def build_loaded(module, loader_name):
original_loader_name = loader_name + '_origin'
if not hasattr(module, original_loader_name):
setattr(module, original_loader_name, getattr(module, loader_name))
original_loader = getattr(module, original_loader_name)
def loader(*args, **kwargs):
result = None
try:
result = original_loader(*args, **kwargs)
except Exception as e:
result = None
exp = str(e) + '\n'
for path in list(args) + list(kwargs.values()):
if isinstance(path, str):
if os.path.exists(path):
exp += f'File corrupted: {path} \n'
corrupted_backup_file = path + '.corrupted'
if os.path.exists(corrupted_backup_file):
os.remove(corrupted_backup_file)
os.replace(path, corrupted_backup_file)
if os.path.exists(path):
os.remove(path)
exp += f'Forge has tried to move the corrupted file to {corrupted_backup_file} \n'
exp += f'You may try again now and Forge will download models again. \n'
raise ValueError(exp)
return result
setattr(module, loader_name, loader)
return
def always_show_tqdm(*args, **kwargs):
kwargs['disable'] = False
if 'name' in kwargs:
del kwargs['name']
return tqdm(*args, **kwargs)
def long_path_prefix(path: Path) -> Path:
if os.name == 'nt' and not str(path).startswith("\\\\?\\") and not path.exists():
return Path("\\\\?\\" + str(path))
return path
def patch_all_basics():
import logging
from huggingface_hub import file_download
file_download.tqdm = always_show_tqdm
from transformers.dynamic_module_utils import logger
logger.setLevel(logging.ERROR)
from huggingface_hub.file_download import _download_to_tmp_and_move as original_download_to_tmp_and_move
def patched_download_to_tmp_and_move(incomplete_path, destination_path, url_to_download, proxies, headers, expected_size, filename, force_download):
incomplete_path = long_path_prefix(incomplete_path)
destination_path = long_path_prefix(destination_path)
return original_download_to_tmp_and_move(incomplete_path, destination_path, url_to_download, proxies, headers, expected_size, filename, force_download)
file_download._download_to_tmp_and_move = patched_download_to_tmp_and_move
gradio.networking.url_ok = gradio_url_ok_fix
build_loaded(safetensors.torch, 'load_file')
build_loaded(torch, 'load')
return

66
modules_forge/shared.py Executable file
View File

@@ -0,0 +1,66 @@
import os
import argparse
from backend import utils
from modules.paths_internal import models_path
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument(
"--controlnet-dir",
type=Path,
help="Path to directory with ControlNet models",
default=None,
)
parser.add_argument(
"--controlnet-preprocessor-models-dir",
type=Path,
help="Path to directory with annotator model directories",
default=None,
)
cmd_opts = parser.parse_known_args()[0]
if cmd_opts.controlnet_dir:
controlnet_dir = str(cmd_opts.controlnet_dir)
else:
controlnet_dir = os.path.join(models_path, 'ControlNet')
os.makedirs(controlnet_dir, exist_ok=True)
if cmd_opts.controlnet_preprocessor_models_dir:
preprocessor_dir = str(cmd_opts.controlnet_preprocessor_models_dir)
else:
preprocessor_dir = os.path.join(models_path, 'ControlNetPreprocessor')
os.makedirs(preprocessor_dir, exist_ok=True)
diffusers_dir = os.path.join(models_path, 'diffusers')
os.makedirs(diffusers_dir, exist_ok=True)
supported_preprocessors = {}
supported_control_models = []
def add_supported_preprocessor(preprocessor):
global supported_preprocessors
p = preprocessor
supported_preprocessors[p.name] = p
return
def add_supported_control_model(control_model):
global supported_control_models
supported_control_models.append(control_model)
return
def try_load_supported_control_model(ckpt_path):
global supported_control_models
state_dict = utils.load_torch_file(ckpt_path, safe_load=True)
for supported_type in supported_control_models:
state_dict_copy = {k: v for k, v in state_dict.items()}
model = supported_type.try_build_from_state_dict(state_dict_copy, ckpt_path)
if model is not None:
return model
return None

13
modules_forge/shared_options.py Executable file
View File

@@ -0,0 +1,13 @@
def register(options_templates, options_section, OptionInfo):
options_templates.update(options_section((None, "Forge Hidden options"), {
"forge_unet_storage_dtype": OptionInfo('Automatic'),
"forge_inference_memory": OptionInfo(1024),
"forge_async_loading": OptionInfo('Queue'),
"forge_pin_shared_memory": OptionInfo('CPU'),
"forge_preset": OptionInfo('sd'),
"forge_additional_modules": OptionInfo([]),
}))
options_templates.update(options_section(('ui_alternatives', "UI alternatives", "ui"), {
"forge_canvas_plain": OptionInfo(False, "ForgeCanvas: use plain background").needs_reload_ui(),
"forge_canvas_toolbar_always": OptionInfo(False, "ForgeCanvas: toolbar always visible").needs_reload_ui(),
}))

View File

@@ -0,0 +1,169 @@
import os
import torch
from huggingface_guess.detection import unet_config_from_diffusers_unet, model_config_from_unet
from huggingface_guess.utils import unet_to_diffusers
from backend import memory_management
from backend.operations import using_forge_operations
from backend.nn.cnets import cldm
from backend.patcher.controlnet import ControlLora, ControlNet, load_t2i_adapter, apply_controlnet_advanced
from modules_forge.shared import add_supported_control_model
class ControlModelPatcher:
@staticmethod
def try_build_from_state_dict(state_dict, ckpt_path):
return None
def __init__(self, model_patcher=None):
self.model_patcher = model_patcher
self.strength = 1.0
self.start_percent = 0.0
self.end_percent = 1.0
self.positive_advanced_weighting = None
self.negative_advanced_weighting = None
self.advanced_frame_weighting = None
self.advanced_sigma_weighting = None
self.advanced_mask_weighting = None
def process_after_running_preprocessors(self, process, params, *args, **kwargs):
return
def process_before_every_sampling(self, process, cond, mask, *args, **kwargs):
return
def process_after_every_sampling(self, process, params, *args, **kwargs):
return
class ControlNetPatcher(ControlModelPatcher):
@staticmethod
def try_build_from_state_dict(controlnet_data, ckpt_path):
if "lora_controlnet" in controlnet_data:
return ControlNetPatcher(ControlLora(controlnet_data))
controlnet_config = None
if "controlnet_cond_embedding.conv_in.weight" in controlnet_data: # diffusers format
unet_dtype = memory_management.unet_dtype()
controlnet_config = unet_config_from_diffusers_unet(controlnet_data, unet_dtype)
diffusers_keys = unet_to_diffusers(controlnet_config)
diffusers_keys["controlnet_mid_block.weight"] = "middle_block_out.0.weight"
diffusers_keys["controlnet_mid_block.bias"] = "middle_block_out.0.bias"
count = 0
loop = True
while loop:
suffix = [".weight", ".bias"]
for s in suffix:
k_in = "controlnet_down_blocks.{}{}".format(count, s)
k_out = "zero_convs.{}.0{}".format(count, s)
if k_in not in controlnet_data:
loop = False
break
diffusers_keys[k_in] = k_out
count += 1
count = 0
loop = True
while loop:
suffix = [".weight", ".bias"]
for s in suffix:
if count == 0:
k_in = "controlnet_cond_embedding.conv_in{}".format(s)
else:
k_in = "controlnet_cond_embedding.blocks.{}{}".format(count - 1, s)
k_out = "input_hint_block.{}{}".format(count * 2, s)
if k_in not in controlnet_data:
k_in = "controlnet_cond_embedding.conv_out{}".format(s)
loop = False
diffusers_keys[k_in] = k_out
count += 1
new_sd = {}
for k in diffusers_keys:
if k in controlnet_data:
new_sd[diffusers_keys[k]] = controlnet_data.pop(k)
leftover_keys = controlnet_data.keys()
if len(leftover_keys) > 0:
print("leftover keys:", leftover_keys)
controlnet_data = new_sd
pth_key = 'control_model.zero_convs.0.0.weight'
pth = False
key = 'zero_convs.0.0.weight'
if pth_key in controlnet_data:
pth = True
key = pth_key
prefix = "control_model."
elif key in controlnet_data:
prefix = ""
else:
net = load_t2i_adapter(controlnet_data)
if net is None:
return None
return ControlNetPatcher(net)
if controlnet_config is None:
unet_dtype = memory_management.unet_dtype()
controlnet_config = model_config_from_unet(controlnet_data, prefix, True).unet_config
controlnet_config['dtype'] = unet_dtype
load_device = memory_management.get_torch_device()
computation_dtype = memory_management.get_computation_dtype(load_device)
controlnet_config.pop("out_channels")
controlnet_config["hint_channels"] = controlnet_data["{}input_hint_block.0.weight".format(prefix)].shape[1]
with using_forge_operations(dtype=unet_dtype, manual_cast_enabled=computation_dtype != unet_dtype):
control_model = cldm.ControlNet(**controlnet_config).to(dtype=unet_dtype)
if pth:
if 'difference' in controlnet_data:
print("WARNING: Your controlnet model is diff version rather than official float16 model. "
"Please use an official float16/float32 model for robust performance.")
class WeightsLoader(torch.nn.Module):
pass
w = WeightsLoader()
w.control_model = control_model
missing, unexpected = w.load_state_dict(controlnet_data, strict=False)
else:
missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
print(missing, unexpected)
global_average_pooling = False
filename = os.path.splitext(ckpt_path)[0]
if filename.endswith("_shuffle") or filename.endswith("_shuffle_fp16"):
# TODO: smarter way of enabling global_average_pooling
global_average_pooling = True
control = ControlNet(control_model, global_average_pooling=global_average_pooling, load_device=load_device, manual_cast_dtype=computation_dtype)
return ControlNetPatcher(control)
def __init__(self, model_patcher):
super().__init__(model_patcher)
def process_before_every_sampling(self, process, cond, mask, *args, **kwargs):
unet = process.sd_model.forge_objects.unet
unet = apply_controlnet_advanced(
unet=unet,
controlnet=self.model_patcher,
image_bchw=cond,
strength=self.strength,
start_percent=self.start_percent,
end_percent=self.end_percent,
positive_advanced_weighting=self.positive_advanced_weighting,
negative_advanced_weighting=self.negative_advanced_weighting,
advanced_frame_weighting=self.advanced_frame_weighting,
advanced_sigma_weighting=self.advanced_sigma_weighting,
advanced_mask_weighting=self.advanced_mask_weighting
)
process.sd_model.forge_objects.unet = unet
return
add_supported_control_model(ControlNetPatcher)

View File

@@ -0,0 +1,138 @@
import cv2
import torch
from modules_forge.shared import add_supported_preprocessor, preprocessor_dir
from backend import memory_management
from backend.patcher.base import ModelPatcher
from backend.patcher import clipvision
from modules_forge.utils import resize_image_with_pad
from modules.modelloader import load_file_from_url
from modules_forge.utils import numpy_to_pytorch
class PreprocessorParameter:
def __init__(self, minimum=0.0, maximum=1.0, step=0.01, label='Parameter 1', value=0.5, visible=False, **kwargs):
self.gradio_update_kwargs = dict(
minimum=minimum, maximum=maximum, step=step, label=label, value=value, visible=visible, **kwargs
)
class Preprocessor:
def __init__(self):
self.name = 'PreprocessorBase'
self.tags = []
self.model_filename_filters = []
self.slider_resolution = PreprocessorParameter(label='Resolution', minimum=128, maximum=2048, value=512, step=8, visible=True)
self.slider_1 = PreprocessorParameter()
self.slider_2 = PreprocessorParameter()
self.slider_3 = PreprocessorParameter()
self.model_patcher: ModelPatcher = None
self.show_control_mode = True
self.do_not_need_model = False
self.sorting_priority = 0 # higher goes to top in the list
self.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab = True
self.fill_mask_with_one_when_resize_and_fill = False
self.use_soft_projection_in_hr_fix = False
self.expand_mask_when_resize_and_fill = False
def setup_model_patcher(self, model, load_device=None, offload_device=None, dtype=torch.float32, **kwargs):
if load_device is None:
load_device = memory_management.get_torch_device()
if offload_device is None:
offload_device = torch.device('cpu')
if not memory_management.should_use_fp16(load_device):
dtype = torch.float32
model.eval()
model = model.to(device=offload_device, dtype=dtype)
self.model_patcher = ModelPatcher(model=model, load_device=load_device, offload_device=offload_device, **kwargs)
self.model_patcher.dtype = dtype
return self.model_patcher
def move_all_model_patchers_to_gpu(self):
memory_management.load_models_gpu([self.model_patcher])
return
def send_tensor_to_model_device(self, x):
return x.to(device=self.model_patcher.current_device, dtype=self.model_patcher.dtype)
def process_after_running_preprocessors(self, process, params, *args, **kwargs):
return
def process_before_every_sampling(self, process, cond, mask, *args, **kwargs):
return cond, mask
def process_after_every_sampling(self, process, params, *args, **kwargs):
return
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, input_mask=None, **kwargs):
return input_image
class PreprocessorNone(Preprocessor):
def __init__(self):
super().__init__()
self.name = 'None'
self.sorting_priority = 10
class PreprocessorCanny(Preprocessor):
def __init__(self):
super().__init__()
self.name = 'canny'
self.tags = ['Canny']
self.model_filename_filters = ['canny']
self.slider_1 = PreprocessorParameter(minimum=0, maximum=256, step=1, value=100, label='Low Threshold', visible=True)
self.slider_2 = PreprocessorParameter(minimum=0, maximum=256, step=1, value=200, label='High Threshold', visible=True)
self.sorting_priority = 100
self.use_soft_projection_in_hr_fix = True
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
input_image, remove_pad = resize_image_with_pad(input_image, resolution)
canny_image = cv2.cvtColor(cv2.Canny(input_image, int(slider_1), int(slider_2)), cv2.COLOR_GRAY2RGB)
return remove_pad(canny_image)
add_supported_preprocessor(PreprocessorNone())
add_supported_preprocessor(PreprocessorCanny())
class PreprocessorClipVision(Preprocessor):
global_cache = {}
def __init__(self, name, url, filename):
super().__init__()
self.name = name
self.url = url
self.filename = filename
self.slider_resolution = PreprocessorParameter(visible=False)
self.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab = False
self.show_control_mode = False
self.sorting_priority = 1
self.clipvision = None
def load_clipvision(self):
if self.clipvision is not None:
return self.clipvision
ckpt_path = load_file_from_url(
url=self.url,
model_dir=preprocessor_dir,
file_name=self.filename
)
if ckpt_path in PreprocessorClipVision.global_cache:
self.clipvision = PreprocessorClipVision.global_cache[ckpt_path]
else:
self.clipvision = clipvision.load(ckpt_path)
PreprocessorClipVision.global_cache[ckpt_path] = self.clipvision
return self.clipvision
@torch.no_grad()
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
clipvision = self.load_clipvision()
return clipvision.encode_image(numpy_to_pytorch(input_image))

159
modules_forge/utils.py Executable file
View File

@@ -0,0 +1,159 @@
import torch
import numpy as np
import os
import time
import random
import string
import cv2
from backend import memory_management
def prepare_free_memory(aggressive=False):
if aggressive:
memory_management.unload_all_models()
print('Cleanup all memory.')
return
memory_management.free_memory(memory_required=memory_management.minimum_inference_memory(),
device=memory_management.get_torch_device())
print('Cleanup minimal inference memory.')
return
def apply_circular_forge(model, tiling_enabled=False):
if model.tiling_enabled == tiling_enabled:
return
print(f'Tiling: {tiling_enabled}')
model.tiling_enabled = tiling_enabled
# def flatten(el):
# flattened = [flatten(children) for children in el.children()]
# res = [el]
# for c in flattened:
# res += c
# return res
#
# layers = flatten(model)
#
# for layer in [layer for layer in layers if 'Conv' in type(layer).__name__]:
# layer.padding_mode = 'circular' if tiling_enabled else 'zeros'
print(f'Tiling is currently under maintenance and unavailable. Sorry for the inconvenience.')
return
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def generate_random_filename(extension=".txt"):
timestamp = time.strftime("%Y%m%d-%H%M%S")
random_string = ''.join(random.choices(string.ascii_lowercase + string.digits, k=5))
filename = f"{timestamp}-{random_string}{extension}"
return filename
@torch.no_grad()
@torch.inference_mode()
def pytorch_to_numpy(x):
return [np.clip(255. * y.cpu().numpy(), 0, 255).astype(np.uint8) for y in x]
@torch.no_grad()
@torch.inference_mode()
def numpy_to_pytorch(x):
y = x.astype(np.float32) / 255.0
y = y[None]
y = np.ascontiguousarray(y.copy())
y = torch.from_numpy(y).float()
return y
def write_images_to_mp4(frame_list: list, filename=None, fps=6):
from modules.paths_internal import default_output_dir
video_folder = os.path.join(default_output_dir, 'svd')
os.makedirs(video_folder, exist_ok=True)
if filename is None:
filename = generate_random_filename('.mp4')
full_path = os.path.join(video_folder, filename)
try:
import av
except ImportError:
from launch import run_pip
run_pip(
"install imageio[pyav]",
"imageio[pyav]",
)
import av
options = {
"crf": str(23)
}
output = av.open(full_path, "w")
stream = output.add_stream('libx264', fps, options=options)
stream.width = frame_list[0].shape[1]
stream.height = frame_list[0].shape[0]
for img in frame_list:
frame = av.VideoFrame.from_ndarray(img)
packet = stream.encode(frame)
output.mux(packet)
packet = stream.encode(None)
output.mux(packet)
output.close()
return full_path
def pad64(x):
return int(np.ceil(float(x) / 64.0) * 64 - x)
def safer_memory(x):
# Fix many MAC/AMD problems
return np.ascontiguousarray(x.copy()).copy()
def resize_image_with_pad(img, resolution):
H_raw, W_raw, _ = img.shape
k = float(resolution) / float(min(H_raw, W_raw))
interpolation = cv2.INTER_CUBIC if k > 1 else cv2.INTER_AREA
H_target = int(np.round(float(H_raw) * k))
W_target = int(np.round(float(W_raw) * k))
img = cv2.resize(img, (W_target, H_target), interpolation=interpolation)
H_pad, W_pad = pad64(H_target), pad64(W_target)
img_padded = np.pad(img, [[0, H_pad], [0, W_pad], [0, 0]], mode='edge')
def remove_pad(x):
return safer_memory(x[:H_target, :W_target])
return safer_memory(img_padded), remove_pad
def lazy_memory_management(model):
required_memory = memory_management.module_size(model) + memory_management.minimum_inference_memory()
memory_management.free_memory(required_memory, device=memory_management.get_torch_device())
return