mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-03-12 00:19:50 +00:00
forge 2.0.0
see also discussions
This commit is contained in:
21
modules_forge/bnb_installer.py
Normal file
21
modules_forge/bnb_installer.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import pkg_resources
|
||||
|
||||
from modules.launch_utils import run_pip
|
||||
|
||||
target_bitsandbytes_version = '0.43.3'
|
||||
|
||||
|
||||
def try_install_bnb():
|
||||
try:
|
||||
bitsandbytes_version = pkg_resources.get_distribution('bitsandbytes').version
|
||||
except Exception:
|
||||
bitsandbytes_version = None
|
||||
|
||||
try:
|
||||
if bitsandbytes_version != target_bitsandbytes_version:
|
||||
run_pip(
|
||||
f"install -U bitsandbytes=={target_bitsandbytes_version}",
|
||||
f"bitsandbytes=={target_bitsandbytes_version}",
|
||||
)
|
||||
except Exception as e:
|
||||
print(f'Cannot install bitsandbytes. Skipped.')
|
||||
@@ -1 +1 @@
|
||||
version = '1.0.2v1.10.1'
|
||||
version = '2.0.0v1.10.1'
|
||||
|
||||
@@ -54,6 +54,10 @@ def initialize_forge():
|
||||
torch.zeros((1, 1)).to(device, torch.float32)
|
||||
memory_management.soft_empty_cache()
|
||||
|
||||
if memory_management.can_install_bnb():
|
||||
from modules_forge.bnb_installer import try_install_bnb
|
||||
try_install_bnb()
|
||||
|
||||
import modules_forge.patch_basic
|
||||
modules_forge.patch_basic.patch_all_basics()
|
||||
|
||||
|
||||
@@ -1,21 +1,32 @@
|
||||
import torch
|
||||
import gradio as gr
|
||||
|
||||
from modules import shared_items, shared, ui_common, sd_models, processing
|
||||
from gradio.context import Context
|
||||
from modules import shared_items, shared, ui_common, sd_models, processing, infotext_utils
|
||||
from modules import sd_vae as sd_vae_module
|
||||
from backend import memory_management, stream
|
||||
|
||||
|
||||
total_vram = int(memory_management.total_vram)
|
||||
|
||||
ui_forge_preset: gr.Radio = None
|
||||
|
||||
ui_checkpoint: gr.Dropdown = None
|
||||
ui_vae: gr.Dropdown = None
|
||||
ui_vae_refresh_button: gr.Button = None
|
||||
ui_clip_skip: gr.Slider = None
|
||||
|
||||
ui_forge_unet_storage_dtype_options: gr.Radio = None
|
||||
ui_forge_async_loading: gr.Radio = None
|
||||
ui_forge_pin_shared_memory: gr.Radio = None
|
||||
ui_forge_inference_memory: gr.Slider = None
|
||||
|
||||
forge_unet_storage_dtype_options = {
|
||||
'None': None,
|
||||
'fp8e4m3': torch.float8_e4m3fn,
|
||||
'fp8e5m2': torch.float8_e5m2,
|
||||
'Auto': None,
|
||||
'nf4': 'nf4',
|
||||
'fp8e4': torch.float8_e4m3fn,
|
||||
'fp4': 'fp4',
|
||||
'fp8e5': torch.float8_e5m2,
|
||||
}
|
||||
|
||||
|
||||
@@ -28,22 +39,24 @@ def bind_to_opts(comp, k, save=False, callback=None):
|
||||
callback()
|
||||
return
|
||||
|
||||
comp.change(on_change, inputs=[comp], show_progress=False)
|
||||
comp.change(on_change, inputs=[comp], queue=False, show_progress=False)
|
||||
return
|
||||
|
||||
|
||||
def make_checkpoint_manager_ui():
|
||||
global ui_checkpoint, ui_vae, ui_clip_skip
|
||||
global ui_checkpoint, ui_vae, ui_clip_skip, ui_forge_unet_storage_dtype_options, ui_forge_async_loading, ui_forge_pin_shared_memory, ui_forge_inference_memory, ui_forge_preset, ui_vae_refresh_button
|
||||
|
||||
if shared.opts.sd_model_checkpoint in [None, 'None', 'none', '']:
|
||||
if len(sd_models.checkpoints_list) == 0:
|
||||
sd_models.list_models()
|
||||
if len(sd_models.checkpoints_list) > 0:
|
||||
shared.opts.set('sd_model_checkpoint', next(iter(sd_models.checkpoints_list.keys())))
|
||||
shared.opts.set('sd_model_checkpoint', next(iter(sd_models.checkpoints_list.values())).name)
|
||||
|
||||
ui_forge_preset = gr.Radio(label="UI", value=lambda: shared.opts.forge_preset, choices=['sd', 'xl', 'flux', 'all'])
|
||||
|
||||
sd_model_checkpoint_args = lambda: {"choices": shared_items.list_checkpoint_tiles(shared.opts.sd_checkpoint_dropdown_use_short)}
|
||||
ui_checkpoint = gr.Dropdown(
|
||||
value=shared.opts.sd_model_checkpoint,
|
||||
value=lambda: shared.opts.sd_model_checkpoint,
|
||||
label="Checkpoint",
|
||||
elem_classes=['model_selection'],
|
||||
**sd_model_checkpoint_args()
|
||||
@@ -52,30 +65,32 @@ def make_checkpoint_manager_ui():
|
||||
|
||||
sd_vae_args = lambda: {"choices": shared_items.sd_vae_items()}
|
||||
ui_vae = gr.Dropdown(
|
||||
value=shared.opts.sd_vae,
|
||||
value=lambda: shared.opts.sd_vae,
|
||||
label="VAE",
|
||||
**sd_vae_args()
|
||||
)
|
||||
ui_common.create_refresh_button(ui_vae, shared_items.refresh_vae_list, sd_vae_args, f"forge_refresh_vae")
|
||||
ui_vae_refresh_button = ui_common.create_refresh_button(ui_vae, shared_items.refresh_vae_list, sd_vae_args, f"forge_refresh_vae")
|
||||
|
||||
ui_forge_unet_storage_dtype_options = gr.Radio(label="Diffusion in FP8", value=shared.opts.forge_unet_storage_dtype, choices=list(forge_unet_storage_dtype_options.keys()))
|
||||
ui_forge_unet_storage_dtype_options = gr.Radio(label="Diffusion with Low Bits", value=lambda: shared.opts.forge_unet_storage_dtype, choices=list(forge_unet_storage_dtype_options.keys()))
|
||||
bind_to_opts(ui_forge_unet_storage_dtype_options, 'forge_unet_storage_dtype', save=True, callback=refresh_model_loading_parameters)
|
||||
|
||||
from backend.args import args as backend_args
|
||||
|
||||
ui_forge_inference_memory = gr.Slider(label="Model Memory (MB)", value=total_vram - shared.opts.forge_inference_memory, minimum=0, maximum=int(memory_management.total_vram), step=1, visible=backend_args.i_am_lllyasviel)
|
||||
ui_forge_async_loading = gr.Checkbox(label="Async Loader", value=shared.opts.forge_async_loading, visible=backend_args.i_am_lllyasviel)
|
||||
ui_forge_pin_shared_memory = gr.Checkbox(label="Offload to Shared Memory", value=shared.opts.forge_pin_shared_memory, visible=backend_args.i_am_lllyasviel)
|
||||
ui_forge_async_loading = gr.Radio(label="Swap Method", value=lambda: shared.opts.forge_async_loading, choices=['Queue', 'Async'])
|
||||
ui_forge_pin_shared_memory = gr.Radio(label="Swap Location", value=lambda: shared.opts.forge_pin_shared_memory, choices=['CPU', 'Shared'])
|
||||
ui_forge_inference_memory = gr.Slider(label="GPU Weights (MB)", value=lambda: total_vram - shared.opts.forge_inference_memory, minimum=0, maximum=int(memory_management.total_vram), step=1)
|
||||
|
||||
mem_comps = [ui_forge_inference_memory, ui_forge_async_loading, ui_forge_pin_shared_memory]
|
||||
|
||||
ui_forge_inference_memory.change(refresh_memory_management_settings, inputs=mem_comps)
|
||||
ui_forge_async_loading.change(refresh_memory_management_settings, inputs=mem_comps)
|
||||
ui_forge_pin_shared_memory.change(refresh_memory_management_settings, inputs=mem_comps)
|
||||
ui_forge_inference_memory.change(refresh_memory_management_settings, inputs=mem_comps, queue=False, show_progress=False)
|
||||
ui_forge_async_loading.change(refresh_memory_management_settings, inputs=mem_comps, queue=False, show_progress=False)
|
||||
ui_forge_pin_shared_memory.change(refresh_memory_management_settings, inputs=mem_comps, queue=False, show_progress=False)
|
||||
Context.root_block.load(refresh_memory_management_settings, inputs=mem_comps, queue=False, show_progress=False)
|
||||
|
||||
ui_clip_skip = gr.Slider(label="Clip skip", value=shared.opts.CLIP_stop_at_last_layers, **{"minimum": 1, "maximum": 12, "step": 1})
|
||||
ui_clip_skip = gr.Slider(label="Clip skip", value=lambda: shared.opts.CLIP_stop_at_last_layers, **{"minimum": 1, "maximum": 12, "step": 1})
|
||||
bind_to_opts(ui_clip_skip, 'CLIP_stop_at_last_layers', save=False)
|
||||
|
||||
ui_checkpoint.change(checkpoint_change, inputs=[ui_checkpoint], show_progress=False)
|
||||
ui_vae.change(vae_change, inputs=[ui_vae], queue=False, show_progress=False)
|
||||
|
||||
return
|
||||
|
||||
|
||||
@@ -86,14 +101,17 @@ def refresh_memory_management_settings(model_memory, async_loading, pin_shared_m
|
||||
shared.opts.set('forge_inference_memory', inference_memory)
|
||||
shared.opts.set('forge_pin_shared_memory', pin_shared_memory)
|
||||
|
||||
stream.stream_activated = async_loading
|
||||
stream.stream_activated = async_loading == 'Async'
|
||||
memory_management.current_inference_memory = inference_memory * 1024 * 1024
|
||||
memory_management.PIN_SHARED_MEMORY = pin_shared_memory
|
||||
memory_management.PIN_SHARED_MEMORY = pin_shared_memory == 'Shared'
|
||||
|
||||
print(f'Stream Set to: {stream.stream_activated}')
|
||||
print(f'Stream Used by CUDA: {stream.should_use_stream()}')
|
||||
print(f'Current Inference Memory: {memory_management.minimum_inference_memory() / (1024 * 1024):.2f} MB')
|
||||
print(f'PIN Shared Memory: {pin_shared_memory}')
|
||||
log_dict = dict(
|
||||
stream=stream.should_use_stream(),
|
||||
inference_memory=memory_management.minimum_inference_memory() / (1024 * 1024),
|
||||
pin_shared_memory=memory_management.PIN_SHARED_MEMORY
|
||||
)
|
||||
|
||||
print(f'Environment vars changed: {log_dict}')
|
||||
|
||||
processing.need_global_unload = True
|
||||
return
|
||||
@@ -111,7 +129,7 @@ def refresh_model_loading_parameters():
|
||||
unet_storage_dtype=forge_unet_storage_dtype_options[shared.opts.forge_unet_storage_dtype]
|
||||
)
|
||||
|
||||
print(f'Loading parameters: {model_data.forge_loading_parameters}')
|
||||
print(f'Model selected: {model_data.forge_loading_parameters}')
|
||||
|
||||
return
|
||||
|
||||
@@ -131,10 +149,149 @@ def vae_change(vae_name):
|
||||
return
|
||||
|
||||
|
||||
def forge_main_entry():
|
||||
ui_checkpoint.change(checkpoint_change, inputs=[ui_checkpoint], show_progress=False)
|
||||
ui_vae.change(vae_change, inputs=[ui_vae], show_progress=False)
|
||||
def get_a1111_ui_component(tab, label):
|
||||
fields = infotext_utils.paste_fields[tab]['fields']
|
||||
for f in fields:
|
||||
if f.label == label or f.api == label:
|
||||
return f.component
|
||||
|
||||
|
||||
def forge_main_entry():
|
||||
ui_txt2img_width = get_a1111_ui_component('txt2img', 'Size-1')
|
||||
ui_txt2img_height = get_a1111_ui_component('txt2img', 'Size-2')
|
||||
ui_txt2img_cfg = get_a1111_ui_component('txt2img', 'CFG scale')
|
||||
ui_txt2img_distilled_cfg = get_a1111_ui_component('txt2img', 'Distilled CFG Scale')
|
||||
ui_txt2img_sampler = get_a1111_ui_component('txt2img', 'sampler_name')
|
||||
ui_txt2img_scheduler = get_a1111_ui_component('txt2img', 'scheduler')
|
||||
|
||||
ui_img2img_width = get_a1111_ui_component('img2img', 'Size-1')
|
||||
ui_img2img_height = get_a1111_ui_component('img2img', 'Size-2')
|
||||
ui_img2img_cfg = get_a1111_ui_component('img2img', 'CFG scale')
|
||||
ui_img2img_distilled_cfg = get_a1111_ui_component('img2img', 'Distilled CFG Scale')
|
||||
ui_img2img_sampler = get_a1111_ui_component('img2img', 'sampler_name')
|
||||
ui_img2img_scheduler = get_a1111_ui_component('img2img', 'scheduler')
|
||||
|
||||
output_targets = [
|
||||
ui_vae,
|
||||
ui_vae_refresh_button,
|
||||
ui_clip_skip,
|
||||
ui_forge_unet_storage_dtype_options,
|
||||
ui_forge_async_loading,
|
||||
ui_forge_pin_shared_memory,
|
||||
ui_forge_inference_memory,
|
||||
ui_txt2img_width,
|
||||
ui_img2img_width,
|
||||
ui_txt2img_height,
|
||||
ui_img2img_height,
|
||||
ui_txt2img_cfg,
|
||||
ui_img2img_cfg,
|
||||
ui_txt2img_distilled_cfg,
|
||||
ui_img2img_distilled_cfg,
|
||||
ui_txt2img_sampler,
|
||||
ui_img2img_sampler,
|
||||
ui_txt2img_scheduler,
|
||||
ui_img2img_scheduler
|
||||
]
|
||||
|
||||
ui_forge_preset.change(on_preset_change, inputs=[ui_forge_preset], outputs=output_targets, queue=False, show_progress=False)
|
||||
Context.root_block.load(on_preset_change, inputs=None, outputs=output_targets, queue=False, show_progress=False)
|
||||
|
||||
# Load Model
|
||||
refresh_model_loading_parameters()
|
||||
return
|
||||
|
||||
|
||||
def on_preset_change(preset=None):
|
||||
if preset is not None:
|
||||
shared.opts.set('forge_preset', preset)
|
||||
shared.opts.save(shared.config_filename)
|
||||
|
||||
if shared.opts.forge_preset == 'sd':
|
||||
return [
|
||||
gr.update(visible=True, value='Automatic'), # ui_vae
|
||||
gr.update(visible=True), # ui_vae_refresh_button
|
||||
gr.update(visible=True, value=1), # ui_clip_skip
|
||||
gr.update(visible=False, value='Auto'), # ui_forge_unet_storage_dtype_options
|
||||
gr.update(visible=False, value='Queue'), # ui_forge_async_loading
|
||||
gr.update(visible=False, value='CPU'), # ui_forge_pin_shared_memory
|
||||
gr.update(visible=False, value=total_vram - 1024), # ui_forge_inference_memory
|
||||
gr.update(value=512), # ui_txt2img_width
|
||||
gr.update(value=512), # ui_img2img_width
|
||||
gr.update(value=640), # ui_txt2img_height
|
||||
gr.update(value=640), # ui_img2img_height
|
||||
gr.update(value=7), # ui_txt2img_cfg
|
||||
gr.update(value=7), # ui_img2img_cfg
|
||||
gr.update(visible=False, value=3.5), # ui_txt2img_distilled_cfg
|
||||
gr.update(visible=False, value=3.5), # ui_img2img_distilled_cfg
|
||||
gr.update(value='Euler a'), # ui_txt2img_sampler
|
||||
gr.update(value='Euler a'), # ui_img2img_sampler
|
||||
gr.update(value='Automatic'), # ui_txt2img_scheduler
|
||||
gr.update(value='Automatic'), # ui_img2img_scheduler
|
||||
]
|
||||
|
||||
if shared.opts.forge_preset == 'xl':
|
||||
return [
|
||||
gr.update(visible=False, value='Automatic'), # ui_vae
|
||||
gr.update(visible=False), # ui_vae_refresh_button
|
||||
gr.update(visible=False, value=1), # ui_clip_skip
|
||||
gr.update(visible=True, value='Auto'), # ui_forge_unet_storage_dtype_options
|
||||
gr.update(visible=False, value='Queue'), # ui_forge_async_loading
|
||||
gr.update(visible=False, value='CPU'), # ui_forge_pin_shared_memory
|
||||
gr.update(visible=False, value=total_vram - 1024), # ui_forge_inference_memory
|
||||
gr.update(value=896), # ui_txt2img_width
|
||||
gr.update(value=896), # ui_img2img_width
|
||||
gr.update(value=1152), # ui_txt2img_height
|
||||
gr.update(value=1152), # ui_img2img_height
|
||||
gr.update(value=5), # ui_txt2img_cfg
|
||||
gr.update(value=5), # ui_img2img_cfg
|
||||
gr.update(visible=False, value=3.5), # ui_txt2img_distilled_cfg
|
||||
gr.update(visible=False, value=3.5), # ui_img2img_distilled_cfg
|
||||
gr.update(value='DPM++ 2M SDE'), # ui_txt2img_sampler
|
||||
gr.update(value='DPM++ 2M SDE'), # ui_img2img_sampler
|
||||
gr.update(value='Karras'), # ui_txt2img_scheduler
|
||||
gr.update(value='Karras'), # ui_img2img_scheduler
|
||||
]
|
||||
|
||||
if shared.opts.forge_preset == 'flux':
|
||||
return [
|
||||
gr.update(visible=False, value='Automatic'), # ui_vae
|
||||
gr.update(visible=False), # ui_vae_refresh_button
|
||||
gr.update(visible=False, value=1), # ui_clip_skip
|
||||
gr.update(visible=True, value='Auto'), # ui_forge_unet_storage_dtype_options
|
||||
gr.update(visible=True, value='Queue'), # ui_forge_async_loading
|
||||
gr.update(visible=True, value='CPU'), # ui_forge_pin_shared_memory
|
||||
gr.update(visible=True, value=total_vram - 1024), # ui_forge_inference_memory
|
||||
gr.update(value=896), # ui_txt2img_width
|
||||
gr.update(value=896), # ui_img2img_width
|
||||
gr.update(value=1152), # ui_txt2img_height
|
||||
gr.update(value=1152), # ui_img2img_height
|
||||
gr.update(value=1), # ui_txt2img_cfg
|
||||
gr.update(value=1), # ui_img2img_cfg
|
||||
gr.update(visible=True, value=3.5), # ui_txt2img_distilled_cfg
|
||||
gr.update(visible=True, value=3.5), # ui_img2img_distilled_cfg
|
||||
gr.update(value='Euler'), # ui_txt2img_sampler
|
||||
gr.update(value='Euler'), # ui_img2img_sampler
|
||||
gr.update(value='Simple'), # ui_txt2img_scheduler
|
||||
gr.update(value='Simple'), # ui_img2img_scheduler
|
||||
]
|
||||
|
||||
return [
|
||||
gr.update(visible=True, value='Automatic'), # ui_vae
|
||||
gr.update(visible=True), # ui_vae_refresh_button
|
||||
gr.update(visible=True, value=1), # ui_clip_skip
|
||||
gr.update(visible=True, value='Auto'), # ui_forge_unet_storage_dtype_options
|
||||
gr.update(visible=True, value='Queue'), # ui_forge_async_loading
|
||||
gr.update(visible=True, value='CPU'), # ui_forge_pin_shared_memory
|
||||
gr.update(visible=True, value=total_vram - 1024), # ui_forge_inference_memory
|
||||
gr.update(value=896), # ui_txt2img_width
|
||||
gr.update(value=896), # ui_img2img_width
|
||||
gr.update(value=1152), # ui_txt2img_height
|
||||
gr.update(value=1152), # ui_img2img_height
|
||||
gr.update(value=7), # ui_txt2img_cfg
|
||||
gr.update(value=7), # ui_img2img_cfg
|
||||
gr.update(visible=True, value=3.5), # ui_txt2img_distilled_cfg
|
||||
gr.update(visible=True, value=3.5), # ui_img2img_distilled_cfg
|
||||
gr.update(value='DPM++ 2M'), # ui_txt2img_sampler
|
||||
gr.update(value='DPM++ 2M'), # ui_img2img_sampler
|
||||
gr.update(value='Automatic'), # ui_txt2img_scheduler
|
||||
gr.update(value='Automatic'), # ui_img2img_scheduler
|
||||
]
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
|
||||
def register(options_templates, options_section, OptionInfo):
|
||||
options_templates.update(options_section((None, "Forge Hidden options"), {
|
||||
"forge_unet_storage_dtype": OptionInfo('None'),
|
||||
"forge_unet_storage_dtype": OptionInfo('Auto'),
|
||||
"forge_inference_memory": OptionInfo(1024),
|
||||
"forge_async_loading": OptionInfo(False),
|
||||
"forge_pin_shared_memory": OptionInfo(False),
|
||||
"forge_async_loading": OptionInfo('Queue'),
|
||||
"forge_pin_shared_memory": OptionInfo('CPU'),
|
||||
"forge_preset": OptionInfo('sd'),
|
||||
}))
|
||||
|
||||
Reference in New Issue
Block a user