mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-03-04 20:49:49 +00:00
better clipvision
This commit is contained in:
@@ -1,52 +1,33 @@
|
||||
from modules_forge.supported_preprocessor import Preprocessor, PreprocessorParameter
|
||||
from modules_forge.shared import preprocessor_dir, add_supported_preprocessor
|
||||
from modules.modelloader import load_file_from_url
|
||||
from modules_forge.supported_preprocessor import PreprocessorClipVision
|
||||
from modules_forge.shared import add_supported_preprocessor
|
||||
from modules_forge.forge_util import numpy_to_pytorch
|
||||
|
||||
import ldm_patched.modules.clip_vision
|
||||
|
||||
|
||||
class PreprocessorClipVision(Preprocessor):
|
||||
class PreprocessorClipVisionForIPAdapter(PreprocessorClipVision):
|
||||
def __init__(self, name, url, filename):
|
||||
super().__init__()
|
||||
self.name = name
|
||||
self.url = url
|
||||
self.filename = filename
|
||||
super().__init__(name, url, filename)
|
||||
self.tags = ['IP-Adapter']
|
||||
self.model_filename_filters = ['IP-Adapter', 'IP_Adapter']
|
||||
self.slider_resolution = PreprocessorParameter(visible=False)
|
||||
self.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab = False
|
||||
self.show_control_mode = False
|
||||
self.sorting_priority = 1
|
||||
self.clipvision = None
|
||||
|
||||
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
|
||||
if self.clipvision is None:
|
||||
ckpt_path = load_file_from_url(
|
||||
url=self.url,
|
||||
model_dir=preprocessor_dir,
|
||||
file_name=self.filename
|
||||
)
|
||||
self.clipvision = ldm_patched.modules.clip_vision.load(ckpt_path)
|
||||
|
||||
# For IPAdapter Format
|
||||
return self.clipvision, numpy_to_pytorch(input_image)
|
||||
clipvision = self.load_clipvision()
|
||||
return clipvision, numpy_to_pytorch(input_image)
|
||||
|
||||
|
||||
add_supported_preprocessor(PreprocessorClipVision(
|
||||
name='CLIP-ViT-H',
|
||||
add_supported_preprocessor(PreprocessorClipVisionForIPAdapter(
|
||||
name='CLIP-ViT-H (IPAdapter)',
|
||||
url='https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors',
|
||||
filename='CLIP-ViT-H-14.safetensors'
|
||||
))
|
||||
|
||||
add_supported_preprocessor(PreprocessorClipVision(
|
||||
name='CLIP-ViT-bigG',
|
||||
add_supported_preprocessor(PreprocessorClipVisionForIPAdapter(
|
||||
name='CLIP-ViT-bigG (IPAdapter)',
|
||||
url='https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors',
|
||||
filename='CLIP-ViT-bigG.safetensors'
|
||||
))
|
||||
|
||||
add_supported_preprocessor(PreprocessorClipVision(
|
||||
name='CLIP-ViT-L',
|
||||
add_supported_preprocessor(PreprocessorClipVisionForIPAdapter(
|
||||
name='CLIP-ViT-L (IPAdapter)',
|
||||
url='https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin',
|
||||
filename='CLIP-ViT-bigG.safetensors'
|
||||
))
|
||||
|
||||
Reference in New Issue
Block a user