mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-01-26 19:09:45 +00:00
revise space
This commit is contained in:
@@ -64,7 +64,7 @@ def load_pipeline(model_name):
|
||||
return pipe
|
||||
|
||||
|
||||
with spaces.GPUObject() as gpu_object:
|
||||
with spaces.capture_gpu_object() as gpu_object:
|
||||
pipe = load_pipeline(MODEL)
|
||||
logger.info("Loaded on Device!")
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ from torchvision import transforms
|
||||
|
||||
os.environ['HOME'] = spaces.convert_root_path() + 'home'
|
||||
|
||||
with spaces.GPUObject() as birefnet_gpu_obj:
|
||||
with spaces.capture_gpu_object() as birefnet_gpu_obj:
|
||||
birefnet = AutoModelForImageSegmentation.from_pretrained(
|
||||
"ZhengPeng7/BiRefNet", trust_remote_code=True
|
||||
)
|
||||
|
||||
@@ -27,7 +27,7 @@ def fixed_get_imports(filename: str | os.PathLike) -> list[str]:
|
||||
return imports
|
||||
|
||||
|
||||
with spaces.GPUObject() as gpu_object:
|
||||
with spaces.capture_gpu_object() as gpu_object:
|
||||
with patch("transformers.dynamic_module_utils.get_imports", fixed_get_imports):
|
||||
models = {
|
||||
# 'microsoft/Florence-2-large-ft': AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-large-ft', attn_implementation='sdpa', trust_remote_code=True).to("cuda").eval(),
|
||||
|
||||
@@ -46,7 +46,7 @@ from torchvision.transforms import InterpolationMode
|
||||
|
||||
device = spaces.gpu
|
||||
|
||||
with spaces.GPUObject() as gpu_object:
|
||||
with spaces.capture_gpu_object() as gpu_object:
|
||||
vae = AutoencoderKL.from_pretrained(spaces.convert_root_path(), subfolder='vae')
|
||||
scheduler = DDIMScheduler.from_pretrained(spaces.convert_root_path(), subfolder='scheduler')
|
||||
image_encoder = CLIPVisionModelWithProjection.from_pretrained(spaces.convert_root_path(), subfolder="image_encoder")
|
||||
|
||||
@@ -16,7 +16,7 @@ from enum import Enum
|
||||
# from torch.hub import download_url_to_file
|
||||
|
||||
|
||||
with spaces.GPUObject() as gpu_object:
|
||||
with spaces.capture_gpu_object() as gpu_object:
|
||||
# 'stablediffusionapi/realistic-vision-v51'
|
||||
# 'runwayml/stable-diffusion-v1-5'
|
||||
sd15_name = 'stablediffusionapi/realistic-vision-v51'
|
||||
|
||||
@@ -28,7 +28,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
|
||||
BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
|
||||
|
||||
|
||||
with spaces.GPUObject() as gpu_object:
|
||||
with spaces.capture_gpu_object() as gpu_object:
|
||||
# Initialize both pipelines
|
||||
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
|
||||
controlnet = ControlNetModel.from_pretrained("monster-labs/control_v1p_sd15_qrcode_monster", torch_dtype=torch.float16)
|
||||
|
||||
@@ -45,7 +45,7 @@ if device == "mps":
|
||||
torch_dtype = torch.float16
|
||||
|
||||
|
||||
with spaces.GPUObject() as gpu_object:
|
||||
with spaces.capture_gpu_object() as gpu_object:
|
||||
# load adapter
|
||||
adapter = T2IAdapter.from_pretrained(
|
||||
"TencentARC/t2i-adapter-sketch-sdxl-1.0", torch_dtype=torch_dtype, variant="fp16"
|
||||
|
||||
@@ -108,6 +108,13 @@ class ForgeSpace:
|
||||
)
|
||||
|
||||
print(f'Downloaded: {downloaded}')
|
||||
|
||||
requirements_filename = os.path.abspath(os.path.realpath(os.path.join(self.root_path, 'requirements.txt')))
|
||||
|
||||
if os.path.exists(requirements_filename):
|
||||
from modules.launch_utils import run_pip
|
||||
run_pip(f'install -r "{requirements_filename}"', desc=f"space requirements for [{self.title}]")
|
||||
|
||||
return self.refresh_gradio()
|
||||
|
||||
def uninstall(self):
|
||||
|
||||
Reference in New Issue
Block a user