From 1cbd13c85a6d4700d51fd85679148720f8a13661 Mon Sep 17 00:00:00 2001 From: layerdiffusion <19834515+lllyasviel@users.noreply.github.com> Date: Sun, 18 Aug 2024 20:05:16 -0700 Subject: [PATCH] Animagine XL 3.1 Official User Interface --- .../forge_space_animagine_xl_31/forge_app.py | 400 ++++++++++++++++++ .../space_meta.json | 6 + 2 files changed, 406 insertions(+) create mode 100644 extensions-builtin/forge_space_animagine_xl_31/forge_app.py create mode 100644 extensions-builtin/forge_space_animagine_xl_31/space_meta.json diff --git a/extensions-builtin/forge_space_animagine_xl_31/forge_app.py b/extensions-builtin/forge_space_animagine_xl_31/forge_app.py new file mode 100644 index 00000000..fe3ebae0 --- /dev/null +++ b/extensions-builtin/forge_space_animagine_xl_31/forge_app.py @@ -0,0 +1,400 @@ +import spaces +import os +import gc +import gradio as gr +import numpy as np +import torch +import json +import config +import utils +import logging +from PIL import Image, PngImagePlugin +from datetime import datetime +from diffusers.models import AutoencoderKL +from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +DESCRIPTION = "Animagine XL 3.1" +if not torch.cuda.is_available(): + DESCRIPTION += "\n
Running on CPU 🥶 This demo does not work on CPU.
" +IS_COLAB = utils.is_google_colab() or os.getenv("IS_COLAB") == "1" +HF_TOKEN = os.getenv("HF_TOKEN") +CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1" +MIN_IMAGE_SIZE = int(os.getenv("MIN_IMAGE_SIZE", "512")) +MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048")) +USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1" +ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1" +OUTPUT_DIR = os.getenv("OUTPUT_DIR", "./outputs") + +MODEL = os.getenv( + "MODEL", + "https://huggingface.co/cagliostrolab/animagine-xl-3.1/blob/main/animagine-xl-3.1.safetensors", +) + +# torch.backends.cudnn.deterministic = True +# torch.backends.cudnn.benchmark = False + +device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + +def load_pipeline(model_name): + vae = AutoencoderKL.from_pretrained( + "madebyollin/sdxl-vae-fp16-fix", + torch_dtype=torch.float16, + ) + pipeline = ( + StableDiffusionXLPipeline.from_single_file + if MODEL.endswith(".safetensors") + else StableDiffusionXLPipeline.from_pretrained + ) + + pipe = pipeline( + model_name, + vae=vae, + torch_dtype=torch.float16, + custom_pipeline="lpw_stable_diffusion_xl", + use_safetensors=True, + add_watermarker=False, + use_auth_token=HF_TOKEN, + ) + + # pipe.to(device) + return pipe + + +with spaces.GPUObject() as gpu_object: + pipe = load_pipeline(MODEL) + logger.info("Loaded on Device!") + + +spaces.automatically_move_pipeline_components(pipe) +spaces.change_attention_from_diffusers_to_forge(pipe.unet) +spaces.change_attention_from_diffusers_to_forge(pipe.vae) + + +@spaces.GPU(gpu_objects=[gpu_object], manual_load=True) +def generate( + prompt: str, + negative_prompt: str = "", + seed: int = 0, + custom_width: int = 1024, + custom_height: int = 1024, + guidance_scale: float = 7.0, + num_inference_steps: int = 28, + sampler: str = "Euler a", + aspect_ratio_selector: str = "896 x 1152", + style_selector: str = "(None)", + quality_selector: str = "Standard v3.1", + use_upscaler: bool = False, + upscaler_strength: float = 0.55, + upscale_by: float = 1.5, + add_quality_tags: bool = True, + progress=gr.Progress(track_tqdm=True), +): + generator = utils.seed_everything(seed) + + width, height = utils.aspect_ratio_handler( + aspect_ratio_selector, + custom_width, + custom_height, + ) + + prompt = utils.add_wildcard(prompt, wildcard_files) + + prompt, negative_prompt = utils.preprocess_prompt( + quality_prompt, quality_selector, prompt, negative_prompt, add_quality_tags + ) + prompt, negative_prompt = utils.preprocess_prompt( + styles, style_selector, prompt, negative_prompt + ) + + width, height = utils.preprocess_image_dimensions(width, height) + + backup_scheduler = pipe.scheduler + pipe.scheduler = utils.get_scheduler(pipe.scheduler.config, sampler) + + if use_upscaler: + upscaler_pipe = StableDiffusionXLImg2ImgPipeline(**pipe.components) + metadata = { + "prompt": prompt, + "negative_prompt": negative_prompt, + "resolution": f"{width} x {height}", + "guidance_scale": guidance_scale, + "num_inference_steps": num_inference_steps, + "seed": seed, + "sampler": sampler, + "sdxl_style": style_selector, + "add_quality_tags": add_quality_tags, + "quality_tags": quality_selector, + } + + if use_upscaler: + new_width = int(width * upscale_by) + new_height = int(height * upscale_by) + metadata["use_upscaler"] = { + "upscale_method": "nearest-exact", + "upscaler_strength": upscaler_strength, + "upscale_by": upscale_by, + "new_resolution": f"{new_width} x {new_height}", + } + else: + metadata["use_upscaler"] = None + metadata["Model"] = { + "Model": DESCRIPTION, + "Model hash": "e3c47aedb0", + } + + logger.info(json.dumps(metadata, indent=4)) + + try: + if use_upscaler: + latents = pipe( + prompt=prompt, + negative_prompt=negative_prompt, + width=width, + height=height, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + generator=generator, + output_type="latent", + ).images + upscaled_latents = utils.upscale(latents, "nearest-exact", upscale_by) + images = upscaler_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + image=upscaled_latents, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + strength=upscaler_strength, + generator=generator, + output_type="pil", + ).images + else: + images = pipe( + prompt=prompt, + negative_prompt=negative_prompt, + width=width, + height=height, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + generator=generator, + output_type="pil", + ).images + + if images: + image_paths = [ + utils.save_image(image, metadata, OUTPUT_DIR, IS_COLAB) + for image in images + ] + + for image_path in image_paths: + logger.info(f"Image saved as {image_path} with metadata") + + return image_paths, metadata + except Exception as e: + logger.exception(f"An error occurred: {e}") + raise + finally: + if use_upscaler: + del upscaler_pipe + pipe.scheduler = backup_scheduler + utils.free_memory() + + + +styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in config.style_list} +quality_prompt = { + k["name"]: (k["prompt"], k["negative_prompt"]) for k in config.quality_prompt_list +} + +wildcard_files = utils.load_wildcard_files(spaces.convert_root_path() + "wildcard") + +with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo: + title = gr.HTML( + f"""