From c4f52d8ffb738dc7189bd2177aec340beb26ef90 Mon Sep 17 00:00:00 2001 From: layerdiffusion <19834515+lllyasviel@users.noreply.github.com> Date: Mon, 29 Jul 2024 11:27:58 -0600 Subject: [PATCH] add args for new backend --- backend/args.py | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/backend/args.py b/backend/args.py index 3ef5f5af..5368a25b 100644 --- a/backend/args.py +++ b/backend/args.py @@ -2,6 +2,54 @@ import argparse parser = argparse.ArgumentParser() +parser.add_argument("--gpu-device-id", type=int, default=None, metavar="DEVICE_ID") +parser.add_argument("--disable-attention-upcast", action="store_true") + +fp_group = parser.add_mutually_exclusive_group() +fp_group.add_argument("--all-in-fp32", action="store_true") +fp_group.add_argument("--all-in-fp16", action="store_true") + +fpunet_group = parser.add_mutually_exclusive_group() +fpunet_group.add_argument("--unet-in-bf16", action="store_true") +fpunet_group.add_argument("--unet-in-fp16", action="store_true") +fpunet_group.add_argument("--unet-in-fp8-e4m3fn", action="store_true") +fpunet_group.add_argument("--unet-in-fp8-e5m2", action="store_true") + +fpvae_group = parser.add_mutually_exclusive_group() +fpvae_group.add_argument("--vae-in-fp16", action="store_true") +fpvae_group.add_argument("--vae-in-fp32", action="store_true") +fpvae_group.add_argument("--vae-in-bf16", action="store_true") + +parser.add_argument("--vae-in-cpu", action="store_true") + +fpte_group = parser.add_mutually_exclusive_group() +fpte_group.add_argument("--clip-in-fp8-e4m3fn", action="store_true") +fpte_group.add_argument("--clip-in-fp8-e5m2", action="store_true") +fpte_group.add_argument("--clip-in-fp16", action="store_true") +fpte_group.add_argument("--clip-in-fp32", action="store_true") + +attn_group = parser.add_mutually_exclusive_group() +attn_group.add_argument("--attention-split", action="store_true") +attn_group.add_argument("--attention-quad", action="store_true") +attn_group.add_argument("--attention-pytorch", action="store_true") + +parser.add_argument("--disable-xformers", action="store_true") +parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1) +parser.add_argument("--disable-ipex-hijack", action="store_true") + +vram_group = parser.add_mutually_exclusive_group() +vram_group.add_argument("--always-gpu", action="store_true") +vram_group.add_argument("--always-high-vram", action="store_true") +vram_group.add_argument("--always-normal-vram", action="store_true") +vram_group.add_argument("--always-low-vram", action="store_true") +vram_group.add_argument("--always-no-vram", action="store_true") +vram_group.add_argument("--always-cpu", action="store_true") + +parser.add_argument("--always-offload-from-vram", action="store_true") +parser.add_argument("--pytorch-deterministic", action="store_true") + +parser.add_argument("--cuda-malloc", action="store_true") parser.add_argument("--cuda-stream", action="store_true") +parser.add_argument("--pin-shared-memory", action="store_true") args = parser.parse_known_args()[0]