implement attention for new backend

This commit is contained in:
layerdiffusion
2024-07-29 11:46:16 -06:00
parent 99fcb35506
commit 68b672493a
3 changed files with 67 additions and 7 deletions

View File

@@ -28,12 +28,7 @@ fpte_group.add_argument("--clip-in-fp8-e5m2", action="store_true")
fpte_group.add_argument("--clip-in-fp16", action="store_true")
fpte_group.add_argument("--clip-in-fp32", action="store_true")
attn_group = parser.add_mutually_exclusive_group()
attn_group.add_argument("--attention-split", action="store_true")
attn_group.add_argument("--attention-quad", action="store_true")
attn_group.add_argument("--attention-pytorch", action="store_true")
parser.add_argument("--disable-xformers", action="store_true")
parser.add_argument("--xformers", action="store_true")
parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1)
parser.add_argument("--disable-ipex-hijack", action="store_true")