From e6c5aead3b4ecfabec37ec9b005756eb7e419679 Mon Sep 17 00:00:00 2001 From: Jaret Burkett Date: Tue, 2 Dec 2025 16:14:34 -0700 Subject: [PATCH] Fix issue that prevented ramtorch layer offloading with z_image --- extensions_built_in/diffusion_models/z_image/z_image.py | 5 +++++ toolkit/config_modules.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/extensions_built_in/diffusion_models/z_image/z_image.py b/extensions_built_in/diffusion_models/z_image/z_image.py index 29542635..368ae9e7 100644 --- a/extensions_built_in/diffusion_models/z_image/z_image.py +++ b/extensions_built_in/diffusion_models/z_image/z_image.py @@ -124,6 +124,7 @@ class ZImageModel(BaseModel): is_transformer=True, target_lin_modules=self.target_lora_modules, is_assistant_adapter=True, + is_ara=True, ) network.apply_to(None, transformer, apply_text_encoder=False, apply_unet=True) self.print_and_status_update("Merging in assistant LoRA") @@ -189,6 +190,10 @@ class ZImageModel(BaseModel): transformer, self.device_torch, offload_percent=self.model_config.layer_offloading_transformer_percent, + ignore_modules=[ + transformer.x_pad_token, + transformer.cap_pad_token, + ] ) if self.model_config.low_vram: diff --git a/toolkit/config_modules.py b/toolkit/config_modules.py index 96617620..c2be1249 100644 --- a/toolkit/config_modules.py +++ b/toolkit/config_modules.py @@ -1308,6 +1308,11 @@ def validate_configs( if train_config.bypass_guidance_embedding and train_config.do_guidance_loss: raise ValueError("Cannot bypass guidance embedding and do guidance loss at the same time. " "Please set bypass_guidance_embedding to False or do_guidance_loss to False.") + + if model_config.accuracy_recovery_adapter is not None: + if model_config.assistant_lora_path is not None: + raise ValueError("Cannot use accuracy recovery adapter and assistant lora at the same time. " + "Please set one of them to None.") # see if any datasets are caching text embeddings is_caching_text_embeddings = any(dataset.cache_text_embeddings for dataset in dataset_configs)