Fix issue that prevented ramtorch layer offloading with z_image

This commit is contained in:
Jaret Burkett
2025-12-02 16:14:34 -07:00
parent d42f5af2fc
commit e6c5aead3b
2 changed files with 10 additions and 0 deletions

View File

@@ -124,6 +124,7 @@ class ZImageModel(BaseModel):
is_transformer=True,
target_lin_modules=self.target_lora_modules,
is_assistant_adapter=True,
is_ara=True,
)
network.apply_to(None, transformer, apply_text_encoder=False, apply_unet=True)
self.print_and_status_update("Merging in assistant LoRA")
@@ -189,6 +190,10 @@ class ZImageModel(BaseModel):
transformer,
self.device_torch,
offload_percent=self.model_config.layer_offloading_transformer_percent,
ignore_modules=[
transformer.x_pad_token,
transformer.cap_pad_token,
]
)
if self.model_config.low_vram:

View File

@@ -1309,6 +1309,11 @@ def validate_configs(
raise ValueError("Cannot bypass guidance embedding and do guidance loss at the same time. "
"Please set bypass_guidance_embedding to False or do_guidance_loss to False.")
if model_config.accuracy_recovery_adapter is not None:
if model_config.assistant_lora_path is not None:
raise ValueError("Cannot use accuracy recovery adapter and assistant lora at the same time. "
"Please set one of them to None.")
# see if any datasets are caching text embeddings
is_caching_text_embeddings = any(dataset.cache_text_embeddings for dataset in dataset_configs)
if is_caching_text_embeddings: