Fixed error with wan models when caching text embeddings

This commit is contained in:
Jaret Burkett
2026-02-06 14:26:53 -07:00
parent 5c37db04f9
commit 115f0a3670

View File

@@ -654,10 +654,10 @@ class Wan21(BaseModel):
return latents.to(device, dtype=dtype)
def get_model_has_grad(self):
return self.model.proj_out.weight.requires_grad
return False
def get_te_has_grad(self):
return self.text_encoder.encoder.block[0].layer[0].SelfAttention.q.weight.requires_grad
return False
def save_model(self, output_path, meta, save_dtype):
# only save the unet