From 1204d490d9571bd6c2566c74253443cd27c967cf Mon Sep 17 00:00:00 2001 From: lllyasviel Date: Tue, 6 Feb 2024 05:01:58 -0800 Subject: [PATCH] safer device --- modules_forge/forge_clip.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/modules_forge/forge_clip.py b/modules_forge/forge_clip.py index 96936e8c..2743a20a 100644 --- a/modules_forge/forge_clip.py +++ b/modules_forge/forge_clip.py @@ -6,7 +6,10 @@ from modules.shared import opts class CLIP_SD_15_L(FrozenCLIPEmbedderWithCustomWords): def encode_with_transformers(self, tokens): model_management.load_model_gpu(self.forge_objects.clip.patcher) - self.wrapped.transformer.text_model.embeddings.to(tokens.device) + + current_device = self.wrapped.transformer.text_model.embeddings.token_embedding.weight.device + tokens = tokens.to(current_device) + outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers) if opts.CLIP_stop_at_last_layers > 1: @@ -32,7 +35,10 @@ class CLIP_SD_21_H(FrozenCLIPEmbedderWithCustomWords): def encode_with_transformers(self, tokens): model_management.load_model_gpu(self.forge_objects.clip.patcher) - self.wrapped.transformer.text_model.embeddings.to(tokens.device) + + current_device = self.wrapped.transformer.text_model.embeddings.token_embedding.weight.device + tokens = tokens.to(current_device) + outputs = self.wrapped.transformer(tokens, output_hidden_states=self.wrapped.layer == "hidden") if self.wrapped.layer == "last": @@ -49,7 +55,9 @@ class CLIP_SD_XL_L(FrozenCLIPEmbedderWithCustomWords): super().__init__(wrapped, hijack) def encode_with_transformers(self, tokens): - self.wrapped.transformer.text_model.embeddings.to(tokens.device) + current_device = self.wrapped.transformer.text_model.embeddings.token_embedding.weight.device + tokens = tokens.to(current_device) + outputs = self.wrapped.transformer(tokens, output_hidden_states=self.wrapped.layer == "hidden") if self.wrapped.layer == "last": @@ -73,7 +81,9 @@ class CLIP_SD_XL_G(FrozenCLIPEmbedderWithCustomWords): self.id_pad = 0 def encode_with_transformers(self, tokens): - self.wrapped.transformer.text_model.embeddings.to(tokens.device) + current_device = self.wrapped.transformer.text_model.embeddings.token_embedding.weight.device + tokens = tokens.to(current_device) + outputs = self.wrapped.transformer(tokens, output_hidden_states=self.wrapped.layer == "hidden") if self.wrapped.layer == "last":