mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-04-30 19:21:21 +00:00
safer device
This commit is contained in:
@@ -6,7 +6,10 @@ from modules.shared import opts
|
|||||||
class CLIP_SD_15_L(FrozenCLIPEmbedderWithCustomWords):
|
class CLIP_SD_15_L(FrozenCLIPEmbedderWithCustomWords):
|
||||||
def encode_with_transformers(self, tokens):
|
def encode_with_transformers(self, tokens):
|
||||||
model_management.load_model_gpu(self.forge_objects.clip.patcher)
|
model_management.load_model_gpu(self.forge_objects.clip.patcher)
|
||||||
self.wrapped.transformer.text_model.embeddings.to(tokens.device)
|
|
||||||
|
current_device = self.wrapped.transformer.text_model.embeddings.token_embedding.weight.device
|
||||||
|
tokens = tokens.to(current_device)
|
||||||
|
|
||||||
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
|
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
|
||||||
|
|
||||||
if opts.CLIP_stop_at_last_layers > 1:
|
if opts.CLIP_stop_at_last_layers > 1:
|
||||||
@@ -32,7 +35,10 @@ class CLIP_SD_21_H(FrozenCLIPEmbedderWithCustomWords):
|
|||||||
|
|
||||||
def encode_with_transformers(self, tokens):
|
def encode_with_transformers(self, tokens):
|
||||||
model_management.load_model_gpu(self.forge_objects.clip.patcher)
|
model_management.load_model_gpu(self.forge_objects.clip.patcher)
|
||||||
self.wrapped.transformer.text_model.embeddings.to(tokens.device)
|
|
||||||
|
current_device = self.wrapped.transformer.text_model.embeddings.token_embedding.weight.device
|
||||||
|
tokens = tokens.to(current_device)
|
||||||
|
|
||||||
outputs = self.wrapped.transformer(tokens, output_hidden_states=self.wrapped.layer == "hidden")
|
outputs = self.wrapped.transformer(tokens, output_hidden_states=self.wrapped.layer == "hidden")
|
||||||
|
|
||||||
if self.wrapped.layer == "last":
|
if self.wrapped.layer == "last":
|
||||||
@@ -49,7 +55,9 @@ class CLIP_SD_XL_L(FrozenCLIPEmbedderWithCustomWords):
|
|||||||
super().__init__(wrapped, hijack)
|
super().__init__(wrapped, hijack)
|
||||||
|
|
||||||
def encode_with_transformers(self, tokens):
|
def encode_with_transformers(self, tokens):
|
||||||
self.wrapped.transformer.text_model.embeddings.to(tokens.device)
|
current_device = self.wrapped.transformer.text_model.embeddings.token_embedding.weight.device
|
||||||
|
tokens = tokens.to(current_device)
|
||||||
|
|
||||||
outputs = self.wrapped.transformer(tokens, output_hidden_states=self.wrapped.layer == "hidden")
|
outputs = self.wrapped.transformer(tokens, output_hidden_states=self.wrapped.layer == "hidden")
|
||||||
|
|
||||||
if self.wrapped.layer == "last":
|
if self.wrapped.layer == "last":
|
||||||
@@ -73,7 +81,9 @@ class CLIP_SD_XL_G(FrozenCLIPEmbedderWithCustomWords):
|
|||||||
self.id_pad = 0
|
self.id_pad = 0
|
||||||
|
|
||||||
def encode_with_transformers(self, tokens):
|
def encode_with_transformers(self, tokens):
|
||||||
self.wrapped.transformer.text_model.embeddings.to(tokens.device)
|
current_device = self.wrapped.transformer.text_model.embeddings.token_embedding.weight.device
|
||||||
|
tokens = tokens.to(current_device)
|
||||||
|
|
||||||
outputs = self.wrapped.transformer(tokens, output_hidden_states=self.wrapped.layer == "hidden")
|
outputs = self.wrapped.transformer(tokens, output_hidden_states=self.wrapped.layer == "hidden")
|
||||||
|
|
||||||
if self.wrapped.layer == "last":
|
if self.wrapped.layer == "last":
|
||||||
|
|||||||
Reference in New Issue
Block a user