From 0091e82f5396968d838cab72ea18ea165619aaaf Mon Sep 17 00:00:00 2001 From: layerdiffusion <19834515+lllyasviel@users.noreply.github.com> Date: Sun, 4 Aug 2024 18:45:31 -0700 Subject: [PATCH] remove old stuff --- modules_forge/clip.py | 100 ------------------------------------------ 1 file changed, 100 deletions(-) diff --git a/modules_forge/clip.py b/modules_forge/clip.py index a9dccf50..777d94f6 100644 --- a/modules_forge/clip.py +++ b/modules_forge/clip.py @@ -1,7 +1,5 @@ -from modules.sd_hijack_clip import FrozenCLIPEmbedderWithCustomWords from backend import memory_management from modules import sd_models -from modules.shared import opts def move_clip_to_gpu(): @@ -12,101 +10,3 @@ def move_clip_to_gpu(): memory_management.load_model_gpu(sd_models.model_data.sd_model.forge_objects.clip.patcher) return - -class CLIP_SD_15_L(FrozenCLIPEmbedderWithCustomWords): - def __init__(self, wrapped, hijack): - super().__init__(wrapped, hijack) - self.minimal_clip_skip = 1 - - def encode_with_transformers(self, tokens): - move_clip_to_gpu() - self.wrapped.transformer.text_model.embeddings.to(tokens.device) - outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers) - - if opts.CLIP_stop_at_last_layers > self.minimal_clip_skip: - z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] - z = self.wrapped.transformer.text_model.final_layer_norm(z) - else: - z = outputs.last_hidden_state - - return z - - -class CLIP_SD_21_H(FrozenCLIPEmbedderWithCustomWords): - def __init__(self, wrapped, hijack): - super().__init__(wrapped, hijack) - - if self.wrapped.layer == "penultimate": - self.wrapped.layer = "hidden" - self.wrapped.layer_idx = -2 - - self.id_start = 49406 - self.id_end = 49407 - self.id_pad = 0 - self.minimal_clip_skip = 2 - - def encode_with_transformers(self, tokens): - move_clip_to_gpu() - self.wrapped.transformer.text_model.embeddings.to(tokens.device) - outputs = self.wrapped.transformer(tokens, output_hidden_states=self.wrapped.layer == "hidden") - - if opts.CLIP_stop_at_last_layers > self.minimal_clip_skip: - z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] - z = self.wrapped.transformer.text_model.final_layer_norm(z) - elif self.wrapped.layer == "last": - z = outputs.last_hidden_state - else: - z = outputs.hidden_states[self.wrapped.layer_idx] - z = self.wrapped.transformer.text_model.final_layer_norm(z) - - return z - - -class CLIP_SD_XL_L(FrozenCLIPEmbedderWithCustomWords): - def __init__(self, wrapped, hijack): - super().__init__(wrapped, hijack) - self.minimal_clip_skip = 2 - - def encode_with_transformers(self, tokens): - self.wrapped.transformer.text_model.embeddings.to(tokens.device) - outputs = self.wrapped.transformer(tokens, output_hidden_states=self.wrapped.layer == "hidden") - - if opts.CLIP_stop_at_last_layers > self.minimal_clip_skip: - z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] - elif self.wrapped.layer == "last": - z = outputs.last_hidden_state - else: - z = outputs.hidden_states[self.wrapped.layer_idx] - - return z - - -class CLIP_SD_XL_G(FrozenCLIPEmbedderWithCustomWords): - def __init__(self, wrapped, hijack): - super().__init__(wrapped, hijack) - - if self.wrapped.layer == "penultimate": - self.wrapped.layer = "hidden" - self.wrapped.layer_idx = -2 - - self.id_start = 49406 - self.id_end = 49407 - self.id_pad = 0 - self.minimal_clip_skip = 2 - - def encode_with_transformers(self, tokens): - self.wrapped.transformer.text_model.embeddings.to(tokens.device) - outputs = self.wrapped.transformer(tokens, output_hidden_states=self.wrapped.layer == "hidden") - - if opts.CLIP_stop_at_last_layers > self.minimal_clip_skip: - z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] - elif self.wrapped.layer == "last": - z = outputs.last_hidden_state - else: - z = outputs.hidden_states[self.wrapped.layer_idx] - - pooled_output = outputs.pooler_output - text_projection = self.wrapped.text_projection - pooled_output = pooled_output.float().to(text_projection.device) @ text_projection.float() - z.pooled = pooled_output - return z