diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index a479148f..bafc6782 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -1,384 +1,384 @@ -import math -from collections import namedtuple - -import torch - -from modules import prompt_parser, devices, sd_hijack, sd_emphasis -from modules.shared import opts - - -class PromptChunk: - """ - This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt. - If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary. - Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token, - so just 75 tokens from prompt. - """ - - def __init__(self): - self.tokens = [] - self.multipliers = [] - self.fixes = [] - - -PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding']) -"""An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt -chunk. Those objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally -are applied by sd_hijack.EmbeddingsWithFixes's forward function.""" - - -class TextConditionalModel(torch.nn.Module): - def __init__(self): - super().__init__() - - self.hijack = sd_hijack.model_hijack - self.chunk_length = 75 - - self.is_trainable = False - self.input_key = 'txt' - self.return_pooled = False - - self.comma_token = None - self.id_start = None - self.id_end = None - self.id_pad = None - - def empty_chunk(self): - """creates an empty PromptChunk and returns it""" - - chunk = PromptChunk() - chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1) - chunk.multipliers = [1.0] * (self.chunk_length + 2) - return chunk - - def get_target_prompt_token_count(self, token_count): - """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented""" - - return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length - - def tokenize(self, texts): - """Converts a batch of texts into a batch of token ids""" - - raise NotImplementedError - - def encode_with_transformers(self, tokens): - """ - converts a batch of token ids (in python lists) into a single tensor with numeric representation of those tokens; - All python lists with tokens are assumed to have same length, usually 77. - if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on - model - can be 768 and 1024. - Among other things, this call will read self.hijack.fixes, apply it to its inputs, and clear it (setting it to None). - """ - - raise NotImplementedError - - def encode_embedding_init_text(self, init_text, nvpt): - """Converts text into a tensor with this text's tokens' embeddings. Note that those are embeddings before they are passed through - transformers. nvpt is used as a maximum length in tokens. If text produces less teokens than nvpt, only this many is returned.""" - - raise NotImplementedError - - def tokenize_line(self, line): - """ - this transforms a single prompt into a list of PromptChunk objects - as many as needed to - represent the prompt. - Returns the list and the total number of tokens in the prompt. - """ - - if opts.emphasis != "None": - parsed = prompt_parser.parse_prompt_attention(line) - else: - parsed = [[line, 1.0]] - - tokenized = self.tokenize([text for text, _ in parsed]) - - chunks = [] - chunk = PromptChunk() - token_count = 0 - last_comma = -1 - - def next_chunk(is_last=False): - """puts current chunk into the list of results and produces the next one - empty; - if is_last is true, tokens tokens at the end won't add to token_count""" - nonlocal token_count - nonlocal last_comma - nonlocal chunk - - if is_last: - token_count += len(chunk.tokens) - else: - token_count += self.chunk_length - - to_add = self.chunk_length - len(chunk.tokens) - if to_add > 0: - chunk.tokens += [self.id_end] * to_add - chunk.multipliers += [1.0] * to_add - - chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end] - chunk.multipliers = [1.0] + chunk.multipliers + [1.0] - - last_comma = -1 - chunks.append(chunk) - chunk = PromptChunk() - - for tokens, (text, weight) in zip(tokenized, parsed): - if text == 'BREAK' and weight == -1: - next_chunk() - continue - - position = 0 - while position < len(tokens): - token = tokens[position] - - if token == self.comma_token: - last_comma = len(chunk.tokens) - - # this is when we are at the end of allotted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack - # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next. - elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack: - break_location = last_comma + 1 - - reloc_tokens = chunk.tokens[break_location:] - reloc_mults = chunk.multipliers[break_location:] - - chunk.tokens = chunk.tokens[:break_location] - chunk.multipliers = chunk.multipliers[:break_location] - - next_chunk() - chunk.tokens = reloc_tokens - chunk.multipliers = reloc_mults - - if len(chunk.tokens) == self.chunk_length: - next_chunk() - - embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, position) - if embedding is None: - chunk.tokens.append(token) - chunk.multipliers.append(weight) - position += 1 - continue - - emb_len = int(embedding.vectors) - if len(chunk.tokens) + emb_len > self.chunk_length: - next_chunk() - - chunk.fixes.append(PromptChunkFix(len(chunk.tokens), embedding)) - - chunk.tokens += [0] * emb_len - chunk.multipliers += [weight] * emb_len - position += embedding_length_in_tokens - - if chunk.tokens or not chunks: - next_chunk(is_last=True) - - return chunks, token_count - - def process_texts(self, texts): - """ - Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum - length, in tokens, of all texts. - """ - - token_count = 0 - - cache = {} - batch_chunks = [] - for line in texts: - if line in cache: - chunks = cache[line] - else: - chunks, current_token_count = self.tokenize_line(line) - token_count = max(current_token_count, token_count) - - cache[line] = chunks - - batch_chunks.append(chunks) - - return batch_chunks, token_count - - def forward(self, texts): - """ - Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts. - Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will - be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, for SD2 it's 1024, and for SDXL it's 1280. - An example shape returned by this function can be: (2, 77, 768). - For SDXL, instead of returning one tensor avobe, it returns a tuple with two: the other one with shape (B, 1280) with pooled values. - Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one element - is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream" - """ - - batch_chunks, token_count = self.process_texts(texts) - - used_embeddings = {} - chunk_count = max([len(x) for x in batch_chunks]) - - zs = [] - for i in range(chunk_count): - batch_chunk = [chunks[i] if i < len(chunks) else self.empty_chunk() for chunks in batch_chunks] - - tokens = [x.tokens for x in batch_chunk] - multipliers = [x.multipliers for x in batch_chunk] - self.hijack.fixes = [x.fixes for x in batch_chunk] - - for fixes in self.hijack.fixes: - for _position, embedding in fixes: - used_embeddings[embedding.name] = embedding - devices.torch_npu_set_device() - z = self.process_tokens(tokens, multipliers) - zs.append(z) - - if opts.textual_inversion_add_hashes_to_infotext and used_embeddings: - hashes = [] - for name, embedding in used_embeddings.items(): - shorthash = embedding.shorthash - if not shorthash: - continue - - name = name.replace(":", "").replace(",", "") - hashes.append(f"{name}: {shorthash}") - - if hashes: - if self.hijack.extra_generation_params.get("TI hashes"): - hashes.append(self.hijack.extra_generation_params.get("TI hashes")) - self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes) - - if any(x for x in texts if "(" in x or "[" in x) and opts.emphasis != "Original": - self.hijack.extra_generation_params["Emphasis"] = opts.emphasis - - if self.return_pooled: - return torch.hstack(zs), zs[0].pooled - else: - return torch.hstack(zs) - - def process_tokens(self, remade_batch_tokens, batch_multipliers): - """ - sends one single prompt chunk to be encoded by transformers neural network. - remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually - there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens. - Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier - corresponds to one token. - """ - tokens = torch.asarray(remade_batch_tokens).to(devices.device) - - # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones. - if self.id_end != self.id_pad: - for batch_pos in range(len(remade_batch_tokens)): - index = remade_batch_tokens[batch_pos].index(self.id_end) - tokens[batch_pos, index+1:tokens.shape[1]] = self.id_pad - - z = self.encode_with_transformers(tokens) - - pooled = getattr(z, 'pooled', None) - - emphasis = sd_emphasis.get_current_option(opts.emphasis)() - emphasis.tokens = remade_batch_tokens - emphasis.multipliers = torch.asarray(batch_multipliers).to(devices.device) - emphasis.z = z - - emphasis.after_transformers() - - z = emphasis.z - - if pooled is not None: - z.pooled = pooled - - return z - - -class FrozenCLIPEmbedderWithCustomWordsBase(TextConditionalModel): - """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to - have unlimited prompt length and assign weights to tokens in prompt. - """ - - def __init__(self, wrapped, hijack): - super().__init__() - - self.hijack = hijack - - self.wrapped = wrapped - """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation, - depending on model.""" - - self.is_trainable = getattr(wrapped, 'is_trainable', False) - self.input_key = getattr(wrapped, 'input_key', 'txt') - self.return_pooled = getattr(self.wrapped, 'return_pooled', False) - - self.legacy_ucg_val = None # for sgm codebase - - def forward(self, texts): - if opts.use_old_emphasis_implementation: - import modules.sd_hijack_clip_old - return modules.sd_hijack_clip_old.forward_old(self, texts) - - return super().forward(texts) - - -class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): - def __init__(self, wrapped, hijack): - super().__init__(wrapped, hijack) - self.tokenizer = wrapped.tokenizer - - vocab = self.tokenizer.get_vocab() - - self.comma_token = vocab.get(',', None) - - self.token_mults = {} - tokens_with_parens = [(k, v) for k, v in vocab.items() if '(' in k or ')' in k or '[' in k or ']' in k] - for text, ident in tokens_with_parens: - mult = 1.0 - for c in text: - if c == '[': - mult /= 1.1 - if c == ']': - mult *= 1.1 - if c == '(': - mult *= 1.1 - if c == ')': - mult /= 1.1 - - if mult != 1.0: - self.token_mults[ident] = mult - - self.id_start = self.wrapped.tokenizer.bos_token_id - self.id_end = self.wrapped.tokenizer.eos_token_id - self.id_pad = self.id_end - - def tokenize(self, texts): - tokenized = self.wrapped.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"] - - return tokenized - - def encode_with_transformers(self, tokens): - outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers) - - if opts.CLIP_stop_at_last_layers > 1: - z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] - z = self.wrapped.transformer.text_model.final_layer_norm(z) - else: - z = outputs.last_hidden_state - - return z - - def encode_embedding_init_text(self, init_text, nvpt): - embedding_layer = self.wrapped.transformer.text_model.embeddings - ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] - embedded = embedding_layer.token_embedding.wrapped(ids.to(embedding_layer.token_embedding.wrapped.weight.device)).squeeze(0) - - return embedded - - -class FrozenCLIPEmbedderForSDXLWithCustomWords(FrozenCLIPEmbedderWithCustomWords): - def __init__(self, wrapped, hijack): - super().__init__(wrapped, hijack) - - def encode_with_transformers(self, tokens): - outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=self.wrapped.layer == "hidden") - - if opts.sdxl_clip_l_skip is True: - z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] - elif self.wrapped.layer == "last": - z = outputs.last_hidden_state - else: - z = outputs.hidden_states[self.wrapped.layer_idx] - - return z +# import math +# from collections import namedtuple +# +# import torch +# +# from modules import prompt_parser, devices, sd_hijack, sd_emphasis +# from modules.shared import opts +# +# +# class PromptChunk: +# """ +# This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt. +# If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary. +# Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token, +# so just 75 tokens from prompt. +# """ +# +# def __init__(self): +# self.tokens = [] +# self.multipliers = [] +# self.fixes = [] +# +# +# PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding']) +# """An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt +# chunk. Those objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally +# are applied by sd_hijack.EmbeddingsWithFixes's forward function.""" +# +# +# class TextConditionalModel(torch.nn.Module): +# def __init__(self): +# super().__init__() +# +# self.hijack = sd_hijack.model_hijack +# self.chunk_length = 75 +# +# self.is_trainable = False +# self.input_key = 'txt' +# self.return_pooled = False +# +# self.comma_token = None +# self.id_start = None +# self.id_end = None +# self.id_pad = None +# +# def empty_chunk(self): +# """creates an empty PromptChunk and returns it""" +# +# chunk = PromptChunk() +# chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1) +# chunk.multipliers = [1.0] * (self.chunk_length + 2) +# return chunk +# +# def get_target_prompt_token_count(self, token_count): +# """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented""" +# +# return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length +# +# def tokenize(self, texts): +# """Converts a batch of texts into a batch of token ids""" +# +# raise NotImplementedError +# +# def encode_with_transformers(self, tokens): +# """ +# converts a batch of token ids (in python lists) into a single tensor with numeric representation of those tokens; +# All python lists with tokens are assumed to have same length, usually 77. +# if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on +# model - can be 768 and 1024. +# Among other things, this call will read self.hijack.fixes, apply it to its inputs, and clear it (setting it to None). +# """ +# +# raise NotImplementedError +# +# def encode_embedding_init_text(self, init_text, nvpt): +# """Converts text into a tensor with this text's tokens' embeddings. Note that those are embeddings before they are passed through +# transformers. nvpt is used as a maximum length in tokens. If text produces less teokens than nvpt, only this many is returned.""" +# +# raise NotImplementedError +# +# def tokenize_line(self, line): +# """ +# this transforms a single prompt into a list of PromptChunk objects - as many as needed to +# represent the prompt. +# Returns the list and the total number of tokens in the prompt. +# """ +# +# if opts.emphasis != "None": +# parsed = prompt_parser.parse_prompt_attention(line) +# else: +# parsed = [[line, 1.0]] +# +# tokenized = self.tokenize([text for text, _ in parsed]) +# +# chunks = [] +# chunk = PromptChunk() +# token_count = 0 +# last_comma = -1 +# +# def next_chunk(is_last=False): +# """puts current chunk into the list of results and produces the next one - empty; +# if is_last is true, tokens tokens at the end won't add to token_count""" +# nonlocal token_count +# nonlocal last_comma +# nonlocal chunk +# +# if is_last: +# token_count += len(chunk.tokens) +# else: +# token_count += self.chunk_length +# +# to_add = self.chunk_length - len(chunk.tokens) +# if to_add > 0: +# chunk.tokens += [self.id_end] * to_add +# chunk.multipliers += [1.0] * to_add +# +# chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end] +# chunk.multipliers = [1.0] + chunk.multipliers + [1.0] +# +# last_comma = -1 +# chunks.append(chunk) +# chunk = PromptChunk() +# +# for tokens, (text, weight) in zip(tokenized, parsed): +# if text == 'BREAK' and weight == -1: +# next_chunk() +# continue +# +# position = 0 +# while position < len(tokens): +# token = tokens[position] +# +# if token == self.comma_token: +# last_comma = len(chunk.tokens) +# +# # this is when we are at the end of allotted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack +# # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next. +# elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack: +# break_location = last_comma + 1 +# +# reloc_tokens = chunk.tokens[break_location:] +# reloc_mults = chunk.multipliers[break_location:] +# +# chunk.tokens = chunk.tokens[:break_location] +# chunk.multipliers = chunk.multipliers[:break_location] +# +# next_chunk() +# chunk.tokens = reloc_tokens +# chunk.multipliers = reloc_mults +# +# if len(chunk.tokens) == self.chunk_length: +# next_chunk() +# +# embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, position) +# if embedding is None: +# chunk.tokens.append(token) +# chunk.multipliers.append(weight) +# position += 1 +# continue +# +# emb_len = int(embedding.vectors) +# if len(chunk.tokens) + emb_len > self.chunk_length: +# next_chunk() +# +# chunk.fixes.append(PromptChunkFix(len(chunk.tokens), embedding)) +# +# chunk.tokens += [0] * emb_len +# chunk.multipliers += [weight] * emb_len +# position += embedding_length_in_tokens +# +# if chunk.tokens or not chunks: +# next_chunk(is_last=True) +# +# return chunks, token_count +# +# def process_texts(self, texts): +# """ +# Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum +# length, in tokens, of all texts. +# """ +# +# token_count = 0 +# +# cache = {} +# batch_chunks = [] +# for line in texts: +# if line in cache: +# chunks = cache[line] +# else: +# chunks, current_token_count = self.tokenize_line(line) +# token_count = max(current_token_count, token_count) +# +# cache[line] = chunks +# +# batch_chunks.append(chunks) +# +# return batch_chunks, token_count +# +# def forward(self, texts): +# """ +# Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts. +# Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will +# be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, for SD2 it's 1024, and for SDXL it's 1280. +# An example shape returned by this function can be: (2, 77, 768). +# For SDXL, instead of returning one tensor avobe, it returns a tuple with two: the other one with shape (B, 1280) with pooled values. +# Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one element +# is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream" +# """ +# +# batch_chunks, token_count = self.process_texts(texts) +# +# used_embeddings = {} +# chunk_count = max([len(x) for x in batch_chunks]) +# +# zs = [] +# for i in range(chunk_count): +# batch_chunk = [chunks[i] if i < len(chunks) else self.empty_chunk() for chunks in batch_chunks] +# +# tokens = [x.tokens for x in batch_chunk] +# multipliers = [x.multipliers for x in batch_chunk] +# self.hijack.fixes = [x.fixes for x in batch_chunk] +# +# for fixes in self.hijack.fixes: +# for _position, embedding in fixes: +# used_embeddings[embedding.name] = embedding +# devices.torch_npu_set_device() +# z = self.process_tokens(tokens, multipliers) +# zs.append(z) +# +# if opts.textual_inversion_add_hashes_to_infotext and used_embeddings: +# hashes = [] +# for name, embedding in used_embeddings.items(): +# shorthash = embedding.shorthash +# if not shorthash: +# continue +# +# name = name.replace(":", "").replace(",", "") +# hashes.append(f"{name}: {shorthash}") +# +# if hashes: +# if self.hijack.extra_generation_params.get("TI hashes"): +# hashes.append(self.hijack.extra_generation_params.get("TI hashes")) +# self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes) +# +# if any(x for x in texts if "(" in x or "[" in x) and opts.emphasis != "Original": +# self.hijack.extra_generation_params["Emphasis"] = opts.emphasis +# +# if self.return_pooled: +# return torch.hstack(zs), zs[0].pooled +# else: +# return torch.hstack(zs) +# +# def process_tokens(self, remade_batch_tokens, batch_multipliers): +# """ +# sends one single prompt chunk to be encoded by transformers neural network. +# remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually +# there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens. +# Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier +# corresponds to one token. +# """ +# tokens = torch.asarray(remade_batch_tokens).to(devices.device) +# +# # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones. +# if self.id_end != self.id_pad: +# for batch_pos in range(len(remade_batch_tokens)): +# index = remade_batch_tokens[batch_pos].index(self.id_end) +# tokens[batch_pos, index+1:tokens.shape[1]] = self.id_pad +# +# z = self.encode_with_transformers(tokens) +# +# pooled = getattr(z, 'pooled', None) +# +# emphasis = sd_emphasis.get_current_option(opts.emphasis)() +# emphasis.tokens = remade_batch_tokens +# emphasis.multipliers = torch.asarray(batch_multipliers).to(devices.device) +# emphasis.z = z +# +# emphasis.after_transformers() +# +# z = emphasis.z +# +# if pooled is not None: +# z.pooled = pooled +# +# return z +# +# +# class FrozenCLIPEmbedderWithCustomWordsBase(TextConditionalModel): +# """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to +# have unlimited prompt length and assign weights to tokens in prompt. +# """ +# +# def __init__(self, wrapped, hijack): +# super().__init__() +# +# self.hijack = hijack +# +# self.wrapped = wrapped +# """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation, +# depending on model.""" +# +# self.is_trainable = getattr(wrapped, 'is_trainable', False) +# self.input_key = getattr(wrapped, 'input_key', 'txt') +# self.return_pooled = getattr(self.wrapped, 'return_pooled', False) +# +# self.legacy_ucg_val = None # for sgm codebase +# +# def forward(self, texts): +# if opts.use_old_emphasis_implementation: +# import modules.sd_hijack_clip_old +# return modules.sd_hijack_clip_old.forward_old(self, texts) +# +# return super().forward(texts) +# +# +# class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): +# def __init__(self, wrapped, hijack): +# super().__init__(wrapped, hijack) +# self.tokenizer = wrapped.tokenizer +# +# vocab = self.tokenizer.get_vocab() +# +# self.comma_token = vocab.get(',', None) +# +# self.token_mults = {} +# tokens_with_parens = [(k, v) for k, v in vocab.items() if '(' in k or ')' in k or '[' in k or ']' in k] +# for text, ident in tokens_with_parens: +# mult = 1.0 +# for c in text: +# if c == '[': +# mult /= 1.1 +# if c == ']': +# mult *= 1.1 +# if c == '(': +# mult *= 1.1 +# if c == ')': +# mult /= 1.1 +# +# if mult != 1.0: +# self.token_mults[ident] = mult +# +# self.id_start = self.wrapped.tokenizer.bos_token_id +# self.id_end = self.wrapped.tokenizer.eos_token_id +# self.id_pad = self.id_end +# +# def tokenize(self, texts): +# tokenized = self.wrapped.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"] +# +# return tokenized +# +# def encode_with_transformers(self, tokens): +# outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers) +# +# if opts.CLIP_stop_at_last_layers > 1: +# z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] +# z = self.wrapped.transformer.text_model.final_layer_norm(z) +# else: +# z = outputs.last_hidden_state +# +# return z +# +# def encode_embedding_init_text(self, init_text, nvpt): +# embedding_layer = self.wrapped.transformer.text_model.embeddings +# ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] +# embedded = embedding_layer.token_embedding.wrapped(ids.to(embedding_layer.token_embedding.wrapped.weight.device)).squeeze(0) +# +# return embedded +# +# +# class FrozenCLIPEmbedderForSDXLWithCustomWords(FrozenCLIPEmbedderWithCustomWords): +# def __init__(self, wrapped, hijack): +# super().__init__(wrapped, hijack) +# +# def encode_with_transformers(self, tokens): +# outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=self.wrapped.layer == "hidden") +# +# if opts.sdxl_clip_l_skip is True: +# z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] +# elif self.wrapped.layer == "last": +# z = outputs.last_hidden_state +# else: +# z = outputs.hidden_states[self.wrapped.layer_idx] +# +# return z diff --git a/modules/sd_hijack_clip_old.py b/modules/sd_hijack_clip_old.py index 43e9b952..803a723d 100644 --- a/modules/sd_hijack_clip_old.py +++ b/modules/sd_hijack_clip_old.py @@ -1,82 +1,82 @@ -from modules import sd_hijack_clip -from modules import shared - - -def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts): - id_start = self.id_start - id_end = self.id_end - maxlen = self.wrapped.max_length # you get to stay at 77 - used_custom_terms = [] - remade_batch_tokens = [] - hijack_comments = [] - hijack_fixes = [] - token_count = 0 - - cache = {} - batch_tokens = self.tokenize(texts) - batch_multipliers = [] - for tokens in batch_tokens: - tuple_tokens = tuple(tokens) - - if tuple_tokens in cache: - remade_tokens, fixes, multipliers = cache[tuple_tokens] - else: - fixes = [] - remade_tokens = [] - multipliers = [] - mult = 1.0 - - i = 0 - while i < len(tokens): - token = tokens[i] - - embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) - - mult_change = self.token_mults.get(token) if shared.opts.emphasis != "None" else None - if mult_change is not None: - mult *= mult_change - i += 1 - elif embedding is None: - remade_tokens.append(token) - multipliers.append(mult) - i += 1 - else: - emb_len = int(embedding.vec.shape[0]) - fixes.append((len(remade_tokens), embedding)) - remade_tokens += [0] * emb_len - multipliers += [mult] * emb_len - used_custom_terms.append((embedding.name, embedding.checksum())) - i += embedding_length_in_tokens - - if len(remade_tokens) > maxlen - 2: - vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} - ovf = remade_tokens[maxlen - 2:] - overflowing_words = [vocab.get(int(x), "") for x in ovf] - overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words)) - hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n") - - token_count = len(remade_tokens) - remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens)) - remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end] - cache[tuple_tokens] = (remade_tokens, fixes, multipliers) - - multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers)) - multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0] - - remade_batch_tokens.append(remade_tokens) - hijack_fixes.append(fixes) - batch_multipliers.append(multipliers) - return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count - - -def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts): - batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = process_text_old(self, texts) - - self.hijack.comments += hijack_comments - - if used_custom_terms: - embedding_names = ", ".join(f"{word} [{checksum}]" for word, checksum in used_custom_terms) - self.hijack.comments.append(f"Used embeddings: {embedding_names}") - - self.hijack.fixes = hijack_fixes - return self.process_tokens(remade_batch_tokens, batch_multipliers) +# from modules import sd_hijack_clip +# from modules import shared +# +# +# def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts): +# id_start = self.id_start +# id_end = self.id_end +# maxlen = self.wrapped.max_length # you get to stay at 77 +# used_custom_terms = [] +# remade_batch_tokens = [] +# hijack_comments = [] +# hijack_fixes = [] +# token_count = 0 +# +# cache = {} +# batch_tokens = self.tokenize(texts) +# batch_multipliers = [] +# for tokens in batch_tokens: +# tuple_tokens = tuple(tokens) +# +# if tuple_tokens in cache: +# remade_tokens, fixes, multipliers = cache[tuple_tokens] +# else: +# fixes = [] +# remade_tokens = [] +# multipliers = [] +# mult = 1.0 +# +# i = 0 +# while i < len(tokens): +# token = tokens[i] +# +# embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) +# +# mult_change = self.token_mults.get(token) if shared.opts.emphasis != "None" else None +# if mult_change is not None: +# mult *= mult_change +# i += 1 +# elif embedding is None: +# remade_tokens.append(token) +# multipliers.append(mult) +# i += 1 +# else: +# emb_len = int(embedding.vec.shape[0]) +# fixes.append((len(remade_tokens), embedding)) +# remade_tokens += [0] * emb_len +# multipliers += [mult] * emb_len +# used_custom_terms.append((embedding.name, embedding.checksum())) +# i += embedding_length_in_tokens +# +# if len(remade_tokens) > maxlen - 2: +# vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} +# ovf = remade_tokens[maxlen - 2:] +# overflowing_words = [vocab.get(int(x), "") for x in ovf] +# overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words)) +# hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n") +# +# token_count = len(remade_tokens) +# remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens)) +# remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end] +# cache[tuple_tokens] = (remade_tokens, fixes, multipliers) +# +# multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers)) +# multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0] +# +# remade_batch_tokens.append(remade_tokens) +# hijack_fixes.append(fixes) +# batch_multipliers.append(multipliers) +# return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count +# +# +# def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts): +# batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = process_text_old(self, texts) +# +# self.hijack.comments += hijack_comments +# +# if used_custom_terms: +# embedding_names = ", ".join(f"{word} [{checksum}]" for word, checksum in used_custom_terms) +# self.hijack.comments.append(f"Used embeddings: {embedding_names}") +# +# self.hijack.fixes = hijack_fixes +# return self.process_tokens(remade_batch_tokens, batch_multipliers) diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py index 6fe6b6ff..9a4d838b 100644 --- a/modules/sd_hijack_ip2p.py +++ b/modules/sd_hijack_ip2p.py @@ -1,10 +1,10 @@ -import os.path - - -def should_hijack_ip2p(checkpoint_info): - from modules import sd_models_config - - ckpt_basename = os.path.basename(checkpoint_info.filename).lower() - cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower() - - return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename +# import os.path +# +# +# def should_hijack_ip2p(checkpoint_info): +# from modules import sd_models_config +# +# ckpt_basename = os.path.basename(checkpoint_info.filename).lower() +# cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower() +# +# return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename diff --git a/modules/sd_hijack_open_clip.py b/modules/sd_hijack_open_clip.py index 25c5e983..96f33720 100644 --- a/modules/sd_hijack_open_clip.py +++ b/modules/sd_hijack_open_clip.py @@ -1,71 +1,71 @@ -import open_clip.tokenizer -import torch - -from modules import sd_hijack_clip, devices -from modules.shared import opts - -tokenizer = open_clip.tokenizer._tokenizer - - -class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): - def __init__(self, wrapped, hijack): - super().__init__(wrapped, hijack) - - self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0] - self.id_start = tokenizer.encoder[""] - self.id_end = tokenizer.encoder[""] - self.id_pad = 0 - - def tokenize(self, texts): - assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' - - tokenized = [tokenizer.encode(text) for text in texts] - - return tokenized - - def encode_with_transformers(self, tokens): - # set self.wrapped.layer_idx here according to opts.CLIP_stop_at_last_layers - z = self.wrapped.encode_with_transformer(tokens) - - return z - - def encode_embedding_init_text(self, init_text, nvpt): - ids = tokenizer.encode(init_text) - ids = torch.asarray([ids], device=devices.device, dtype=torch.int) - embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0) - - return embedded - - -class FrozenOpenCLIPEmbedder2WithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): - def __init__(self, wrapped, hijack): - super().__init__(wrapped, hijack) - - self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0] - self.id_start = tokenizer.encoder[""] - self.id_end = tokenizer.encoder[""] - self.id_pad = 0 - - def tokenize(self, texts): - assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' - - tokenized = [tokenizer.encode(text) for text in texts] - - return tokenized - - def encode_with_transformers(self, tokens): - d = self.wrapped.encode_with_transformer(tokens) - z = d[self.wrapped.layer] - - pooled = d.get("pooled") - if pooled is not None: - z.pooled = pooled - - return z - - def encode_embedding_init_text(self, init_text, nvpt): - ids = tokenizer.encode(init_text) - ids = torch.asarray([ids], device=devices.device, dtype=torch.int) - embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0) - - return embedded +# import open_clip.tokenizer +# import torch +# +# from modules import sd_hijack_clip, devices +# from modules.shared import opts +# +# tokenizer = open_clip.tokenizer._tokenizer +# +# +# class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): +# def __init__(self, wrapped, hijack): +# super().__init__(wrapped, hijack) +# +# self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0] +# self.id_start = tokenizer.encoder[""] +# self.id_end = tokenizer.encoder[""] +# self.id_pad = 0 +# +# def tokenize(self, texts): +# assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' +# +# tokenized = [tokenizer.encode(text) for text in texts] +# +# return tokenized +# +# def encode_with_transformers(self, tokens): +# # set self.wrapped.layer_idx here according to opts.CLIP_stop_at_last_layers +# z = self.wrapped.encode_with_transformer(tokens) +# +# return z +# +# def encode_embedding_init_text(self, init_text, nvpt): +# ids = tokenizer.encode(init_text) +# ids = torch.asarray([ids], device=devices.device, dtype=torch.int) +# embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0) +# +# return embedded +# +# +# class FrozenOpenCLIPEmbedder2WithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): +# def __init__(self, wrapped, hijack): +# super().__init__(wrapped, hijack) +# +# self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0] +# self.id_start = tokenizer.encoder[""] +# self.id_end = tokenizer.encoder[""] +# self.id_pad = 0 +# +# def tokenize(self, texts): +# assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' +# +# tokenized = [tokenizer.encode(text) for text in texts] +# +# return tokenized +# +# def encode_with_transformers(self, tokens): +# d = self.wrapped.encode_with_transformer(tokens) +# z = d[self.wrapped.layer] +# +# pooled = d.get("pooled") +# if pooled is not None: +# z.pooled = pooled +# +# return z +# +# def encode_embedding_init_text(self, init_text, nvpt): +# ids = tokenizer.encode(init_text) +# ids = torch.asarray([ids], device=devices.device, dtype=torch.int) +# embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0) +# +# return embedded diff --git a/modules/sd_hijack_xlmr.py b/modules/sd_hijack_xlmr.py index 28528329..091de420 100644 --- a/modules/sd_hijack_xlmr.py +++ b/modules/sd_hijack_xlmr.py @@ -1,32 +1,32 @@ -import torch - -from modules import sd_hijack_clip, devices - - -class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): - def __init__(self, wrapped, hijack): - super().__init__(wrapped, hijack) - - self.id_start = wrapped.config.bos_token_id - self.id_end = wrapped.config.eos_token_id - self.id_pad = wrapped.config.pad_token_id - - self.comma_token = self.tokenizer.get_vocab().get(',', None) # alt diffusion doesn't have bits for comma - - def encode_with_transformers(self, tokens): - # there's no CLIP Skip here because all hidden layers have size of 1024 and the last one uses a - # trained layer to transform those 1024 into 768 for unet; so you can't choose which transformer - # layer to work with - you have to use the last - - attention_mask = (tokens != self.id_pad).to(device=tokens.device, dtype=torch.int64) - features = self.wrapped(input_ids=tokens, attention_mask=attention_mask) - z = features['projection_state'] - - return z - - def encode_embedding_init_text(self, init_text, nvpt): - embedding_layer = self.wrapped.roberta.embeddings - ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] - embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) - - return embedded +# import torch +# +# from modules import sd_hijack_clip, devices +# +# +# class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): +# def __init__(self, wrapped, hijack): +# super().__init__(wrapped, hijack) +# +# self.id_start = wrapped.config.bos_token_id +# self.id_end = wrapped.config.eos_token_id +# self.id_pad = wrapped.config.pad_token_id +# +# self.comma_token = self.tokenizer.get_vocab().get(',', None) # alt diffusion doesn't have bits for comma +# +# def encode_with_transformers(self, tokens): +# # there's no CLIP Skip here because all hidden layers have size of 1024 and the last one uses a +# # trained layer to transform those 1024 into 768 for unet; so you can't choose which transformer +# # layer to work with - you have to use the last +# +# attention_mask = (tokens != self.id_pad).to(device=tokens.device, dtype=torch.int64) +# features = self.wrapped(input_ids=tokens, attention_mask=attention_mask) +# z = features['projection_state'] +# +# return z +# +# def encode_embedding_init_text(self, init_text, nvpt): +# embedding_layer = self.wrapped.roberta.embeddings +# ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] +# embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) +# +# return embedded