remove unused

This commit is contained in:
layerdiffusion
2024-08-05 12:14:43 -07:00
parent 48dec215f3
commit d9fc9f40e6
5 changed files with 579 additions and 579 deletions

View File

@@ -1,384 +1,384 @@
import math # import math
from collections import namedtuple # from collections import namedtuple
#
import torch # import torch
#
from modules import prompt_parser, devices, sd_hijack, sd_emphasis # from modules import prompt_parser, devices, sd_hijack, sd_emphasis
from modules.shared import opts # from modules.shared import opts
#
#
class PromptChunk: # class PromptChunk:
""" # """
This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt. # This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt.
If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary. # If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary.
Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token, # Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token,
so just 75 tokens from prompt. # so just 75 tokens from prompt.
""" # """
#
def __init__(self): # def __init__(self):
self.tokens = [] # self.tokens = []
self.multipliers = [] # self.multipliers = []
self.fixes = [] # self.fixes = []
#
#
PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding']) # PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding'])
"""An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt # """An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt
chunk. Those objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally # chunk. Those objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally
are applied by sd_hijack.EmbeddingsWithFixes's forward function.""" # are applied by sd_hijack.EmbeddingsWithFixes's forward function."""
#
#
class TextConditionalModel(torch.nn.Module): # class TextConditionalModel(torch.nn.Module):
def __init__(self): # def __init__(self):
super().__init__() # super().__init__()
#
self.hijack = sd_hijack.model_hijack # self.hijack = sd_hijack.model_hijack
self.chunk_length = 75 # self.chunk_length = 75
#
self.is_trainable = False # self.is_trainable = False
self.input_key = 'txt' # self.input_key = 'txt'
self.return_pooled = False # self.return_pooled = False
#
self.comma_token = None # self.comma_token = None
self.id_start = None # self.id_start = None
self.id_end = None # self.id_end = None
self.id_pad = None # self.id_pad = None
#
def empty_chunk(self): # def empty_chunk(self):
"""creates an empty PromptChunk and returns it""" # """creates an empty PromptChunk and returns it"""
#
chunk = PromptChunk() # chunk = PromptChunk()
chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1) # chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1)
chunk.multipliers = [1.0] * (self.chunk_length + 2) # chunk.multipliers = [1.0] * (self.chunk_length + 2)
return chunk # return chunk
#
def get_target_prompt_token_count(self, token_count): # def get_target_prompt_token_count(self, token_count):
"""returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented""" # """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented"""
#
return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length # return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length
#
def tokenize(self, texts): # def tokenize(self, texts):
"""Converts a batch of texts into a batch of token ids""" # """Converts a batch of texts into a batch of token ids"""
#
raise NotImplementedError # raise NotImplementedError
#
def encode_with_transformers(self, tokens): # def encode_with_transformers(self, tokens):
""" # """
converts a batch of token ids (in python lists) into a single tensor with numeric representation of those tokens; # converts a batch of token ids (in python lists) into a single tensor with numeric representation of those tokens;
All python lists with tokens are assumed to have same length, usually 77. # All python lists with tokens are assumed to have same length, usually 77.
if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on # if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on
model - can be 768 and 1024. # model - can be 768 and 1024.
Among other things, this call will read self.hijack.fixes, apply it to its inputs, and clear it (setting it to None). # Among other things, this call will read self.hijack.fixes, apply it to its inputs, and clear it (setting it to None).
""" # """
#
raise NotImplementedError # raise NotImplementedError
#
def encode_embedding_init_text(self, init_text, nvpt): # def encode_embedding_init_text(self, init_text, nvpt):
"""Converts text into a tensor with this text's tokens' embeddings. Note that those are embeddings before they are passed through # """Converts text into a tensor with this text's tokens' embeddings. Note that those are embeddings before they are passed through
transformers. nvpt is used as a maximum length in tokens. If text produces less teokens than nvpt, only this many is returned.""" # transformers. nvpt is used as a maximum length in tokens. If text produces less teokens than nvpt, only this many is returned."""
#
raise NotImplementedError # raise NotImplementedError
#
def tokenize_line(self, line): # def tokenize_line(self, line):
""" # """
this transforms a single prompt into a list of PromptChunk objects - as many as needed to # this transforms a single prompt into a list of PromptChunk objects - as many as needed to
represent the prompt. # represent the prompt.
Returns the list and the total number of tokens in the prompt. # Returns the list and the total number of tokens in the prompt.
""" # """
#
if opts.emphasis != "None": # if opts.emphasis != "None":
parsed = prompt_parser.parse_prompt_attention(line) # parsed = prompt_parser.parse_prompt_attention(line)
else: # else:
parsed = [[line, 1.0]] # parsed = [[line, 1.0]]
#
tokenized = self.tokenize([text for text, _ in parsed]) # tokenized = self.tokenize([text for text, _ in parsed])
#
chunks = [] # chunks = []
chunk = PromptChunk() # chunk = PromptChunk()
token_count = 0 # token_count = 0
last_comma = -1 # last_comma = -1
#
def next_chunk(is_last=False): # def next_chunk(is_last=False):
"""puts current chunk into the list of results and produces the next one - empty; # """puts current chunk into the list of results and produces the next one - empty;
if is_last is true, tokens <end-of-text> tokens at the end won't add to token_count""" # if is_last is true, tokens <end-of-text> tokens at the end won't add to token_count"""
nonlocal token_count # nonlocal token_count
nonlocal last_comma # nonlocal last_comma
nonlocal chunk # nonlocal chunk
#
if is_last: # if is_last:
token_count += len(chunk.tokens) # token_count += len(chunk.tokens)
else: # else:
token_count += self.chunk_length # token_count += self.chunk_length
#
to_add = self.chunk_length - len(chunk.tokens) # to_add = self.chunk_length - len(chunk.tokens)
if to_add > 0: # if to_add > 0:
chunk.tokens += [self.id_end] * to_add # chunk.tokens += [self.id_end] * to_add
chunk.multipliers += [1.0] * to_add # chunk.multipliers += [1.0] * to_add
#
chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end] # chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end]
chunk.multipliers = [1.0] + chunk.multipliers + [1.0] # chunk.multipliers = [1.0] + chunk.multipliers + [1.0]
#
last_comma = -1 # last_comma = -1
chunks.append(chunk) # chunks.append(chunk)
chunk = PromptChunk() # chunk = PromptChunk()
#
for tokens, (text, weight) in zip(tokenized, parsed): # for tokens, (text, weight) in zip(tokenized, parsed):
if text == 'BREAK' and weight == -1: # if text == 'BREAK' and weight == -1:
next_chunk() # next_chunk()
continue # continue
#
position = 0 # position = 0
while position < len(tokens): # while position < len(tokens):
token = tokens[position] # token = tokens[position]
#
if token == self.comma_token: # if token == self.comma_token:
last_comma = len(chunk.tokens) # last_comma = len(chunk.tokens)
#
# this is when we are at the end of allotted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack # # this is when we are at the end of allotted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
# is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next. # # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next.
elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack: # elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack:
break_location = last_comma + 1 # break_location = last_comma + 1
#
reloc_tokens = chunk.tokens[break_location:] # reloc_tokens = chunk.tokens[break_location:]
reloc_mults = chunk.multipliers[break_location:] # reloc_mults = chunk.multipliers[break_location:]
#
chunk.tokens = chunk.tokens[:break_location] # chunk.tokens = chunk.tokens[:break_location]
chunk.multipliers = chunk.multipliers[:break_location] # chunk.multipliers = chunk.multipliers[:break_location]
#
next_chunk() # next_chunk()
chunk.tokens = reloc_tokens # chunk.tokens = reloc_tokens
chunk.multipliers = reloc_mults # chunk.multipliers = reloc_mults
#
if len(chunk.tokens) == self.chunk_length: # if len(chunk.tokens) == self.chunk_length:
next_chunk() # next_chunk()
#
embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, position) # embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, position)
if embedding is None: # if embedding is None:
chunk.tokens.append(token) # chunk.tokens.append(token)
chunk.multipliers.append(weight) # chunk.multipliers.append(weight)
position += 1 # position += 1
continue # continue
#
emb_len = int(embedding.vectors) # emb_len = int(embedding.vectors)
if len(chunk.tokens) + emb_len > self.chunk_length: # if len(chunk.tokens) + emb_len > self.chunk_length:
next_chunk() # next_chunk()
#
chunk.fixes.append(PromptChunkFix(len(chunk.tokens), embedding)) # chunk.fixes.append(PromptChunkFix(len(chunk.tokens), embedding))
#
chunk.tokens += [0] * emb_len # chunk.tokens += [0] * emb_len
chunk.multipliers += [weight] * emb_len # chunk.multipliers += [weight] * emb_len
position += embedding_length_in_tokens # position += embedding_length_in_tokens
#
if chunk.tokens or not chunks: # if chunk.tokens or not chunks:
next_chunk(is_last=True) # next_chunk(is_last=True)
#
return chunks, token_count # return chunks, token_count
#
def process_texts(self, texts): # def process_texts(self, texts):
""" # """
Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum # Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum
length, in tokens, of all texts. # length, in tokens, of all texts.
""" # """
#
token_count = 0 # token_count = 0
#
cache = {} # cache = {}
batch_chunks = [] # batch_chunks = []
for line in texts: # for line in texts:
if line in cache: # if line in cache:
chunks = cache[line] # chunks = cache[line]
else: # else:
chunks, current_token_count = self.tokenize_line(line) # chunks, current_token_count = self.tokenize_line(line)
token_count = max(current_token_count, token_count) # token_count = max(current_token_count, token_count)
#
cache[line] = chunks # cache[line] = chunks
#
batch_chunks.append(chunks) # batch_chunks.append(chunks)
#
return batch_chunks, token_count # return batch_chunks, token_count
#
def forward(self, texts): # def forward(self, texts):
""" # """
Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts. # Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts.
Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will # Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will
be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, for SD2 it's 1024, and for SDXL it's 1280. # be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, for SD2 it's 1024, and for SDXL it's 1280.
An example shape returned by this function can be: (2, 77, 768). # An example shape returned by this function can be: (2, 77, 768).
For SDXL, instead of returning one tensor avobe, it returns a tuple with two: the other one with shape (B, 1280) with pooled values. # For SDXL, instead of returning one tensor avobe, it returns a tuple with two: the other one with shape (B, 1280) with pooled values.
Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one element # Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one element
is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream" # is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream"
""" # """
#
batch_chunks, token_count = self.process_texts(texts) # batch_chunks, token_count = self.process_texts(texts)
#
used_embeddings = {} # used_embeddings = {}
chunk_count = max([len(x) for x in batch_chunks]) # chunk_count = max([len(x) for x in batch_chunks])
#
zs = [] # zs = []
for i in range(chunk_count): # for i in range(chunk_count):
batch_chunk = [chunks[i] if i < len(chunks) else self.empty_chunk() for chunks in batch_chunks] # batch_chunk = [chunks[i] if i < len(chunks) else self.empty_chunk() for chunks in batch_chunks]
#
tokens = [x.tokens for x in batch_chunk] # tokens = [x.tokens for x in batch_chunk]
multipliers = [x.multipliers for x in batch_chunk] # multipliers = [x.multipliers for x in batch_chunk]
self.hijack.fixes = [x.fixes for x in batch_chunk] # self.hijack.fixes = [x.fixes for x in batch_chunk]
#
for fixes in self.hijack.fixes: # for fixes in self.hijack.fixes:
for _position, embedding in fixes: # for _position, embedding in fixes:
used_embeddings[embedding.name] = embedding # used_embeddings[embedding.name] = embedding
devices.torch_npu_set_device() # devices.torch_npu_set_device()
z = self.process_tokens(tokens, multipliers) # z = self.process_tokens(tokens, multipliers)
zs.append(z) # zs.append(z)
#
if opts.textual_inversion_add_hashes_to_infotext and used_embeddings: # if opts.textual_inversion_add_hashes_to_infotext and used_embeddings:
hashes = [] # hashes = []
for name, embedding in used_embeddings.items(): # for name, embedding in used_embeddings.items():
shorthash = embedding.shorthash # shorthash = embedding.shorthash
if not shorthash: # if not shorthash:
continue # continue
#
name = name.replace(":", "").replace(",", "") # name = name.replace(":", "").replace(",", "")
hashes.append(f"{name}: {shorthash}") # hashes.append(f"{name}: {shorthash}")
#
if hashes: # if hashes:
if self.hijack.extra_generation_params.get("TI hashes"): # if self.hijack.extra_generation_params.get("TI hashes"):
hashes.append(self.hijack.extra_generation_params.get("TI hashes")) # hashes.append(self.hijack.extra_generation_params.get("TI hashes"))
self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes) # self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes)
#
if any(x for x in texts if "(" in x or "[" in x) and opts.emphasis != "Original": # if any(x for x in texts if "(" in x or "[" in x) and opts.emphasis != "Original":
self.hijack.extra_generation_params["Emphasis"] = opts.emphasis # self.hijack.extra_generation_params["Emphasis"] = opts.emphasis
#
if self.return_pooled: # if self.return_pooled:
return torch.hstack(zs), zs[0].pooled # return torch.hstack(zs), zs[0].pooled
else: # else:
return torch.hstack(zs) # return torch.hstack(zs)
#
def process_tokens(self, remade_batch_tokens, batch_multipliers): # def process_tokens(self, remade_batch_tokens, batch_multipliers):
""" # """
sends one single prompt chunk to be encoded by transformers neural network. # sends one single prompt chunk to be encoded by transformers neural network.
remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually # remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually
there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens. # there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens.
Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier # Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier
corresponds to one token. # corresponds to one token.
""" # """
tokens = torch.asarray(remade_batch_tokens).to(devices.device) # tokens = torch.asarray(remade_batch_tokens).to(devices.device)
#
# this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones. # # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones.
if self.id_end != self.id_pad: # if self.id_end != self.id_pad:
for batch_pos in range(len(remade_batch_tokens)): # for batch_pos in range(len(remade_batch_tokens)):
index = remade_batch_tokens[batch_pos].index(self.id_end) # index = remade_batch_tokens[batch_pos].index(self.id_end)
tokens[batch_pos, index+1:tokens.shape[1]] = self.id_pad # tokens[batch_pos, index+1:tokens.shape[1]] = self.id_pad
#
z = self.encode_with_transformers(tokens) # z = self.encode_with_transformers(tokens)
#
pooled = getattr(z, 'pooled', None) # pooled = getattr(z, 'pooled', None)
#
emphasis = sd_emphasis.get_current_option(opts.emphasis)() # emphasis = sd_emphasis.get_current_option(opts.emphasis)()
emphasis.tokens = remade_batch_tokens # emphasis.tokens = remade_batch_tokens
emphasis.multipliers = torch.asarray(batch_multipliers).to(devices.device) # emphasis.multipliers = torch.asarray(batch_multipliers).to(devices.device)
emphasis.z = z # emphasis.z = z
#
emphasis.after_transformers() # emphasis.after_transformers()
#
z = emphasis.z # z = emphasis.z
#
if pooled is not None: # if pooled is not None:
z.pooled = pooled # z.pooled = pooled
#
return z # return z
#
#
class FrozenCLIPEmbedderWithCustomWordsBase(TextConditionalModel): # class FrozenCLIPEmbedderWithCustomWordsBase(TextConditionalModel):
"""A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to # """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to
have unlimited prompt length and assign weights to tokens in prompt. # have unlimited prompt length and assign weights to tokens in prompt.
""" # """
#
def __init__(self, wrapped, hijack): # def __init__(self, wrapped, hijack):
super().__init__() # super().__init__()
#
self.hijack = hijack # self.hijack = hijack
#
self.wrapped = wrapped # self.wrapped = wrapped
"""Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation, # """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation,
depending on model.""" # depending on model."""
#
self.is_trainable = getattr(wrapped, 'is_trainable', False) # self.is_trainable = getattr(wrapped, 'is_trainable', False)
self.input_key = getattr(wrapped, 'input_key', 'txt') # self.input_key = getattr(wrapped, 'input_key', 'txt')
self.return_pooled = getattr(self.wrapped, 'return_pooled', False) # self.return_pooled = getattr(self.wrapped, 'return_pooled', False)
#
self.legacy_ucg_val = None # for sgm codebase # self.legacy_ucg_val = None # for sgm codebase
#
def forward(self, texts): # def forward(self, texts):
if opts.use_old_emphasis_implementation: # if opts.use_old_emphasis_implementation:
import modules.sd_hijack_clip_old # import modules.sd_hijack_clip_old
return modules.sd_hijack_clip_old.forward_old(self, texts) # return modules.sd_hijack_clip_old.forward_old(self, texts)
#
return super().forward(texts) # return super().forward(texts)
#
#
class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): # class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
def __init__(self, wrapped, hijack): # def __init__(self, wrapped, hijack):
super().__init__(wrapped, hijack) # super().__init__(wrapped, hijack)
self.tokenizer = wrapped.tokenizer # self.tokenizer = wrapped.tokenizer
#
vocab = self.tokenizer.get_vocab() # vocab = self.tokenizer.get_vocab()
#
self.comma_token = vocab.get(',</w>', None) # self.comma_token = vocab.get(',</w>', None)
#
self.token_mults = {} # self.token_mults = {}
tokens_with_parens = [(k, v) for k, v in vocab.items() if '(' in k or ')' in k or '[' in k or ']' in k] # tokens_with_parens = [(k, v) for k, v in vocab.items() if '(' in k or ')' in k or '[' in k or ']' in k]
for text, ident in tokens_with_parens: # for text, ident in tokens_with_parens:
mult = 1.0 # mult = 1.0
for c in text: # for c in text:
if c == '[': # if c == '[':
mult /= 1.1 # mult /= 1.1
if c == ']': # if c == ']':
mult *= 1.1 # mult *= 1.1
if c == '(': # if c == '(':
mult *= 1.1 # mult *= 1.1
if c == ')': # if c == ')':
mult /= 1.1 # mult /= 1.1
#
if mult != 1.0: # if mult != 1.0:
self.token_mults[ident] = mult # self.token_mults[ident] = mult
#
self.id_start = self.wrapped.tokenizer.bos_token_id # self.id_start = self.wrapped.tokenizer.bos_token_id
self.id_end = self.wrapped.tokenizer.eos_token_id # self.id_end = self.wrapped.tokenizer.eos_token_id
self.id_pad = self.id_end # self.id_pad = self.id_end
#
def tokenize(self, texts): # def tokenize(self, texts):
tokenized = self.wrapped.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"] # tokenized = self.wrapped.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"]
#
return tokenized # return tokenized
#
def encode_with_transformers(self, tokens): # def encode_with_transformers(self, tokens):
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers) # outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
#
if opts.CLIP_stop_at_last_layers > 1: # if opts.CLIP_stop_at_last_layers > 1:
z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] # z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers]
z = self.wrapped.transformer.text_model.final_layer_norm(z) # z = self.wrapped.transformer.text_model.final_layer_norm(z)
else: # else:
z = outputs.last_hidden_state # z = outputs.last_hidden_state
#
return z # return z
#
def encode_embedding_init_text(self, init_text, nvpt): # def encode_embedding_init_text(self, init_text, nvpt):
embedding_layer = self.wrapped.transformer.text_model.embeddings # embedding_layer = self.wrapped.transformer.text_model.embeddings
ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] # ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"]
embedded = embedding_layer.token_embedding.wrapped(ids.to(embedding_layer.token_embedding.wrapped.weight.device)).squeeze(0) # embedded = embedding_layer.token_embedding.wrapped(ids.to(embedding_layer.token_embedding.wrapped.weight.device)).squeeze(0)
#
return embedded # return embedded
#
#
class FrozenCLIPEmbedderForSDXLWithCustomWords(FrozenCLIPEmbedderWithCustomWords): # class FrozenCLIPEmbedderForSDXLWithCustomWords(FrozenCLIPEmbedderWithCustomWords):
def __init__(self, wrapped, hijack): # def __init__(self, wrapped, hijack):
super().__init__(wrapped, hijack) # super().__init__(wrapped, hijack)
#
def encode_with_transformers(self, tokens): # def encode_with_transformers(self, tokens):
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=self.wrapped.layer == "hidden") # outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=self.wrapped.layer == "hidden")
#
if opts.sdxl_clip_l_skip is True: # if opts.sdxl_clip_l_skip is True:
z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] # z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers]
elif self.wrapped.layer == "last": # elif self.wrapped.layer == "last":
z = outputs.last_hidden_state # z = outputs.last_hidden_state
else: # else:
z = outputs.hidden_states[self.wrapped.layer_idx] # z = outputs.hidden_states[self.wrapped.layer_idx]
#
return z # return z

View File

@@ -1,82 +1,82 @@
from modules import sd_hijack_clip # from modules import sd_hijack_clip
from modules import shared # from modules import shared
#
#
def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts): # def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts):
id_start = self.id_start # id_start = self.id_start
id_end = self.id_end # id_end = self.id_end
maxlen = self.wrapped.max_length # you get to stay at 77 # maxlen = self.wrapped.max_length # you get to stay at 77
used_custom_terms = [] # used_custom_terms = []
remade_batch_tokens = [] # remade_batch_tokens = []
hijack_comments = [] # hijack_comments = []
hijack_fixes = [] # hijack_fixes = []
token_count = 0 # token_count = 0
#
cache = {} # cache = {}
batch_tokens = self.tokenize(texts) # batch_tokens = self.tokenize(texts)
batch_multipliers = [] # batch_multipliers = []
for tokens in batch_tokens: # for tokens in batch_tokens:
tuple_tokens = tuple(tokens) # tuple_tokens = tuple(tokens)
#
if tuple_tokens in cache: # if tuple_tokens in cache:
remade_tokens, fixes, multipliers = cache[tuple_tokens] # remade_tokens, fixes, multipliers = cache[tuple_tokens]
else: # else:
fixes = [] # fixes = []
remade_tokens = [] # remade_tokens = []
multipliers = [] # multipliers = []
mult = 1.0 # mult = 1.0
#
i = 0 # i = 0
while i < len(tokens): # while i < len(tokens):
token = tokens[i] # token = tokens[i]
#
embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) # embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
#
mult_change = self.token_mults.get(token) if shared.opts.emphasis != "None" else None # mult_change = self.token_mults.get(token) if shared.opts.emphasis != "None" else None
if mult_change is not None: # if mult_change is not None:
mult *= mult_change # mult *= mult_change
i += 1 # i += 1
elif embedding is None: # elif embedding is None:
remade_tokens.append(token) # remade_tokens.append(token)
multipliers.append(mult) # multipliers.append(mult)
i += 1 # i += 1
else: # else:
emb_len = int(embedding.vec.shape[0]) # emb_len = int(embedding.vec.shape[0])
fixes.append((len(remade_tokens), embedding)) # fixes.append((len(remade_tokens), embedding))
remade_tokens += [0] * emb_len # remade_tokens += [0] * emb_len
multipliers += [mult] * emb_len # multipliers += [mult] * emb_len
used_custom_terms.append((embedding.name, embedding.checksum())) # used_custom_terms.append((embedding.name, embedding.checksum()))
i += embedding_length_in_tokens # i += embedding_length_in_tokens
#
if len(remade_tokens) > maxlen - 2: # if len(remade_tokens) > maxlen - 2:
vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} # vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
ovf = remade_tokens[maxlen - 2:] # ovf = remade_tokens[maxlen - 2:]
overflowing_words = [vocab.get(int(x), "") for x in ovf] # overflowing_words = [vocab.get(int(x), "") for x in ovf]
overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words)) # overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n") # hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
#
token_count = len(remade_tokens) # token_count = len(remade_tokens)
remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens)) # remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end] # remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end]
cache[tuple_tokens] = (remade_tokens, fixes, multipliers) # cache[tuple_tokens] = (remade_tokens, fixes, multipliers)
#
multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers)) # multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0] # multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
#
remade_batch_tokens.append(remade_tokens) # remade_batch_tokens.append(remade_tokens)
hijack_fixes.append(fixes) # hijack_fixes.append(fixes)
batch_multipliers.append(multipliers) # batch_multipliers.append(multipliers)
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count # return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
#
#
def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts): # def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts):
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = process_text_old(self, texts) # batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = process_text_old(self, texts)
#
self.hijack.comments += hijack_comments # self.hijack.comments += hijack_comments
#
if used_custom_terms: # if used_custom_terms:
embedding_names = ", ".join(f"{word} [{checksum}]" for word, checksum in used_custom_terms) # embedding_names = ", ".join(f"{word} [{checksum}]" for word, checksum in used_custom_terms)
self.hijack.comments.append(f"Used embeddings: {embedding_names}") # self.hijack.comments.append(f"Used embeddings: {embedding_names}")
#
self.hijack.fixes = hijack_fixes # self.hijack.fixes = hijack_fixes
return self.process_tokens(remade_batch_tokens, batch_multipliers) # return self.process_tokens(remade_batch_tokens, batch_multipliers)

View File

@@ -1,10 +1,10 @@
import os.path # import os.path
#
#
def should_hijack_ip2p(checkpoint_info): # def should_hijack_ip2p(checkpoint_info):
from modules import sd_models_config # from modules import sd_models_config
#
ckpt_basename = os.path.basename(checkpoint_info.filename).lower() # ckpt_basename = os.path.basename(checkpoint_info.filename).lower()
cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower() # cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower()
#
return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename # return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename

View File

@@ -1,71 +1,71 @@
import open_clip.tokenizer # import open_clip.tokenizer
import torch # import torch
#
from modules import sd_hijack_clip, devices # from modules import sd_hijack_clip, devices
from modules.shared import opts # from modules.shared import opts
#
tokenizer = open_clip.tokenizer._tokenizer # tokenizer = open_clip.tokenizer._tokenizer
#
#
class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): # class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase):
def __init__(self, wrapped, hijack): # def __init__(self, wrapped, hijack):
super().__init__(wrapped, hijack) # super().__init__(wrapped, hijack)
#
self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ',</w>'][0] # self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ',</w>'][0]
self.id_start = tokenizer.encoder["<start_of_text>"] # self.id_start = tokenizer.encoder["<start_of_text>"]
self.id_end = tokenizer.encoder["<end_of_text>"] # self.id_end = tokenizer.encoder["<end_of_text>"]
self.id_pad = 0 # self.id_pad = 0
#
def tokenize(self, texts): # def tokenize(self, texts):
assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' # assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip'
#
tokenized = [tokenizer.encode(text) for text in texts] # tokenized = [tokenizer.encode(text) for text in texts]
#
return tokenized # return tokenized
#
def encode_with_transformers(self, tokens): # def encode_with_transformers(self, tokens):
# set self.wrapped.layer_idx here according to opts.CLIP_stop_at_last_layers # # set self.wrapped.layer_idx here according to opts.CLIP_stop_at_last_layers
z = self.wrapped.encode_with_transformer(tokens) # z = self.wrapped.encode_with_transformer(tokens)
#
return z # return z
#
def encode_embedding_init_text(self, init_text, nvpt): # def encode_embedding_init_text(self, init_text, nvpt):
ids = tokenizer.encode(init_text) # ids = tokenizer.encode(init_text)
ids = torch.asarray([ids], device=devices.device, dtype=torch.int) # ids = torch.asarray([ids], device=devices.device, dtype=torch.int)
embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0) # embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0)
#
return embedded # return embedded
#
#
class FrozenOpenCLIPEmbedder2WithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): # class FrozenOpenCLIPEmbedder2WithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase):
def __init__(self, wrapped, hijack): # def __init__(self, wrapped, hijack):
super().__init__(wrapped, hijack) # super().__init__(wrapped, hijack)
#
self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ',</w>'][0] # self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ',</w>'][0]
self.id_start = tokenizer.encoder["<start_of_text>"] # self.id_start = tokenizer.encoder["<start_of_text>"]
self.id_end = tokenizer.encoder["<end_of_text>"] # self.id_end = tokenizer.encoder["<end_of_text>"]
self.id_pad = 0 # self.id_pad = 0
#
def tokenize(self, texts): # def tokenize(self, texts):
assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' # assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip'
#
tokenized = [tokenizer.encode(text) for text in texts] # tokenized = [tokenizer.encode(text) for text in texts]
#
return tokenized # return tokenized
#
def encode_with_transformers(self, tokens): # def encode_with_transformers(self, tokens):
d = self.wrapped.encode_with_transformer(tokens) # d = self.wrapped.encode_with_transformer(tokens)
z = d[self.wrapped.layer] # z = d[self.wrapped.layer]
#
pooled = d.get("pooled") # pooled = d.get("pooled")
if pooled is not None: # if pooled is not None:
z.pooled = pooled # z.pooled = pooled
#
return z # return z
#
def encode_embedding_init_text(self, init_text, nvpt): # def encode_embedding_init_text(self, init_text, nvpt):
ids = tokenizer.encode(init_text) # ids = tokenizer.encode(init_text)
ids = torch.asarray([ids], device=devices.device, dtype=torch.int) # ids = torch.asarray([ids], device=devices.device, dtype=torch.int)
embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0) # embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0)
#
return embedded # return embedded

View File

@@ -1,32 +1,32 @@
import torch # import torch
#
from modules import sd_hijack_clip, devices # from modules import sd_hijack_clip, devices
#
#
class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): # class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords):
def __init__(self, wrapped, hijack): # def __init__(self, wrapped, hijack):
super().__init__(wrapped, hijack) # super().__init__(wrapped, hijack)
#
self.id_start = wrapped.config.bos_token_id # self.id_start = wrapped.config.bos_token_id
self.id_end = wrapped.config.eos_token_id # self.id_end = wrapped.config.eos_token_id
self.id_pad = wrapped.config.pad_token_id # self.id_pad = wrapped.config.pad_token_id
#
self.comma_token = self.tokenizer.get_vocab().get(',', None) # alt diffusion doesn't have </w> bits for comma # self.comma_token = self.tokenizer.get_vocab().get(',', None) # alt diffusion doesn't have </w> bits for comma
#
def encode_with_transformers(self, tokens): # def encode_with_transformers(self, tokens):
# there's no CLIP Skip here because all hidden layers have size of 1024 and the last one uses a # # there's no CLIP Skip here because all hidden layers have size of 1024 and the last one uses a
# trained layer to transform those 1024 into 768 for unet; so you can't choose which transformer # # trained layer to transform those 1024 into 768 for unet; so you can't choose which transformer
# layer to work with - you have to use the last # # layer to work with - you have to use the last
#
attention_mask = (tokens != self.id_pad).to(device=tokens.device, dtype=torch.int64) # attention_mask = (tokens != self.id_pad).to(device=tokens.device, dtype=torch.int64)
features = self.wrapped(input_ids=tokens, attention_mask=attention_mask) # features = self.wrapped(input_ids=tokens, attention_mask=attention_mask)
z = features['projection_state'] # z = features['projection_state']
#
return z # return z
#
def encode_embedding_init_text(self, init_text, nvpt): # def encode_embedding_init_text(self, init_text, nvpt):
embedding_layer = self.wrapped.roberta.embeddings # embedding_layer = self.wrapped.roberta.embeddings
ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] # ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"]
embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) # embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
#
return embedded # return embedded