Text Processing Engine is Finished

100% reproduce all previous results, including TI embeddings, LoRAs in CLIP, emphasize settings, BREAK, timestep swap scheduling, AB mixture, advanced uncond, etc
Backend is 85% finished
This commit is contained in:
layerdiffusion
2024-08-04 14:13:37 -07:00
parent 2791203d5b
commit a72154405e
8 changed files with 123 additions and 90 deletions

View File

@@ -498,8 +498,14 @@ class StableDiffusionProcessing:
with devices.autocast():
cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling)
import backend.text_processing.classic_engine
last_extra_generation_params = backend.text_processing.classic_engine.last_extra_generation_params.copy()
modules.sd_hijack.model_hijack.extra_generation_params.update(last_extra_generation_params)
if len(cache) > 2:
cache[2] = modules.sd_hijack.model_hijack.extra_generation_params
cache[2] = last_extra_generation_params
cache[0] = cached_params
return cache[1]
@@ -880,7 +886,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
model_hijack.embedding_db.load_textual_inversion_embeddings()
# todo: reload ti
# model_hijack.embedding_db.load_textual_inversion_embeddings()
pass
if p.scripts is not None:
p.scripts.process(p)

View File

@@ -127,14 +127,9 @@ class StableDiffusionModelHijack:
optimization_method = None
def __init__(self):
import modules.textual_inversion.textual_inversion
self.extra_generation_params = {}
self.comments = []
self.embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase()
self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir)
def apply_optimizations(self, option=None):
pass

View File

@@ -686,19 +686,10 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
model_data.set_sd_model(sd_model)
model_data.was_loaded_at_least_once = True
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model
timer.record("load textual inversion embeddings")
script_callbacks.model_loaded_callback(sd_model)
timer.record("scripts callbacks")
with torch.no_grad():
sd_model.cond_stage_model_empty_prompt = get_empty_cond(sd_model)
timer.record("calculate empty prompt")
print(f"Model loaded in {timer.summary()}.")
return sd_model

View File

@@ -127,7 +127,7 @@ class EmbeddingDatabase:
return self.register_embedding_by_name(embedding, model, embedding.name)
def register_embedding_by_name(self, embedding, model, name):
ids = model.cond_stage_model.tokenize([name])[0]
ids = [0, 0, 0] # model.cond_stage_model.tokenize([name])[0]
first_id = ids[0]
if first_id not in self.ids_lookup:
self.ids_lookup[first_id] = []
@@ -183,11 +183,7 @@ class EmbeddingDatabase:
if data is not None:
embedding = create_embedding_from_data(data, name, filename=filename, filepath=path)
if self.expected_shape == -1 or self.expected_shape == embedding.shape:
self.register_embedding(embedding, shared.sd_model)
else:
self.skipped_embeddings[name] = embedding
self.register_embedding(embedding, None)
else:
print(f"Unable to load Textual inversion embedding due to data issue: '{name}'.")