From 9679232d2906bb04e0447db6b68d7340d778d6bc Mon Sep 17 00:00:00 2001 From: layerdiffusion <19834515+lllyasviel@users.noreply.github.com> Date: Sun, 4 Aug 2024 18:49:35 -0700 Subject: [PATCH] fix TI embedding info text original webui is also broken but forge is fixed now --- backend/text_processing/classic_engine.py | 2 -- modules/processing.py | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/backend/text_processing/classic_engine.py b/backend/text_processing/classic_engine.py index dcc7695b..7c16de70 100644 --- a/backend/text_processing/classic_engine.py +++ b/backend/text_processing/classic_engine.py @@ -262,8 +262,6 @@ class ClassicTextProcessingEngine(torch.nn.Module): global last_extra_generation_params - last_extra_generation_params = {} - if used_embeddings: names = [] diff --git a/modules/processing.py b/modules/processing.py index 08a3b781..d7d36eea 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -500,6 +500,7 @@ class StableDiffusionProcessing: cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling) import backend.text_processing.classic_engine + last_extra_generation_params = backend.text_processing.classic_engine.last_extra_generation_params.copy() modules.sd_hijack.model_hijack.extra_generation_params.update(last_extra_generation_params) @@ -507,6 +508,8 @@ class StableDiffusionProcessing: if len(cache) > 2: cache[2] = last_extra_generation_params + backend.text_processing.classic_engine.last_extra_generation_params = {} + cache[0] = cached_params return cache[1]