diff --git a/extensions-builtin/forge_preprocessor_revision/scripts/preprocessor_revision.py b/extensions-builtin/forge_preprocessor_revision/scripts/preprocessor_revision.py index 46274b6d..eae93fce 100644 --- a/extensions-builtin/forge_preprocessor_revision/scripts/preprocessor_revision.py +++ b/extensions-builtin/forge_preprocessor_revision/scripts/preprocessor_revision.py @@ -36,6 +36,7 @@ def revision_conditioning_modifier(model, x, timestep, uncond, cond, cond_scale, noise_level=torch.tensor([noise_level], device=x.device)) adm_out = torch.cat((c_adm, noise_level_emb), 1) + new_y = adm_out[:, :1280] cond = copy.deepcopy(cond) uncond = copy.deepcopy(uncond) diff --git a/modules_forge/supported_preprocessor.py b/modules_forge/supported_preprocessor.py index ae08847b..9be7ca45 100644 --- a/modules_forge/supported_preprocessor.py +++ b/modules_forge/supported_preprocessor.py @@ -125,6 +125,8 @@ class PreprocessorClipVision(Preprocessor): return self.clipvision + @torch.no_grad() + @torch.inference_mode() def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs): clipvision = self.load_clipvision() return clipvision.encode_image(numpy_to_pytorch(input_image))