From a8d7cac5031744ea2229b1ff130a1b07edad50cf Mon Sep 17 00:00:00 2001 From: layerdiffusion <19834515+lllyasviel@users.noreply.github.com> Date: Sun, 11 Aug 2024 18:55:36 -0700 Subject: [PATCH] make transformers less verbose --- backend/loader.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/backend/loader.py b/backend/loader.py index c8f8a256..b0a8da40 100644 --- a/backend/loader.py +++ b/backend/loader.py @@ -37,9 +37,14 @@ def load_huggingface_component(guess, component_name, lib_name, cls_name, repo_p return None if lib_name in ['transformers', 'diffusers']: - if component_name in ['scheduler'] or component_name.startswith('tokenizer'): + if component_name in ['scheduler']: cls = getattr(importlib.import_module(lib_name), cls_name) return cls.from_pretrained(os.path.join(repo_path, component_name)) + if component_name.startswith('tokenizer'): + cls = getattr(importlib.import_module(lib_name), cls_name) + comp = cls.from_pretrained(os.path.join(repo_path, component_name)) + comp._eventual_warn_about_too_long_sequence = lambda *args, **kwargs: None + return comp if cls_name in ['AutoencoderKL']: config = IntegratedAutoencoderKL.load_config(config_path)