make transformers less verbose

This commit is contained in:
layerdiffusion
2024-08-11 18:55:36 -07:00
parent 294416ed55
commit a8d7cac503

View File

@@ -37,9 +37,14 @@ def load_huggingface_component(guess, component_name, lib_name, cls_name, repo_p
return None
if lib_name in ['transformers', 'diffusers']:
if component_name in ['scheduler'] or component_name.startswith('tokenizer'):
if component_name in ['scheduler']:
cls = getattr(importlib.import_module(lib_name), cls_name)
return cls.from_pretrained(os.path.join(repo_path, component_name))
if component_name.startswith('tokenizer'):
cls = getattr(importlib.import_module(lib_name), cls_name)
comp = cls.from_pretrained(os.path.join(repo_path, component_name))
comp._eventual_warn_about_too_long_sequence = lambda *args, **kwargs: None
return comp
if cls_name in ['AutoencoderKL']:
config = IntegratedAutoencoderKL.load_config(config_path)