mirror of
https://github.com/Physton/sd-webui-prompt-all-in-one.git
synced 2026-01-26 11:19:55 +00:00
Fix: Use try-except to stabilize token counter during model access
This fixes a server crash (TypeError NoneType in torch/nn/modules/module.py) when the token counter API is triggered during model load/switch. Standard sequential null checks failed because the crash occurred inside PyTorch's internal attribute lookup process. The whole function is wrapped in try-except as a robust defense against this race condition.
This commit is contained in:
@@ -4,39 +4,41 @@ from functools import partial, reduce
|
|||||||
|
|
||||||
|
|
||||||
def get_token_counter(text, steps):
|
def get_token_counter(text, steps):
|
||||||
# FIX: Robust Null Check to prevent TypeError during model loading/unloading.
|
# FIX: Use try-except to safely handle PyTorch/model access errors (TypeError NoneType)
|
||||||
# Checks for the existence of model_data and its property sd_model sequentially.
|
# that occur during model loading/switching when the token counter API is triggered.
|
||||||
if sd_models.model_data is None or sd_models.model_data.sd_model is None:
|
try:
|
||||||
|
# copy from modules.ui.py
|
||||||
|
try:
|
||||||
|
text, _ = extra_networks.parse_prompt(text)
|
||||||
|
|
||||||
|
_, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text])
|
||||||
|
prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps)
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
# a parsing error can happen here during typing, and we don't want to bother the user with
|
||||||
|
# messages related to it in console
|
||||||
|
prompt_schedules = [[[steps, text]]]
|
||||||
|
|
||||||
|
try:
|
||||||
|
from modules_forge import forge_version
|
||||||
|
forge = True
|
||||||
|
|
||||||
|
except:
|
||||||
|
forge = False
|
||||||
|
|
||||||
|
flat_prompts = reduce(lambda list1, list2: list1 + list2, prompt_schedules)
|
||||||
|
prompts = [prompt_text for step, prompt_text in flat_prompts]
|
||||||
|
|
||||||
|
if forge:
|
||||||
|
cond_stage_model = sd_models.model_data.sd_model.cond_stage_model
|
||||||
|
token_count, max_length = max([model_hijack.get_prompt_lengths(prompt,cond_stage_model) for prompt in prompts],
|
||||||
|
key=lambda args: args[0])
|
||||||
|
else:
|
||||||
|
token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts],
|
||||||
|
key=lambda args: args[0])
|
||||||
|
|
||||||
|
return {"token_count": token_count, "max_length": max_length}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# return 0 token count if any error (model instability, parsing error, etc.) occurs during calculation
|
||||||
return {"token_count": 0, "max_length": 0}
|
return {"token_count": 0, "max_length": 0}
|
||||||
|
|
||||||
# copy from modules.ui.py
|
|
||||||
try:
|
|
||||||
text, _ = extra_networks.parse_prompt(text)
|
|
||||||
|
|
||||||
_, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text])
|
|
||||||
prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps)
|
|
||||||
|
|
||||||
except Exception:
|
|
||||||
# a parsing error can happen here during typing, and we don't want to bother the user with
|
|
||||||
# messages related to it in console
|
|
||||||
prompt_schedules = [[[steps, text]]]
|
|
||||||
|
|
||||||
try:
|
|
||||||
from modules_forge import forge_version
|
|
||||||
forge = True
|
|
||||||
|
|
||||||
except:
|
|
||||||
forge = False
|
|
||||||
|
|
||||||
flat_prompts = reduce(lambda list1, list2: list1 + list2, prompt_schedules)
|
|
||||||
prompts = [prompt_text for step, prompt_text in flat_prompts]
|
|
||||||
|
|
||||||
if forge:
|
|
||||||
cond_stage_model = sd_models.model_data.sd_model.cond_stage_model
|
|
||||||
token_count, max_length = max([model_hijack.get_prompt_lengths(prompt,cond_stage_model) for prompt in prompts],
|
|
||||||
key=lambda args: args[0])
|
|
||||||
else:
|
|
||||||
token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts],
|
|
||||||
key=lambda args: args[0])
|
|
||||||
|
|
||||||
return {"token_count": token_count, "max_length": max_length}
|
|
||||||
|
|||||||
Reference in New Issue
Block a user