From 88be6531e8c7cc29d8d8436912b1014a71500960 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E3=81=AD=E3=81=8A=E3=82=93?= <240400715+neon-aiart@users.noreply.github.com> Date: Wed, 26 Nov 2025 16:22:35 +0900 Subject: [PATCH] Fix: Use try-except to stabilize token counter during model access This fixes a server crash (TypeError NoneType in torch/nn/modules/module.py) when the token counter API is triggered during model load/switch. Standard sequential null checks failed because the crash occurred inside PyTorch's internal attribute lookup process. The whole function is wrapped in try-except as a robust defense against this race condition. --- scripts/physton_prompt/get_token_counter.py | 72 +++++++++++---------- 1 file changed, 37 insertions(+), 35 deletions(-) diff --git a/scripts/physton_prompt/get_token_counter.py b/scripts/physton_prompt/get_token_counter.py index 462fc5c..169f6e5 100644 --- a/scripts/physton_prompt/get_token_counter.py +++ b/scripts/physton_prompt/get_token_counter.py @@ -4,39 +4,41 @@ from functools import partial, reduce def get_token_counter(text, steps): - # FIX: Robust Null Check to prevent TypeError during model loading/unloading. - # Checks for the existence of model_data and its property sd_model sequentially. - if sd_models.model_data is None or sd_models.model_data.sd_model is None: + # FIX: Use try-except to safely handle PyTorch/model access errors (TypeError NoneType) + # that occur during model loading/switching when the token counter API is triggered. + try: + # copy from modules.ui.py + try: + text, _ = extra_networks.parse_prompt(text) + + _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) + prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) + + except Exception: + # a parsing error can happen here during typing, and we don't want to bother the user with + # messages related to it in console + prompt_schedules = [[[steps, text]]] + + try: + from modules_forge import forge_version + forge = True + + except: + forge = False + + flat_prompts = reduce(lambda list1, list2: list1 + list2, prompt_schedules) + prompts = [prompt_text for step, prompt_text in flat_prompts] + + if forge: + cond_stage_model = sd_models.model_data.sd_model.cond_stage_model + token_count, max_length = max([model_hijack.get_prompt_lengths(prompt,cond_stage_model) for prompt in prompts], + key=lambda args: args[0]) + else: + token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], + key=lambda args: args[0]) + + return {"token_count": token_count, "max_length": max_length} + + except Exception as e: + # return 0 token count if any error (model instability, parsing error, etc.) occurs during calculation return {"token_count": 0, "max_length": 0} - - # copy from modules.ui.py - try: - text, _ = extra_networks.parse_prompt(text) - - _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) - prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) - - except Exception: - # a parsing error can happen here during typing, and we don't want to bother the user with - # messages related to it in console - prompt_schedules = [[[steps, text]]] - - try: - from modules_forge import forge_version - forge = True - - except: - forge = False - - flat_prompts = reduce(lambda list1, list2: list1 + list2, prompt_schedules) - prompts = [prompt_text for step, prompt_text in flat_prompts] - - if forge: - cond_stage_model = sd_models.model_data.sd_model.cond_stage_model - token_count, max_length = max([model_hijack.get_prompt_lengths(prompt,cond_stage_model) for prompt in prompts], - key=lambda args: args[0]) - else: - token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], - key=lambda args: args[0]) - - return {"token_count": token_count, "max_length": max_length}