Fix: Use try-except to stabilize token counter during model access

This fixes a server crash (TypeError NoneType in torch/nn/modules/module.py) when the token counter API is triggered during model load/switch.

Standard sequential null checks failed because the crash occurred inside PyTorch's internal attribute lookup process. The whole function is wrapped in try-except as a robust defense against this race condition.
This commit is contained in:
ねおん
2025-11-26 16:22:35 +09:00
committed by GitHub
parent 077daf3f6e
commit 88be6531e8

View File

@@ -4,11 +4,9 @@ from functools import partial, reduce
def get_token_counter(text, steps):
# FIX: Robust Null Check to prevent TypeError during model loading/unloading.
# Checks for the existence of model_data and its property sd_model sequentially.
if sd_models.model_data is None or sd_models.model_data.sd_model is None:
return {"token_count": 0, "max_length": 0}
# FIX: Use try-except to safely handle PyTorch/model access errors (TypeError NoneType)
# that occur during model loading/switching when the token counter API is triggered.
try:
# copy from modules.ui.py
try:
text, _ = extra_networks.parse_prompt(text)
@@ -40,3 +38,7 @@ def get_token_counter(text, steps):
key=lambda args: args[0])
return {"token_count": token_count, "max_length": max_length}
except Exception as e:
# return 0 token count if any error (model instability, parsing error, etc.) occurs during calculation
return {"token_count": 0, "max_length": 0}