Be less strict when loading mixed ops weights. (#11769)

This commit is contained in:
comfyanonymous
2026-01-09 11:21:06 -08:00
committed by GitHub
parent ec0a832acb
commit bd0e6825e8

View File

@@ -546,7 +546,8 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec
weight_key = f"{prefix}weight"
weight = state_dict.pop(weight_key, None)
if weight is None:
raise ValueError(f"Missing weight for layer {layer_name}")
logging.warning(f"Missing weight for layer {layer_name}")
return
manually_loaded_keys = [weight_key]