Compare commits

..

3 Commits

Author SHA1 Message Date
Jedrzej Kosinski
6e78fd5271 Merge branch 'master' into christian-byrne-patch-4 2026-02-23 22:46:13 -08:00
Christian Byrne
42ce30946f Update requirements.txt 2026-02-23 22:20:04 -08:00
Christian Byrne
8481dc6a3b Update requirements.txt 2026-02-23 21:29:07 -08:00
11 changed files with 39 additions and 84 deletions

View File

@@ -46,8 +46,6 @@ class NodeReplaceManager:
connections: dict[str, list[tuple[str, str, int]]] = {}
need_replacement: set[str] = set()
for node_number, node_struct in prompt.items():
if "class_type" not in node_struct or "inputs" not in node_struct:
continue
class_type = node_struct["class_type"]
# need replacement if not in NODE_CLASS_MAPPINGS and has replacement
if class_type not in nodes.NODE_CLASS_MAPPINGS.keys() and self.has_replacement(class_type):

View File

@@ -271,7 +271,6 @@ class ModelPatcher:
self.is_clip = False
self.hook_mode = comfy.hooks.EnumHookMode.MaxSpeed
self.cached_patcher_init: tuple[Callable, tuple] | None = None
if not hasattr(self.model, 'model_loaded_weight_memory'):
self.model.model_loaded_weight_memory = 0
@@ -308,15 +307,8 @@ class ModelPatcher:
def get_free_memory(self, device):
return comfy.model_management.get_free_memory(device)
def clone(self, disable_dynamic=False):
class_ = self.__class__
model = self.model
if self.is_dynamic() and disable_dynamic:
class_ = ModelPatcher
temp_model_patcher = self.cached_patcher_init[0](*self.cached_patcher_init[1], disable_dynamic=True)
model = temp_model_patcher.model
n = class_(model, self.load_device, self.offload_device, self.model_size(), weight_inplace_update=self.weight_inplace_update)
def clone(self):
n = self.__class__(self.model, self.load_device, self.offload_device, self.model_size(), weight_inplace_update=self.weight_inplace_update)
n.patches = {}
for k in self.patches:
n.patches[k] = self.patches[k][:]
@@ -370,8 +362,6 @@ class ModelPatcher:
n.is_clip = self.is_clip
n.hook_mode = self.hook_mode
n.cached_patcher_init = self.cached_patcher_init
for callback in self.get_all_callbacks(CallbacksMP.ON_CLONE):
callback(self, n)
return n

View File

@@ -19,7 +19,7 @@
import torch
import logging
import comfy.model_management
from comfy.cli_args import args, PerformanceFeature
from comfy.cli_args import args, PerformanceFeature, enables_dynamic_vram
import comfy.float
import json
import comfy.memory_management
@@ -296,7 +296,7 @@ class disable_weight_init:
class Linear(torch.nn.Linear, CastWeightBiasOp):
def __init__(self, in_features, out_features, bias=True, device=None, dtype=None):
if not comfy.model_management.WINDOWS or not comfy.memory_management.aimdo_enabled:
if not comfy.model_management.WINDOWS or not enables_dynamic_vram():
super().__init__(in_features, out_features, bias, device, dtype)
return
@@ -317,7 +317,7 @@ class disable_weight_init:
def _load_from_state_dict(self, state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys, error_msgs):
if not comfy.model_management.WINDOWS or not comfy.memory_management.aimdo_enabled:
if not comfy.model_management.WINDOWS or not enables_dynamic_vram():
return super()._load_from_state_dict(state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
assign_to_params_buffers = local_metadata.get("assign_to_params_buffers", False)
@@ -827,10 +827,6 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec
else:
sd = {}
if not hasattr(self, 'weight'):
logging.warning("Warning: state dict on uninitialized op {}".format(prefix))
return sd
if self.bias is not None:
sd["{}bias".format(prefix)] = self.bias

View File

@@ -1530,24 +1530,14 @@ def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_cl
return (model, clip, vae)
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True, model_options={}, te_model_options={}, disable_dynamic=False):
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True, model_options={}, te_model_options={}):
sd, metadata = comfy.utils.load_torch_file(ckpt_path, return_metadata=True)
out = load_state_dict_guess_config(sd, output_vae, output_clip, output_clipvision, embedding_directory, output_model, model_options, te_model_options=te_model_options, metadata=metadata, disable_dynamic=disable_dynamic)
out = load_state_dict_guess_config(sd, output_vae, output_clip, output_clipvision, embedding_directory, output_model, model_options, te_model_options=te_model_options, metadata=metadata)
if out is None:
raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(ckpt_path, model_detection_error_hint(ckpt_path, sd)))
if output_model:
out[0].cached_patcher_init = (load_checkpoint_guess_config_model_only, (ckpt_path, embedding_directory, model_options, te_model_options))
return out
def load_checkpoint_guess_config_model_only(ckpt_path, embedding_directory=None, model_options={}, te_model_options={}, disable_dynamic=False):
model, *_ = load_checkpoint_guess_config(ckpt_path, False, False, False,
embedding_directory=embedding_directory,
model_options=model_options,
te_model_options=te_model_options,
disable_dynamic=disable_dynamic)
return model
def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True, model_options={}, te_model_options={}, metadata=None, disable_dynamic=False):
def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True, model_options={}, te_model_options={}, metadata=None):
clip = None
clipvision = None
vae = None
@@ -1596,8 +1586,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c
if output_model:
inital_load_device = model_management.unet_inital_load_device(parameters, unet_dtype)
model = model_config.get_model(sd, diffusion_model_prefix, device=inital_load_device)
ModelPatcher = comfy.model_patcher.ModelPatcher if disable_dynamic else comfy.model_patcher.CoreModelPatcher
model_patcher = ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device())
model_patcher = comfy.model_patcher.CoreModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device())
model.load_model_weights(sd, diffusion_model_prefix, assign=model_patcher.is_dynamic())
if output_vae:
@@ -1648,7 +1637,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c
return (model_patcher, clip, vae, clipvision)
def load_diffusion_model_state_dict(sd, model_options={}, metadata=None, disable_dynamic=False):
def load_diffusion_model_state_dict(sd, model_options={}, metadata=None):
"""
Loads a UNet diffusion model from a state dictionary, supporting both diffusers and regular formats.
@@ -1732,8 +1721,7 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None, disable
model_config.optimizations["fp8"] = True
model = model_config.get_model(new_sd, "")
ModelPatcher = comfy.model_patcher.ModelPatcher if disable_dynamic else comfy.model_patcher.CoreModelPatcher
model_patcher = ModelPatcher(model, load_device=load_device, offload_device=offload_device)
model_patcher = comfy.model_patcher.CoreModelPatcher(model, load_device=load_device, offload_device=offload_device)
if not model_management.is_device_cpu(offload_device):
model.to(offload_device)
model.load_model_weights(new_sd, "", assign=model_patcher.is_dynamic())
@@ -1742,13 +1730,12 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None, disable
logging.info("left over keys in diffusion model: {}".format(left_over))
return model_patcher
def load_diffusion_model(unet_path, model_options={}, disable_dynamic=False):
def load_diffusion_model(unet_path, model_options={}):
sd, metadata = comfy.utils.load_torch_file(unet_path, return_metadata=True)
model = load_diffusion_model_state_dict(sd, model_options=model_options, metadata=metadata, disable_dynamic=disable_dynamic)
model = load_diffusion_model_state_dict(sd, model_options=model_options, metadata=metadata)
if model is None:
logging.error("ERROR UNSUPPORTED DIFFUSION MODEL {}".format(unet_path))
raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(unet_path, model_detection_error_hint(unet_path, sd)))
model.cached_patcher_init = (load_diffusion_model, (unet_path, model_options))
return model
def load_unet(unet_path, dtype=None):

View File

@@ -29,7 +29,7 @@ import itertools
from torch.nn.functional import interpolate
from tqdm.auto import trange
from einops import rearrange
from comfy.cli_args import args
from comfy.cli_args import args, enables_dynamic_vram
import json
import time
import mmap
@@ -113,7 +113,7 @@ def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False):
metadata = None
if ckpt.lower().endswith(".safetensors") or ckpt.lower().endswith(".sft"):
try:
if comfy.memory_management.aimdo_enabled:
if enables_dynamic_vram():
sd, metadata = load_safetensors(ckpt)
if not return_metadata:
metadata = None

View File

@@ -27,7 +27,6 @@ class Seedream4TaskCreationRequest(BaseModel):
sequential_image_generation: str = Field("disabled")
sequential_image_generation_options: Seedream4Options = Field(Seedream4Options(max_images=15))
watermark: bool = Field(False)
output_format: str | None = None
class ImageTaskCreationResponse(BaseModel):
@@ -107,7 +106,6 @@ RECOMMENDED_PRESETS_SEEDREAM_4 = [
("2496x1664 (3:2)", 2496, 1664),
("1664x2496 (2:3)", 1664, 2496),
("3024x1296 (21:9)", 3024, 1296),
("3072x3072 (1:1)", 3072, 3072),
("4096x4096 (1:1)", 4096, 4096),
("Custom", None, None),
]

View File

@@ -37,12 +37,6 @@ from comfy_api_nodes.util import (
BYTEPLUS_IMAGE_ENDPOINT = "/proxy/byteplus/api/v3/images/generations"
SEEDREAM_MODELS = {
"seedream 5.0 lite": "seedream-5-0-260128",
"seedream-4-5-251128": "seedream-4-5-251128",
"seedream-4-0-250828": "seedream-4-0-250828",
}
# Long-running tasks endpoints(e.g., video)
BYTEPLUS_TASK_ENDPOINT = "/proxy/byteplus/api/v3/contents/generations/tasks"
BYTEPLUS_TASK_STATUS_ENDPOINT = "/proxy/byteplus/api/v3/contents/generations/tasks" # + /{task_id}
@@ -186,13 +180,14 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
def define_schema(cls):
return IO.Schema(
node_id="ByteDanceSeedreamNode",
display_name="ByteDance Seedream 5.0",
display_name="ByteDance Seedream 4.5",
category="api node/image/ByteDance",
description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.",
inputs=[
IO.Combo.Input(
"model",
options=list(SEEDREAM_MODELS.keys()),
options=["seedream-4-5-251128", "seedream-4-0-250828"],
tooltip="Model name",
),
IO.String.Input(
"prompt",
@@ -203,7 +198,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
IO.Image.Input(
"image",
tooltip="Input image(s) for image-to-image generation. "
"Reference image(s) for single or multi-reference generation.",
"List of 1-10 images for single or multi-reference generation.",
optional=True,
),
IO.Combo.Input(
@@ -215,8 +210,8 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
"width",
default=2048,
min=1024,
max=6240,
step=2,
max=4096,
step=8,
tooltip="Custom width for image. Value is working only if `size_preset` is set to `Custom`",
optional=True,
),
@@ -224,8 +219,8 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
"height",
default=2048,
min=1024,
max=4992,
step=2,
max=4096,
step=8,
tooltip="Custom height for image. Value is working only if `size_preset` is set to `Custom`",
optional=True,
),
@@ -288,8 +283,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
depends_on=IO.PriceBadgeDepends(widgets=["model"]),
expr="""
(
$price := $contains(widgets.model, "5.0 lite") ? 0.035 :
$contains(widgets.model, "4-5") ? 0.04 : 0.03;
$price := $contains(widgets.model, "seedream-4-5-251128") ? 0.04 : 0.03;
{
"type":"usd",
"usd": $price,
@@ -315,7 +309,6 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
watermark: bool = False,
fail_on_partial: bool = True,
) -> IO.NodeOutput:
model = SEEDREAM_MODELS[model]
validate_string(prompt, strip_whitespace=True, min_length=1)
w = h = None
for label, tw, th in RECOMMENDED_PRESETS_SEEDREAM_4:
@@ -325,12 +318,15 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
if w is None or h is None:
w, h = width, height
if not (1024 <= w <= 4096) or not (1024 <= h <= 4096):
raise ValueError(
f"Custom size out of range: {w}x{h}. " "Both width and height must be between 1024 and 4096 pixels."
)
out_num_pixels = w * h
mp_provided = out_num_pixels / 1_000_000.0
if ("seedream-4-5" in model or "seedream-5-0" in model) and out_num_pixels < 3686400:
if "seedream-4-5" in model and out_num_pixels < 3686400:
raise ValueError(
f"Minimum image resolution for the selected model is 3.68MP, "
f"Minimum image resolution that Seedream 4.5 can generate is 3.68MP, "
f"but {mp_provided:.2f}MP provided."
)
if "seedream-4-0" in model and out_num_pixels < 921600:
@@ -338,18 +334,9 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
f"Minimum image resolution that the selected model can generate is 0.92MP, "
f"but {mp_provided:.2f}MP provided."
)
max_pixels = 10_404_496 if "seedream-5-0" in model else 16_777_216
if out_num_pixels > max_pixels:
raise ValueError(
f"Maximum image resolution for the selected model is {max_pixels / 1_000_000:.2f}MP, "
f"but {mp_provided:.2f}MP provided."
)
n_input_images = get_number_of_images(image) if image is not None else 0
max_num_of_images = 14 if model == "seedream-5-0-260128" else 10
if n_input_images > max_num_of_images:
raise ValueError(
f"Maximum of {max_num_of_images} reference images are supported, but {n_input_images} received."
)
if n_input_images > 10:
raise ValueError(f"Maximum of 10 reference images are supported, but {n_input_images} received.")
if sequential_image_generation == "auto" and n_input_images + max_images > 15:
raise ValueError(
"The maximum number of generated images plus the number of reference images cannot exceed 15."
@@ -377,7 +364,6 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
sequential_image_generation=sequential_image_generation,
sequential_image_generation_options=Seedream4Options(max_images=max_images),
watermark=watermark,
output_format="png" if model == "seedream-5-0-260128" else None,
),
)
if len(response.data) == 1:

View File

@@ -25,7 +25,7 @@ class TorchCompileModel(io.ComfyNode):
@classmethod
def execute(cls, model, backend) -> io.NodeOutput:
m = model.clone(disable_dynamic=True)
m = model.clone()
set_torch_compile_wrapper(model=m, backend=backend, options={"guard_filter_fn": skip_torch_compile_dict})
return io.NodeOutput(m)

View File

@@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
__version__ = "0.15.0"
__version__ = "0.14.1"

View File

@@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
version = "0.15.0"
version = "0.14.1"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.10"

View File

@@ -1,6 +1,6 @@
comfyui-frontend-package==1.39.19
comfyui-workflow-templates==0.9.3
comfyui-embedded-docs==0.4.3
comfyui-frontend-package==1.39.16
comfyui-workflow-templates==0.9.2
comfyui-embedded-docs==0.4.1
torch
torchsde
torchvision
@@ -22,7 +22,7 @@ alembic
SQLAlchemy
av>=14.2.0
comfy-kitchen>=0.2.7
comfy-aimdo>=0.2.2
comfy-aimdo>=0.2.0
requests
#non essential dependencies: