mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-17 13:40:04 +00:00
Compare commits
1 Commits
pysssss/no
...
painter-no
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c5d49efd1 |
@@ -1,105 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from typing import TYPE_CHECKING, TypedDict
|
||||
if TYPE_CHECKING:
|
||||
from comfy_api.latest._io_public import NodeReplace
|
||||
|
||||
from comfy_execution.graph_utils import is_link
|
||||
import nodes
|
||||
|
||||
class NodeStruct(TypedDict):
|
||||
inputs: dict[str, str | int | float | bool | tuple[str, int]]
|
||||
class_type: str
|
||||
_meta: dict[str, str]
|
||||
|
||||
def copy_node_struct(node_struct: NodeStruct, empty_inputs: bool = False) -> NodeStruct:
|
||||
new_node_struct = node_struct.copy()
|
||||
if empty_inputs:
|
||||
new_node_struct["inputs"] = {}
|
||||
else:
|
||||
new_node_struct["inputs"] = node_struct["inputs"].copy()
|
||||
new_node_struct["_meta"] = node_struct["_meta"].copy()
|
||||
return new_node_struct
|
||||
|
||||
|
||||
class NodeReplaceManager:
|
||||
"""Manages node replacement registrations."""
|
||||
|
||||
def __init__(self):
|
||||
self._replacements: dict[str, list[NodeReplace]] = {}
|
||||
|
||||
def register(self, node_replace: NodeReplace):
|
||||
"""Register a node replacement mapping."""
|
||||
self._replacements.setdefault(node_replace.old_node_id, []).append(node_replace)
|
||||
|
||||
def get_replacement(self, old_node_id: str) -> list[NodeReplace] | None:
|
||||
"""Get replacements for an old node ID."""
|
||||
return self._replacements.get(old_node_id)
|
||||
|
||||
def has_replacement(self, old_node_id: str) -> bool:
|
||||
"""Check if a replacement exists for an old node ID."""
|
||||
return old_node_id in self._replacements
|
||||
|
||||
def apply_replacements(self, prompt: dict[str, NodeStruct]):
|
||||
connections: dict[str, list[tuple[str, str, int]]] = {}
|
||||
need_replacement: set[str] = set()
|
||||
for node_number, node_struct in prompt.items():
|
||||
class_type = node_struct["class_type"]
|
||||
# need replacement if not in NODE_CLASS_MAPPINGS and has replacement
|
||||
if class_type not in nodes.NODE_CLASS_MAPPINGS.keys() and self.has_replacement(class_type):
|
||||
need_replacement.add(node_number)
|
||||
# keep track of connections
|
||||
for input_id, input_value in node_struct["inputs"].items():
|
||||
if is_link(input_value):
|
||||
conn_number = input_value[0]
|
||||
connections.setdefault(conn_number, []).append((node_number, input_id, input_value[1]))
|
||||
for node_number in need_replacement:
|
||||
node_struct = prompt[node_number]
|
||||
class_type = node_struct["class_type"]
|
||||
replacements = self.get_replacement(class_type)
|
||||
if replacements is None:
|
||||
continue
|
||||
# just use the first replacement
|
||||
replacement = replacements[0]
|
||||
new_node_id = replacement.new_node_id
|
||||
# if replacement is not a valid node, skip trying to replace it as will only cause confusion
|
||||
if new_node_id not in nodes.NODE_CLASS_MAPPINGS.keys():
|
||||
continue
|
||||
# first, replace node id (class_type)
|
||||
new_node_struct = copy_node_struct(node_struct, empty_inputs=True)
|
||||
new_node_struct["class_type"] = new_node_id
|
||||
# TODO: consider replacing display_name in _meta as well for error reporting purposes; would need to query node schema
|
||||
# second, replace inputs
|
||||
if replacement.input_mapping is not None:
|
||||
for input_map in replacement.input_mapping:
|
||||
if "set_value" in input_map:
|
||||
new_node_struct["inputs"][input_map["new_id"]] = input_map["set_value"]
|
||||
elif "old_id" in input_map:
|
||||
new_node_struct["inputs"][input_map["new_id"]] = node_struct["inputs"][input_map["old_id"]]
|
||||
# finalize input replacement
|
||||
prompt[node_number] = new_node_struct
|
||||
# third, replace outputs
|
||||
if replacement.output_mapping is not None:
|
||||
# re-mapping outputs requires changing the input values of nodes that receive connections from this one
|
||||
if node_number in connections:
|
||||
for conns in connections[node_number]:
|
||||
conn_node_number, conn_input_id, old_output_idx = conns
|
||||
for output_map in replacement.output_mapping:
|
||||
if output_map["old_idx"] == old_output_idx:
|
||||
new_output_idx = output_map["new_idx"]
|
||||
previous_input = prompt[conn_node_number]["inputs"][conn_input_id]
|
||||
previous_input[1] = new_output_idx
|
||||
|
||||
def as_dict(self):
|
||||
"""Serialize all replacements to dict."""
|
||||
return {
|
||||
k: [v.as_dict() for v in v_list]
|
||||
for k, v_list in self._replacements.items()
|
||||
}
|
||||
|
||||
def add_routes(self, routes):
|
||||
@routes.get("/node_replacements")
|
||||
async def get_node_replacements(request):
|
||||
return web.json_response(self.as_dict())
|
||||
13
comfy/checkpoint_pickle.py
Normal file
13
comfy/checkpoint_pickle.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import pickle
|
||||
|
||||
load = pickle.load
|
||||
|
||||
class Empty:
|
||||
pass
|
||||
|
||||
class Unpickler(pickle.Unpickler):
|
||||
def find_class(self, module, name):
|
||||
#TODO: safe unpickle
|
||||
if module.startswith("pytorch_lightning"):
|
||||
return Empty
|
||||
return super().find_class(module, name)
|
||||
@@ -102,7 +102,19 @@ class VideoConv3d(nn.Module):
|
||||
return self.conv(x)
|
||||
|
||||
def interpolate_up(x, scale_factor):
|
||||
return torch.nn.functional.interpolate(x, scale_factor=scale_factor, mode="nearest")
|
||||
try:
|
||||
return torch.nn.functional.interpolate(x, scale_factor=scale_factor, mode="nearest")
|
||||
except: #operation not implemented for bf16
|
||||
orig_shape = list(x.shape)
|
||||
out_shape = orig_shape[:2]
|
||||
for i in range(len(orig_shape) - 2):
|
||||
out_shape.append(round(orig_shape[i + 2] * scale_factor[i]))
|
||||
out = torch.empty(out_shape, dtype=x.dtype, layout=x.layout, device=x.device)
|
||||
split = 8
|
||||
l = out.shape[1] // split
|
||||
for i in range(0, out.shape[1], l):
|
||||
out[:,i:i+l] = torch.nn.functional.interpolate(x[:,i:i+l].to(torch.float32), scale_factor=scale_factor, mode="nearest").to(x.dtype)
|
||||
return out
|
||||
|
||||
class Upsample(nn.Module):
|
||||
def __init__(self, in_channels, with_conv, conv_op=ops.Conv2d, scale_factor=2.0):
|
||||
|
||||
@@ -374,31 +374,6 @@ def pad_tensor_to_shape(tensor: torch.Tensor, new_shape: list[int]) -> torch.Ten
|
||||
|
||||
return padded_tensor
|
||||
|
||||
def calculate_shape(patches, weight, key, original_weights=None):
|
||||
current_shape = weight.shape
|
||||
|
||||
for p in patches:
|
||||
v = p[1]
|
||||
offset = p[3]
|
||||
|
||||
# Offsets restore the old shape; lists force a diff without metadata
|
||||
if offset is not None or isinstance(v, list):
|
||||
continue
|
||||
|
||||
if isinstance(v, weight_adapter.WeightAdapterBase):
|
||||
adapter_shape = v.calculate_shape(key)
|
||||
if adapter_shape is not None:
|
||||
current_shape = adapter_shape
|
||||
continue
|
||||
|
||||
# Standard diff logic with padding
|
||||
if len(v) == 2:
|
||||
patch_type, patch_data = v[0], v[1]
|
||||
if patch_type == "diff" and len(patch_data) > 1 and patch_data[1]['pad_weight']:
|
||||
current_shape = patch_data[0].shape
|
||||
|
||||
return current_shape
|
||||
|
||||
def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, original_weights=None):
|
||||
for p in patches:
|
||||
strength = p[0]
|
||||
|
||||
@@ -1514,10 +1514,8 @@ class ModelPatcherDynamic(ModelPatcher):
|
||||
|
||||
weight, _, _ = get_key_weight(self.model, key)
|
||||
if weight is None:
|
||||
return (False, 0)
|
||||
return 0
|
||||
if key in self.patches:
|
||||
if comfy.lora.calculate_shape(self.patches[key], weight, key) != weight.shape:
|
||||
return (True, 0)
|
||||
setattr(m, param_key + "_lowvram_function", LowVramPatch(key, self.patches))
|
||||
num_patches += 1
|
||||
else:
|
||||
@@ -1531,13 +1529,7 @@ class ModelPatcherDynamic(ModelPatcher):
|
||||
model_dtype = getattr(m, param_key + "_comfy_model_dtype", None) or weight.dtype
|
||||
weight._model_dtype = model_dtype
|
||||
geometry = comfy.memory_management.TensorGeometry(shape=weight.shape, dtype=model_dtype)
|
||||
return (False, comfy.memory_management.vram_aligned_size(geometry))
|
||||
|
||||
def force_load_param(self, param_key, device_to):
|
||||
key = key_param_name_to_key(n, param_key)
|
||||
if key in self.backup:
|
||||
comfy.utils.set_attr_param(self.model, key, self.backup[key].weight)
|
||||
self.patch_weight_to_device(key, device_to=device_to)
|
||||
return comfy.memory_management.vram_aligned_size(geometry)
|
||||
|
||||
if hasattr(m, "comfy_cast_weights"):
|
||||
m.comfy_cast_weights = True
|
||||
@@ -1545,19 +1537,13 @@ class ModelPatcherDynamic(ModelPatcher):
|
||||
m.seed_key = n
|
||||
set_dirty(m, dirty)
|
||||
|
||||
force_load, v_weight_size = setup_param(self, m, n, "weight")
|
||||
force_load_bias, v_weight_bias = setup_param(self, m, n, "bias")
|
||||
force_load = force_load or force_load_bias
|
||||
v_weight_size += v_weight_bias
|
||||
v_weight_size = 0
|
||||
v_weight_size += setup_param(self, m, n, "weight")
|
||||
v_weight_size += setup_param(self, m, n, "bias")
|
||||
|
||||
if force_load:
|
||||
logging.info(f"Module {n} has resizing Lora - force loading")
|
||||
force_load_param(self, "weight", device_to)
|
||||
force_load_param(self, "bias", device_to)
|
||||
else:
|
||||
if vbar is not None and not hasattr(m, "_v"):
|
||||
m._v = vbar.alloc(v_weight_size)
|
||||
allocated_size += v_weight_size
|
||||
if vbar is not None and not hasattr(m, "_v"):
|
||||
m._v = vbar.alloc(v_weight_size)
|
||||
allocated_size += v_weight_size
|
||||
|
||||
else:
|
||||
for param in params:
|
||||
@@ -1620,11 +1606,6 @@ class ModelPatcherDynamic(ModelPatcher):
|
||||
for m in self.model.modules():
|
||||
move_weight_functions(m, device_to)
|
||||
|
||||
keys = list(self.backup.keys())
|
||||
for k in keys:
|
||||
bk = self.backup[k]
|
||||
comfy.utils.set_attr_param(self.model, k, bk.weight)
|
||||
|
||||
def partially_load(self, device_to, extra_memory=0, force_patch_weights=False):
|
||||
assert not force_patch_weights #See above
|
||||
with self.use_ejected(skip_and_inject_on_exit_only=True):
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
import torch
|
||||
import math
|
||||
import struct
|
||||
import comfy.memory_management
|
||||
import comfy.checkpoint_pickle
|
||||
import safetensors.torch
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
@@ -38,26 +38,26 @@ import warnings
|
||||
MMAP_TORCH_FILES = args.mmap_torch_files
|
||||
DISABLE_MMAP = args.disable_mmap
|
||||
|
||||
|
||||
if True: # ckpt/pt file whitelist for safe loading of old sd files
|
||||
ALWAYS_SAFE_LOAD = False
|
||||
if hasattr(torch.serialization, "add_safe_globals"): # TODO: this was added in pytorch 2.4, the unsafe path should be removed once earlier versions are deprecated
|
||||
class ModelCheckpoint:
|
||||
pass
|
||||
ModelCheckpoint.__module__ = "pytorch_lightning.callbacks.model_checkpoint"
|
||||
|
||||
def scalar(*args, **kwargs):
|
||||
return None
|
||||
from numpy.core.multiarray import scalar as sc
|
||||
return sc(*args, **kwargs)
|
||||
scalar.__module__ = "numpy.core.multiarray"
|
||||
|
||||
from numpy import dtype
|
||||
from numpy.dtypes import Float64DType
|
||||
|
||||
def encode(*args, **kwargs): # no longer necessary on newer torch
|
||||
return None
|
||||
encode.__module__ = "_codecs"
|
||||
from _codecs import encode
|
||||
|
||||
torch.serialization.add_safe_globals([ModelCheckpoint, scalar, dtype, Float64DType, encode])
|
||||
ALWAYS_SAFE_LOAD = True
|
||||
logging.info("Checkpoint files will always be loaded safely.")
|
||||
|
||||
else:
|
||||
logging.warning("Warning, you are using an old pytorch version and some ckpt/pt files might be loaded unsafely. Upgrading to 2.4 or above is recommended as older versions of pytorch are no longer supported.")
|
||||
|
||||
# Current as of safetensors 0.7.0
|
||||
_TYPES = {
|
||||
@@ -140,8 +140,11 @@ def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False):
|
||||
if MMAP_TORCH_FILES:
|
||||
torch_args["mmap"] = True
|
||||
|
||||
pl_sd = torch.load(ckpt, map_location=device, weights_only=True, **torch_args)
|
||||
|
||||
if safe_load or ALWAYS_SAFE_LOAD:
|
||||
pl_sd = torch.load(ckpt, map_location=device, weights_only=True, **torch_args)
|
||||
else:
|
||||
logging.warning("WARNING: loading {} unsafely, upgrade your pytorch to 2.4 or newer to load this file safely.".format(ckpt))
|
||||
pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle)
|
||||
if "state_dict" in pl_sd:
|
||||
sd = pl_sd["state_dict"]
|
||||
else:
|
||||
|
||||
@@ -49,12 +49,6 @@ class WeightAdapterBase:
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def calculate_shape(
|
||||
self,
|
||||
key
|
||||
):
|
||||
return None
|
||||
|
||||
def calculate_weight(
|
||||
self,
|
||||
weight,
|
||||
|
||||
@@ -214,13 +214,6 @@ class LoRAAdapter(WeightAdapterBase):
|
||||
else:
|
||||
return None
|
||||
|
||||
def calculate_shape(
|
||||
self,
|
||||
key
|
||||
):
|
||||
reshape = self.weights[5]
|
||||
return tuple(reshape) if reshape is not None else None
|
||||
|
||||
def calculate_weight(
|
||||
self,
|
||||
weight,
|
||||
|
||||
@@ -14,7 +14,6 @@ SERVER_FEATURE_FLAGS: dict[str, Any] = {
|
||||
"supports_preview_metadata": True,
|
||||
"max_upload_size": args.max_upload_size * 1024 * 1024, # Convert MB to bytes
|
||||
"extension": {"manager": {"supports_v4": True}},
|
||||
"node_replacements": True,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -21,17 +21,6 @@ class ComfyAPI_latest(ComfyAPIBase):
|
||||
VERSION = "latest"
|
||||
STABLE = False
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.node_replacement = self.NodeReplacement()
|
||||
self.execution = self.Execution()
|
||||
|
||||
class NodeReplacement(ProxiedSingleton):
|
||||
async def register(self, node_replace: io.NodeReplace) -> None:
|
||||
"""Register a node replacement mapping."""
|
||||
from server import PromptServer
|
||||
PromptServer.instance.node_replace_manager.register(node_replace)
|
||||
|
||||
class Execution(ProxiedSingleton):
|
||||
async def set_progress(
|
||||
self,
|
||||
@@ -84,6 +73,8 @@ class ComfyAPI_latest(ComfyAPIBase):
|
||||
image=to_display,
|
||||
)
|
||||
|
||||
execution: Execution
|
||||
|
||||
class ComfyExtension(ABC):
|
||||
async def on_load(self) -> None:
|
||||
"""
|
||||
|
||||
@@ -1300,7 +1300,6 @@ class NodeInfoV1:
|
||||
name: str=None
|
||||
display_name: str=None
|
||||
description: str=None
|
||||
short_description: str=None
|
||||
python_module: Any=None
|
||||
category: str=None
|
||||
output_node: bool=None
|
||||
@@ -1391,8 +1390,6 @@ class Schema:
|
||||
hidden: list[Hidden] = field(default_factory=list)
|
||||
description: str=""
|
||||
"""Node description, shown as a tooltip when hovering over the node."""
|
||||
short_description: str=""
|
||||
"""Short node description, shown in the node list/search."""
|
||||
search_aliases: list[str] = field(default_factory=list)
|
||||
"""Alternative names for search. Useful for synonyms, abbreviations, or old names after renaming."""
|
||||
is_input_list: bool = False
|
||||
@@ -1531,7 +1528,6 @@ class Schema:
|
||||
display_name=self.display_name,
|
||||
category=self.category,
|
||||
description=self.description,
|
||||
short_description=self.short_description,
|
||||
output_node=self.is_output_node,
|
||||
deprecated=self.is_deprecated,
|
||||
experimental=self.is_experimental,
|
||||
@@ -1775,14 +1771,6 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
|
||||
cls.GET_SCHEMA()
|
||||
return cls._DESCRIPTION
|
||||
|
||||
_SHORT_DESCRIPTION = None
|
||||
@final
|
||||
@classproperty
|
||||
def SHORT_DESCRIPTION(cls): # noqa
|
||||
if cls._SHORT_DESCRIPTION is None:
|
||||
cls.GET_SCHEMA()
|
||||
return cls._SHORT_DESCRIPTION
|
||||
|
||||
_CATEGORY = None
|
||||
@final
|
||||
@classproperty
|
||||
@@ -1911,8 +1899,6 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
|
||||
schema.validate()
|
||||
if cls._DESCRIPTION is None:
|
||||
cls._DESCRIPTION = schema.description
|
||||
if cls._SHORT_DESCRIPTION is None:
|
||||
cls._SHORT_DESCRIPTION = schema.short_description
|
||||
if cls._CATEGORY is None:
|
||||
cls._CATEGORY = schema.category
|
||||
if cls._EXPERIMENTAL is None:
|
||||
@@ -2044,68 +2030,6 @@ class _UIOutput(ABC):
|
||||
...
|
||||
|
||||
|
||||
class InputMapOldId(TypedDict):
|
||||
"""Map an old node input to a new node input by ID."""
|
||||
new_id: str
|
||||
old_id: str
|
||||
|
||||
class InputMapSetValue(TypedDict):
|
||||
"""Set a specific value for a new node input."""
|
||||
new_id: str
|
||||
set_value: Any
|
||||
|
||||
InputMap = InputMapOldId | InputMapSetValue
|
||||
"""
|
||||
Input mapping for node replacement. Type is inferred by dictionary keys:
|
||||
- {"new_id": str, "old_id": str} - maps old input to new input
|
||||
- {"new_id": str, "set_value": Any} - sets a specific value for new input
|
||||
"""
|
||||
|
||||
class OutputMap(TypedDict):
|
||||
"""Map outputs of node replacement via indexes."""
|
||||
new_idx: int
|
||||
old_idx: int
|
||||
|
||||
class NodeReplace:
|
||||
"""
|
||||
Defines a possible node replacement, mapping inputs and outputs of the old node to the new node.
|
||||
|
||||
Also supports assigning specific values to the input widgets of the new node.
|
||||
|
||||
Args:
|
||||
new_node_id: The class name of the new replacement node.
|
||||
old_node_id: The class name of the deprecated node.
|
||||
old_widget_ids: Ordered list of input IDs for widgets that may not have an input slot
|
||||
connected. The workflow JSON stores widget values by their relative position index,
|
||||
not by ID. This list maps those positional indexes to input IDs, enabling the
|
||||
replacement system to correctly identify widget values during node migration.
|
||||
input_mapping: List of input mappings from old node to new node.
|
||||
output_mapping: List of output mappings from old node to new node.
|
||||
"""
|
||||
def __init__(self,
|
||||
new_node_id: str,
|
||||
old_node_id: str,
|
||||
old_widget_ids: list[str] | None=None,
|
||||
input_mapping: list[InputMap] | None=None,
|
||||
output_mapping: list[OutputMap] | None=None,
|
||||
):
|
||||
self.new_node_id = new_node_id
|
||||
self.old_node_id = old_node_id
|
||||
self.old_widget_ids = old_widget_ids
|
||||
self.input_mapping = input_mapping
|
||||
self.output_mapping = output_mapping
|
||||
|
||||
def as_dict(self):
|
||||
"""Create serializable representation of the node replacement."""
|
||||
return {
|
||||
"new_node_id": self.new_node_id,
|
||||
"old_node_id": self.old_node_id,
|
||||
"old_widget_ids": self.old_widget_ids,
|
||||
"input_mapping": list(self.input_mapping) if self.input_mapping else None,
|
||||
"output_mapping": list(self.output_mapping) if self.output_mapping else None,
|
||||
}
|
||||
|
||||
|
||||
__all__ = [
|
||||
"FolderType",
|
||||
"UploadType",
|
||||
@@ -2197,5 +2121,4 @@ __all__ = [
|
||||
"ImageCompare",
|
||||
"PriceBadgeDepends",
|
||||
"PriceBadge",
|
||||
"NodeReplace",
|
||||
]
|
||||
|
||||
@@ -45,55 +45,17 @@ class BriaEditImageRequest(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class BriaRemoveBackgroundRequest(BaseModel):
|
||||
image: str = Field(...)
|
||||
sync: bool = Field(False)
|
||||
visual_input_content_moderation: bool = Field(
|
||||
False, description="If true, returns 422 on input image moderation failure."
|
||||
)
|
||||
visual_output_content_moderation: bool = Field(
|
||||
False, description="If true, returns 422 on visual output moderation failure."
|
||||
)
|
||||
seed: int = Field(...)
|
||||
|
||||
|
||||
class BriaStatusResponse(BaseModel):
|
||||
request_id: str = Field(...)
|
||||
status_url: str = Field(...)
|
||||
warning: str | None = Field(None)
|
||||
|
||||
|
||||
class BriaRemoveBackgroundResult(BaseModel):
|
||||
image_url: str = Field(...)
|
||||
|
||||
|
||||
class BriaRemoveBackgroundResponse(BaseModel):
|
||||
status: str = Field(...)
|
||||
result: BriaRemoveBackgroundResult | None = Field(None)
|
||||
|
||||
|
||||
class BriaImageEditResult(BaseModel):
|
||||
class BriaResult(BaseModel):
|
||||
structured_prompt: str = Field(...)
|
||||
image_url: str = Field(...)
|
||||
|
||||
|
||||
class BriaImageEditResponse(BaseModel):
|
||||
class BriaResponse(BaseModel):
|
||||
status: str = Field(...)
|
||||
result: BriaImageEditResult | None = Field(None)
|
||||
|
||||
|
||||
class BriaRemoveVideoBackgroundRequest(BaseModel):
|
||||
video: str = Field(...)
|
||||
background_color: str = Field(default="transparent", description="Background color for the output video.")
|
||||
output_container_and_codec: str = Field(...)
|
||||
preserve_audio: bool = Field(True)
|
||||
seed: int = Field(...)
|
||||
|
||||
|
||||
class BriaRemoveVideoBackgroundResult(BaseModel):
|
||||
video_url: str = Field(...)
|
||||
|
||||
|
||||
class BriaRemoveVideoBackgroundResponse(BaseModel):
|
||||
status: str = Field(...)
|
||||
result: BriaRemoveVideoBackgroundResult | None = Field(None)
|
||||
result: BriaResult | None = Field(None)
|
||||
|
||||
@@ -64,23 +64,3 @@ class To3DProTaskResultResponse(BaseModel):
|
||||
|
||||
class To3DProTaskQueryRequest(BaseModel):
|
||||
JobId: str = Field(...)
|
||||
|
||||
|
||||
class To3DUVFileInput(BaseModel):
|
||||
Type: str = Field(..., description="File type: GLB, OBJ, or FBX")
|
||||
Url: str = Field(...)
|
||||
|
||||
|
||||
class To3DUVTaskRequest(BaseModel):
|
||||
File: To3DUVFileInput = Field(...)
|
||||
|
||||
|
||||
class TextureEditImageInfo(BaseModel):
|
||||
Url: str = Field(...)
|
||||
|
||||
|
||||
class TextureEditTaskRequest(BaseModel):
|
||||
File3D: To3DUVFileInput = Field(...)
|
||||
Image: TextureEditImageInfo | None = Field(None)
|
||||
Prompt: str | None = Field(None)
|
||||
EnablePBR: bool | None = Field(None)
|
||||
|
||||
@@ -44,7 +44,6 @@ class FluxProUltraImageNode(IO.ComfyNode):
|
||||
display_name="Flux 1.1 [pro] Ultra Image",
|
||||
category="api node/image/BFL",
|
||||
description="Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.",
|
||||
short_description="Generate images with Flux Pro 1.1 Ultra API.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -155,17 +154,13 @@ class FluxProUltraImageNode(IO.ComfyNode):
|
||||
|
||||
class FluxKontextProImageNode(IO.ComfyNode):
|
||||
|
||||
DESCRIPTION = "Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio."
|
||||
SHORT_DESCRIPTION = "Edit images with Flux.1 Kontext [pro] API."
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls) -> IO.Schema:
|
||||
return IO.Schema(
|
||||
node_id=cls.NODE_ID,
|
||||
display_name=cls.DISPLAY_NAME,
|
||||
category="api node/image/BFL",
|
||||
description=cls.DESCRIPTION,
|
||||
short_description=cls.SHORT_DESCRIPTION,
|
||||
description="Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -273,7 +268,6 @@ class FluxKontextProImageNode(IO.ComfyNode):
|
||||
class FluxKontextMaxImageNode(FluxKontextProImageNode):
|
||||
|
||||
DESCRIPTION = "Edits images using Flux.1 Kontext [max] via api based on prompt and aspect ratio."
|
||||
SHORT_DESCRIPTION = "Edit images with Flux.1 Kontext [max] API."
|
||||
BFL_PATH = "/proxy/bfl/flux-kontext-max/generate"
|
||||
NODE_ID = "FluxKontextMaxImageNode"
|
||||
DISPLAY_NAME = "Flux.1 Kontext [max] Image"
|
||||
@@ -288,7 +282,6 @@ class FluxProExpandNode(IO.ComfyNode):
|
||||
display_name="Flux.1 Expand Image",
|
||||
category="api node/image/BFL",
|
||||
description="Outpaints image based on prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -425,7 +418,6 @@ class FluxProFillNode(IO.ComfyNode):
|
||||
display_name="Flux.1 Fill Image",
|
||||
category="api node/image/BFL",
|
||||
description="Inpaints image based on mask and prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Mask.Input("mask"),
|
||||
@@ -551,7 +543,6 @@ class Flux2ProImageNode(IO.ComfyNode):
|
||||
display_name=cls.DISPLAY_NAME,
|
||||
category="api node/image/BFL",
|
||||
description="Generates images synchronously based on prompt and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -3,11 +3,7 @@ from typing_extensions import override
|
||||
from comfy_api.latest import IO, ComfyExtension, Input
|
||||
from comfy_api_nodes.apis.bria import (
|
||||
BriaEditImageRequest,
|
||||
BriaRemoveBackgroundRequest,
|
||||
BriaRemoveBackgroundResponse,
|
||||
BriaRemoveVideoBackgroundRequest,
|
||||
BriaRemoveVideoBackgroundResponse,
|
||||
BriaImageEditResponse,
|
||||
BriaResponse,
|
||||
BriaStatusResponse,
|
||||
InputModerationSettings,
|
||||
)
|
||||
@@ -15,12 +11,10 @@ from comfy_api_nodes.util import (
|
||||
ApiEndpoint,
|
||||
convert_mask_to_image,
|
||||
download_url_to_image_tensor,
|
||||
download_url_to_video_output,
|
||||
get_number_of_images,
|
||||
poll_op,
|
||||
sync_op,
|
||||
upload_image_to_comfyapi,
|
||||
upload_video_to_comfyapi,
|
||||
validate_video_duration,
|
||||
upload_images_to_comfyapi,
|
||||
)
|
||||
|
||||
|
||||
@@ -33,7 +27,6 @@ class BriaImageEditNode(IO.ComfyNode):
|
||||
display_name="Bria FIBO Image Edit",
|
||||
category="api node/image/Bria",
|
||||
description="Edit images using Bria latest model",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["FIBO"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -80,15 +73,21 @@ class BriaImageEditNode(IO.ComfyNode):
|
||||
IO.DynamicCombo.Input(
|
||||
"moderation",
|
||||
options=[
|
||||
IO.DynamicCombo.Option("false", []),
|
||||
IO.DynamicCombo.Option(
|
||||
"true",
|
||||
[
|
||||
IO.Boolean.Input("prompt_content_moderation", default=False),
|
||||
IO.Boolean.Input("visual_input_moderation", default=False),
|
||||
IO.Boolean.Input("visual_output_moderation", default=True),
|
||||
IO.Boolean.Input(
|
||||
"prompt_content_moderation", default=False
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"visual_input_moderation", default=False
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"visual_output_moderation", default=True
|
||||
),
|
||||
],
|
||||
),
|
||||
IO.DynamicCombo.Option("false", []),
|
||||
],
|
||||
tooltip="Moderation settings",
|
||||
),
|
||||
@@ -128,26 +127,50 @@ class BriaImageEditNode(IO.ComfyNode):
|
||||
mask: Input.Image | None = None,
|
||||
) -> IO.NodeOutput:
|
||||
if not prompt and not structured_prompt:
|
||||
raise ValueError("One of prompt or structured_prompt is required to be non-empty.")
|
||||
raise ValueError(
|
||||
"One of prompt or structured_prompt is required to be non-empty."
|
||||
)
|
||||
if get_number_of_images(image) != 1:
|
||||
raise ValueError("Exactly one input image is required.")
|
||||
mask_url = None
|
||||
if mask is not None:
|
||||
mask_url = await upload_image_to_comfyapi(cls, convert_mask_to_image(mask), wait_label="Uploading mask")
|
||||
mask_url = (
|
||||
await upload_images_to_comfyapi(
|
||||
cls,
|
||||
convert_mask_to_image(mask),
|
||||
max_images=1,
|
||||
mime_type="image/png",
|
||||
wait_label="Uploading mask",
|
||||
)
|
||||
)[0]
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="proxy/bria/v2/image/edit", method="POST"),
|
||||
data=BriaEditImageRequest(
|
||||
instruction=prompt if prompt else None,
|
||||
structured_instruction=structured_prompt if structured_prompt else None,
|
||||
images=[await upload_image_to_comfyapi(cls, image, wait_label="Uploading image")],
|
||||
images=await upload_images_to_comfyapi(
|
||||
cls,
|
||||
image,
|
||||
max_images=1,
|
||||
mime_type="image/png",
|
||||
wait_label="Uploading image",
|
||||
),
|
||||
mask=mask_url,
|
||||
negative_prompt=negative_prompt if negative_prompt else None,
|
||||
guidance_scale=guidance_scale,
|
||||
seed=seed,
|
||||
model_version=model,
|
||||
steps_num=steps,
|
||||
prompt_content_moderation=moderation.get("prompt_content_moderation", False),
|
||||
visual_input_content_moderation=moderation.get("visual_input_moderation", False),
|
||||
visual_output_content_moderation=moderation.get("visual_output_moderation", False),
|
||||
prompt_content_moderation=moderation.get(
|
||||
"prompt_content_moderation", False
|
||||
),
|
||||
visual_input_content_moderation=moderation.get(
|
||||
"visual_input_moderation", False
|
||||
),
|
||||
visual_output_content_moderation=moderation.get(
|
||||
"visual_output_moderation", False
|
||||
),
|
||||
),
|
||||
response_model=BriaStatusResponse,
|
||||
)
|
||||
@@ -155,7 +178,7 @@ class BriaImageEditNode(IO.ComfyNode):
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/bria/v2/status/{response.request_id}"),
|
||||
status_extractor=lambda r: r.status,
|
||||
response_model=BriaImageEditResponse,
|
||||
response_model=BriaResponse,
|
||||
)
|
||||
return IO.NodeOutput(
|
||||
await download_url_to_image_tensor(response.result.image_url),
|
||||
@@ -163,167 +186,11 @@ class BriaImageEditNode(IO.ComfyNode):
|
||||
)
|
||||
|
||||
|
||||
class BriaRemoveImageBackground(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="BriaRemoveImageBackground",
|
||||
display_name="Bria Remove Image Background",
|
||||
category="api node/image/Bria",
|
||||
description="Remove the background from an image using Bria RMBG 2.0.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.DynamicCombo.Input(
|
||||
"moderation",
|
||||
options=[
|
||||
IO.DynamicCombo.Option("false", []),
|
||||
IO.DynamicCombo.Option(
|
||||
"true",
|
||||
[
|
||||
IO.Boolean.Input("visual_input_moderation", default=False),
|
||||
IO.Boolean.Input("visual_output_moderation", default=True),
|
||||
],
|
||||
),
|
||||
],
|
||||
tooltip="Moderation settings",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[IO.Image.Output()],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd":0.018}""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
image: Input.Image,
|
||||
moderation: dict,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/bria/v2/image/edit/remove_background", method="POST"),
|
||||
data=BriaRemoveBackgroundRequest(
|
||||
image=await upload_image_to_comfyapi(cls, image, wait_label="Uploading image"),
|
||||
sync=False,
|
||||
visual_input_content_moderation=moderation.get("visual_input_moderation", False),
|
||||
visual_output_content_moderation=moderation.get("visual_output_moderation", False),
|
||||
seed=seed,
|
||||
),
|
||||
response_model=BriaStatusResponse,
|
||||
)
|
||||
response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/bria/v2/status/{response.request_id}"),
|
||||
status_extractor=lambda r: r.status,
|
||||
response_model=BriaRemoveBackgroundResponse,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_image_tensor(response.result.image_url))
|
||||
|
||||
|
||||
class BriaRemoveVideoBackground(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="BriaRemoveVideoBackground",
|
||||
display_name="Bria Remove Video Background",
|
||||
category="api node/video/Bria",
|
||||
description="Remove the background from a video using Bria. ",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Combo.Input(
|
||||
"background_color",
|
||||
options=[
|
||||
"Black",
|
||||
"White",
|
||||
"Gray",
|
||||
"Red",
|
||||
"Green",
|
||||
"Blue",
|
||||
"Yellow",
|
||||
"Cyan",
|
||||
"Magenta",
|
||||
"Orange",
|
||||
],
|
||||
tooltip="Background color for the output video.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[IO.Video.Output()],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd":0.14,"format":{"suffix":"/second"}}""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
video: Input.Video,
|
||||
background_color: str,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
validate_video_duration(video, max_duration=60.0)
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/bria/v2/video/edit/remove_background", method="POST"),
|
||||
data=BriaRemoveVideoBackgroundRequest(
|
||||
video=await upload_video_to_comfyapi(cls, video),
|
||||
background_color=background_color,
|
||||
output_container_and_codec="mp4_h264",
|
||||
seed=seed,
|
||||
),
|
||||
response_model=BriaStatusResponse,
|
||||
)
|
||||
response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/bria/v2/status/{response.request_id}"),
|
||||
status_extractor=lambda r: r.status,
|
||||
response_model=BriaRemoveVideoBackgroundResponse,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_video_output(response.result.video_url))
|
||||
|
||||
|
||||
class BriaExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||
return [
|
||||
BriaImageEditNode,
|
||||
BriaRemoveImageBackground,
|
||||
BriaRemoveVideoBackground,
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -60,7 +60,6 @@ class ByteDanceImageNode(IO.ComfyNode):
|
||||
display_name="ByteDance Image",
|
||||
category="api node/image/ByteDance",
|
||||
description="Generate images using ByteDance models via api based on prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["seedream-3-0-t2i-250415"]),
|
||||
IO.String.Input(
|
||||
@@ -183,7 +182,6 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
|
||||
display_name="ByteDance Seedream 4.5",
|
||||
category="api node/image/ByteDance",
|
||||
description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.",
|
||||
short_description="Text-to-image generation and editing up to 4K.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -382,7 +380,6 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
|
||||
display_name="ByteDance Text to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using ByteDance models via api based on prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -508,7 +505,6 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
|
||||
display_name="ByteDance Image to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using ByteDance models via api based on image and prompt",
|
||||
short_description="Generate video from image and prompt via ByteDance API.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -643,7 +639,6 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
|
||||
display_name="ByteDance First-Last-Frame to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using prompt and first and last frames.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -789,7 +784,6 @@ class ByteDanceImageReferenceNode(IO.ComfyNode):
|
||||
display_name="ByteDance Reference Images to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using prompt and reference images.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -254,7 +254,6 @@ class GeminiNode(IO.ComfyNode):
|
||||
description="Generate text responses with Google's Gemini AI model. "
|
||||
"You can provide multiple types of inputs (text, images, audio, video) "
|
||||
"as context for generating more relevant and meaningful responses.",
|
||||
short_description="Generate text responses with Google's Gemini AI.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -481,7 +480,6 @@ class GeminiInputFiles(IO.ComfyNode):
|
||||
"The files will be read by the Gemini model when generating a response. "
|
||||
"The contents of the text file count toward the token limit. "
|
||||
"🛈 TIP: Can be chained together with other Gemini Input File nodes.",
|
||||
short_description="Load and prepare input files for Gemini LLM nodes.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"file",
|
||||
@@ -536,7 +534,6 @@ class GeminiImage(IO.ComfyNode):
|
||||
display_name="Nano Banana (Google Gemini Image)",
|
||||
category="api node/image/Gemini",
|
||||
description="Edit images synchronously via Google API.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -668,7 +665,6 @@ class GeminiImage2(IO.ComfyNode):
|
||||
display_name="Nano Banana Pro (Google Gemini Image)",
|
||||
category="api node/image/Gemini",
|
||||
description="Generate or edit images synchronously via Google Vertex API.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -36,7 +36,6 @@ class GrokImageNode(IO.ComfyNode):
|
||||
display_name="Grok Image",
|
||||
category="api node/image/Grok",
|
||||
description="Generate images using Grok based on a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-image-beta"]),
|
||||
IO.String.Input(
|
||||
@@ -138,7 +137,6 @@ class GrokImageEditNode(IO.ComfyNode):
|
||||
display_name="Grok Image Edit",
|
||||
category="api node/image/Grok",
|
||||
description="Modify an existing image based on a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-image-beta"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -228,7 +226,6 @@ class GrokVideoNode(IO.ComfyNode):
|
||||
display_name="Grok Video",
|
||||
category="api node/video/Grok",
|
||||
description="Generate video from a prompt or an image",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-video-beta"]),
|
||||
IO.String.Input(
|
||||
@@ -337,7 +334,6 @@ class GrokVideoEditNode(IO.ComfyNode):
|
||||
display_name="Grok Video Edit",
|
||||
category="api node/video/Grok",
|
||||
description="Edit an existing video based on a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-video-beta"]),
|
||||
IO.String.Input(
|
||||
|
||||
@@ -74,7 +74,6 @@ class HitPawGeneralImageEnhance(IO.ComfyNode):
|
||||
category="api node/image/HitPaw",
|
||||
description="Upscale low-resolution images to super-resolution, eliminate artifacts and noise. "
|
||||
f"Maximum output: {MAX_MP_GENERATIVE} megapixels.",
|
||||
short_description="Upscale images to super-resolution, removing artifacts and noise.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["generative_portrait", "generative"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -206,7 +205,6 @@ class HitPawVideoEnhance(IO.ComfyNode):
|
||||
category="api node/video/HitPaw",
|
||||
description="Upscale low-resolution videos to high resolution, eliminate artifacts and noise. "
|
||||
"Prices shown are per second of video.",
|
||||
short_description="Upscale videos to high resolution, removing artifacts and noise.",
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input("model", options=model_options),
|
||||
IO.Video.Input("video"),
|
||||
|
||||
@@ -1,48 +1,31 @@
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import IO, ComfyExtension, Input, Types
|
||||
from comfy_api.latest import IO, ComfyExtension, Input
|
||||
from comfy_api_nodes.apis.hunyuan3d import (
|
||||
Hunyuan3DViewImage,
|
||||
InputGenerateType,
|
||||
ResultFile3D,
|
||||
TextureEditTaskRequest,
|
||||
To3DProTaskCreateResponse,
|
||||
To3DProTaskQueryRequest,
|
||||
To3DProTaskRequest,
|
||||
To3DProTaskResultResponse,
|
||||
To3DUVFileInput,
|
||||
To3DUVTaskRequest,
|
||||
)
|
||||
from comfy_api_nodes.util import (
|
||||
ApiEndpoint,
|
||||
download_url_to_file_3d,
|
||||
download_url_to_image_tensor,
|
||||
downscale_image_tensor_by_max_side,
|
||||
poll_op,
|
||||
sync_op,
|
||||
upload_3d_model_to_comfyapi,
|
||||
upload_image_to_comfyapi,
|
||||
validate_image_dimensions,
|
||||
validate_string,
|
||||
)
|
||||
|
||||
|
||||
def _is_tencent_rate_limited(status: int, body: object) -> bool:
|
||||
return (
|
||||
status == 400
|
||||
and isinstance(body, dict)
|
||||
and "RequestLimitExceeded" in str(body.get("Response", {}).get("Error", {}).get("Code", ""))
|
||||
)
|
||||
|
||||
|
||||
def get_file_from_response(
|
||||
response_objs: list[ResultFile3D], file_type: str, raise_if_not_found: bool = True
|
||||
) -> ResultFile3D | None:
|
||||
def get_file_from_response(response_objs: list[ResultFile3D], file_type: str) -> ResultFile3D | None:
|
||||
for i in response_objs:
|
||||
if i.Type.lower() == file_type.lower():
|
||||
return i
|
||||
if raise_if_not_found:
|
||||
raise ValueError(f"'{file_type}' file type is not found in the response.")
|
||||
return None
|
||||
|
||||
|
||||
@@ -52,10 +35,8 @@ class TencentTextToModelNode(IO.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="TencentTextToModelNode",
|
||||
display_name="Hunyuan3D: Text to Model",
|
||||
display_name="Hunyuan3D: Text to Model (Pro)",
|
||||
category="api node/3d/Tencent",
|
||||
description="Generate 3D models from text prompts using Hunyuan3D Pro with configurable face count and geometry options.",
|
||||
short_description="Generate 3D models from text using Hunyuan3D Pro.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -139,7 +120,6 @@ class TencentTextToModelNode(IO.ComfyNode):
|
||||
EnablePBR=generate_type.get("pbr", None),
|
||||
PolygonType=generate_type.get("polygon_type", None),
|
||||
),
|
||||
is_rate_limited=_is_tencent_rate_limited,
|
||||
)
|
||||
if response.Error:
|
||||
raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}")
|
||||
@@ -151,14 +131,11 @@ class TencentTextToModelNode(IO.ComfyNode):
|
||||
response_model=To3DProTaskResultResponse,
|
||||
status_extractor=lambda r: r.Status,
|
||||
)
|
||||
glb_result = get_file_from_response(result.ResultFile3Ds, "glb")
|
||||
obj_result = get_file_from_response(result.ResultFile3Ds, "obj")
|
||||
file_glb = await download_url_to_file_3d(glb_result.Url, "glb", task_id=task_id) if glb_result else None
|
||||
return IO.NodeOutput(
|
||||
f"{task_id}.glb",
|
||||
await download_url_to_file_3d(
|
||||
get_file_from_response(result.ResultFile3Ds, "glb").Url, "glb", task_id=task_id
|
||||
),
|
||||
await download_url_to_file_3d(
|
||||
get_file_from_response(result.ResultFile3Ds, "obj").Url, "obj", task_id=task_id
|
||||
),
|
||||
file_glb, file_glb, await download_url_to_file_3d(obj_result.Url, "obj", task_id=task_id) if obj_result else None
|
||||
)
|
||||
|
||||
|
||||
@@ -168,10 +145,8 @@ class TencentImageToModelNode(IO.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="TencentImageToModelNode",
|
||||
display_name="Hunyuan3D: Image(s) to Model",
|
||||
display_name="Hunyuan3D: Image(s) to Model (Pro)",
|
||||
category="api node/3d/Tencent",
|
||||
description="Generate 3D models from images using Hunyuan3D Pro with optional multi-view inputs and configurable geometry.",
|
||||
short_description="Generate 3D models from images using Hunyuan3D Pro.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -293,7 +268,6 @@ class TencentImageToModelNode(IO.ComfyNode):
|
||||
EnablePBR=generate_type.get("pbr", None),
|
||||
PolygonType=generate_type.get("polygon_type", None),
|
||||
),
|
||||
is_rate_limited=_is_tencent_rate_limited,
|
||||
)
|
||||
if response.Error:
|
||||
raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}")
|
||||
@@ -305,257 +279,11 @@ class TencentImageToModelNode(IO.ComfyNode):
|
||||
response_model=To3DProTaskResultResponse,
|
||||
status_extractor=lambda r: r.Status,
|
||||
)
|
||||
glb_result = get_file_from_response(result.ResultFile3Ds, "glb")
|
||||
obj_result = get_file_from_response(result.ResultFile3Ds, "obj")
|
||||
file_glb = await download_url_to_file_3d(glb_result.Url, "glb", task_id=task_id) if glb_result else None
|
||||
return IO.NodeOutput(
|
||||
f"{task_id}.glb",
|
||||
await download_url_to_file_3d(
|
||||
get_file_from_response(result.ResultFile3Ds, "glb").Url, "glb", task_id=task_id
|
||||
),
|
||||
await download_url_to_file_3d(
|
||||
get_file_from_response(result.ResultFile3Ds, "obj").Url, "obj", task_id=task_id
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class TencentModelTo3DUVNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="TencentModelTo3DUVNode",
|
||||
display_name="Hunyuan3D: Model to UV",
|
||||
category="api node/3d/Tencent",
|
||||
description="Perform UV unfolding on a 3D model to generate UV texture. "
|
||||
"Input model must have less than 30000 faces.",
|
||||
inputs=[
|
||||
IO.MultiType.Input(
|
||||
"model_3d",
|
||||
types=[IO.File3DGLB, IO.File3DOBJ, IO.File3DFBX, IO.File3DAny],
|
||||
tooltip="Input 3D model (GLB, OBJ, or FBX)",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=1,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.File3DOBJ.Output(display_name="OBJ"),
|
||||
IO.File3DFBX.Output(display_name="FBX"),
|
||||
IO.Image.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(expr='{"type":"usd","usd":0.2}'),
|
||||
)
|
||||
|
||||
SUPPORTED_FORMATS = {"glb", "obj", "fbx"}
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model_3d: Types.File3D,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
_ = seed
|
||||
file_format = model_3d.format.lower()
|
||||
if file_format not in cls.SUPPORTED_FORMATS:
|
||||
raise ValueError(
|
||||
f"Unsupported file format: '{file_format}'. "
|
||||
f"Supported formats: {', '.join(sorted(cls.SUPPORTED_FORMATS))}."
|
||||
)
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-uv", method="POST"),
|
||||
response_model=To3DProTaskCreateResponse,
|
||||
data=To3DUVTaskRequest(
|
||||
File=To3DUVFileInput(
|
||||
Type=file_format.upper(),
|
||||
Url=await upload_3d_model_to_comfyapi(cls, model_3d, file_format),
|
||||
)
|
||||
),
|
||||
is_rate_limited=_is_tencent_rate_limited,
|
||||
)
|
||||
if response.Error:
|
||||
raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}")
|
||||
result = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-uv/query", method="POST"),
|
||||
data=To3DProTaskQueryRequest(JobId=response.JobId),
|
||||
response_model=To3DProTaskResultResponse,
|
||||
status_extractor=lambda r: r.Status,
|
||||
)
|
||||
return IO.NodeOutput(
|
||||
await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "obj").Url, "obj"),
|
||||
await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "fbx").Url, "fbx"),
|
||||
await download_url_to_image_tensor(get_file_from_response(result.ResultFile3Ds, "image").Url),
|
||||
)
|
||||
|
||||
|
||||
class Tencent3DTextureEditNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="Tencent3DTextureEditNode",
|
||||
display_name="Hunyuan3D: 3D Texture Edit",
|
||||
category="api node/3d/Tencent",
|
||||
description="After inputting the 3D model, perform 3D model texture redrawing.",
|
||||
inputs=[
|
||||
IO.MultiType.Input(
|
||||
"model_3d",
|
||||
types=[IO.File3DFBX, IO.File3DAny],
|
||||
tooltip="3D model in FBX format. Model should have less than 100000 faces.",
|
||||
),
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
default="",
|
||||
tooltip="Describes texture editing. Supports up to 1024 UTF-8 characters.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.File3DGLB.Output(display_name="GLB"),
|
||||
IO.File3DFBX.Output(display_name="FBX"),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd": 0.6}""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model_3d: Types.File3D,
|
||||
prompt: str,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
_ = seed
|
||||
file_format = model_3d.format.lower()
|
||||
if file_format != "fbx":
|
||||
raise ValueError(f"Unsupported file format: '{file_format}'. Only FBX format is supported.")
|
||||
validate_string(prompt, field_name="prompt", min_length=1, max_length=1024)
|
||||
model_url = await upload_3d_model_to_comfyapi(cls, model_3d, file_format)
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-texture-edit", method="POST"),
|
||||
response_model=To3DProTaskCreateResponse,
|
||||
data=TextureEditTaskRequest(
|
||||
File3D=To3DUVFileInput(Type=file_format.upper(), Url=model_url),
|
||||
Prompt=prompt,
|
||||
EnablePBR=True,
|
||||
),
|
||||
is_rate_limited=_is_tencent_rate_limited,
|
||||
)
|
||||
if response.Error:
|
||||
raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}")
|
||||
|
||||
result = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-texture-edit/query", method="POST"),
|
||||
data=To3DProTaskQueryRequest(JobId=response.JobId),
|
||||
response_model=To3DProTaskResultResponse,
|
||||
status_extractor=lambda r: r.Status,
|
||||
)
|
||||
return IO.NodeOutput(
|
||||
await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "glb").Url, "glb"),
|
||||
await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "fbx").Url, "fbx"),
|
||||
)
|
||||
|
||||
|
||||
class Tencent3DPartNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="Tencent3DPartNode",
|
||||
display_name="Hunyuan3D: 3D Part",
|
||||
category="api node/3d/Tencent",
|
||||
description="Automatically perform component identification and generation based on the model structure.",
|
||||
inputs=[
|
||||
IO.MultiType.Input(
|
||||
"model_3d",
|
||||
types=[IO.File3DFBX, IO.File3DAny],
|
||||
tooltip="3D model in FBX format. Model should have less than 30000 faces.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.File3DFBX.Output(display_name="FBX"),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(expr='{"type":"usd","usd":0.6}'),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model_3d: Types.File3D,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
_ = seed
|
||||
file_format = model_3d.format.lower()
|
||||
if file_format != "fbx":
|
||||
raise ValueError(f"Unsupported file format: '{file_format}'. Only FBX format is supported.")
|
||||
model_url = await upload_3d_model_to_comfyapi(cls, model_3d, file_format)
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-part", method="POST"),
|
||||
response_model=To3DProTaskCreateResponse,
|
||||
data=To3DUVTaskRequest(
|
||||
File=To3DUVFileInput(Type=file_format.upper(), Url=model_url),
|
||||
),
|
||||
is_rate_limited=_is_tencent_rate_limited,
|
||||
)
|
||||
if response.Error:
|
||||
raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}")
|
||||
result = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-part/query", method="POST"),
|
||||
data=To3DProTaskQueryRequest(JobId=response.JobId),
|
||||
response_model=To3DProTaskResultResponse,
|
||||
status_extractor=lambda r: r.Status,
|
||||
)
|
||||
return IO.NodeOutput(
|
||||
await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "fbx").Url, "fbx"),
|
||||
file_glb, file_glb, await download_url_to_file_3d(obj_result.Url, "obj", task_id=task_id) if obj_result else None
|
||||
)
|
||||
|
||||
|
||||
@@ -565,9 +293,6 @@ class TencentHunyuan3DExtension(ComfyExtension):
|
||||
return [
|
||||
TencentTextToModelNode,
|
||||
TencentImageToModelNode,
|
||||
# TencentModelTo3DUVNode,
|
||||
# Tencent3DTextureEditNode,
|
||||
Tencent3DPartNode,
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -236,7 +236,6 @@ class IdeogramV1(IO.ComfyNode):
|
||||
display_name="Ideogram V1",
|
||||
category="api node/image/Ideogram",
|
||||
description="Generates images using the Ideogram V1 model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -362,7 +361,6 @@ class IdeogramV2(IO.ComfyNode):
|
||||
display_name="Ideogram V2",
|
||||
category="api node/image/Ideogram",
|
||||
description="Generates images using the Ideogram V2 model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -528,7 +526,6 @@ class IdeogramV3(IO.ComfyNode):
|
||||
category="api node/image/Ideogram",
|
||||
description="Generates images using the Ideogram V3 model. "
|
||||
"Supports both regular image generation from text prompts and image editing with mask.",
|
||||
short_description="Generate and edit images with Ideogram V3.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -642,7 +642,6 @@ class KlingCameraControls(IO.ComfyNode):
|
||||
display_name="Kling Camera Controls",
|
||||
category="api node/video/Kling",
|
||||
description="Allows specifying configuration options for Kling Camera Controls and motion control effects.",
|
||||
short_description="Configure Kling camera controls and motion effects.",
|
||||
inputs=[
|
||||
IO.Combo.Input("camera_control_type", options=KlingCameraControlType),
|
||||
IO.Float.Input(
|
||||
@@ -763,7 +762,6 @@ class KlingTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling Text to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Text to Video Node",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
@@ -851,7 +849,6 @@ class OmniProTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Text to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use text prompts to generate videos with the latest Kling model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -992,7 +989,6 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni First-Last-Frame to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use a start frame, an optional end frame, or reference images with the latest Kling model.",
|
||||
short_description="Generate video from start/end frames or reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1191,7 +1187,6 @@ class OmniProImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Image to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use up to 7 reference images to generate a video with the latest Kling model.",
|
||||
short_description="Generate video from up to 7 reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1352,7 +1347,6 @@ class OmniProVideoToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Video to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use a video and up to 4 reference images to generate a video with the latest Kling model.",
|
||||
short_description="Generate video from a video and reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1464,7 +1458,6 @@ class OmniProEditVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Edit Video",
|
||||
category="api node/video/Kling",
|
||||
description="Edit an existing video with the latest model from Kling.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1572,7 +1565,6 @@ class OmniProImageNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Image",
|
||||
category="api node/image/Kling",
|
||||
description="Create or edit images with the latest model from Kling.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-image-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1701,7 +1693,6 @@ class KlingCameraControlT2VNode(IO.ComfyNode):
|
||||
display_name="Kling Text to Video (Camera Control)",
|
||||
category="api node/video/Kling",
|
||||
description="Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.",
|
||||
short_description="Generate videos from text with camera movement controls.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
@@ -1763,8 +1754,6 @@ class KlingImage2VideoNode(IO.ComfyNode):
|
||||
node_id="KlingImage2VideoNode",
|
||||
display_name="Kling Image(First Frame) to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video from a first-frame image with configurable model, mode, aspect ratio, and duration settings.",
|
||||
short_description="Generate video from a first-frame reference image.",
|
||||
inputs=[
|
||||
IO.Image.Input("start_frame", tooltip="The reference image used to generate the video."),
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
@@ -1865,7 +1854,6 @@ class KlingCameraControlI2VNode(IO.ComfyNode):
|
||||
display_name="Kling Image to Video (Camera Control)",
|
||||
category="api node/video/Kling",
|
||||
description="Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.",
|
||||
short_description="Generate videos from images with camera movement controls.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"start_frame",
|
||||
@@ -1937,7 +1925,6 @@ class KlingStartEndFrameNode(IO.ComfyNode):
|
||||
display_name="Kling Start-End Frame to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.",
|
||||
short_description="Generate video transitioning between start and end frame images.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"start_frame",
|
||||
@@ -2032,7 +2019,6 @@ class KlingVideoExtendNode(IO.ComfyNode):
|
||||
display_name="Kling Video Extend",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.",
|
||||
short_description="Extend videos generated by other Kling nodes.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -2114,7 +2100,6 @@ class KlingDualCharacterVideoEffectNode(IO.ComfyNode):
|
||||
display_name="Kling Dual Character Video Effects",
|
||||
category="api node/video/Kling",
|
||||
description="Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.",
|
||||
short_description="Apply dual-character video effects from two images.",
|
||||
inputs=[
|
||||
IO.Image.Input("image_left", tooltip="Left side image"),
|
||||
IO.Image.Input("image_right", tooltip="Right side image"),
|
||||
@@ -2205,7 +2190,6 @@ class KlingSingleImageVideoEffectNode(IO.ComfyNode):
|
||||
display_name="Kling Video Effects",
|
||||
category="api node/video/Kling",
|
||||
description="Achieve different special effects when generating a video based on the effect_scene.",
|
||||
short_description="Apply special video effects to a single image.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -2279,7 +2263,6 @@ class KlingLipSyncAudioToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling Lip Sync Video with Audio",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
|
||||
short_description="Sync video mouth movements to audio content.",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Audio.Input("audio"),
|
||||
@@ -2331,7 +2314,6 @@ class KlingLipSyncTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling Lip Sync Video with Text",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
|
||||
short_description="Sync video mouth movements to a text prompt.",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.String.Input(
|
||||
@@ -2399,7 +2381,6 @@ class KlingVirtualTryOnNode(IO.ComfyNode):
|
||||
display_name="Kling Virtual Try On",
|
||||
category="api node/image/Kling",
|
||||
description="Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.",
|
||||
short_description="Virtually try clothing onto a human image.",
|
||||
inputs=[
|
||||
IO.Image.Input("human_image"),
|
||||
IO.Image.Input("cloth_image"),
|
||||
@@ -2467,7 +2448,6 @@ class KlingImageGenerationNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Image",
|
||||
category="api node/image/Kling",
|
||||
description="Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.",
|
||||
short_description="Generate images from text with optional reference image.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
@@ -2601,8 +2581,6 @@ class TextToVideoWithAudio(IO.ComfyNode):
|
||||
node_id="KlingTextToVideoWithAudio",
|
||||
display_name="Kling 2.6 Text to Video with Audio",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video with synchronized audio from a text prompt using the Kling v2-6 model.",
|
||||
short_description="Generate video with audio from text using Kling v2-6.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v2-6"]),
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt."),
|
||||
@@ -2671,8 +2649,6 @@ class ImageToVideoWithAudio(IO.ComfyNode):
|
||||
node_id="KlingImageToVideoWithAudio",
|
||||
display_name="Kling 2.6 Image(First Frame) to Video with Audio",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video with synchronized audio from a first-frame image and text prompt using the Kling v2-6 model.",
|
||||
short_description="Generate video with audio from an image using Kling v2-6.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v2-6"]),
|
||||
IO.Image.Input("start_frame"),
|
||||
@@ -2743,8 +2719,6 @@ class MotionControl(IO.ComfyNode):
|
||||
node_id="KlingMotionControl",
|
||||
display_name="Kling Motion Control",
|
||||
category="api node/video/Kling",
|
||||
description="Drive character movement and expression in video using a reference image and motion reference video.",
|
||||
short_description="Control video character motion using reference image and video.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True),
|
||||
IO.Image.Input("reference_image"),
|
||||
@@ -2841,7 +2815,6 @@ class KlingVideoNode(IO.ComfyNode):
|
||||
category="api node/video/Kling",
|
||||
description="Generate videos with Kling V3. "
|
||||
"Supports text-to-video and image-to-video with optional storyboard multi-prompt and audio generation.",
|
||||
short_description="Generate videos with Kling V3 from text or images.",
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"multi_shot",
|
||||
|
||||
@@ -52,7 +52,6 @@ class TextToVideoNode(IO.ComfyNode):
|
||||
display_name="LTXV Text To Video",
|
||||
category="api node/video/LTXV",
|
||||
description="Professional-quality videos with customizable duration and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=list(MODELS_MAP.keys())),
|
||||
IO.String.Input(
|
||||
@@ -129,7 +128,6 @@ class ImageToVideoNode(IO.ComfyNode):
|
||||
display_name="LTXV Image To Video",
|
||||
category="api node/video/LTXV",
|
||||
description="Professional-quality videos with customizable duration and resolution based on start image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="First frame to be used for the video."),
|
||||
IO.Combo.Input("model", options=list(MODELS_MAP.keys())),
|
||||
|
||||
@@ -46,7 +46,6 @@ class LumaReferenceNode(IO.ComfyNode):
|
||||
display_name="Luma Reference",
|
||||
category="api node/image/Luma",
|
||||
description="Holds an image and weight for use with Luma Generate Image node.",
|
||||
short_description="Image and weight input for Luma generation.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -86,7 +85,6 @@ class LumaConceptsNode(IO.ComfyNode):
|
||||
display_name="Luma Concepts",
|
||||
category="api node/video/Luma",
|
||||
description="Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.",
|
||||
short_description="Camera concepts for Luma video generation nodes.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"concept1",
|
||||
@@ -136,7 +134,6 @@ class LumaImageGenerationNode(IO.ComfyNode):
|
||||
display_name="Luma Text to Image",
|
||||
category="api node/image/Luma",
|
||||
description="Generates images synchronously based on prompt and aspect ratio.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -281,7 +278,6 @@ class LumaImageModifyNode(IO.ComfyNode):
|
||||
display_name="Luma Image to Image",
|
||||
category="api node/image/Luma",
|
||||
description="Modifies images synchronously based on prompt and aspect ratio.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -375,7 +371,6 @@ class LumaTextToVideoGenerationNode(IO.ComfyNode):
|
||||
display_name="Luma Text to Video",
|
||||
category="api node/video/Luma",
|
||||
description="Generates videos synchronously based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -477,7 +472,6 @@ class LumaImageToVideoGenerationNode(IO.ComfyNode):
|
||||
display_name="Luma Image to Video",
|
||||
category="api node/video/Luma",
|
||||
description="Generates videos synchronously based on prompt, input images, and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -242,7 +242,6 @@ class MagnificImageUpscalerPreciseV2Node(IO.ComfyNode):
|
||||
category="api node/image/Magnific",
|
||||
description="High-fidelity upscaling with fine control over sharpness, grain, and detail. "
|
||||
"Maximum output: 10060×10060 pixels.",
|
||||
short_description="High-fidelity upscaling with sharpness, grain, and detail control.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
|
||||
@@ -402,7 +401,6 @@ class MagnificImageStyleTransferNode(IO.ComfyNode):
|
||||
display_name="Magnific Image Style Transfer",
|
||||
category="api node/image/Magnific",
|
||||
description="Transfer the style from a reference image to your input image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The image to apply style transfer to."),
|
||||
IO.Image.Input("reference_image", tooltip="The reference image to extract style from."),
|
||||
@@ -551,7 +549,6 @@ class MagnificImageRelightNode(IO.ComfyNode):
|
||||
display_name="Magnific Image Relight",
|
||||
category="api node/image/Magnific",
|
||||
description="Relight an image with lighting adjustments and optional reference-based light transfer.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The image to relight."),
|
||||
IO.String.Input(
|
||||
@@ -790,7 +787,6 @@ class MagnificImageSkinEnhancerNode(IO.ComfyNode):
|
||||
display_name="Magnific Image Skin Enhancer",
|
||||
category="api node/image/Magnific",
|
||||
description="Skin enhancement for portraits with multiple processing modes.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The portrait image to enhance."),
|
||||
IO.Int.Input(
|
||||
|
||||
@@ -34,8 +34,6 @@ class MeshyTextToModelNode(IO.ComfyNode):
|
||||
node_id="MeshyTextToModelNode",
|
||||
display_name="Meshy: Text to Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Generate a 3D model from a text prompt using the Meshy API.",
|
||||
short_description="Generate a 3D model from a text prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.String.Input("prompt", multiline=True, default=""),
|
||||
@@ -148,7 +146,6 @@ class MeshyRefineNode(IO.ComfyNode):
|
||||
display_name="Meshy: Refine Draft Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Refine a previously created draft model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
|
||||
@@ -242,8 +239,6 @@ class MeshyImageToModelNode(IO.ComfyNode):
|
||||
node_id="MeshyImageToModelNode",
|
||||
display_name="Meshy: Image to Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Generate a 3D model from a single image using the Meshy API.",
|
||||
short_description="Generate a 3D model from an image.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -408,7 +403,6 @@ class MeshyMultiImageToModelNode(IO.ComfyNode):
|
||||
node_id="MeshyMultiImageToModelNode",
|
||||
display_name="Meshy: Multi-Image to Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Generate a 3D model from multiple images using the Meshy API.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Autogrow.Input(
|
||||
@@ -581,7 +575,6 @@ class MeshyRigModelNode(IO.ComfyNode):
|
||||
description="Provides a rigged character in standard formats. "
|
||||
"Auto-rigging is currently not suitable for untextured meshes, non-humanoid assets, "
|
||||
"or humanoid assets with unclear limb and body structure.",
|
||||
short_description="Rig a character model for animation.",
|
||||
inputs=[
|
||||
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
|
||||
IO.Float.Input(
|
||||
@@ -661,7 +654,6 @@ class MeshyAnimateModelNode(IO.ComfyNode):
|
||||
display_name="Meshy: Animate Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Apply a specific animation action to a previously rigged character.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Custom("MESHY_RIGGED_TASK_ID").Input("rig_task_id"),
|
||||
IO.Int.Input(
|
||||
@@ -727,7 +719,6 @@ class MeshyTextureNode(IO.ComfyNode):
|
||||
node_id="MeshyTextureNode",
|
||||
display_name="Meshy: Texture Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Apply textures to an existing 3D model using the Meshy API.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
|
||||
|
||||
@@ -103,7 +103,6 @@ class MinimaxTextToVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Text to Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos synchronously based on a prompt, and optional parameters.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt_text",
|
||||
@@ -166,7 +165,6 @@ class MinimaxImageToVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Image to Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos synchronously based on an image and prompt, and optional parameters.",
|
||||
short_description="Generate videos from an image, prompt, and optional parameters.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -234,7 +232,6 @@ class MinimaxSubjectToVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Subject to Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos synchronously based on an image and prompt, and optional parameters.",
|
||||
short_description="Subject-driven video generation from image and prompt.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"subject",
|
||||
@@ -299,7 +296,6 @@ class MinimaxHailuoVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Hailuo Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.",
|
||||
short_description="Generate videos with optional start frame using Hailuo-02.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt_text",
|
||||
|
||||
@@ -166,7 +166,6 @@ class MoonvalleyImg2VideoNode(IO.ComfyNode):
|
||||
display_name="Moonvalley Marey Image to Video",
|
||||
category="api node/video/Moonvalley Marey",
|
||||
description="Moonvalley Marey Image to Video Node",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -291,8 +290,7 @@ class MoonvalleyVideo2VideoNode(IO.ComfyNode):
|
||||
node_id="MoonvalleyVideo2VideoNode",
|
||||
display_name="Moonvalley Marey Video to Video",
|
||||
category="api node/video/Moonvalley Marey",
|
||||
description="Transform an input video into a new video using a text prompt and motion or pose control.",
|
||||
short_description="Transform video using text prompt with motion or pose control.",
|
||||
description="",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -417,8 +415,7 @@ class MoonvalleyTxt2VideoNode(IO.ComfyNode):
|
||||
node_id="MoonvalleyTxt2VideoNode",
|
||||
display_name="Moonvalley Marey Text to Video",
|
||||
category="api node/video/Moonvalley Marey",
|
||||
description="Generate a video from a text prompt using the Moonvalley Marey model.",
|
||||
short_description="Generate video from a text prompt using Moonvalley Marey.",
|
||||
description="",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -43,6 +43,7 @@ class SupportedOpenAIModel(str, Enum):
|
||||
o1 = "o1"
|
||||
o3 = "o3"
|
||||
o1_pro = "o1-pro"
|
||||
gpt_4o = "gpt-4o"
|
||||
gpt_4_1 = "gpt-4.1"
|
||||
gpt_4_1_mini = "gpt-4.1-mini"
|
||||
gpt_4_1_nano = "gpt-4.1-nano"
|
||||
@@ -98,7 +99,6 @@ class OpenAIDalle2(IO.ComfyNode):
|
||||
display_name="OpenAI DALL·E 2",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's DALL·E 2 endpoint.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -249,7 +249,6 @@ class OpenAIDalle3(IO.ComfyNode):
|
||||
display_name="OpenAI DALL·E 3",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's DALL·E 3 endpoint.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -368,7 +367,6 @@ class OpenAIGPTImage1(IO.ComfyNode):
|
||||
display_name="OpenAI GPT Image 1.5",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's GPT Image endpoint.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -579,7 +577,6 @@ class OpenAIChatNode(IO.ComfyNode):
|
||||
display_name="OpenAI ChatGPT",
|
||||
category="api node/text/OpenAI",
|
||||
description="Generate text responses from an OpenAI model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -652,6 +649,11 @@ class OpenAIChatNode(IO.ComfyNode):
|
||||
"usd": [0.01, 0.04],
|
||||
"format": { "approximate": true, "separator": "-", "suffix": " per 1K tokens" }
|
||||
}
|
||||
: $contains($m, "gpt-4o") ? {
|
||||
"type": "list_usd",
|
||||
"usd": [0.0025, 0.01],
|
||||
"format": { "approximate": true, "separator": "-", "suffix": " per 1K tokens" }
|
||||
}
|
||||
: $contains($m, "gpt-4.1-nano") ? {
|
||||
"type": "list_usd",
|
||||
"usd": [0.0001, 0.0004],
|
||||
@@ -807,7 +809,6 @@ class OpenAIInputFiles(IO.ComfyNode):
|
||||
display_name="OpenAI ChatGPT Input Files",
|
||||
category="api node/text/OpenAI",
|
||||
description="Loads and prepares input files (text, pdf, etc.) to include as inputs for the OpenAI Chat Node. The files will be read by the OpenAI model when generating a response. 🛈 TIP: Can be chained together with other OpenAI Input File nodes.",
|
||||
short_description="Load and prepare input files for OpenAI Chat.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"file",
|
||||
@@ -855,7 +856,6 @@ class OpenAIChatConfig(IO.ComfyNode):
|
||||
display_name="OpenAI ChatGPT Advanced Options",
|
||||
category="api node/text/OpenAI",
|
||||
description="Allows specifying advanced configuration options for the OpenAI Chat Nodes.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"truncation",
|
||||
|
||||
@@ -54,8 +54,6 @@ class PixverseTemplateNode(IO.ComfyNode):
|
||||
node_id="PixverseTemplateNode",
|
||||
display_name="PixVerse Template",
|
||||
category="api node/video/PixVerse",
|
||||
description="Select a style template for PixVerse video generation.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("template", options=list(pixverse_templates.keys())),
|
||||
],
|
||||
@@ -78,7 +76,6 @@ class PixverseTextToVideoNode(IO.ComfyNode):
|
||||
display_name="PixVerse Text to Video",
|
||||
category="api node/video/PixVerse",
|
||||
description="Generates videos based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -197,7 +194,6 @@ class PixverseImageToVideoNode(IO.ComfyNode):
|
||||
display_name="PixVerse Image to Video",
|
||||
category="api node/video/PixVerse",
|
||||
description="Generates videos based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -316,7 +312,6 @@ class PixverseTransitionVideoNode(IO.ComfyNode):
|
||||
display_name="PixVerse Transition Video",
|
||||
category="api node/video/PixVerse",
|
||||
description="Generates videos based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("first_frame"),
|
||||
IO.Image.Input("last_frame"),
|
||||
|
||||
@@ -180,7 +180,6 @@ class RecraftColorRGBNode(IO.ComfyNode):
|
||||
display_name="Recraft Color RGB",
|
||||
category="api node/image/Recraft",
|
||||
description="Create Recraft Color by choosing specific RGB values.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Int.Input("r", default=0, min=0, max=255, tooltip="Red value of color."),
|
||||
IO.Int.Input("g", default=0, min=0, max=255, tooltip="Green value of color."),
|
||||
@@ -207,7 +206,6 @@ class RecraftControlsNode(IO.ComfyNode):
|
||||
display_name="Recraft Controls",
|
||||
category="api node/image/Recraft",
|
||||
description="Create Recraft Controls for customizing Recraft generation.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Custom(RecraftIO.COLOR).Input("colors", optional=True),
|
||||
IO.Custom(RecraftIO.COLOR).Input("background_color", optional=True),
|
||||
@@ -232,7 +230,6 @@ class RecraftStyleV3RealisticImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Style - Realistic Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
|
||||
],
|
||||
@@ -257,8 +254,7 @@ class RecraftStyleV3DigitalIllustrationNode(RecraftStyleV3RealisticImageNode):
|
||||
node_id="RecraftStyleV3DigitalIllustration",
|
||||
display_name="Recraft Style - Digital Illustration",
|
||||
category="api node/image/Recraft",
|
||||
description="Select digital_illustration style and optional substyle.",
|
||||
short_description=None,
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
|
||||
],
|
||||
@@ -275,10 +271,9 @@ class RecraftStyleV3VectorIllustrationNode(RecraftStyleV3RealisticImageNode):
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="RecraftStyleV3VectorIllustrationNode",
|
||||
display_name="Recraft Style - Vector Illustration",
|
||||
display_name="Recraft Style - Realistic Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Select vector_illustration style and optional substyle.",
|
||||
short_description=None,
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
|
||||
],
|
||||
@@ -297,8 +292,7 @@ class RecraftStyleV3LogoRasterNode(RecraftStyleV3RealisticImageNode):
|
||||
node_id="RecraftStyleV3LogoRaster",
|
||||
display_name="Recraft Style - Logo Raster",
|
||||
category="api node/image/Recraft",
|
||||
description="Select logo_raster style and optional substyle.",
|
||||
short_description=None,
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE, include_none=False)),
|
||||
],
|
||||
@@ -316,7 +310,6 @@ class RecraftStyleInfiniteStyleLibrary(IO.ComfyNode):
|
||||
display_name="Recraft Style - Infinite Style Library",
|
||||
category="api node/image/Recraft",
|
||||
description="Select style based on preexisting UUID from Recraft's Infinite Style Library.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("style_id", default="", tooltip="UUID of style from Infinite Style Library."),
|
||||
],
|
||||
@@ -342,7 +335,6 @@ class RecraftCreateStyleNode(IO.ComfyNode):
|
||||
description="Create a custom style from reference images. "
|
||||
"Upload 1-5 images to use as style references. "
|
||||
"Total size of all images is limited to 5 MB.",
|
||||
short_description="Create a custom style from 1-5 reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"style",
|
||||
@@ -410,7 +402,6 @@ class RecraftTextToImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Text to Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates images synchronously based on prompt and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."),
|
||||
IO.Combo.Input(
|
||||
@@ -523,7 +514,6 @@ class RecraftImageToImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Image to Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Modify image based on prompt and strength.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."),
|
||||
@@ -642,7 +632,6 @@ class RecraftImageInpaintingNode(IO.ComfyNode):
|
||||
display_name="Recraft Image Inpainting",
|
||||
category="api node/image/Recraft",
|
||||
description="Modify image based on prompt and mask.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Mask.Input("mask"),
|
||||
@@ -745,7 +734,6 @@ class RecraftTextToVectorNode(IO.ComfyNode):
|
||||
display_name="Recraft Text to Vector",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates SVG synchronously based on prompt and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("prompt", default="", tooltip="Prompt for the image generation.", multiline=True),
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(RecraftStyleV3.vector_illustration)),
|
||||
@@ -846,7 +834,6 @@ class RecraftVectorizeImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Vectorize Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates SVG synchronously from an input image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -890,7 +877,6 @@ class RecraftReplaceBackgroundNode(IO.ComfyNode):
|
||||
display_name="Recraft Replace Background",
|
||||
category="api node/image/Recraft",
|
||||
description="Replace background on image, based on provided prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input("prompt", tooltip="Prompt for the image generation.", default="", multiline=True),
|
||||
@@ -978,7 +964,6 @@ class RecraftRemoveBackgroundNode(IO.ComfyNode):
|
||||
display_name="Recraft Remove Background",
|
||||
category="api node/image/Recraft",
|
||||
description="Remove background from image, and return processed image and mask.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -1027,9 +1012,8 @@ class RecraftCrispUpscaleNode(IO.ComfyNode):
|
||||
display_name="Recraft Crisp Upscale Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Upscale image synchronously.\n"
|
||||
"Enhances a given raster image using 'crisp upscale' tool, "
|
||||
"Enhances a given raster image using ‘crisp upscale’ tool, "
|
||||
"increasing image resolution, making the image sharper and cleaner.",
|
||||
short_description="Crisp upscale to sharpen and increase image resolution.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -1074,9 +1058,8 @@ class RecraftCreativeUpscaleNode(RecraftCrispUpscaleNode):
|
||||
display_name="Recraft Creative Upscale Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Upscale image synchronously.\n"
|
||||
"Enhances a given raster image using 'creative upscale' tool, "
|
||||
"Enhances a given raster image using ‘creative upscale’ tool, "
|
||||
"boosting resolution with a focus on refining small details and faces.",
|
||||
short_description="Creative upscale focusing on small details and faces.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
|
||||
@@ -238,7 +238,6 @@ class Rodin3D_Regular(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Regular Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
*COMMON_PARAMETERS,
|
||||
@@ -298,7 +297,6 @@ class Rodin3D_Detail(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Detail Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
*COMMON_PARAMETERS,
|
||||
@@ -358,7 +356,6 @@ class Rodin3D_Smooth(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Smooth Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
*COMMON_PARAMETERS,
|
||||
@@ -417,7 +414,6 @@ class Rodin3D_Sketch(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Sketch Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
IO.Int.Input(
|
||||
@@ -480,7 +476,6 @@ class Rodin3D_Gen2(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Gen-2 Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
IO.Int.Input(
|
||||
|
||||
@@ -145,7 +145,6 @@ class RunwayImageToVideoNodeGen3a(IO.ComfyNode):
|
||||
"Before diving in, review these best practices to ensure that "
|
||||
"your input selections will set your generation up for success: "
|
||||
"https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.",
|
||||
short_description="Generate video from a starting frame using Gen3a Turbo.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -240,7 +239,6 @@ class RunwayImageToVideoNodeGen4(IO.ComfyNode):
|
||||
"Before diving in, review these best practices to ensure that "
|
||||
"your input selections will set your generation up for success: "
|
||||
"https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.",
|
||||
short_description="Generate video from a starting frame using Gen4 Turbo.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -339,7 +337,6 @@ class RunwayFirstLastFrameNode(IO.ComfyNode):
|
||||
"Before diving in, review these best practices to ensure that your input selections "
|
||||
"will set your generation up for success: "
|
||||
"https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.",
|
||||
short_description="Generate video from first and last keyframes with a prompt.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -446,7 +443,6 @@ class RunwayTextToImageNode(IO.ComfyNode):
|
||||
category="api node/image/Runway",
|
||||
description="Generate an image from a text prompt using Runway's Gen 4 model. "
|
||||
"You can also include reference image to guide the generation.",
|
||||
short_description="Generate an image from text using Runway Gen 4.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -36,7 +36,6 @@ class OpenAIVideoSora2(IO.ComfyNode):
|
||||
display_name="OpenAI Sora - Video",
|
||||
category="api node/video/Sora",
|
||||
description="OpenAI video and audio generation.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -64,7 +64,6 @@ class StabilityStableImageUltraNode(IO.ComfyNode):
|
||||
display_name="Stability AI Stable Image Ultra",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -198,7 +197,6 @@ class StabilityStableImageSD_3_5Node(IO.ComfyNode):
|
||||
display_name="Stability AI Stable Diffusion 3.5 Image",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -354,7 +352,6 @@ class StabilityUpscaleConservativeNode(IO.ComfyNode):
|
||||
display_name="Stability AI Upscale Conservative",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -457,7 +454,6 @@ class StabilityUpscaleCreativeNode(IO.ComfyNode):
|
||||
display_name="Stability AI Upscale Creative",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -577,7 +573,6 @@ class StabilityUpscaleFastNode(IO.ComfyNode):
|
||||
display_name="Stability AI Upscale Fast",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description="Quickly upscale an image to 4x its original size.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -630,7 +625,6 @@ class StabilityTextToAudio(IO.ComfyNode):
|
||||
display_name="Stability AI Text To Audio",
|
||||
category="api node/audio/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -707,7 +701,6 @@ class StabilityAudioToAudio(IO.ComfyNode):
|
||||
display_name="Stability AI Audio To Audio",
|
||||
category="api node/audio/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -801,7 +794,6 @@ class StabilityAudioInpaint(IO.ComfyNode):
|
||||
display_name="Stability AI Audio Inpaint",
|
||||
category="api node/audio/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -49,7 +49,6 @@ class TopazImageEnhance(IO.ComfyNode):
|
||||
display_name="Topaz Image Enhance",
|
||||
category="api node/image/Topaz",
|
||||
description="Industry-standard upscaling and image enhancement.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["Reimagine"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -224,7 +223,6 @@ class TopazVideoEnhance(IO.ComfyNode):
|
||||
display_name="Topaz Video Enhance",
|
||||
category="api node/video/Topaz",
|
||||
description="Breathe new life into video with powerful upscaling and recovery technology.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Boolean.Input("upscaler_enabled", default=True),
|
||||
|
||||
@@ -80,7 +80,6 @@ class TripoTextToModelNode(IO.ComfyNode):
|
||||
node_id="TripoTextToModelNode",
|
||||
display_name="Tripo: Text to Model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Generate a 3D model from a text prompt using Tripo's API.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True),
|
||||
IO.String.Input("negative_prompt", multiline=True, optional=True),
|
||||
@@ -200,7 +199,6 @@ class TripoImageToModelNode(IO.ComfyNode):
|
||||
node_id="TripoImageToModelNode",
|
||||
display_name="Tripo: Image to Model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Generate a 3D model from a single image using Tripo's API.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input(
|
||||
@@ -333,7 +331,6 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
|
||||
node_id="TripoMultiviewToModelNode",
|
||||
display_name="Tripo: Multiview to Model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Generate a 3D model from multiple view images using Tripo's API.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Image.Input("image_left", optional=True),
|
||||
@@ -473,7 +470,6 @@ class TripoTextureNode(IO.ComfyNode):
|
||||
node_id="TripoTextureNode",
|
||||
display_name="Tripo: Texture model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Apply textures to an existing 3D model using Tripo's API.",
|
||||
inputs=[
|
||||
IO.Custom("MODEL_TASK_ID").Input("model_task_id"),
|
||||
IO.Boolean.Input("texture", default=True, optional=True),
|
||||
@@ -542,7 +538,6 @@ class TripoRefineNode(IO.ComfyNode):
|
||||
display_name="Tripo: Refine Draft model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Refine a draft model created by v1.4 Tripo models only.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Custom("MODEL_TASK_ID").Input("model_task_id", tooltip="Must be a v1.4 Tripo model"),
|
||||
],
|
||||
@@ -582,8 +577,6 @@ class TripoRigNode(IO.ComfyNode):
|
||||
node_id="TripoRigNode",
|
||||
display_name="Tripo: Rig model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Add a skeleton rig to an existing 3D model using Tripo's API.",
|
||||
short_description="Add a skeleton rig to a 3D model.",
|
||||
inputs=[IO.Custom("MODEL_TASK_ID").Input("original_model_task_id")],
|
||||
outputs=[
|
||||
IO.String.Output(display_name="model_file"), # for backward compatibility only
|
||||
@@ -621,8 +614,6 @@ class TripoRetargetNode(IO.ComfyNode):
|
||||
node_id="TripoRetargetNode",
|
||||
display_name="Tripo: Retarget rigged model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Apply a preset animation to a rigged 3D model using Tripo's API.",
|
||||
short_description="Apply a preset animation to a rigged model.",
|
||||
inputs=[
|
||||
IO.Custom("RIG_TASK_ID").Input("original_model_task_id"),
|
||||
IO.Combo.Input(
|
||||
@@ -688,8 +679,6 @@ class TripoConversionNode(IO.ComfyNode):
|
||||
node_id="TripoConversionNode",
|
||||
display_name="Tripo: Convert model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Convert a 3D model to different formats with optional post-processing using Tripo's API.",
|
||||
short_description="Convert a 3D model to different formats.",
|
||||
inputs=[
|
||||
IO.Custom("MODEL_TASK_ID,RIG_TASK_ID,RETARGET_TASK_ID").Input("original_model_task_id"),
|
||||
IO.Combo.Input("format", options=["GLTF", "USDZ", "FBX", "OBJ", "STL", "3MF"]),
|
||||
|
||||
@@ -46,7 +46,6 @@ class VeoVideoGenerationNode(IO.ComfyNode):
|
||||
display_name="Google Veo 2 Video Generation",
|
||||
category="api node/video/Veo",
|
||||
description="Generates videos from text prompts using Google's Veo 2 API",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -265,7 +264,6 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
|
||||
display_name="Google Veo 3 Video Generation",
|
||||
category="api node/video/Veo",
|
||||
description="Generates videos from text prompts using Google's Veo 3 API",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -379,7 +377,6 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
display_name="Google Veo 3 First-Last-Frame to Video",
|
||||
category="api node/video/Veo",
|
||||
description="Generate video using prompt and first and last frames.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -72,7 +72,6 @@ class ViduTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Text To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.String.Input(
|
||||
@@ -169,7 +168,6 @@ class ViduImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Image To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from image and optional prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.Image.Input(
|
||||
@@ -272,7 +270,6 @@ class ViduReferenceVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Reference To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from multiple images and a prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.Image.Input(
|
||||
@@ -386,7 +383,6 @@ class ViduStartEndToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Start End To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from start and end frames and a prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.Image.Input(
|
||||
@@ -489,7 +485,6 @@ class Vidu2TextToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Text-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2"]),
|
||||
IO.String.Input(
|
||||
@@ -581,7 +576,6 @@ class Vidu2ImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Image-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from an image and an optional prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]),
|
||||
IO.Image.Input(
|
||||
@@ -710,7 +704,6 @@ class Vidu2ReferenceVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Reference-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from multiple reference images and a prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2"]),
|
||||
IO.Autogrow.Input(
|
||||
@@ -844,7 +837,6 @@ class Vidu2StartEndToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Start/End Frame-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from a start frame, an end frame, and a prompt.",
|
||||
short_description="Generate video from start frame, end frame, and prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]),
|
||||
IO.Image.Input("first_frame"),
|
||||
@@ -964,7 +956,6 @@ class ViduExtendVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Video Extension",
|
||||
category="api node/video/Vidu",
|
||||
description="Extend an existing video by generating additional frames.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
@@ -1135,7 +1126,6 @@ class ViduMultiFrameVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Multi-Frame Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video with multiple keyframe transitions.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2-pro", "viduq2-turbo"]),
|
||||
IO.Image.Input(
|
||||
@@ -1282,7 +1272,6 @@ class Vidu3TextToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Q3 Text-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
@@ -1391,7 +1380,6 @@ class Vidu3ImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Q3 Image-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from an image and an optional prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
|
||||
@@ -175,7 +175,6 @@ class WanTextToImageApi(IO.ComfyNode):
|
||||
display_name="Wan Text to Image",
|
||||
category="api node/image/Wan",
|
||||
description="Generates an image based on a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -299,7 +298,6 @@ class WanImageToImageApi(IO.ComfyNode):
|
||||
category="api node/image/Wan",
|
||||
description="Generates an image from one or two input images and a text prompt. "
|
||||
"The output image is currently fixed at 1.6 MP, and its aspect ratio matches the input image(s).",
|
||||
short_description="Generate an image from input images and a text prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -426,7 +424,6 @@ class WanTextToVideoApi(IO.ComfyNode):
|
||||
display_name="Wan Text to Video",
|
||||
category="api node/video/Wan",
|
||||
description="Generates a video based on a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -606,7 +603,6 @@ class WanImageToVideoApi(IO.ComfyNode):
|
||||
display_name="Wan Image to Video",
|
||||
category="api node/video/Wan",
|
||||
description="Generates a video from the first frame and a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -783,7 +779,6 @@ class WanReferenceVideoApi(IO.ComfyNode):
|
||||
category="api node/video/Wan",
|
||||
description="Use the character and voice from input videos, combined with a prompt, "
|
||||
"to generate a new video that maintains character consistency.",
|
||||
short_description="Generate character-consistent video from reference videos and prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["wan2.6-r2v"]),
|
||||
IO.String.Input(
|
||||
|
||||
@@ -30,7 +30,6 @@ class WavespeedFlashVSRNode(IO.ComfyNode):
|
||||
category="api node/video/WaveSpeed",
|
||||
description="Fast, high-quality video upscaler that "
|
||||
"boosts resolution and restores clarity for low-resolution or blurry footage.",
|
||||
short_description="Fast video upscaler that boosts resolution and restores clarity.",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Combo.Input("target_resolution", options=["720p", "1080p", "2K", "4K"]),
|
||||
@@ -102,7 +101,6 @@ class WavespeedImageUpscaleNode(IO.ComfyNode):
|
||||
display_name="WaveSpeed Image Upscale",
|
||||
category="api node/image/WaveSpeed",
|
||||
description="Boost image resolution and quality, upscaling photos to 4K or 8K for sharp, detailed results.",
|
||||
short_description="Upscale images to 4K or 8K with enhanced quality.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["SeedVR2", "Ultimate"]),
|
||||
IO.Image.Input("image"),
|
||||
|
||||
@@ -33,7 +33,6 @@ from .download_helpers import (
|
||||
download_url_to_video_output,
|
||||
)
|
||||
from .upload_helpers import (
|
||||
upload_3d_model_to_comfyapi,
|
||||
upload_audio_to_comfyapi,
|
||||
upload_file_to_comfyapi,
|
||||
upload_image_to_comfyapi,
|
||||
@@ -63,7 +62,6 @@ __all__ = [
|
||||
"sync_op",
|
||||
"sync_op_raw",
|
||||
# Upload helpers
|
||||
"upload_3d_model_to_comfyapi",
|
||||
"upload_audio_to_comfyapi",
|
||||
"upload_file_to_comfyapi",
|
||||
"upload_image_to_comfyapi",
|
||||
|
||||
@@ -57,7 +57,7 @@ def tensor_to_bytesio(
|
||||
image: torch.Tensor,
|
||||
*,
|
||||
total_pixels: int | None = 2048 * 2048,
|
||||
mime_type: str | None = "image/png",
|
||||
mime_type: str = "image/png",
|
||||
) -> BytesIO:
|
||||
"""Converts a torch.Tensor image to a named BytesIO object.
|
||||
|
||||
|
||||
@@ -164,27 +164,6 @@ async def upload_video_to_comfyapi(
|
||||
return await upload_file_to_comfyapi(cls, video_bytes_io, filename, upload_mime_type, wait_label)
|
||||
|
||||
|
||||
_3D_MIME_TYPES = {
|
||||
"glb": "model/gltf-binary",
|
||||
"obj": "model/obj",
|
||||
"fbx": "application/octet-stream",
|
||||
}
|
||||
|
||||
|
||||
async def upload_3d_model_to_comfyapi(
|
||||
cls: type[IO.ComfyNode],
|
||||
model_3d: Types.File3D,
|
||||
file_format: str,
|
||||
) -> str:
|
||||
"""Uploads a 3D model file to ComfyUI API and returns its download URL."""
|
||||
return await upload_file_to_comfyapi(
|
||||
cls,
|
||||
model_3d.get_data(),
|
||||
f"{uuid.uuid4()}.{file_format}",
|
||||
_3D_MIME_TYPES.get(file_format, "application/octet-stream"),
|
||||
)
|
||||
|
||||
|
||||
async def upload_file_to_comfyapi(
|
||||
cls: type[IO.ComfyNode],
|
||||
file_bytes_io: BytesIO,
|
||||
|
||||
@@ -12,8 +12,6 @@ class TextEncodeAceStepAudio(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeAceStepAudio",
|
||||
category="conditioning",
|
||||
description="Encodes tags and lyrics into conditioning for ACE-Step 1.0 audio generation with adjustable lyrics strength.",
|
||||
short_description="Encodes tags and lyrics for ACE-Step 1.0 audio.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
@@ -36,8 +34,6 @@ class TextEncodeAceStepAudio15(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeAceStepAudio1.5",
|
||||
category="conditioning",
|
||||
description="Encodes tags, lyrics, and music parameters like BPM, key, and language into conditioning for ACE-Step 1.5 audio generation.",
|
||||
short_description="Encodes text and music parameters for ACE-Step 1.5.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
@@ -72,8 +68,6 @@ class EmptyAceStepLatentAudio(io.ComfyNode):
|
||||
node_id="EmptyAceStepLatentAudio",
|
||||
display_name="Empty Ace Step 1.0 Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty latent audio tensor for ACE-Step 1.0 with a specified duration and batch size.",
|
||||
short_description="Creates an empty ACE-Step 1.0 audio latent.",
|
||||
inputs=[
|
||||
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1),
|
||||
io.Int.Input(
|
||||
@@ -97,8 +91,6 @@ class EmptyAceStep15LatentAudio(io.ComfyNode):
|
||||
node_id="EmptyAceStep1.5LatentAudio",
|
||||
display_name="Empty Ace Step 1.5 Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty latent audio tensor for ACE-Step 1.5 with a specified duration and batch size.",
|
||||
short_description="Creates an empty ACE-Step 1.5 audio latent.",
|
||||
inputs=[
|
||||
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.01),
|
||||
io.Int.Input(
|
||||
@@ -123,7 +115,6 @@ class ReferenceAudio(io.ComfyNode):
|
||||
category="advanced/conditioning/audio",
|
||||
is_experimental=True,
|
||||
description="This node sets the reference audio for ace step 1.5",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
|
||||
@@ -46,8 +46,6 @@ class SamplerLCMUpscale(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerLCMUpscale",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Sampler that progressively upscales the latent during LCM sampling steps, combining denoising with gradual resolution increase.",
|
||||
short_description="LCM sampler with progressive latent upscaling.",
|
||||
inputs=[
|
||||
io.Float.Input("scale_ratio", default=1.0, min=0.1, max=20.0, step=0.01),
|
||||
io.Int.Input("scale_steps", default=-1, min=-1, max=1000, step=1),
|
||||
@@ -95,8 +93,6 @@ class SamplerEulerCFGpp(io.ComfyNode):
|
||||
node_id="SamplerEulerCFGpp",
|
||||
display_name="SamplerEulerCFG++",
|
||||
category="_for_testing", # "sampling/custom_sampling/samplers"
|
||||
description="Euler sampler variant using the CFG++ formulation, which modifies the denoising direction using unconditional predictions for improved guidance.",
|
||||
short_description="Euler sampler using CFG++ guidance formulation.",
|
||||
inputs=[
|
||||
io.Combo.Input("version", options=["regular", "alternative"]),
|
||||
],
|
||||
|
||||
@@ -30,8 +30,6 @@ class AlignYourStepsScheduler(io.ComfyNode):
|
||||
node_id="AlignYourStepsScheduler",
|
||||
search_aliases=["AYS scheduler"],
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates an optimized noise schedule using the Align Your Steps method with log-linear interpolation.",
|
||||
short_description="Optimized noise schedule using Align Your Steps.",
|
||||
inputs=[
|
||||
io.Combo.Input("model_type", options=["SD1", "SDXL", "SVD"]),
|
||||
io.Int.Input("steps", default=10, min=1, max=10000),
|
||||
|
||||
@@ -17,8 +17,6 @@ class APG(io.ComfyNode):
|
||||
node_id="APG",
|
||||
display_name="Adaptive Projected Guidance",
|
||||
category="sampling/custom_sampling",
|
||||
description="Applies Adaptive Projected Guidance to a model, decomposing CFG guidance into parallel and orthogonal components with optional momentum and norm thresholding for improved sampling quality.",
|
||||
short_description="Decomposes CFG guidance with projection and normalization.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
|
||||
@@ -26,8 +26,6 @@ class UNetSelfAttentionMultiply(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="UNetSelfAttentionMultiply",
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the query, key, value, and output weights of UNet self-attention layers by specified multipliers to experiment with attention behavior.",
|
||||
short_description="Scale UNet self-attention Q/K/V/Out weights.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
@@ -51,8 +49,6 @@ class UNetCrossAttentionMultiply(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="UNetCrossAttentionMultiply",
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the query, key, value, and output weights of UNet cross-attention layers by specified multipliers to experiment with text-to-image attention.",
|
||||
short_description="Scale UNet cross-attention Q/K/V/Out weights.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
@@ -77,8 +73,6 @@ class CLIPAttentionMultiply(io.ComfyNode):
|
||||
node_id="CLIPAttentionMultiply",
|
||||
search_aliases=["clip attention scale", "text encoder attention"],
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the query, key, value, and output projection weights of CLIP text encoder self-attention layers by specified multipliers.",
|
||||
short_description="Scale CLIP text encoder attention weights.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
@@ -113,8 +107,6 @@ class UNetTemporalAttentionMultiply(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="UNetTemporalAttentionMultiply",
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the output weights of UNet temporal and structural attention layers independently, allowing fine-grained control over video model attention behavior.",
|
||||
short_description="Scale UNet temporal and structural attention weights.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("self_structural", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
|
||||
@@ -19,8 +19,6 @@ class EmptyLatentAudio(IO.ComfyNode):
|
||||
node_id="EmptyLatentAudio",
|
||||
display_name="Empty Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty latent audio tensor with a specified duration and batch size for Stable Audio generation.",
|
||||
short_description="Creates an empty latent audio tensor.",
|
||||
inputs=[
|
||||
IO.Float.Input("seconds", default=47.6, min=1.0, max=1000.0, step=0.1),
|
||||
IO.Int.Input(
|
||||
@@ -45,8 +43,6 @@ class ConditioningStableAudio(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="ConditioningStableAudio",
|
||||
category="conditioning",
|
||||
description="Sets the start time and total duration on Stable Audio positive and negative conditioning.",
|
||||
short_description="Sets timing parameters on Stable Audio conditioning.",
|
||||
inputs=[
|
||||
IO.Conditioning.Input("positive"),
|
||||
IO.Conditioning.Input("negative"),
|
||||
@@ -76,8 +72,6 @@ class VAEEncodeAudio(IO.ComfyNode):
|
||||
search_aliases=["audio to latent"],
|
||||
display_name="VAE Encode Audio",
|
||||
category="latent/audio",
|
||||
description="Encodes an audio waveform into a latent representation using a VAE, resampling if needed.",
|
||||
short_description="Encodes audio into latent via VAE.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -121,8 +115,6 @@ class VAEDecodeAudio(IO.ComfyNode):
|
||||
search_aliases=["latent to audio"],
|
||||
display_name="VAE Decode Audio",
|
||||
category="latent/audio",
|
||||
description="Decodes a latent representation back into an audio waveform using a VAE.",
|
||||
short_description="Decodes latent into audio via VAE.",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -145,8 +137,6 @@ class VAEDecodeAudioTiled(IO.ComfyNode):
|
||||
search_aliases=["latent to audio"],
|
||||
display_name="VAE Decode Audio (Tiled)",
|
||||
category="latent/audio",
|
||||
description="Decodes a latent representation into audio using tiled VAE decoding to reduce memory usage.",
|
||||
short_description="Tiled VAE decoding of latent into audio.",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -169,8 +159,6 @@ class SaveAudio(IO.ComfyNode):
|
||||
search_aliases=["export flac"],
|
||||
display_name="Save Audio (FLAC)",
|
||||
category="audio",
|
||||
description="Saves audio to disk in FLAC format with a configurable filename prefix.",
|
||||
short_description="Saves audio to disk in FLAC format.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
@@ -196,8 +184,6 @@ class SaveAudioMP3(IO.ComfyNode):
|
||||
search_aliases=["export mp3"],
|
||||
display_name="Save Audio (MP3)",
|
||||
category="audio",
|
||||
description="Saves audio to disk in MP3 format with configurable quality and filename prefix.",
|
||||
short_description="Saves audio to disk in MP3 format.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
@@ -226,8 +212,6 @@ class SaveAudioOpus(IO.ComfyNode):
|
||||
search_aliases=["export opus"],
|
||||
display_name="Save Audio (Opus)",
|
||||
category="audio",
|
||||
description="Saves audio to disk in Opus format with configurable quality and filename prefix.",
|
||||
short_description="Saves audio to disk in Opus format.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
@@ -256,8 +240,6 @@ class PreviewAudio(IO.ComfyNode):
|
||||
search_aliases=["play audio"],
|
||||
display_name="Preview Audio",
|
||||
category="audio",
|
||||
description="Plays back audio in the UI for previewing.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
],
|
||||
@@ -318,8 +300,6 @@ class LoadAudio(IO.ComfyNode):
|
||||
search_aliases=["import audio", "open audio", "audio file"],
|
||||
display_name="Load Audio",
|
||||
category="audio",
|
||||
description="Loads an audio or video file from disk and outputs the audio as a single Audio output.",
|
||||
short_description="Loads an audio file from disk.",
|
||||
inputs=[
|
||||
IO.Combo.Input("audio", upload=IO.UploadType.audio, options=sorted(files)),
|
||||
],
|
||||
@@ -358,7 +338,6 @@ class RecordAudio(IO.ComfyNode):
|
||||
search_aliases=["microphone input", "audio capture", "voice input"],
|
||||
display_name="Record Audio",
|
||||
category="audio",
|
||||
description="Records audio from a microphone input and outputs the captured audio.",
|
||||
inputs=[
|
||||
IO.Custom("AUDIO_RECORD").Input("audio"),
|
||||
],
|
||||
@@ -384,7 +363,6 @@ class TrimAudioDuration(IO.ComfyNode):
|
||||
search_aliases=["cut audio", "audio clip", "shorten audio"],
|
||||
display_name="Trim Audio Duration",
|
||||
description="Trim audio tensor into chosen time range.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
@@ -438,7 +416,6 @@ class SplitAudioChannels(IO.ComfyNode):
|
||||
search_aliases=["stereo to mono"],
|
||||
display_name="Split Audio Channels",
|
||||
description="Separates the audio into left and right channels.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
@@ -471,7 +448,6 @@ class JoinAudioChannels(IO.ComfyNode):
|
||||
node_id="JoinAudioChannels",
|
||||
display_name="Join Audio Channels",
|
||||
description="Joins left and right mono audio channels into a stereo audio.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio_left"),
|
||||
@@ -541,7 +517,6 @@ class AudioConcat(IO.ComfyNode):
|
||||
search_aliases=["join audio", "combine audio", "append audio"],
|
||||
display_name="Audio Concat",
|
||||
description="Concatenates the audio1 to audio2 in the specified direction.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio1"),
|
||||
@@ -590,7 +565,6 @@ class AudioMerge(IO.ComfyNode):
|
||||
search_aliases=["mix audio", "overlay audio", "layer audio"],
|
||||
display_name="Audio Merge",
|
||||
description="Combine two audio tracks by overlaying their waveforms.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio1"),
|
||||
@@ -652,8 +626,6 @@ class AudioAdjustVolume(IO.ComfyNode):
|
||||
search_aliases=["audio gain", "loudness", "audio level"],
|
||||
display_name="Audio Adjust Volume",
|
||||
category="audio",
|
||||
description="Adjusts audio volume by a specified number of decibels.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.Int.Input(
|
||||
@@ -690,8 +662,6 @@ class EmptyAudio(IO.ComfyNode):
|
||||
search_aliases=["blank audio"],
|
||||
display_name="Empty Audio",
|
||||
category="audio",
|
||||
description="Creates a silent audio clip with configurable duration, sample rate, and channel count.",
|
||||
short_description="Creates a silent audio clip.",
|
||||
inputs=[
|
||||
IO.Float.Input(
|
||||
"duration",
|
||||
|
||||
@@ -11,8 +11,6 @@ class AudioEncoderLoader(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="AudioEncoderLoader",
|
||||
category="loaders",
|
||||
description="Loads an audio encoder model from a checkpoint file for encoding audio into embeddings.",
|
||||
short_description="Loads an audio encoder model from a checkpoint.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"audio_encoder_name",
|
||||
@@ -38,8 +36,6 @@ class AudioEncoderEncode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="AudioEncoderEncode",
|
||||
category="conditioning",
|
||||
description="Encodes audio into embeddings using a loaded audio encoder model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.AudioEncoder.Input("audio_encoder"),
|
||||
io.Audio.Input("audio"),
|
||||
|
||||
@@ -154,8 +154,6 @@ class WanCameraEmbedding(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="WanCameraEmbedding",
|
||||
category="camera",
|
||||
description="Generates Plucker camera embeddings from a selected camera motion trajectory for Wan video generation.",
|
||||
short_description="Generates camera embeddings for Wan video generation.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"camera_pose",
|
||||
|
||||
@@ -12,8 +12,6 @@ class Canny(io.ComfyNode):
|
||||
node_id="Canny",
|
||||
search_aliases=["edge detection", "outline", "contour detection", "line art"],
|
||||
category="image/preprocessors",
|
||||
description="Detects edges in an image using the Canny edge detection algorithm with configurable low and high thresholds.",
|
||||
short_description="Canny edge detection on images.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Float.Input("low_threshold", default=0.4, min=0.01, max=0.99, step=0.01),
|
||||
|
||||
@@ -27,8 +27,6 @@ class CFGZeroStar(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CFGZeroStar",
|
||||
category="advanced/guidance",
|
||||
description="Applies CFG-Zero* post-CFG correction that computes an optimal scaling factor between conditional and unconditional predictions to reduce CFG artifacts.",
|
||||
short_description="CFG-Zero* guidance correction to reduce artifacts.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
],
|
||||
@@ -56,8 +54,6 @@ class CFGNorm(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CFGNorm",
|
||||
category="advanced/guidance",
|
||||
description="Constrains the CFG-guided prediction norm to not exceed the conditional prediction norm, helping to prevent oversaturation at high CFG scales.",
|
||||
short_description="Constrain CFG output norm to conditional prediction norm.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||
|
||||
@@ -14,8 +14,6 @@ class EmptyChromaRadianceLatentImage(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyChromaRadianceLatentImage",
|
||||
category="latent/chroma_radiance",
|
||||
description="Creates an empty Chroma Radiance latent image tensor with the specified width, height, and batch size.",
|
||||
short_description="Creates an empty Chroma Radiance latent image.",
|
||||
inputs=[
|
||||
io.Int.Input(id="width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input(id="height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -37,7 +35,6 @@ class ChromaRadianceOptions(io.ComfyNode):
|
||||
node_id="ChromaRadianceOptions",
|
||||
category="model_patches/chroma_radiance",
|
||||
description="Allows setting advanced options for the Chroma Radiance model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input(id="model"),
|
||||
io.Boolean.Input(
|
||||
|
||||
@@ -10,8 +10,6 @@ class CLIPTextEncodeSDXLRefiner(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeSDXLRefiner",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text for SDXL refiner models with aesthetic score and resolution conditioning parameters.",
|
||||
short_description="Encodes text for SDXL refiner models.",
|
||||
inputs=[
|
||||
io.Float.Input("ascore", default=6.0, min=0.0, max=1000.0, step=0.01),
|
||||
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
@@ -33,8 +31,6 @@ class CLIPTextEncodeSDXL(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeSDXL",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes separate G and L text prompts for SDXL models with resolution and crop conditioning parameters.",
|
||||
short_description="Encodes dual text prompts for SDXL models.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
|
||||
@@ -10,7 +10,6 @@ class ColorToRGBInt(io.ComfyNode):
|
||||
display_name="Color to RGB Int",
|
||||
category="utils",
|
||||
description="Convert a color to a RGB integer value.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Color.Input("color"),
|
||||
],
|
||||
|
||||
@@ -112,8 +112,6 @@ class PorterDuffImageComposite(io.ComfyNode):
|
||||
search_aliases=["alpha composite", "blend modes", "layer blend", "transparency blend"],
|
||||
display_name="Porter-Duff Image Composite",
|
||||
category="mask/compositing",
|
||||
description="Composites two images with alpha masks using Porter-Duff blend modes.",
|
||||
short_description="",
|
||||
inputs=[
|
||||
io.Image.Input("source"),
|
||||
io.Mask.Input("source_alpha"),
|
||||
@@ -171,8 +169,6 @@ class SplitImageWithAlpha(io.ComfyNode):
|
||||
search_aliases=["extract alpha", "separate transparency", "remove alpha"],
|
||||
display_name="Split Image with Alpha",
|
||||
category="mask/compositing",
|
||||
description="Separates an RGBA image into its RGB color channels and an alpha transparency mask.",
|
||||
short_description="Split RGBA image into RGB and alpha mask.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
@@ -197,8 +193,6 @@ class JoinImageWithAlpha(io.ComfyNode):
|
||||
search_aliases=["add transparency", "apply alpha", "composite alpha", "RGBA"],
|
||||
display_name="Join Image with Alpha",
|
||||
category="mask/compositing",
|
||||
description="Combines an RGB image with an alpha mask to produce an RGBA image with transparency.",
|
||||
short_description="Combine RGB image and alpha into RGBA.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Mask.Input("alpha"),
|
||||
|
||||
@@ -9,8 +9,6 @@ class CLIPTextEncodeControlnet(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeControlnet",
|
||||
category="_for_testing/conditioning",
|
||||
description="Encodes text with CLIP and attaches the result as cross-attention controlnet conditioning to existing conditioning data.",
|
||||
short_description="CLIP text encode for controlnet cross-attention conditioning.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Conditioning.Input("conditioning"),
|
||||
@@ -38,8 +36,6 @@ class T5TokenizerOptions(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="T5TokenizerOptions",
|
||||
category="_for_testing/conditioning",
|
||||
description="Configures minimum padding and length options for T5-family tokenizers used in CLIP text encoding.",
|
||||
short_description="Set T5 tokenizer padding and length options.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Int.Input("min_padding", default=0, min=0, max=10000, step=1),
|
||||
|
||||
@@ -12,7 +12,6 @@ class ContextWindowsManualNode(io.ComfyNode):
|
||||
display_name="Context Windows (Manual)",
|
||||
category="context",
|
||||
description="Manually set context windows.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input("model", tooltip="The model to apply context windows to during sampling."),
|
||||
io.Int.Input("context_length", min=1, default=16, tooltip="The length of the context window."),
|
||||
@@ -66,7 +65,6 @@ class WanContextWindowsManualNode(ContextWindowsManualNode):
|
||||
schema.node_id = "WanContextWindowsManual"
|
||||
schema.display_name = "WAN Context Windows (Manual)"
|
||||
schema.description = "Manually set context windows for WAN-like models (dim=2)."
|
||||
schema.short_description = None
|
||||
schema.inputs = [
|
||||
io.Model.Input("model", tooltip="The model to apply context windows to during sampling."),
|
||||
io.Int.Input("context_length", min=1, max=nodes.MAX_RESOLUTION, step=4, default=81, tooltip="The length of the context window."),
|
||||
|
||||
@@ -10,8 +10,6 @@ class SetUnionControlNetType(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SetUnionControlNetType",
|
||||
category="conditioning/controlnet",
|
||||
description="Sets the control type for a Union ControlNet, selecting which conditioning mode to use.",
|
||||
short_description="Select control mode for Union ControlNet.",
|
||||
inputs=[
|
||||
io.ControlNet.Input("control_net"),
|
||||
io.Combo.Input("type", options=["auto"] + list(UNION_CONTROLNET_TYPES.keys())),
|
||||
@@ -42,8 +40,6 @@ class ControlNetInpaintingAliMamaApply(io.ComfyNode):
|
||||
node_id="ControlNetInpaintingAliMamaApply",
|
||||
search_aliases=["masked controlnet"],
|
||||
category="conditioning/controlnet",
|
||||
description="Applies an AliMama inpainting ControlNet to positive and negative conditioning using an image and mask with VAE encoding.",
|
||||
short_description="Applies AliMama inpainting ControlNet with mask.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
|
||||
@@ -14,7 +14,6 @@ class EmptyCosmosLatentVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyCosmosLatentVideo",
|
||||
category="latent/video",
|
||||
description="Creates an empty latent tensor sized for Cosmos video generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=704, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -47,8 +46,6 @@ class CosmosImageToVideoLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CosmosImageToVideoLatent",
|
||||
category="conditioning/inpaint",
|
||||
description="Creates an inpainting video latent for Cosmos by encoding optional start and end images with a noise mask.",
|
||||
short_description="Cosmos inpainting video latent from start/end images.",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -92,8 +89,6 @@ class CosmosPredict2ImageToVideoLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CosmosPredict2ImageToVideoLatent",
|
||||
category="conditioning/inpaint",
|
||||
description="Creates an inpainting video latent for Cosmos Predict2 by encoding optional start and end images with Wan latent format processing.",
|
||||
short_description="Cosmos Predict2 inpainting video latent from images.",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
|
||||
@@ -18,8 +18,6 @@ class BasicScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="BasicScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule from a model using a selected scheduler algorithm, step count, and denoise strength.",
|
||||
short_description="Generate sigma schedule from model and scheduler.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Combo.Input("scheduler", options=comfy.samplers.SCHEDULER_NAMES),
|
||||
@@ -50,8 +48,6 @@ class KarrasScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="KarrasScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using the Karras noise schedule with configurable sigma range and rho parameter.",
|
||||
short_description="Generate sigmas using Karras noise schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -74,8 +70,6 @@ class ExponentialScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ExponentialScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using an exponential noise schedule with configurable sigma range.",
|
||||
short_description="Generate sigmas using exponential noise schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -97,8 +91,6 @@ class PolyexponentialScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="PolyexponentialScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using a polyexponential noise schedule with configurable sigma range and rho parameter.",
|
||||
short_description="Generate sigmas using polyexponential noise schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -121,8 +113,6 @@ class LaplaceScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LaplaceScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using a Laplace distribution-based noise schedule with configurable mu and beta parameters.",
|
||||
short_description="Generate sigmas using Laplace distribution schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -147,8 +137,6 @@ class SDTurboScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SDTurboScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule optimized for SD Turbo models with very few steps and adjustable denoise strength.",
|
||||
short_description="Generate sigma schedule for SD Turbo models.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("steps", default=1, min=1, max=10),
|
||||
@@ -173,8 +161,6 @@ class BetaSamplingScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="BetaSamplingScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using a beta distribution with configurable alpha and beta shape parameters.",
|
||||
short_description="Generate sigmas using beta distribution schedule.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
@@ -197,8 +183,6 @@ class VPScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="VPScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using the Variance Preserving (VP) SDE formulation with configurable beta and epsilon parameters.",
|
||||
short_description="Generate sigmas using VP SDE schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("beta_d", default=19.9, min=0.0, max=5000.0, step=0.01, round=False), #TODO: fix default values
|
||||
@@ -221,8 +205,6 @@ class SplitSigmas(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SplitSigmas",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Splits a sigma sequence into high and low portions at a specified step index for multi-pass sampling.",
|
||||
short_description="Split sigmas into high and low at a step.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Int.Input("step", default=0, min=0, max=10000),
|
||||
@@ -247,8 +229,6 @@ class SplitSigmasDenoise(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SplitSigmasDenoise",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Splits a sigma sequence into high and low portions based on a denoise ratio for multi-pass sampling workflows.",
|
||||
short_description="Split sigmas by denoise ratio.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
@@ -275,8 +255,6 @@ class FlipSigmas(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="FlipSigmas",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Reverses the order of a sigma sequence, useful for converting between ascending and descending noise schedules.",
|
||||
short_description="Reverse the order of a sigma sequence.",
|
||||
inputs=[io.Sigmas.Input("sigmas")],
|
||||
outputs=[io.Sigmas.Output()]
|
||||
)
|
||||
@@ -299,8 +277,6 @@ class SetFirstSigma(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SetFirstSigma",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Overrides the first sigma value in a sequence with a custom value, allowing manual control of the initial noise level.",
|
||||
short_description="Override the first sigma value in a sequence.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Float.Input("sigma", default=136.0, min=0.0, max=20000.0, step=0.001, round=False),
|
||||
@@ -323,8 +299,6 @@ class ExtendIntermediateSigmas(io.ComfyNode):
|
||||
node_id="ExtendIntermediateSigmas",
|
||||
search_aliases=["interpolate sigmas"],
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Interpolates additional intermediate sigma values between existing steps using selectable spacing within a specified sigma range.",
|
||||
short_description="Interpolate additional sigma steps between existing values.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Int.Input("steps", default=2, min=1, max=100),
|
||||
@@ -378,8 +352,6 @@ class SamplingPercentToSigma(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplingPercentToSigma",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Converts a sampling percentage (0.0 to 1.0) to the corresponding sigma value using a model's noise schedule.",
|
||||
short_description="Convert sampling percentage to sigma value.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("sampling_percent", default=0.0, min=0.0, max=1.0, step=0.0001),
|
||||
@@ -408,8 +380,6 @@ class KSamplerSelect(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="KSamplerSelect",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Selects a sampler algorithm by name from the list of available samplers and outputs the sampler object.",
|
||||
short_description="Select a sampler algorithm by name.",
|
||||
inputs=[io.Combo.Input("sampler_name", options=comfy.samplers.SAMPLER_NAMES)],
|
||||
outputs=[io.Sampler.Output()]
|
||||
)
|
||||
@@ -427,8 +397,6 @@ class SamplerDPMPP_3M_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_3M_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ 3M SDE sampler with configurable eta, noise scale, and GPU or CPU noise generation.",
|
||||
short_description="Create a DPM++ 3M SDE sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -454,8 +422,6 @@ class SamplerDPMPP_2M_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_2M_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ 2M SDE sampler with configurable solver type, eta, noise scale, and noise device.",
|
||||
short_description="Create a DPM++ 2M SDE sampler.",
|
||||
inputs=[
|
||||
io.Combo.Input("solver_type", options=['midpoint', 'heun']),
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -483,8 +449,6 @@ class SamplerDPMPP_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ SDE sampler with configurable eta, noise scale, r parameter, and noise device.",
|
||||
short_description="Create a DPM++ SDE sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -511,8 +475,6 @@ class SamplerDPMPP_2S_Ancestral(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_2S_Ancestral",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ 2S Ancestral sampler with configurable eta and noise scale parameters.",
|
||||
short_description="Create a DPM++ 2S Ancestral sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -533,8 +495,6 @@ class SamplerEulerAncestral(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerEulerAncestral",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an Euler Ancestral sampler with configurable eta and noise scale for stochastic sampling.",
|
||||
short_description="Create an Euler Ancestral stochastic sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -556,8 +516,6 @@ class SamplerEulerAncestralCFGPP(io.ComfyNode):
|
||||
node_id="SamplerEulerAncestralCFGPP",
|
||||
display_name="SamplerEulerAncestralCFG++",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an Euler Ancestral CFG++ sampler that applies classifier-free guidance with improved stability.",
|
||||
short_description="Create an Euler Ancestral CFG++ sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=1.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=10.0, step=0.01, round=False),
|
||||
@@ -580,8 +538,6 @@ class SamplerLMS(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerLMS",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a Linear Multi-Step (LMS) sampler with a configurable order parameter.",
|
||||
short_description="Create a Linear Multi-Step (LMS) sampler.",
|
||||
inputs=[io.Int.Input("order", default=4, min=1, max=100)],
|
||||
outputs=[io.Sampler.Output()]
|
||||
)
|
||||
@@ -599,8 +555,6 @@ class SamplerDPMAdaptative(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMAdaptative",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM Adaptive sampler with configurable order, tolerances, PID coefficients, and stochastic noise parameters for adaptive step-size sampling.",
|
||||
short_description="Create a DPM Adaptive step-size sampler.",
|
||||
inputs=[
|
||||
io.Int.Input("order", default=3, min=2, max=3),
|
||||
io.Float.Input("rtol", default=0.05, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -632,8 +586,6 @@ class SamplerER_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerER_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an ER-SDE sampler supporting ER-SDE, reverse-time SDE, and ODE solver types with configurable stochastic strength and staging.",
|
||||
short_description="Create an ER-SDE, reverse-time SDE, or ODE sampler.",
|
||||
inputs=[
|
||||
io.Combo.Input("solver_type", options=["ER-SDE", "Reverse-time SDE", "ODE"]),
|
||||
io.Int.Input("max_stage", default=3, min=1, max=3),
|
||||
@@ -672,8 +624,6 @@ class SamplerSASolver(io.ComfyNode):
|
||||
node_id="SamplerSASolver",
|
||||
search_aliases=["sde"],
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an SA-Solver sampler with configurable predictor/corrector orders, SDE region, and PECE mode for high-order diffusion sampling.",
|
||||
short_description="Create an SA-Solver high-order diffusion sampler.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=10.0, step=0.01, round=False),
|
||||
@@ -734,8 +684,7 @@ class SamplerSEEDS2(io.ComfyNode):
|
||||
"- solver_type=phi_2, r=1.0, eta=0.0\n\n"
|
||||
"exp_heun_2_x0_sde\n"
|
||||
"- solver_type=phi_2, r=1.0, eta=1.0, s_noise=1.0"
|
||||
),
|
||||
short_description="SEEDS2 sampler with configurable solver and SDE settings.",
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -779,8 +728,6 @@ class SamplerCustom(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerCustom",
|
||||
category="sampling/custom_sampling",
|
||||
description="Runs a complete custom sampling pass by combining a model, sampler, sigmas, and conditioning with optional noise injection.",
|
||||
short_description="Run custom sampling with manual sampler and sigmas.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Boolean.Input("add_noise", default=True),
|
||||
@@ -847,8 +794,6 @@ class BasicGuider(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="BasicGuider",
|
||||
category="sampling/custom_sampling/guiders",
|
||||
description="Creates a basic guider that applies a single conditioning input to guide the diffusion model without classifier-free guidance.",
|
||||
short_description="Create a single-conditioning guider without CFG.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("conditioning"),
|
||||
@@ -870,8 +815,6 @@ class CFGGuider(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CFGGuider",
|
||||
category="sampling/custom_sampling/guiders",
|
||||
description="Creates a classifier-free guidance guider that combines positive and negative conditioning with an adjustable CFG scale.",
|
||||
short_description="Create a CFG guider with positive/negative conditioning.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("positive"),
|
||||
@@ -926,8 +869,6 @@ class DualCFGGuider(io.ComfyNode):
|
||||
node_id="DualCFGGuider",
|
||||
search_aliases=["dual prompt guidance"],
|
||||
category="sampling/custom_sampling/guiders",
|
||||
description="Creates a dual classifier-free guidance guider that blends two conditioning inputs against a negative with independent CFG scales and regular or nested styles.",
|
||||
short_description="Create a dual CFG guider with two conditionings.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("cond1"),
|
||||
@@ -956,8 +897,6 @@ class DisableNoise(io.ComfyNode):
|
||||
node_id="DisableNoise",
|
||||
search_aliases=["zero noise"],
|
||||
category="sampling/custom_sampling/noise",
|
||||
description="Produces a zero-noise source that disables noise injection, useful for deterministic sampling or img2img without added noise.",
|
||||
short_description="Produce zero noise to disable noise injection.",
|
||||
inputs=[],
|
||||
outputs=[io.Noise.Output()]
|
||||
)
|
||||
@@ -975,8 +914,6 @@ class RandomNoise(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="RandomNoise",
|
||||
category="sampling/custom_sampling/noise",
|
||||
description="Produces a random noise source from a seed value for use in custom sampling workflows.",
|
||||
short_description="Produce seeded random noise for sampling.",
|
||||
inputs=[io.Int.Input("noise_seed", default=0, min=0, max=0xffffffffffffffff, control_after_generate=True)],
|
||||
outputs=[io.Noise.Output()]
|
||||
)
|
||||
@@ -994,8 +931,6 @@ class SamplerCustomAdvanced(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerCustomAdvanced",
|
||||
category="sampling/custom_sampling",
|
||||
description="Runs an advanced custom sampling pass using separate noise, guider, sampler, and sigmas inputs for maximum control over the diffusion process.",
|
||||
short_description="Run advanced custom sampling with separate components.",
|
||||
inputs=[
|
||||
io.Noise.Input("noise"),
|
||||
io.Guider.Input("guider"),
|
||||
@@ -1050,8 +985,6 @@ class AddNoise(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="AddNoise",
|
||||
category="_for_testing/custom_sampling/noise",
|
||||
description="Adds scaled noise to a latent image using the model's noise schedule and sigma values for manual noise injection.",
|
||||
short_description="Add scaled noise to a latent image.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
@@ -1102,8 +1035,6 @@ class ManualSigmas(io.ComfyNode):
|
||||
node_id="ManualSigmas",
|
||||
search_aliases=["custom noise schedule", "define sigmas"],
|
||||
category="_for_testing/custom_sampling",
|
||||
description="Defines a custom sigma sequence by manually entering comma-separated numeric values as a text string.",
|
||||
short_description="Define custom sigmas from comma-separated values.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.String.Input("sigmas", default="1, 0.5", multiline=False)
|
||||
|
||||
@@ -49,8 +49,6 @@ class LoadImageDataSetFromFolderNode(io.ComfyNode):
|
||||
node_id="LoadImageDataSetFromFolder",
|
||||
display_name="Load Image Dataset from Folder",
|
||||
category="dataset",
|
||||
description="Loads all images from a selected input subfolder and outputs them as a list of image tensors.",
|
||||
short_description="Loads images from a folder as a list.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
@@ -88,8 +86,6 @@ class LoadImageTextDataSetFromFolderNode(io.ComfyNode):
|
||||
node_id="LoadImageTextDataSetFromFolder",
|
||||
display_name="Load Image and Text Dataset from Folder",
|
||||
category="dataset",
|
||||
description="Loads paired images and text captions from a folder, matching each image with its corresponding text file.",
|
||||
short_description="Loads paired images and text captions from folder.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
@@ -212,8 +208,6 @@ class SaveImageDataSetToFolderNode(io.ComfyNode):
|
||||
node_id="SaveImageDataSetToFolder",
|
||||
display_name="Save Image Dataset to Folder",
|
||||
category="dataset",
|
||||
description="Saves a list of images to a named folder in the output directory with configurable filename prefix.",
|
||||
short_description="Saves image list to an output folder.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
is_input_list=True, # Receive images as list
|
||||
@@ -253,8 +247,6 @@ class SaveImageTextDataSetToFolderNode(io.ComfyNode):
|
||||
node_id="SaveImageTextDataSetToFolder",
|
||||
display_name="Save Image and Text Dataset to Folder",
|
||||
category="dataset",
|
||||
description="Saves paired images and text captions to a named folder in the output directory with configurable filename prefix.",
|
||||
short_description="Saves paired images and text to output folder.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
is_input_list=True, # Receive both images and texts as lists
|
||||
@@ -409,8 +401,6 @@ class ImageProcessingNode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id=cls.node_id,
|
||||
display_name=cls.display_name or cls.node_id,
|
||||
description=getattr(cls, 'description', ''),
|
||||
short_description=getattr(cls, 'short_description', ''),
|
||||
category="dataset/image",
|
||||
is_experimental=True,
|
||||
is_input_list=is_group, # True for group, False for individual
|
||||
@@ -560,8 +550,6 @@ class TextProcessingNode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id=cls.node_id,
|
||||
display_name=cls.display_name or cls.node_id,
|
||||
description=getattr(cls, 'description', ''),
|
||||
short_description=getattr(cls, 'short_description', ''),
|
||||
category="dataset/text",
|
||||
is_experimental=True,
|
||||
is_input_list=is_group, # True for group, False for individual
|
||||
@@ -639,7 +627,6 @@ class ResizeImagesByShorterEdgeNode(ImageProcessingNode):
|
||||
node_id = "ResizeImagesByShorterEdge"
|
||||
display_name = "Resize Images by Shorter Edge"
|
||||
description = "Resize images so that the shorter edge matches the specified length while preserving aspect ratio."
|
||||
short_description = "Resizes images by shorter edge preserving aspect ratio."
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
"shorter_edge",
|
||||
@@ -668,7 +655,6 @@ class ResizeImagesByLongerEdgeNode(ImageProcessingNode):
|
||||
node_id = "ResizeImagesByLongerEdge"
|
||||
display_name = "Resize Images by Longer Edge"
|
||||
description = "Resize images so that the longer edge matches the specified length while preserving aspect ratio."
|
||||
short_description = "Resizes images by longer edge preserving aspect ratio."
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
"longer_edge",
|
||||
@@ -700,7 +686,6 @@ class CenterCropImagesNode(ImageProcessingNode):
|
||||
node_id = "CenterCropImages"
|
||||
display_name = "Center Crop Images"
|
||||
description = "Center crop all images to the specified dimensions."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."),
|
||||
io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."),
|
||||
@@ -723,7 +708,6 @@ class RandomCropImagesNode(ImageProcessingNode):
|
||||
description = (
|
||||
"Randomly crop all images to the specified dimensions (for data augmentation)."
|
||||
)
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."),
|
||||
io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."),
|
||||
@@ -750,7 +734,6 @@ class NormalizeImagesNode(ImageProcessingNode):
|
||||
node_id = "NormalizeImages"
|
||||
display_name = "Normalize Images"
|
||||
description = "Normalize images using mean and standard deviation."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
"mean",
|
||||
@@ -777,7 +760,6 @@ class AdjustBrightnessNode(ImageProcessingNode):
|
||||
node_id = "AdjustBrightness"
|
||||
display_name = "Adjust Brightness"
|
||||
description = "Adjust brightness of all images."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
"factor",
|
||||
@@ -797,7 +779,6 @@ class AdjustContrastNode(ImageProcessingNode):
|
||||
node_id = "AdjustContrast"
|
||||
display_name = "Adjust Contrast"
|
||||
description = "Adjust contrast of all images."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
"factor",
|
||||
@@ -817,7 +798,6 @@ class ShuffleDatasetNode(ImageProcessingNode):
|
||||
node_id = "ShuffleDataset"
|
||||
display_name = "Shuffle Image Dataset"
|
||||
description = "Randomly shuffle the order of images in the dataset."
|
||||
short_description = None
|
||||
is_group_process = True # Requires full list to shuffle
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
@@ -841,8 +821,6 @@ class ShuffleImageTextDatasetNode(io.ComfyNode):
|
||||
node_id="ShuffleImageTextDataset",
|
||||
display_name="Shuffle Image-Text Dataset",
|
||||
category="dataset/image",
|
||||
description="Randomly shuffles paired image and text lists together using a seed, preserving their correspondence.",
|
||||
short_description="Shuffles paired image-text lists together.",
|
||||
is_experimental=True,
|
||||
is_input_list=True,
|
||||
inputs=[
|
||||
@@ -885,7 +863,6 @@ class TextToLowercaseNode(TextProcessingNode):
|
||||
node_id = "TextToLowercase"
|
||||
display_name = "Text to Lowercase"
|
||||
description = "Convert all texts to lowercase."
|
||||
short_description = None
|
||||
|
||||
@classmethod
|
||||
def _process(cls, text):
|
||||
@@ -896,7 +873,6 @@ class TextToUppercaseNode(TextProcessingNode):
|
||||
node_id = "TextToUppercase"
|
||||
display_name = "Text to Uppercase"
|
||||
description = "Convert all texts to uppercase."
|
||||
short_description = None
|
||||
|
||||
@classmethod
|
||||
def _process(cls, text):
|
||||
@@ -907,7 +883,6 @@ class TruncateTextNode(TextProcessingNode):
|
||||
node_id = "TruncateText"
|
||||
display_name = "Truncate Text"
|
||||
description = "Truncate all texts to a maximum length."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
"max_length", default=77, min=1, max=10000, tooltip="Maximum text length."
|
||||
@@ -923,7 +898,6 @@ class AddTextPrefixNode(TextProcessingNode):
|
||||
node_id = "AddTextPrefix"
|
||||
display_name = "Add Text Prefix"
|
||||
description = "Add a prefix to all texts."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.String.Input("prefix", default="", tooltip="Prefix to add."),
|
||||
]
|
||||
@@ -937,7 +911,6 @@ class AddTextSuffixNode(TextProcessingNode):
|
||||
node_id = "AddTextSuffix"
|
||||
display_name = "Add Text Suffix"
|
||||
description = "Add a suffix to all texts."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.String.Input("suffix", default="", tooltip="Suffix to add."),
|
||||
]
|
||||
@@ -951,7 +924,6 @@ class ReplaceTextNode(TextProcessingNode):
|
||||
node_id = "ReplaceText"
|
||||
display_name = "Replace Text"
|
||||
description = "Replace text in all texts."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.String.Input("find", default="", tooltip="Text to find."),
|
||||
io.String.Input("replace", default="", tooltip="Text to replace with."),
|
||||
@@ -966,7 +938,6 @@ class StripWhitespaceNode(TextProcessingNode):
|
||||
node_id = "StripWhitespace"
|
||||
display_name = "Strip Whitespace"
|
||||
description = "Strip leading and trailing whitespace from all texts."
|
||||
short_description = None
|
||||
|
||||
@classmethod
|
||||
def _process(cls, text):
|
||||
@@ -982,7 +953,6 @@ class ImageDeduplicationNode(ImageProcessingNode):
|
||||
node_id = "ImageDeduplication"
|
||||
display_name = "Image Deduplication"
|
||||
description = "Remove duplicate or very similar images from the dataset."
|
||||
short_description = None
|
||||
is_group_process = True # Requires full list to compare images
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
@@ -1053,7 +1023,6 @@ class ImageGridNode(ImageProcessingNode):
|
||||
node_id = "ImageGrid"
|
||||
display_name = "Image Grid"
|
||||
description = "Arrange multiple images into a grid layout."
|
||||
short_description = None
|
||||
is_group_process = True # Requires full list to create grid
|
||||
is_output_list = False # Outputs single grid image
|
||||
extra_inputs = [
|
||||
@@ -1128,7 +1097,6 @@ class MergeImageListsNode(ImageProcessingNode):
|
||||
node_id = "MergeImageLists"
|
||||
display_name = "Merge Image Lists"
|
||||
description = "Concatenate multiple image lists into one."
|
||||
short_description = None
|
||||
is_group_process = True # Receives images as list
|
||||
|
||||
@classmethod
|
||||
@@ -1146,7 +1114,6 @@ class MergeTextListsNode(TextProcessingNode):
|
||||
node_id = "MergeTextLists"
|
||||
display_name = "Merge Text Lists"
|
||||
description = "Concatenate multiple text lists into one."
|
||||
short_description = None
|
||||
is_group_process = True # Receives texts as list
|
||||
|
||||
@classmethod
|
||||
@@ -1170,8 +1137,6 @@ class ResolutionBucket(io.ComfyNode):
|
||||
node_id="ResolutionBucket",
|
||||
display_name="Resolution Bucket",
|
||||
category="dataset",
|
||||
description="Groups latents and conditioning by resolution into batched buckets for efficient training with mixed aspect ratios.",
|
||||
short_description="Groups latents by resolution into training buckets.",
|
||||
is_experimental=True,
|
||||
is_input_list=True,
|
||||
inputs=[
|
||||
@@ -1265,8 +1230,6 @@ class MakeTrainingDataset(io.ComfyNode):
|
||||
search_aliases=["encode dataset"],
|
||||
display_name="Make Training Dataset",
|
||||
category="dataset",
|
||||
description="Encodes images with a VAE and text captions with CLIP to create paired latent and conditioning training data.",
|
||||
short_description="Encodes images and text into training data.",
|
||||
is_experimental=True,
|
||||
is_input_list=True, # images and texts as lists
|
||||
inputs=[
|
||||
@@ -1353,8 +1316,6 @@ class SaveTrainingDataset(io.ComfyNode):
|
||||
search_aliases=["export training data"],
|
||||
display_name="Save Training Dataset",
|
||||
category="dataset",
|
||||
description="Saves encoded latent and conditioning training data to disk in sharded files with configurable shard size.",
|
||||
short_description="Saves encoded training data to sharded files.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
is_input_list=True, # Receive lists
|
||||
@@ -1456,8 +1417,6 @@ class LoadTrainingDataset(io.ComfyNode):
|
||||
search_aliases=["import dataset", "training data"],
|
||||
display_name="Load Training Dataset",
|
||||
category="dataset",
|
||||
description="Loads a previously saved training dataset of latents and conditioning from sharded files on disk.",
|
||||
short_description="Loads saved training dataset from disk.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.String.Input(
|
||||
|
||||
@@ -14,8 +14,6 @@ class DifferentialDiffusion(io.ComfyNode):
|
||||
search_aliases=["inpaint gradient", "variable denoise strength"],
|
||||
display_name="Differential Diffusion",
|
||||
category="_for_testing",
|
||||
description="Enables per-pixel variable denoise strength using a mask, where mask intensity controls how much each region is denoised during sampling.",
|
||||
short_description="Per-pixel variable denoise strength via mask.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
|
||||
@@ -363,7 +363,6 @@ class EasyCacheNode(io.ComfyNode):
|
||||
node_id="EasyCache",
|
||||
display_name="EasyCache",
|
||||
description="Native EasyCache implementation.",
|
||||
short_description=None,
|
||||
category="advanced/debug/model",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
@@ -497,7 +496,6 @@ class LazyCacheNode(io.ComfyNode):
|
||||
node_id="LazyCache",
|
||||
display_name="LazyCache",
|
||||
description="A homebrew version of EasyCache - even 'easier' version of EasyCache to implement. Overall works worse than EasyCache, but better in some rare cases AND universal compatibility with everything in ComfyUI.",
|
||||
short_description="Simpler EasyCache alternative with universal ComfyUI compatibility.",
|
||||
category="advanced/debug/model",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
|
||||
@@ -10,7 +10,6 @@ class ReferenceLatent(io.ComfyNode):
|
||||
node_id="ReferenceLatent",
|
||||
category="advanced/conditioning/edit_models",
|
||||
description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.",
|
||||
short_description="Sets guiding latent for edit models with chaining support.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
|
||||
@@ -19,8 +19,6 @@ class EpsilonScaling(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="Epsilon Scaling",
|
||||
category="model_patches/unet",
|
||||
description="Applies epsilon scaling to mitigate exposure bias in diffusion models by scaling the predicted noise after CFG, improving sample quality.",
|
||||
short_description="Scale predicted noise to reduce exposure bias.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
@@ -123,7 +121,6 @@ class TemporalScoreRescaling(io.ComfyNode):
|
||||
"TSR - Temporal Score Rescaling (2510.01184)\n\n"
|
||||
"Rescaling the model's score or noise to steer the sampling diversity.\n"
|
||||
),
|
||||
short_description="Rescales temporal scores to control sampling diversity.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -13,8 +13,6 @@ class CLIPTextEncodeFlux(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeFlux",
|
||||
category="advanced/conditioning/flux",
|
||||
description="Encodes separate CLIP-L and T5-XXL text prompts with a guidance value into Flux conditioning.",
|
||||
short_description="Encodes CLIP-L and T5-XXL prompts for Flux.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
@@ -42,8 +40,6 @@ class EmptyFlux2LatentImage(io.ComfyNode):
|
||||
node_id="EmptyFlux2LatentImage",
|
||||
display_name="Empty Flux 2 Latent",
|
||||
category="latent",
|
||||
description="Creates an empty Flux 2 latent image tensor with the specified width, height, and batch size.",
|
||||
short_description="Creates an empty Flux 2 latent image tensor.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -65,8 +61,6 @@ class FluxGuidance(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="FluxGuidance",
|
||||
category="advanced/conditioning/flux",
|
||||
description="Sets the guidance strength value on Flux conditioning to control how closely generation follows the prompt.",
|
||||
short_description="Sets guidance strength on Flux conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1),
|
||||
@@ -91,7 +85,6 @@ class FluxDisableGuidance(io.ComfyNode):
|
||||
node_id="FluxDisableGuidance",
|
||||
category="advanced/conditioning/flux",
|
||||
description="This node completely disables the guidance embed on Flux and Flux like models",
|
||||
short_description="Disables guidance embed on Flux and Flux-like models.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
],
|
||||
@@ -136,7 +129,6 @@ class FluxKontextImageScale(io.ComfyNode):
|
||||
node_id="FluxKontextImageScale",
|
||||
category="advanced/conditioning/flux",
|
||||
description="This node resizes the image to one that is more optimal for flux kontext.",
|
||||
short_description="Resizes images to optimal dimensions for Flux Kontext.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
@@ -164,8 +156,6 @@ class FluxKontextMultiReferenceLatentMethod(io.ComfyNode):
|
||||
node_id="FluxKontextMultiReferenceLatentMethod",
|
||||
display_name="Edit Model Reference Method",
|
||||
category="advanced/conditioning/flux",
|
||||
description="Selects the method used for handling multiple reference latents in Flux Kontext edit models.",
|
||||
short_description="Selects reference latent method for Flux Kontext.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Combo.Input(
|
||||
@@ -224,8 +214,6 @@ class Flux2Scheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="Flux2Scheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule for Flux 2 sampling based on step count and image resolution.",
|
||||
short_description="Generates a sigma schedule for Flux 2 sampling.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=4096),
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=1),
|
||||
|
||||
@@ -30,8 +30,6 @@ class FreeU(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="FreeU",
|
||||
category="model_patches/unet",
|
||||
description="Applies FreeU v1 to a UNet model, boosting backbone features and filtering skip connections using Fourier transforms for improved quality.",
|
||||
short_description="Applies FreeU v1 backbone boost and skip filtering.",
|
||||
inputs=[
|
||||
IO.Model.Input("model"),
|
||||
IO.Float.Input("b1", default=1.1, min=0.0, max=10.0, step=0.01),
|
||||
@@ -79,8 +77,6 @@ class FreeU_V2(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="FreeU_V2",
|
||||
category="model_patches/unet",
|
||||
description="Applies FreeU v2 to a UNet model with adaptive backbone scaling based on hidden state magnitude and Fourier skip filtering.",
|
||||
short_description="Applies FreeU v2 with adaptive scaling.",
|
||||
inputs=[
|
||||
IO.Model.Input("model"),
|
||||
IO.Float.Input("b1", default=1.3, min=0.0, max=10.0, step=0.01),
|
||||
|
||||
@@ -62,7 +62,6 @@ class FreSca(io.ComfyNode):
|
||||
display_name="FreSca",
|
||||
category="_for_testing",
|
||||
description="Applies frequency-dependent scaling to the guidance",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01,
|
||||
|
||||
@@ -341,8 +341,6 @@ class GITSScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="GITSScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a noise schedule using the GITS method with precomputed optimal sigma levels and configurable coefficient.",
|
||||
short_description="Generates a GITS noise schedule with optimal sigma levels.",
|
||||
inputs=[
|
||||
io.Float.Input("coeff", default=1.20, min=0.80, max=1.50, step=0.05),
|
||||
io.Int.Input("steps", default=10, min=2, max=1000),
|
||||
|
||||
@@ -13,7 +13,6 @@ class QuadrupleCLIPLoader(io.ComfyNode):
|
||||
node_id="QuadrupleCLIPLoader",
|
||||
category="advanced/loaders",
|
||||
description="[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")),
|
||||
io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")),
|
||||
@@ -41,8 +40,6 @@ class CLIPTextEncodeHiDream(io.ComfyNode):
|
||||
node_id="CLIPTextEncodeHiDream",
|
||||
search_aliases=["hidream prompt"],
|
||||
category="advanced/conditioning",
|
||||
description="Encodes separate CLIP-L, CLIP-G, T5-XXL, and Llama text prompts into HiDream conditioning.",
|
||||
short_description="Encodes multi-encoder text prompts for HiDream.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
|
||||
@@ -38,8 +38,6 @@ class PairConditioningSetProperties:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Set properties like strength, mask, hooks, and timesteps on a positive/negative conditioning pair."
|
||||
SHORT_DESCRIPTION = "Set properties on a positive/negative conditioning pair."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, positive_NEW, negative_NEW,
|
||||
@@ -75,8 +73,6 @@ class PairConditioningSetPropertiesAndCombine:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Set properties on new conditioning pair and combine with existing positive/negative conditioning."
|
||||
SHORT_DESCRIPTION = "Set properties on new cond pair, combine with existing."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, positive, negative, positive_NEW, negative_NEW,
|
||||
@@ -108,8 +104,6 @@ class ConditioningSetProperties:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "advanced/hooks/cond single"
|
||||
DESCRIPTION = "Set properties like strength, mask, hooks, and timesteps on a single conditioning input."
|
||||
SHORT_DESCRIPTION = "Set properties on a single conditioning input."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, cond_NEW,
|
||||
@@ -142,8 +136,6 @@ class ConditioningSetPropertiesAndCombine:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "advanced/hooks/cond single"
|
||||
DESCRIPTION = "Set properties on new conditioning and combine it with an existing conditioning input."
|
||||
SHORT_DESCRIPTION = "Set properties on new conditioning, combine with existing."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, cond, cond_NEW,
|
||||
@@ -172,8 +164,6 @@ class PairConditioningCombine:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Combine two positive/negative conditioning pairs into a single pair."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine"
|
||||
|
||||
def combine(self, positive_A, negative_A, positive_B, negative_B):
|
||||
@@ -201,8 +191,6 @@ class PairConditioningSetDefaultAndCombine:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Set default conditioning pair and combine it with existing positive/negative conditioning and optional hooks."
|
||||
SHORT_DESCRIPTION = "Set default cond pair and combine with existing."
|
||||
FUNCTION = "set_default_and_combine"
|
||||
|
||||
def set_default_and_combine(self, positive, negative, positive_DEFAULT, negative_DEFAULT,
|
||||
@@ -229,8 +217,6 @@ class ConditioningSetDefaultAndCombine:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "advanced/hooks/cond single"
|
||||
DESCRIPTION = "Set default conditioning and combine it with existing conditioning input and optional hooks."
|
||||
SHORT_DESCRIPTION = "Set default conditioning and combine with existing."
|
||||
FUNCTION = "set_default_and_combine"
|
||||
|
||||
def set_default_and_combine(self, cond, cond_DEFAULT,
|
||||
@@ -258,8 +244,6 @@ class SetClipHooks:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CLIP",)
|
||||
CATEGORY = "advanced/hooks/clip"
|
||||
DESCRIPTION = "Apply hooks to a CLIP model, optionally propagating them to conditioning outputs and enabling CLIP scheduling."
|
||||
SHORT_DESCRIPTION = "Apply hooks to a CLIP model with scheduling options."
|
||||
FUNCTION = "apply_hooks"
|
||||
|
||||
def apply_hooks(self, clip: CLIP, schedule_clip: bool, apply_to_conds: bool, hooks: comfy.hooks.HookGroup=None):
|
||||
@@ -291,8 +275,6 @@ class ConditioningTimestepsRange:
|
||||
RETURN_TYPES = ("TIMESTEPS_RANGE", "TIMESTEPS_RANGE", "TIMESTEPS_RANGE")
|
||||
RETURN_NAMES = ("TIMESTEPS_RANGE", "BEFORE_RANGE", "AFTER_RANGE")
|
||||
CATEGORY = "advanced/hooks"
|
||||
DESCRIPTION = "Define a timestep percentage range and output the range plus its complement before and after segments."
|
||||
SHORT_DESCRIPTION = "Define a timestep range with before/after complements."
|
||||
FUNCTION = "create_range"
|
||||
|
||||
def create_range(self, start_percent: float, end_percent: float):
|
||||
@@ -326,8 +308,6 @@ class CreateHookLora:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a LoRA hook with separate model and CLIP strength that can be scheduled on conditioning."
|
||||
SHORT_DESCRIPTION = "Create a LoRA hook with model and CLIP strength."
|
||||
FUNCTION = "create_hook"
|
||||
|
||||
def create_hook(self, lora_name: str, strength_model: float, strength_clip: float, prev_hooks: comfy.hooks.HookGroup=None):
|
||||
@@ -373,8 +353,6 @@ class CreateHookLoraModelOnly(CreateHookLora):
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a LoRA hook that only affects the model (not CLIP) for scheduling on conditioning."
|
||||
SHORT_DESCRIPTION = "Create a model-only LoRA hook."
|
||||
FUNCTION = "create_hook_model_only"
|
||||
|
||||
def create_hook_model_only(self, lora_name: str, strength_model: float, prev_hooks: comfy.hooks.HookGroup=None):
|
||||
@@ -405,8 +383,6 @@ class CreateHookModelAsLora:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a hook from a full checkpoint treated as a LoRA, with separate model and CLIP strength controls."
|
||||
SHORT_DESCRIPTION = "Create a hook from a checkpoint treated as LoRA."
|
||||
FUNCTION = "create_hook"
|
||||
|
||||
def create_hook(self, ckpt_name: str, strength_model: float, strength_clip: float,
|
||||
@@ -455,8 +431,6 @@ class CreateHookModelAsLoraModelOnly(CreateHookModelAsLora):
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a model-only hook from a full checkpoint treated as a LoRA, without affecting CLIP."
|
||||
SHORT_DESCRIPTION = "Create a model-only hook from a checkpoint as LoRA."
|
||||
FUNCTION = "create_hook_model_only"
|
||||
|
||||
def create_hook_model_only(self, ckpt_name: str, strength_model: float,
|
||||
@@ -486,8 +460,6 @@ class SetHookKeyframes:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Assign keyframe schedules to hooks for controlling their strength over time during sampling."
|
||||
SHORT_DESCRIPTION = "Assign keyframe schedules to hooks over time."
|
||||
FUNCTION = "set_hook_keyframes"
|
||||
|
||||
def set_hook_keyframes(self, hooks: comfy.hooks.HookGroup, hook_kf: comfy.hooks.HookKeyframeGroup=None):
|
||||
@@ -516,8 +488,6 @@ class CreateHookKeyframe:
|
||||
RETURN_TYPES = ("HOOK_KEYFRAMES",)
|
||||
RETURN_NAMES = ("HOOK_KF",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Create a single hook keyframe with a strength multiplier at a specific timestep percentage."
|
||||
SHORT_DESCRIPTION = "Create a hook keyframe at a specific timestep."
|
||||
FUNCTION = "create_hook_keyframe"
|
||||
|
||||
def create_hook_keyframe(self, strength_mult: float, start_percent: float, prev_hook_kf: comfy.hooks.HookKeyframeGroup=None):
|
||||
@@ -553,8 +523,6 @@ class CreateHookKeyframesInterpolated:
|
||||
RETURN_TYPES = ("HOOK_KEYFRAMES",)
|
||||
RETURN_NAMES = ("HOOK_KF",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Generate multiple interpolated hook keyframes between start and end strength values over a timestep range."
|
||||
SHORT_DESCRIPTION = "Generate interpolated hook keyframes over a timestep range."
|
||||
FUNCTION = "create_hook_keyframes"
|
||||
|
||||
def create_hook_keyframes(self, strength_start: float, strength_end: float, interpolation: str,
|
||||
@@ -600,8 +568,6 @@ class CreateHookKeyframesFromFloats:
|
||||
RETURN_TYPES = ("HOOK_KEYFRAMES",)
|
||||
RETURN_NAMES = ("HOOK_KF",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Create hook keyframes from a list of float values distributed evenly across a timestep percentage range."
|
||||
SHORT_DESCRIPTION = "Create hook keyframes from a list of float values."
|
||||
FUNCTION = "create_hook_keyframes"
|
||||
|
||||
def create_hook_keyframes(self, floats_strength: Union[float, list[float]],
|
||||
@@ -673,8 +639,6 @@ class CombineHooks:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/combine"
|
||||
DESCRIPTION = "Combine two hook groups into one."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine_hooks"
|
||||
|
||||
def combine_hooks(self,
|
||||
@@ -702,8 +666,6 @@ class CombineHooksFour:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/combine"
|
||||
DESCRIPTION = "Combine up to four hook groups into one."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine_hooks"
|
||||
|
||||
def combine_hooks(self,
|
||||
@@ -737,8 +699,6 @@ class CombineHooksEight:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/combine"
|
||||
DESCRIPTION = "Combine up to eight hook groups into one."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine_hooks"
|
||||
|
||||
def combine_hooks(self,
|
||||
|
||||
@@ -15,8 +15,6 @@ class CLIPTextEncodeHunyuanDiT(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeHunyuanDiT",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text using both BERT and mT5-XL tokenizers for Hunyuan DiT conditioning.",
|
||||
short_description="Dual-tokenizer text encoding for Hunyuan DiT.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("bert", multiline=True, dynamic_prompts=True),
|
||||
@@ -44,8 +42,6 @@ class EmptyHunyuanLatentVideo(io.ComfyNode):
|
||||
node_id="EmptyHunyuanLatentVideo",
|
||||
display_name="Empty HunyuanVideo 1.0 Latent",
|
||||
category="latent/video",
|
||||
description="Creates an empty latent tensor sized for HunyuanVideo 1.0 video generation.",
|
||||
short_description="Empty latent for HunyuanVideo 1.0 generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -71,8 +67,6 @@ class EmptyHunyuanVideo15Latent(EmptyHunyuanLatentVideo):
|
||||
schema = super().define_schema()
|
||||
schema.node_id = "EmptyHunyuanVideo15Latent"
|
||||
schema.display_name = "Empty HunyuanVideo 1.5 Latent"
|
||||
schema.description = "Creates an empty latent tensor sized for HunyuanVideo 1.5 video generation with 16x spatial downscale."
|
||||
schema.short_description = "Empty latent for HunyuanVideo 1.5 generation."
|
||||
return schema
|
||||
|
||||
@classmethod
|
||||
@@ -88,8 +82,6 @@ class HunyuanVideo15ImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="HunyuanVideo15ImageToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Prepares conditioning and latent for HunyuanVideo 1.5 image-to-video generation with start image and CLIP vision support.",
|
||||
short_description="HunyuanVideo 1.5 image-to-video conditioning setup.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -139,9 +131,6 @@ class HunyuanVideo15SuperResolution(io.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="HunyuanVideo15SuperResolution",
|
||||
category="conditioning/video_models",
|
||||
description="Sets up conditioning for HunyuanVideo 1.5 super-resolution upscaling of a latent with noise augmentation and optional image guidance.",
|
||||
short_description="HunyuanVideo 1.5 super-resolution latent conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -188,8 +177,6 @@ class LatentUpscaleModelLoader(io.ComfyNode):
|
||||
node_id="LatentUpscaleModelLoader",
|
||||
display_name="Load Latent Upscale Model",
|
||||
category="loaders",
|
||||
description="Loads a latent upscale model from disk, supporting HunyuanVideo 720p, 1080p, and other latent upsampler architectures.",
|
||||
short_description="Load a latent upscale model from file.",
|
||||
inputs=[
|
||||
io.Combo.Input("model_name", options=folder_paths.get_filename_list("latent_upscale_models")),
|
||||
],
|
||||
@@ -239,8 +226,6 @@ class HunyuanVideo15LatentUpscaleWithModel(io.ComfyNode):
|
||||
node_id="HunyuanVideo15LatentUpscaleWithModel",
|
||||
display_name="Hunyuan Video 15 Latent Upscale With Model",
|
||||
category="latent",
|
||||
description="Upscales a video latent to a target resolution using a loaded latent upscale model and configurable upscale method.",
|
||||
short_description="Upscale video latent using a latent upscale model.",
|
||||
inputs=[
|
||||
io.LatentUpscaleModel.Input("model"),
|
||||
io.Latent.Input("samples"),
|
||||
@@ -290,8 +275,6 @@ class TextEncodeHunyuanVideo_ImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeHunyuanVideo_ImageToVideo",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text with CLIP vision image embeddings for HunyuanVideo image-to-video conditioning using an interleaved template.",
|
||||
short_description="Text and image encoding for HunyuanVideo image-to-video.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.ClipVisionOutput.Input("clip_vision_output"),
|
||||
@@ -323,8 +306,6 @@ class HunyuanImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="HunyuanImageToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Prepares conditioning and latent for Hunyuan image-to-video generation with selectable guidance type.",
|
||||
short_description="Hunyuan image-to-video conditioning with guidance options.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Vae.Input("vae"),
|
||||
@@ -376,8 +357,6 @@ class EmptyHunyuanImageLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyHunyuanImageLatent",
|
||||
category="latent",
|
||||
description="Creates an empty latent tensor sized for Hunyuan image generation.",
|
||||
short_description="Empty latent for Hunyuan image generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
io.Int.Input("height", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
@@ -401,9 +380,6 @@ class HunyuanRefinerLatent(io.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="HunyuanRefinerLatent",
|
||||
category="conditioning/video_models",
|
||||
description="Prepares conditioning for a Hunyuan refiner pass by concatenating the input latent with noise augmentation settings.",
|
||||
short_description="Hunyuan refiner conditioning with noise augmentation.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
|
||||
@@ -18,8 +18,6 @@ class EmptyLatentHunyuan3Dv2(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="EmptyLatentHunyuan3Dv2",
|
||||
category="latent/3d",
|
||||
description="Creates an empty latent tensor for Hunyuan 3D v2 generation with configurable resolution and batch size.",
|
||||
short_description="Empty latent for Hunyuan 3D v2 generation.",
|
||||
inputs=[
|
||||
IO.Int.Input("resolution", default=3072, min=1, max=8192),
|
||||
IO.Int.Input("batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."),
|
||||
@@ -43,8 +41,6 @@ class Hunyuan3Dv2Conditioning(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="Hunyuan3Dv2Conditioning",
|
||||
category="conditioning/video_models",
|
||||
description="Creates positive and negative conditioning for Hunyuan 3D v2 from a CLIP vision output embedding.",
|
||||
short_description="Conditioning from CLIP vision for Hunyuan 3D v2.",
|
||||
inputs=[
|
||||
IO.ClipVisionOutput.Input("clip_vision_output"),
|
||||
],
|
||||
@@ -70,8 +66,6 @@ class Hunyuan3Dv2ConditioningMultiView(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="Hunyuan3Dv2ConditioningMultiView",
|
||||
category="conditioning/video_models",
|
||||
description="Creates multi-view conditioning for Hunyuan 3D v2 from up to four directional CLIP vision outputs with positional encoding.",
|
||||
short_description="Multi-view conditioning for Hunyuan 3D v2.",
|
||||
inputs=[
|
||||
IO.ClipVisionOutput.Input("front", optional=True),
|
||||
IO.ClipVisionOutput.Input("left", optional=True),
|
||||
@@ -109,8 +103,6 @@ class VAEDecodeHunyuan3D(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="VAEDecodeHunyuan3D",
|
||||
category="latent/3d",
|
||||
description="Decodes a Hunyuan 3D latent into a voxel grid using a VAE with configurable chunk size and octree resolution.",
|
||||
short_description="Decodes Hunyuan 3D latent into voxels.",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -433,8 +425,6 @@ class VoxelToMeshBasic(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="VoxelToMeshBasic",
|
||||
category="3d",
|
||||
description="Converts a voxel grid to a 3D mesh using basic cube-based surface extraction with adjustable threshold.",
|
||||
short_description="Converts voxels to mesh using basic extraction.",
|
||||
inputs=[
|
||||
IO.Voxel.Input("voxel"),
|
||||
IO.Float.Input("threshold", default=0.6, min=-1.0, max=1.0, step=0.01),
|
||||
@@ -464,8 +454,6 @@ class VoxelToMesh(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="VoxelToMesh",
|
||||
category="3d",
|
||||
description="Converts a voxel grid to a 3D mesh using selectable surface net or basic algorithm with adjustable threshold.",
|
||||
short_description="Converts voxels to mesh with algorithm selection.",
|
||||
inputs=[
|
||||
IO.Voxel.Input("voxel"),
|
||||
IO.Combo.Input("algorithm", options=["surface net", "basic"]),
|
||||
@@ -633,8 +621,6 @@ class SaveGLB(IO.ComfyNode):
|
||||
display_name="Save 3D Model",
|
||||
search_aliases=["export 3d model", "save mesh"],
|
||||
category="3d",
|
||||
description="Saves a 3D mesh or model file to disk in GLB format with optional workflow metadata embedding.",
|
||||
short_description="Saves 3D mesh or model to GLB file.",
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
IO.MultiType.Input(
|
||||
|
||||
@@ -103,8 +103,6 @@ class HypernetworkLoader(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="HypernetworkLoader",
|
||||
category="loaders",
|
||||
description="Loads a hypernetwork and patches it onto a diffusion model's attention layers with adjustable strength.",
|
||||
short_description="Loads and applies a hypernetwork to a model.",
|
||||
inputs=[
|
||||
IO.Model.Input("model"),
|
||||
IO.Combo.Input("hypernetwork_name", options=folder_paths.get_filename_list("hypernetworks")),
|
||||
|
||||
@@ -28,8 +28,6 @@ class HyperTile(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="HyperTile",
|
||||
category="model_patches/unet",
|
||||
description="Patches the model to split self-attention into smaller tiles during inference, reducing memory usage and speeding up generation at higher resolutions.",
|
||||
short_description="Tile self-attention for faster high-res generation.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("tile_size", default=256, min=1, max=2048),
|
||||
|
||||
@@ -13,7 +13,6 @@ class ImageCompare(IO.ComfyNode):
|
||||
node_id="ImageCompare",
|
||||
display_name="Image Compare",
|
||||
description="Compares two images side by side with a slider.",
|
||||
short_description=None,
|
||||
category="image",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
|
||||
@@ -25,8 +25,6 @@ class ImageCrop(IO.ComfyNode):
|
||||
search_aliases=["trim"],
|
||||
display_name="Image Crop",
|
||||
category="image/transform",
|
||||
description="Crops a rectangular region from an image at the specified position and dimensions.",
|
||||
short_description="Crops a region from an image.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -56,8 +54,6 @@ class RepeatImageBatch(IO.ComfyNode):
|
||||
node_id="RepeatImageBatch",
|
||||
search_aliases=["duplicate image", "clone image"],
|
||||
category="image/batch",
|
||||
description="Repeats an image a specified number of times to create a batch of identical images.",
|
||||
short_description="Repeats an image to create a batch.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("amount", default=1, min=1, max=4096),
|
||||
@@ -80,8 +76,6 @@ class ImageFromBatch(IO.ComfyNode):
|
||||
node_id="ImageFromBatch",
|
||||
search_aliases=["select image", "pick from batch", "extract image"],
|
||||
category="image/batch",
|
||||
description="Selects a contiguous range of images from a batch starting at a given index.",
|
||||
short_description="Selects images from a batch by index.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("batch_index", default=0, min=0, max=4095),
|
||||
@@ -108,8 +102,6 @@ class ImageAddNoise(IO.ComfyNode):
|
||||
node_id="ImageAddNoise",
|
||||
search_aliases=["film grain"],
|
||||
category="image",
|
||||
description="Adds random noise to an image with adjustable strength, useful for film grain effects.",
|
||||
short_description="Adds random noise to an image.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input(
|
||||
@@ -142,8 +134,6 @@ class SaveAnimatedWEBP(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="SaveAnimatedWEBP",
|
||||
category="image/animation",
|
||||
description="Saves a sequence of images as an animated WEBP file with configurable FPS, quality, and compression.",
|
||||
short_description="Saves images as an animated WEBP file.",
|
||||
inputs=[
|
||||
IO.Image.Input("images"),
|
||||
IO.String.Input("filename_prefix", default="ComfyUI"),
|
||||
@@ -181,8 +171,6 @@ class SaveAnimatedPNG(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="SaveAnimatedPNG",
|
||||
category="image/animation",
|
||||
description="Saves a sequence of images as an animated PNG (APNG) file with configurable FPS and compression level.",
|
||||
short_description="Saves images as an animated PNG file.",
|
||||
inputs=[
|
||||
IO.Image.Input("images"),
|
||||
IO.String.Input("filename_prefix", default="ComfyUI"),
|
||||
@@ -219,7 +207,6 @@ class ImageStitch(IO.ComfyNode):
|
||||
description="Stitches image2 to image1 in the specified direction.\n"
|
||||
"If image2 is not provided, returns image1 unchanged.\n"
|
||||
"Optional spacing can be added between images.",
|
||||
short_description="Joins two images together in a specified direction.",
|
||||
category="image/transform",
|
||||
inputs=[
|
||||
IO.Image.Input("image1"),
|
||||
@@ -392,8 +379,6 @@ class ResizeAndPadImage(IO.ComfyNode):
|
||||
node_id="ResizeAndPadImage",
|
||||
search_aliases=["fit to size"],
|
||||
category="image/transform",
|
||||
description="Resizes an image to fit within target dimensions while preserving aspect ratio, then pads with a solid color to fill the target size.",
|
||||
short_description="Resizes an image to fit and pads the remainder.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("target_width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -445,7 +430,6 @@ class SaveSVGNode(IO.ComfyNode):
|
||||
node_id="SaveSVGNode",
|
||||
search_aliases=["export vector", "save vector graphics"],
|
||||
description="Save SVG files on disk.",
|
||||
short_description=None,
|
||||
category="image/save",
|
||||
inputs=[
|
||||
IO.SVG.Input("svg"),
|
||||
@@ -518,7 +502,7 @@ class GetImageSize(IO.ComfyNode):
|
||||
node_id="GetImageSize",
|
||||
search_aliases=["dimensions", "resolution", "image info"],
|
||||
display_name="Get Image Size",
|
||||
description="Returns the width, height, and batch size of an image.",
|
||||
description="Returns width and height of the image, and passes it through unchanged.",
|
||||
category="image",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
@@ -553,8 +537,6 @@ class ImageRotate(IO.ComfyNode):
|
||||
node_id="ImageRotate",
|
||||
search_aliases=["turn", "flip orientation"],
|
||||
category="image/transform",
|
||||
description="Rotates an image by 90, 180, or 270 degrees.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("rotation", options=["none", "90 degrees", "180 degrees", "270 degrees"]),
|
||||
@@ -585,8 +567,6 @@ class ImageFlip(IO.ComfyNode):
|
||||
node_id="ImageFlip",
|
||||
search_aliases=["mirror", "reflect"],
|
||||
category="image/transform",
|
||||
description="Flips an image horizontally or vertically.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("flip_method", options=["x-axis: vertically", "y-axis: horizontally"]),
|
||||
@@ -613,8 +593,6 @@ class ImageScaleToMaxDimension(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="ImageScaleToMaxDimension",
|
||||
category="image/upscaling",
|
||||
description="Scales an image so its largest dimension matches the specified size while preserving aspect ratio.",
|
||||
short_description="Scales image to a target max dimension size.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input(
|
||||
|
||||
@@ -10,8 +10,6 @@ class InstructPixToPixConditioning(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="InstructPixToPixConditioning",
|
||||
category="conditioning/instructpix2pix",
|
||||
description="Prepares conditioning for InstructPix2Pix image editing by encoding the input image through a VAE and attaching it as concat latent to both positive and negative conditioning.",
|
||||
short_description="Prepare conditioning for InstructPix2Pix editing.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
|
||||
@@ -14,8 +14,6 @@ class Kandinsky5ImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="Kandinsky5ImageToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Sets up Kandinsky 5 image-to-video generation by creating an empty video latent and optionally encoding a start image for conditioning.",
|
||||
short_description="Sets up Kandinsky 5 image-to-video conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -75,7 +73,6 @@ class NormalizeVideoLatentStart(io.ComfyNode):
|
||||
node_id="NormalizeVideoLatentStart",
|
||||
category="conditioning/video_models",
|
||||
description="Normalizes the initial frames of a video latent to match the mean and standard deviation of subsequent reference frames. Helps reduce differences between the starting frames and the rest of the video.",
|
||||
short_description="Normalizes initial video latent frames to match reference frames.",
|
||||
inputs=[
|
||||
io.Latent.Input("latent"),
|
||||
io.Int.Input("start_frame_count", default=4, min=1, max=nodes.MAX_RESOLUTION, step=1, tooltip="Number of latent frames to normalize, counted from the start"),
|
||||
@@ -109,8 +106,6 @@ class CLIPTextEncodeKandinsky5(io.ComfyNode):
|
||||
node_id="CLIPTextEncodeKandinsky5",
|
||||
search_aliases=["kandinsky prompt"],
|
||||
category="advanced/conditioning/kandinsky5",
|
||||
description="Encodes separate CLIP-L and Qwen 2.5 7B text prompts into Kandinsky 5 conditioning.",
|
||||
short_description="Encodes CLIP-L and Qwen prompts for Kandinsky 5.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
|
||||
@@ -23,8 +23,6 @@ class LatentAdd(io.ComfyNode):
|
||||
node_id="LatentAdd",
|
||||
search_aliases=["combine latents", "sum latents"],
|
||||
category="latent/advanced",
|
||||
description="Adds two latent tensors element-wise, automatically resizing the second to match the first.",
|
||||
short_description="Add two latent tensors element-wise.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
@@ -52,8 +50,6 @@ class LatentSubtract(io.ComfyNode):
|
||||
node_id="LatentSubtract",
|
||||
search_aliases=["difference latent", "remove features"],
|
||||
category="latent/advanced",
|
||||
description="Subtracts one latent tensor from another element-wise, automatically resizing the second to match the first.",
|
||||
short_description="Subtract one latent tensor from another.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
@@ -81,8 +77,6 @@ class LatentMultiply(io.ComfyNode):
|
||||
node_id="LatentMultiply",
|
||||
search_aliases=["scale latent", "amplify latent", "latent gain"],
|
||||
category="latent/advanced",
|
||||
description="Multiplies a latent tensor by a scalar value to scale its magnitude up or down.",
|
||||
short_description="Scale a latent tensor by a multiplier.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01),
|
||||
@@ -107,8 +101,6 @@ class LatentInterpolate(io.ComfyNode):
|
||||
node_id="LatentInterpolate",
|
||||
search_aliases=["blend latent", "mix latent", "lerp latent", "transition"],
|
||||
category="latent/advanced",
|
||||
description="Interpolates between two latent tensors using a ratio, preserving magnitude for smoother blending than linear interpolation.",
|
||||
short_description="Interpolate between two latent tensors.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
@@ -148,8 +140,6 @@ class LatentConcat(io.ComfyNode):
|
||||
node_id="LatentConcat",
|
||||
search_aliases=["join latents", "stitch latents"],
|
||||
category="latent/advanced",
|
||||
description="Concatenates two latent tensors along a chosen spatial or temporal dimension (x, y, or t) with optional reversal.",
|
||||
short_description="Concatenate two latents along a chosen dimension.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
@@ -190,8 +180,6 @@ class LatentCut(io.ComfyNode):
|
||||
node_id="LatentCut",
|
||||
search_aliases=["crop latent", "slice latent", "extract region"],
|
||||
category="latent/advanced",
|
||||
description="Extracts a contiguous slice from a latent tensor along a chosen spatial or temporal dimension at a specified index and size.",
|
||||
short_description="Extract a slice from a latent along a dimension.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Combo.Input("dim", options=["x", "y", "t"]),
|
||||
@@ -233,8 +221,6 @@ class LatentCutToBatch(io.ComfyNode):
|
||||
node_id="LatentCutToBatch",
|
||||
search_aliases=["slice to batch", "split latent", "tile latent"],
|
||||
category="latent/advanced",
|
||||
description="Slices a latent tensor along a chosen dimension into equal-sized chunks and reshapes them into the batch dimension.",
|
||||
short_description="Slice latent along a dimension into batch chunks.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Combo.Input("dim", options=["t", "x", "y"]),
|
||||
@@ -277,8 +263,6 @@ class LatentBatch(io.ComfyNode):
|
||||
node_id="LatentBatch",
|
||||
search_aliases=["combine latents", "merge latents", "join latents"],
|
||||
category="latent/batch",
|
||||
description="Concatenates two latent tensors along the batch dimension, preserving batch index metadata.",
|
||||
short_description="Concatenate two latents along the batch dimension.",
|
||||
is_deprecated=True,
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
@@ -307,8 +291,6 @@ class LatentBatchSeedBehavior(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LatentBatchSeedBehavior",
|
||||
category="latent/advanced",
|
||||
description="Controls whether each item in a latent batch receives a random or fixed noise seed during sampling.",
|
||||
short_description="Set random or fixed seed behavior for batches.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Combo.Input("seed_behavior", options=["random", "fixed"], default="fixed"),
|
||||
@@ -338,8 +320,6 @@ class LatentApplyOperation(io.ComfyNode):
|
||||
node_id="LatentApplyOperation",
|
||||
search_aliases=["transform latent"],
|
||||
category="latent/advanced/operations",
|
||||
description="Applies a latent operation (such as tonemap or sharpen) directly to a latent tensor.",
|
||||
short_description="Apply a latent operation to a latent tensor.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
@@ -364,8 +344,6 @@ class LatentApplyOperationCFG(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LatentApplyOperationCFG",
|
||||
category="latent/advanced/operations",
|
||||
description="Applies a latent operation during the CFG pre-processing stage of sampling, modifying the model's prediction before guidance is applied.",
|
||||
short_description="Apply a latent operation during CFG pre-processing.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
@@ -398,8 +376,6 @@ class LatentOperationTonemapReinhard(io.ComfyNode):
|
||||
node_id="LatentOperationTonemapReinhard",
|
||||
search_aliases=["hdr latent"],
|
||||
category="latent/advanced/operations",
|
||||
description="Creates a Reinhard tonemapping operation that compresses high-magnitude latent values to reduce blown-out artifacts.",
|
||||
short_description="Create a Reinhard tonemapping latent operation.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||
@@ -435,8 +411,6 @@ class LatentOperationSharpen(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LatentOperationSharpen",
|
||||
category="latent/advanced/operations",
|
||||
description="Creates a sharpening operation that enhances detail in latent space using a Gaussian-based unsharp mask with configurable radius, sigma, and strength.",
|
||||
short_description="Create a Gaussian-based latent sharpening operation.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Int.Input("sharpen_radius", default=9, min=1, max=31, step=1),
|
||||
@@ -474,8 +448,6 @@ class ReplaceVideoLatentFrames(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ReplaceVideoLatentFrames",
|
||||
category="latent/batch",
|
||||
description="Replaces a range of frames in a destination video latent with frames from a source latent at a specified index.",
|
||||
short_description="Replace video latent frames at a given index.",
|
||||
inputs=[
|
||||
io.Latent.Input("destination", tooltip="The destination latent where frames will be replaced."),
|
||||
io.Latent.Input("source", optional=True, tooltip="The source latent providing frames to insert into the destination latent. If not provided, the destination latent is returned unchanged."),
|
||||
|
||||
@@ -31,8 +31,6 @@ class Load3D(IO.ComfyNode):
|
||||
node_id="Load3D",
|
||||
display_name="Load 3D & Animation",
|
||||
category="3d",
|
||||
description="Loads a 3D model file and renders it to produce an image, mask, normal map, camera info, recording video, and 3D file output.",
|
||||
short_description="Loads and renders a 3D model file.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_file", options=sorted(files), upload=IO.UploadType.model),
|
||||
@@ -83,8 +81,6 @@ class Preview3D(IO.ComfyNode):
|
||||
search_aliases=["view mesh", "3d viewer"],
|
||||
display_name="Preview 3D & Animation",
|
||||
category="3d",
|
||||
description="Previews a 3D model or file in the UI with optional camera info and background image overlay.",
|
||||
short_description="Previews a 3D model in the UI.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
|
||||
@@ -16,8 +16,6 @@ class SwitchNode(io.ComfyNode):
|
||||
node_id="ComfySwitchNode",
|
||||
display_name="Switch",
|
||||
category="logic",
|
||||
description="Routes one of two inputs to the output based on a boolean switch value, evaluating only the selected branch lazily.",
|
||||
short_description="Route one of two inputs based on a boolean.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Boolean.Input("switch"),
|
||||
@@ -49,8 +47,6 @@ class SoftSwitchNode(io.ComfyNode):
|
||||
node_id="ComfySoftSwitchNode",
|
||||
display_name="Soft Switch",
|
||||
category="logic",
|
||||
description="Routes one of two optional inputs to the output based on a boolean, falling back to whichever input is connected if only one is provided.",
|
||||
short_description="Switch with optional fallback to connected input.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Boolean.Input("switch"),
|
||||
@@ -106,8 +102,6 @@ class CustomComboNode(io.ComfyNode):
|
||||
node_id="CustomCombo",
|
||||
display_name="Custom Combo",
|
||||
category="utils",
|
||||
description="Provides a user-defined dropdown combo box where options are written by the user, outputting the selected string and its index.",
|
||||
short_description="User-defined dropdown outputting string and index.",
|
||||
is_experimental=True,
|
||||
inputs=[io.Combo.Input("choice", options=[])],
|
||||
outputs=[
|
||||
@@ -143,8 +137,6 @@ class DCTestNode(io.ComfyNode):
|
||||
node_id="DCTestNode",
|
||||
display_name="DCTest",
|
||||
category="logic",
|
||||
description="Test node demonstrating DynamicCombo inputs with nested sub-options that conditionally show different input types.",
|
||||
short_description="Test node for DynamicCombo nested inputs.",
|
||||
is_output_node=True,
|
||||
inputs=[io.DynamicCombo.Input("combo", options=[
|
||||
io.DynamicCombo.Option("option1", [io.String.Input("string")]),
|
||||
@@ -183,8 +175,6 @@ class AutogrowNamesTestNode(io.ComfyNode):
|
||||
node_id="AutogrowNamesTestNode",
|
||||
display_name="AutogrowNamesTest",
|
||||
category="logic",
|
||||
description="Test node demonstrating Autogrow inputs with named template slots that dynamically add float inputs.",
|
||||
short_description="Test node for Autogrow named template inputs.",
|
||||
inputs=[
|
||||
_io.Autogrow.Input("autogrow", template=template)
|
||||
],
|
||||
@@ -205,8 +195,6 @@ class AutogrowPrefixTestNode(io.ComfyNode):
|
||||
node_id="AutogrowPrefixTestNode",
|
||||
display_name="AutogrowPrefixTest",
|
||||
category="logic",
|
||||
description="Test node demonstrating Autogrow inputs with prefix-based template slots that dynamically add numbered float inputs.",
|
||||
short_description="Test node for Autogrow prefix template inputs.",
|
||||
inputs=[
|
||||
_io.Autogrow.Input("autogrow", template=template)
|
||||
],
|
||||
@@ -226,8 +214,6 @@ class ComboOutputTestNode(io.ComfyNode):
|
||||
node_id="ComboOptionTestNode",
|
||||
display_name="ComboOptionTest",
|
||||
category="logic",
|
||||
description="Test node demonstrating combo output types by passing two selected combo values through as outputs.",
|
||||
short_description="Test node for combo output passthrough.",
|
||||
inputs=[io.Combo.Input("combo", options=["option1", "option2", "option3"]),
|
||||
io.Combo.Input("combo2", options=["option4", "option5", "option6"])],
|
||||
outputs=[io.Combo.Output(), io.Combo.Output()],
|
||||
@@ -245,8 +231,6 @@ class ConvertStringToComboNode(io.ComfyNode):
|
||||
search_aliases=["string to dropdown", "text to combo"],
|
||||
display_name="Convert String to Combo",
|
||||
category="logic",
|
||||
description="Converts a string value into a combo type output so it can be used as a dropdown selection in downstream nodes.",
|
||||
short_description="Convert a string to a combo type output.",
|
||||
inputs=[io.String.Input("string")],
|
||||
outputs=[io.Combo.Output()],
|
||||
)
|
||||
@@ -263,8 +247,6 @@ class InvertBooleanNode(io.ComfyNode):
|
||||
search_aliases=["not", "toggle", "negate", "flip boolean"],
|
||||
display_name="Invert Boolean",
|
||||
category="logic",
|
||||
description="Inverts a boolean value, outputting true when input is false and vice versa.",
|
||||
short_description="Invert a boolean value.",
|
||||
inputs=[io.Boolean.Input("boolean")],
|
||||
outputs=[io.Boolean.Output()],
|
||||
)
|
||||
|
||||
@@ -32,7 +32,6 @@ class LoraLoaderBypass:
|
||||
|
||||
CATEGORY = "loaders"
|
||||
DESCRIPTION = "Apply LoRA in bypass mode. Unlike regular LoRA, this doesn't modify model weights - instead it injects the LoRA computation during forward pass. Useful for training scenarios."
|
||||
SHORT_DESCRIPTION = "Applies LoRA via forward pass injection, not weight modification."
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
|
||||
@@ -63,8 +62,6 @@ class LoraLoaderBypassModelOnly(LoraLoaderBypass):
|
||||
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
|
||||
}}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
DESCRIPTION = "Apply LoRA in bypass mode to only the diffusion model without modifying base weights or affecting CLIP."
|
||||
SHORT_DESCRIPTION = "Apply bypass LoRA to model only, no CLIP."
|
||||
FUNCTION = "load_lora_model_only"
|
||||
|
||||
def load_lora_model_only(self, model, lora_name, strength_model):
|
||||
|
||||
@@ -7,7 +7,6 @@ import logging
|
||||
from enum import Enum
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
from tqdm.auto import trange
|
||||
|
||||
CLAMP_QUANTILE = 0.99
|
||||
|
||||
@@ -50,22 +49,12 @@ LORA_TYPES = {"standard": LORAType.STANDARD,
|
||||
"full_diff": LORAType.FULL_DIFF}
|
||||
|
||||
def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora_type, bias_diff=False):
|
||||
comfy.model_management.load_models_gpu([model_diff])
|
||||
comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True)
|
||||
sd = model_diff.model_state_dict(filter_prefix=prefix_model)
|
||||
|
||||
sd_keys = list(sd.keys())
|
||||
for index in trange(len(sd_keys), unit="weight"):
|
||||
k = sd_keys[index]
|
||||
op_keys = sd_keys[index].rsplit('.', 1)
|
||||
if len(op_keys) < 2 or op_keys[1] not in ["weight", "bias"] or (op_keys[1] == "bias" and not bias_diff):
|
||||
continue
|
||||
op = comfy.utils.get_attr(model_diff.model, op_keys[0])
|
||||
if hasattr(op, "comfy_cast_weights") and not getattr(op, "comfy_patched_weights", False):
|
||||
weight_diff = model_diff.patch_weight_to_device(k, model_diff.load_device, return_weight=True)
|
||||
else:
|
||||
for k in sd:
|
||||
if k.endswith(".weight"):
|
||||
weight_diff = sd[k]
|
||||
|
||||
if op_keys[1] == "weight":
|
||||
if lora_type == LORAType.STANDARD:
|
||||
if weight_diff.ndim < 2:
|
||||
if bias_diff:
|
||||
@@ -80,8 +69,8 @@ def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora
|
||||
elif lora_type == LORAType.FULL_DIFF:
|
||||
output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu()
|
||||
|
||||
elif bias_diff and op_keys[1] == "bias":
|
||||
output_sd["{}{}.diff_b".format(prefix_lora, k[len(prefix_model):-5])] = weight_diff.contiguous().half().cpu()
|
||||
elif bias_diff and k.endswith(".bias"):
|
||||
output_sd["{}{}.diff_b".format(prefix_lora, k[len(prefix_model):-5])] = sd[k].contiguous().half().cpu()
|
||||
return output_sd
|
||||
|
||||
class LoraSave(io.ComfyNode):
|
||||
@@ -92,8 +81,6 @@ class LoraSave(io.ComfyNode):
|
||||
search_aliases=["export lora"],
|
||||
display_name="Extract and Save Lora",
|
||||
category="_for_testing",
|
||||
description="Extracts LoRA weights from a model or text encoder diff using SVD decomposition and saves them as a safetensors file, supporting standard and full diff modes.",
|
||||
short_description="Extract and save LoRA from model diff.",
|
||||
inputs=[
|
||||
io.String.Input("filename_prefix", default="loras/ComfyUI_extracted_lora"),
|
||||
io.Int.Input("rank", default=8, min=1, max=4096, step=1),
|
||||
|
||||
@@ -11,8 +11,6 @@ class LotusConditioning(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LotusConditioning",
|
||||
category="conditioning/lotus",
|
||||
description="Provides precomputed null conditioning embeddings for the Lotus depth/normal estimation model, avoiding the need for a separate text encoder.",
|
||||
short_description="Precomputed null conditioning for Lotus model.",
|
||||
inputs=[],
|
||||
outputs=[io.Conditioning.Output(display_name="conditioning")],
|
||||
)
|
||||
|
||||
@@ -18,8 +18,6 @@ class EmptyLTXVLatentVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyLTXVLatentVideo",
|
||||
category="latent/video/ltxv",
|
||||
description="Creates an empty LTXV video latent tensor with the specified dimensions and batch size.",
|
||||
short_description="Creates an empty LTXV video latent tensor.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
@@ -44,8 +42,6 @@ class LTXVImgToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVImgToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Encodes an image through a VAE and sets up conditioning for LTXV image-to-video generation with adjustable strength.",
|
||||
short_description="Sets up LTXV image-to-video conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -91,8 +87,6 @@ class LTXVImgToVideoInplace(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVImgToVideoInplace",
|
||||
category="conditioning/video_models",
|
||||
description="Encodes an image through a VAE and injects it into an existing latent for in-place LTXV image-to-video conditioning.",
|
||||
short_description="In-place LTXV image-to-video latent conditioning.",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Image.Input("image"),
|
||||
@@ -177,8 +171,6 @@ class LTXVAddGuide(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVAddGuide",
|
||||
category="conditioning/video_models",
|
||||
description="Adds a guiding image or video to LTXV conditioning at a specified frame index to control video generation.",
|
||||
short_description="Adds a guiding image or video to LTXV conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -343,8 +335,6 @@ class LTXVCropGuides(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVCropGuides",
|
||||
category="conditioning/video_models",
|
||||
description="Removes appended keyframe guide latents from an LTXV latent and resets keyframe indices in the conditioning.",
|
||||
short_description="Removes keyframe guide latents from LTXV conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -383,8 +373,6 @@ class LTXVConditioning(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVConditioning",
|
||||
category="conditioning/video_models",
|
||||
description="Sets the frame rate on LTXV positive and negative conditioning for video generation.",
|
||||
short_description="Sets frame rate on LTXV conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -409,8 +397,6 @@ class ModelSamplingLTXV(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ModelSamplingLTXV",
|
||||
category="advanced/model",
|
||||
description="Configures LTXV model sampling by computing a shift parameter from max_shift, base_shift, and latent token count.",
|
||||
short_description="Configures LTXV model sampling shift parameters.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01),
|
||||
@@ -456,8 +442,6 @@ class LTXVScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule for LTXV sampling with configurable shift parameters, stretch, and terminal value.",
|
||||
short_description="Generates a sigma schedule for LTXV sampling.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01),
|
||||
@@ -562,8 +546,6 @@ class LTXVPreprocess(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVPreprocess",
|
||||
category="image",
|
||||
description="Applies H.264 video compression preprocessing to images to improve LTXV generation quality.",
|
||||
short_description="Applies video compression preprocessing for LTXV.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input(
|
||||
@@ -592,8 +574,6 @@ class LTXVConcatAVLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVConcatAVLatent",
|
||||
category="latent/video/ltxv",
|
||||
description="Concatenates separate video and audio latents into a combined audio-video latent for LTXV processing.",
|
||||
short_description="Concatenates video and audio latents for LTXV.",
|
||||
inputs=[
|
||||
io.Latent.Input("video_latent"),
|
||||
io.Latent.Input("audio_latent"),
|
||||
@@ -629,8 +609,7 @@ class LTXVSeparateAVLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVSeparateAVLatent",
|
||||
category="latent/video/ltxv",
|
||||
description="Separates a combined audio-video latent into individual video and audio latents.",
|
||||
short_description=None,
|
||||
description="LTXV Separate AV Latent",
|
||||
inputs=[
|
||||
io.Latent.Input("av_latent"),
|
||||
],
|
||||
|
||||
@@ -14,8 +14,6 @@ class LTXVAudioVAELoader(io.ComfyNode):
|
||||
node_id="LTXVAudioVAELoader",
|
||||
display_name="LTXV Audio VAE Loader",
|
||||
category="audio",
|
||||
description="Loads an LTXV Audio VAE model from a checkpoint file for audio encoding and decoding.",
|
||||
short_description="Loads an LTXV Audio VAE model checkpoint.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"ckpt_name",
|
||||
@@ -40,7 +38,6 @@ class LTXVAudioVAEEncode(io.ComfyNode):
|
||||
node_id="LTXVAudioVAEEncode",
|
||||
display_name="LTXV Audio VAE Encode",
|
||||
category="audio",
|
||||
description="Encodes audio into latent representations using the LTXV Audio VAE model.",
|
||||
inputs=[
|
||||
io.Audio.Input("audio", tooltip="The audio to be encoded."),
|
||||
io.Vae.Input(
|
||||
@@ -71,8 +68,6 @@ class LTXVAudioVAEDecode(io.ComfyNode):
|
||||
node_id="LTXVAudioVAEDecode",
|
||||
display_name="LTXV Audio VAE Decode",
|
||||
category="audio",
|
||||
description="Decodes latent representations back into audio using the LTXV Audio VAE model.",
|
||||
short_description="Decodes latents back to audio via LTXV Audio VAE.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples", tooltip="The latent to be decoded."),
|
||||
io.Vae.Input(
|
||||
@@ -106,8 +101,6 @@ class LTXVEmptyLatentAudio(io.ComfyNode):
|
||||
node_id="LTXVEmptyLatentAudio",
|
||||
display_name="LTXV Empty Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty LTXV audio latent tensor sized according to the frame count, frame rate, and Audio VAE configuration.",
|
||||
short_description="Creates an empty LTXV audio latent tensor.",
|
||||
inputs=[
|
||||
io.Int.Input(
|
||||
"frames_number",
|
||||
@@ -184,7 +177,6 @@ class LTXAVTextEncoderLoader(io.ComfyNode):
|
||||
display_name="LTXV Audio Text Encoder Loader",
|
||||
category="advanced/loaders",
|
||||
description="[Recipes]\n\nltxav: gemma 3 12B",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"text_encoder",
|
||||
|
||||
@@ -19,8 +19,6 @@ class LTXVLatentUpsampler:
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "upsample_latent"
|
||||
CATEGORY = "latent/video"
|
||||
DESCRIPTION = "Upsample an LTXV video latent by a factor of 2 using a dedicated latent upscale model."
|
||||
SHORT_DESCRIPTION = "Upsample an LTXV video latent by 2x."
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def upsample_latent(
|
||||
|
||||
@@ -10,8 +10,6 @@ class RenormCFG(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="RenormCFG",
|
||||
category="advanced/model",
|
||||
description="Applies renormalized classifier-free guidance with configurable truncation threshold and renormalization strength to control CFG output magnitude.",
|
||||
short_description="Applies renormalized classifier-free guidance with truncation.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("cfg_trunc", default=100, min=0.0, max=100.0, step=0.01),
|
||||
@@ -86,7 +84,6 @@ class CLIPTextEncodeLumina2(io.ComfyNode):
|
||||
category="conditioning",
|
||||
description="Encodes a system prompt and a user prompt using a CLIP model into an embedding "
|
||||
"that can be used to guide the diffusion model towards generating specific images.",
|
||||
short_description="Encodes system and user prompts via CLIP for Lumina2.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"system_prompt",
|
||||
|
||||
@@ -13,7 +13,6 @@ class Mahiro(io.ComfyNode):
|
||||
display_name="Mahiro CFG",
|
||||
category="_for_testing",
|
||||
description="Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.",
|
||||
short_description="Scales guidance toward positive prompt direction over negative difference.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
],
|
||||
|
||||
@@ -52,8 +52,6 @@ class LatentCompositeMasked(IO.ComfyNode):
|
||||
node_id="LatentCompositeMasked",
|
||||
search_aliases=["overlay latent", "layer latent", "paste latent", "inpaint latent"],
|
||||
category="latent",
|
||||
description="Composites a source latent onto a destination latent at a specified position with optional mask and resize support.",
|
||||
short_description="Composites one latent onto another with masking.",
|
||||
inputs=[
|
||||
IO.Latent.Input("destination"),
|
||||
IO.Latent.Input("source"),
|
||||
@@ -83,8 +81,6 @@ class ImageCompositeMasked(IO.ComfyNode):
|
||||
node_id="ImageCompositeMasked",
|
||||
search_aliases=["paste image", "overlay", "layer"],
|
||||
category="image",
|
||||
description="Composites a source image onto a destination image at a specified position with optional mask and resize support.",
|
||||
short_description="Composites one image onto another with masking.",
|
||||
inputs=[
|
||||
IO.Image.Input("destination"),
|
||||
IO.Image.Input("source"),
|
||||
@@ -114,8 +110,6 @@ class MaskToImage(IO.ComfyNode):
|
||||
search_aliases=["convert mask"],
|
||||
display_name="Convert Mask to Image",
|
||||
category="mask",
|
||||
description="Converts a single-channel mask into a three-channel grayscale image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
],
|
||||
@@ -138,7 +132,6 @@ class ImageToMask(IO.ComfyNode):
|
||||
search_aliases=["extract channel", "channel to mask"],
|
||||
display_name="Convert Image to Mask",
|
||||
category="mask",
|
||||
description="Extracts a selected color channel from an image as a mask.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("channel", options=["red", "green", "blue", "alpha"]),
|
||||
@@ -162,8 +155,6 @@ class ImageColorToMask(IO.ComfyNode):
|
||||
node_id="ImageColorToMask",
|
||||
search_aliases=["color keying", "chroma key"],
|
||||
category="mask",
|
||||
description="Creates a mask from an image where pixels matching a specified RGB color value become white.",
|
||||
short_description="Creates a mask from pixels matching a color.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("color", default=0, min=0, max=0xFFFFFF, step=1, display_mode=IO.NumberDisplay.number),
|
||||
@@ -187,8 +178,6 @@ class SolidMask(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="SolidMask",
|
||||
category="mask",
|
||||
description="Creates a uniform solid mask filled with a single value at the specified dimensions.",
|
||||
short_description="Creates a solid mask with a uniform value.",
|
||||
inputs=[
|
||||
IO.Float.Input("value", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -212,8 +201,6 @@ class InvertMask(IO.ComfyNode):
|
||||
node_id="InvertMask",
|
||||
search_aliases=["reverse mask", "flip mask"],
|
||||
category="mask",
|
||||
description="Inverts a mask so white becomes black and vice versa.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
],
|
||||
@@ -235,8 +222,6 @@ class CropMask(IO.ComfyNode):
|
||||
node_id="CropMask",
|
||||
search_aliases=["cut mask", "extract mask region", "mask slice"],
|
||||
category="mask",
|
||||
description="Crops a rectangular region from a mask at the specified position and dimensions.",
|
||||
short_description="Crops a rectangular region from a mask.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
IO.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -263,8 +248,6 @@ class MaskComposite(IO.ComfyNode):
|
||||
node_id="MaskComposite",
|
||||
search_aliases=["combine masks", "blend masks", "layer masks"],
|
||||
category="mask",
|
||||
description="Composites a source mask onto a destination mask at a specified position using selectable blend operations.",
|
||||
short_description="Composites masks with selectable blend operations.",
|
||||
inputs=[
|
||||
IO.Mask.Input("destination"),
|
||||
IO.Mask.Input("source"),
|
||||
@@ -314,8 +297,6 @@ class FeatherMask(IO.ComfyNode):
|
||||
node_id="FeatherMask",
|
||||
search_aliases=["soft edge mask", "blur mask edges", "gradient mask edge"],
|
||||
category="mask",
|
||||
description="Applies a soft gradient feather to the edges of a mask with independent control for each side.",
|
||||
short_description="Feathers mask edges with per-side control.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
IO.Int.Input("left", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -364,8 +345,6 @@ class GrowMask(IO.ComfyNode):
|
||||
search_aliases=["expand mask", "shrink mask"],
|
||||
display_name="Grow Mask",
|
||||
category="mask",
|
||||
description="Expands or shrinks a mask by a specified number of pixels using morphological dilation or erosion with optional tapered corners.",
|
||||
short_description="Expands or shrinks a mask by pixel amount.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
IO.Int.Input("expand", default=0, min=-nodes.MAX_RESOLUTION, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -403,8 +382,6 @@ class ThresholdMask(IO.ComfyNode):
|
||||
node_id="ThresholdMask",
|
||||
search_aliases=["binary mask"],
|
||||
category="mask",
|
||||
description="Converts a mask to binary by setting pixels above a threshold to white and below to black.",
|
||||
short_description="Converts a mask to binary using a threshold.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
IO.Float.Input("value", default=0.5, min=0.0, max=1.0, step=0.01),
|
||||
@@ -431,8 +408,7 @@ class MaskPreview(IO.ComfyNode):
|
||||
search_aliases=["show mask", "view mask", "inspect mask", "debug mask"],
|
||||
display_name="Preview Mask",
|
||||
category="mask",
|
||||
description="Previews a mask in the UI by rendering it as a grayscale image.",
|
||||
short_description="Previews a mask as a grayscale image.",
|
||||
description="Saves the input images to your ComfyUI output directory.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
],
|
||||
|
||||
@@ -11,8 +11,6 @@ class EmptyMochiLatentVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyMochiLatentVideo",
|
||||
category="latent/video",
|
||||
description="Creates an empty latent tensor sized for Mochi video generation with configurable width, height, frame length, and batch size.",
|
||||
short_description="Create empty latent for Mochi video generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
|
||||
@@ -60,8 +60,6 @@ class ModelSamplingDiscrete:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling method to use a discrete noise schedule with a selectable prediction type."
|
||||
SHORT_DESCRIPTION = "Override model sampling to a discrete noise schedule."
|
||||
|
||||
def patch(self, model, sampling, zsnr):
|
||||
m = model.clone()
|
||||
@@ -98,8 +96,6 @@ class ModelSamplingStableCascade:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use Stable Cascade noise scheduling with an adjustable shift parameter."
|
||||
SHORT_DESCRIPTION = "Override sampling to Stable Cascade noise scheduling."
|
||||
|
||||
def patch(self, model, shift):
|
||||
m = model.clone()
|
||||
@@ -126,8 +122,6 @@ class ModelSamplingSD3:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use the SD3 discrete flow noise schedule with an adjustable shift parameter."
|
||||
SHORT_DESCRIPTION = "Override sampling to SD3 discrete flow schedule."
|
||||
|
||||
def patch(self, model, shift, multiplier=1000):
|
||||
m = model.clone()
|
||||
@@ -150,8 +144,6 @@ class ModelSamplingAuraFlow(ModelSamplingSD3):
|
||||
"shift": ("FLOAT", {"default": 1.73, "min": 0.0, "max": 100.0, "step":0.01}),
|
||||
}}
|
||||
|
||||
DESCRIPTION = "Override the model's sampling to use the AuraFlow discrete flow noise schedule with an adjustable shift."
|
||||
SHORT_DESCRIPTION = "Override sampling to AuraFlow discrete flow schedule."
|
||||
FUNCTION = "patch_aura"
|
||||
|
||||
def patch_aura(self, model, shift):
|
||||
@@ -171,8 +163,6 @@ class ModelSamplingFlux:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use the Flux flow schedule with resolution-dependent shift computed from base and max shift values."
|
||||
SHORT_DESCRIPTION = "Override sampling to Flux flow schedule with resolution shift."
|
||||
|
||||
def patch(self, model, max_shift, base_shift, width, height):
|
||||
m = model.clone()
|
||||
@@ -208,8 +198,6 @@ class ModelSamplingContinuousEDM:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use a continuous EDM noise schedule with configurable sigma range and prediction type."
|
||||
SHORT_DESCRIPTION = "Override sampling to continuous EDM noise schedule."
|
||||
|
||||
def patch(self, model, sampling, sigma_max, sigma_min):
|
||||
m = model.clone()
|
||||
@@ -255,8 +243,6 @@ class ModelSamplingContinuousV:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use a continuous V-prediction noise schedule with configurable sigma range."
|
||||
SHORT_DESCRIPTION = "Override sampling to continuous V-prediction schedule."
|
||||
|
||||
def patch(self, model, sampling, sigma_max, sigma_min):
|
||||
m = model.clone()
|
||||
@@ -283,8 +269,6 @@ class RescaleCFG:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Apply Rescale CFG to the model, which normalizes the CFG output to match the standard deviation of the positive conditioning prediction."
|
||||
SHORT_DESCRIPTION = "Normalize CFG output to match positive conditioning std."
|
||||
|
||||
def patch(self, model, multiplier):
|
||||
def rescale_cfg(args):
|
||||
@@ -326,7 +310,6 @@ class ModelComputeDtype:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/debug/model"
|
||||
DESCRIPTION = "Override the compute dtype used by the model during inference."
|
||||
|
||||
def patch(self, model, dtype):
|
||||
m = model.clone()
|
||||
|
||||
@@ -11,8 +11,6 @@ class PatchModelAddDownscale(io.ComfyNode):
|
||||
node_id="PatchModelAddDownscale",
|
||||
display_name="PatchModelAddDownscale (Kohya Deep Shrink)",
|
||||
category="model_patches/unet",
|
||||
description="Patches the UNet to downscale internal feature maps at a specified block during a configurable sigma range, then upscale on output, implementing the Kohya Deep Shrink technique for faster generation.",
|
||||
short_description="Kohya Deep Shrink: downscale UNet internals for speed.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("block_number", default=3, min=1, max=32, step=1),
|
||||
|
||||
@@ -22,8 +22,6 @@ class ModelMergeSimple:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Merge two diffusion models using a simple ratio to blend all weights uniformly."
|
||||
SHORT_DESCRIPTION = "Merge two models with a uniform blend ratio."
|
||||
|
||||
def merge(self, model1, model2, ratio):
|
||||
m = model1.clone()
|
||||
@@ -43,8 +41,6 @@ class ModelSubtract:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Subtract one diffusion model's weights from another with an adjustable multiplier for extracting differences."
|
||||
SHORT_DESCRIPTION = "Subtract model weights with adjustable multiplier."
|
||||
|
||||
def merge(self, model1, model2, multiplier):
|
||||
m = model1.clone()
|
||||
@@ -63,8 +59,6 @@ class ModelAdd:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Add the weights of one diffusion model on top of another."
|
||||
SHORT_DESCRIPTION = None
|
||||
|
||||
def merge(self, model1, model2):
|
||||
m = model1.clone()
|
||||
@@ -85,8 +79,6 @@ class CLIPMergeSimple:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Merge two CLIP text encoder models using a simple ratio to blend all weights uniformly."
|
||||
SHORT_DESCRIPTION = "Merge two CLIP models with a uniform blend ratio."
|
||||
|
||||
def merge(self, clip1, clip2, ratio):
|
||||
m = clip1.clone()
|
||||
@@ -110,8 +102,6 @@ class CLIPSubtract:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Subtract one CLIP model's weights from another with an adjustable multiplier for extracting differences."
|
||||
SHORT_DESCRIPTION = "Subtract CLIP weights with adjustable multiplier."
|
||||
|
||||
def merge(self, clip1, clip2, multiplier):
|
||||
m = clip1.clone()
|
||||
@@ -134,8 +124,6 @@ class CLIPAdd:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Add the weights of one CLIP model on top of another."
|
||||
SHORT_DESCRIPTION = None
|
||||
|
||||
def merge(self, clip1, clip2):
|
||||
m = clip1.clone()
|
||||
@@ -160,8 +148,6 @@ class ModelMergeBlocks:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Merge two diffusion models with separate blend ratios for input, middle, and output blocks."
|
||||
SHORT_DESCRIPTION = "Merge two models with per-block blend ratios."
|
||||
|
||||
def merge(self, model1, model2, **kwargs):
|
||||
m = model1.clone()
|
||||
@@ -242,8 +228,6 @@ def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefi
|
||||
|
||||
class CheckpointSave:
|
||||
SEARCH_ALIASES = ["save model", "export checkpoint", "merge save"]
|
||||
DESCRIPTION = "Saves a model, CLIP, and VAE as a combined checkpoint file in safetensors format with optional workflow metadata."
|
||||
SHORT_DESCRIPTION = "Saves model, CLIP, and VAE as a checkpoint."
|
||||
def __init__(self):
|
||||
self.output_dir = folder_paths.get_output_directory()
|
||||
|
||||
@@ -278,8 +262,6 @@ class CLIPSave:
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Save a CLIP text encoder model to safetensors files, splitting by model component."
|
||||
SHORT_DESCRIPTION = "Save a CLIP model to safetensors files."
|
||||
|
||||
def save(self, clip, filename_prefix, prompt=None, extra_pnginfo=None):
|
||||
prompt_info = ""
|
||||
@@ -337,8 +319,6 @@ class VAESave:
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Save a VAE model to a safetensors file."
|
||||
SHORT_DESCRIPTION = None
|
||||
|
||||
def save(self, vae, filename_prefix, prompt=None, extra_pnginfo=None):
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
|
||||
@@ -374,8 +354,6 @@ class ModelSave:
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Save a diffusion model to a safetensors file."
|
||||
SHORT_DESCRIPTION = None
|
||||
|
||||
def save(self, model, filename_prefix, prompt=None, extra_pnginfo=None):
|
||||
save_checkpoint(model, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo)
|
||||
|
||||
@@ -2,8 +2,6 @@ import comfy_extras.nodes_model_merging
|
||||
|
||||
class ModelMergeSD1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two SD1 models with per-block weight control over input, middle, and output blocks."
|
||||
SHORT_DESCRIPTION = "Merge two SD1 models with per-block control."
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
arg_dict = { "model1": ("MODEL",),
|
||||
@@ -28,15 +26,8 @@ class ModelMergeSD1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
return {"required": arg_dict}
|
||||
|
||||
|
||||
class ModelMergeSD2(ModelMergeSD1):
|
||||
DESCRIPTION = "Merge two SD2 models with per-block weight control over input, middle, and output blocks."
|
||||
SHORT_DESCRIPTION = "Merge two SD2 models with per-block control."
|
||||
|
||||
|
||||
class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two SDXL models with per-block weight control over input, middle, and output blocks."
|
||||
SHORT_DESCRIPTION = "Merge two SDXL models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -63,8 +54,6 @@ class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two SD3 2B models with per-block weight control over 24 joint blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two SD3 2B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -89,8 +78,6 @@ class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two AuraFlow models with per-block weight control over double and single layers."
|
||||
SHORT_DESCRIPTION = "Merge two AuraFlow models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -118,8 +105,6 @@ class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Flux1 models with per-block weight control over 19 double blocks and 38 single blocks."
|
||||
SHORT_DESCRIPTION = "Merge two Flux1 models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -146,8 +131,6 @@ class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two SD3.5 Large models with per-block weight control over 38 joint blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two SD3.5 Large models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -171,8 +154,6 @@ class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeMochiPreview(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Mochi Preview models with per-block weight control over 48 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Mochi Preview models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -195,8 +176,6 @@ class ModelMergeMochiPreview(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two LTXV models with per-block weight control over 28 transformer blocks."
|
||||
SHORT_DESCRIPTION = "Merge two LTXV models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -219,8 +198,6 @@ class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Cosmos 7B models with per-block weight control over 28 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Cosmos 7B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -245,8 +222,6 @@ class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Cosmos 14B models with per-block weight control over 36 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Cosmos 14B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -272,7 +247,6 @@ class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "1.3B model has 30 blocks, 14B model has 40 blocks. Image to video model has the extra img_emb."
|
||||
SHORT_DESCRIPTION = "WAN 2.1 model merging with block-level control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -296,8 +270,6 @@ class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeCosmosPredict2_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Cosmos Predict2 2B models with per-block weight control over 28 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Cosmos Predict2 2B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -321,8 +293,6 @@ class ModelMergeCosmosPredict2_2B(comfy_extras.nodes_model_merging.ModelMergeBlo
|
||||
|
||||
class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Cosmos Predict2 14B models with per-block weight control over 36 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Cosmos Predict2 14B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -346,8 +316,6 @@ class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBl
|
||||
|
||||
class ModelMergeQwenImage(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Qwen Image models with per-block weight control over 60 transformer blocks."
|
||||
SHORT_DESCRIPTION = "Merge two Qwen Image models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -371,7 +339,7 @@ class ModelMergeQwenImage(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"ModelMergeSD1": ModelMergeSD1,
|
||||
"ModelMergeSD2": ModelMergeSD2, #SD1 and SD2 have the same blocks
|
||||
"ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks
|
||||
"ModelMergeSDXL": ModelMergeSDXL,
|
||||
"ModelMergeSD3_2B": ModelMergeSD3_2B,
|
||||
"ModelMergeAuraflow": ModelMergeAuraflow,
|
||||
|
||||
@@ -230,8 +230,6 @@ class ModelPatchLoader:
|
||||
EXPERIMENTAL = True
|
||||
|
||||
CATEGORY = "advanced/loaders"
|
||||
DESCRIPTION = "Load a model patch file such as a controlnet or style reference patch for use with compatible model nodes."
|
||||
SHORT_DESCRIPTION = "Load a model patch file for controlnet or style."
|
||||
|
||||
def load_model_patch(self, name):
|
||||
model_patch_path = folder_paths.get_full_path_or_raise("model_patches", name)
|
||||
@@ -458,8 +456,6 @@ class QwenImageDiffsynthControlnet:
|
||||
EXPERIMENTAL = True
|
||||
|
||||
CATEGORY = "advanced/loaders/qwen"
|
||||
DESCRIPTION = "Apply a DiffSynth-style controlnet patch to a Qwen Image model using a VAE-encoded control image."
|
||||
SHORT_DESCRIPTION = "Apply DiffSynth controlnet to a Qwen Image model."
|
||||
|
||||
def diffsynth_controlnet(self, model, model_patch, vae, image=None, strength=1.0, inpaint_image=None, mask=None):
|
||||
model_patched = model.clone()
|
||||
@@ -493,8 +489,6 @@ class ZImageFunControlnet(QwenImageDiffsynthControlnet):
|
||||
"optional": {"image": ("IMAGE",), "inpaint_image": ("IMAGE",), "mask": ("MASK",)}}
|
||||
|
||||
CATEGORY = "advanced/loaders/zimage"
|
||||
DESCRIPTION = "Apply a Z-Image Fun controlnet patch to a model with optional control image, inpaint image, and mask inputs."
|
||||
SHORT_DESCRIPTION = "Apply Z-Image Fun controlnet with optional inpainting."
|
||||
|
||||
class UsoStyleProjectorPatch:
|
||||
def __init__(self, model_patch, encoded_image):
|
||||
@@ -531,8 +525,6 @@ class USOStyleReference:
|
||||
EXPERIMENTAL = True
|
||||
|
||||
CATEGORY = "advanced/model_patches/flux"
|
||||
DESCRIPTION = "Apply a USO style reference patch to a Flux model using multi-layer SigLIP features from CLIP vision output."
|
||||
SHORT_DESCRIPTION = "Apply USO style reference to a Flux model."
|
||||
|
||||
def apply_patch(self, model, model_patch, clip_vision_output):
|
||||
encoded_image = torch.stack((clip_vision_output.all_hidden_states[:, -20], clip_vision_output.all_hidden_states[:, -11], clip_vision_output.penultimate_hidden_states))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user