mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-18 14:10:07 +00:00
Compare commits
13 Commits
pysssss/no
...
v0.14.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
185c61dc26 | ||
|
|
19236edfa4 | ||
|
|
73c3f86973 | ||
|
|
262abf437b | ||
|
|
5284e6bf69 | ||
|
|
44f8598521 | ||
|
|
fe52843fe5 | ||
|
|
c39653163d | ||
|
|
18927538a1 | ||
|
|
8a6fbc2dc2 | ||
|
|
b44fc4c589 | ||
|
|
4454fab7f0 | ||
|
|
1978f59ffd |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -11,7 +11,7 @@ extra_model_paths.yaml
|
||||
/.vs
|
||||
.vscode/
|
||||
.idea/
|
||||
venv/
|
||||
venv*/
|
||||
.venv/
|
||||
/web/extensions/*
|
||||
!/web/extensions/logging.js.example
|
||||
|
||||
@@ -179,8 +179,8 @@ class LLMAdapter(nn.Module):
|
||||
if source_attention_mask.ndim == 2:
|
||||
source_attention_mask = source_attention_mask.unsqueeze(1).unsqueeze(1)
|
||||
|
||||
x = self.in_proj(self.embed(target_input_ids))
|
||||
context = source_hidden_states
|
||||
x = self.in_proj(self.embed(target_input_ids, out_dtype=context.dtype))
|
||||
position_ids = torch.arange(x.shape[1], device=x.device).unsqueeze(0)
|
||||
position_ids_context = torch.arange(context.shape[1], device=x.device).unsqueeze(0)
|
||||
position_embeddings = self.rotary_emb(x, position_ids)
|
||||
|
||||
@@ -152,6 +152,7 @@ class Chroma(nn.Module):
|
||||
transformer_options={},
|
||||
attn_mask: Tensor = None,
|
||||
) -> Tensor:
|
||||
transformer_options = transformer_options.copy()
|
||||
patches_replace = transformer_options.get("patches_replace", {})
|
||||
|
||||
# running on sequences img
|
||||
@@ -228,6 +229,7 @@ class Chroma(nn.Module):
|
||||
|
||||
transformer_options["total_blocks"] = len(self.single_blocks)
|
||||
transformer_options["block_type"] = "single"
|
||||
transformer_options["img_slice"] = [txt.shape[1], img.shape[1]]
|
||||
for i, block in enumerate(self.single_blocks):
|
||||
transformer_options["block_index"] = i
|
||||
if i not in self.skip_dit:
|
||||
|
||||
@@ -196,6 +196,9 @@ class DoubleStreamBlock(nn.Module):
|
||||
else:
|
||||
(img_mod1, img_mod2), (txt_mod1, txt_mod2) = vec
|
||||
|
||||
transformer_patches = transformer_options.get("patches", {})
|
||||
extra_options = transformer_options.copy()
|
||||
|
||||
# prepare image for attention
|
||||
img_modulated = self.img_norm1(img)
|
||||
img_modulated = apply_mod(img_modulated, (1 + img_mod1.scale), img_mod1.shift, modulation_dims_img)
|
||||
@@ -224,6 +227,12 @@ class DoubleStreamBlock(nn.Module):
|
||||
attn = attention(q, k, v, pe=pe, mask=attn_mask, transformer_options=transformer_options)
|
||||
del q, k, v
|
||||
|
||||
if "attn1_output_patch" in transformer_patches:
|
||||
extra_options["img_slice"] = [txt.shape[1], attn.shape[1]]
|
||||
patch = transformer_patches["attn1_output_patch"]
|
||||
for p in patch:
|
||||
attn = p(attn, extra_options)
|
||||
|
||||
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1]:]
|
||||
|
||||
# calculate the img bloks
|
||||
@@ -303,6 +312,9 @@ class SingleStreamBlock(nn.Module):
|
||||
else:
|
||||
mod = vec
|
||||
|
||||
transformer_patches = transformer_options.get("patches", {})
|
||||
extra_options = transformer_options.copy()
|
||||
|
||||
qkv, mlp = torch.split(self.linear1(apply_mod(self.pre_norm(x), (1 + mod.scale), mod.shift, modulation_dims)), [3 * self.hidden_size, self.mlp_hidden_dim_first], dim=-1)
|
||||
|
||||
q, k, v = qkv.view(qkv.shape[0], qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
||||
@@ -312,6 +324,12 @@ class SingleStreamBlock(nn.Module):
|
||||
# compute attention
|
||||
attn = attention(q, k, v, pe=pe, mask=attn_mask, transformer_options=transformer_options)
|
||||
del q, k, v
|
||||
|
||||
if "attn1_output_patch" in transformer_patches:
|
||||
patch = transformer_patches["attn1_output_patch"]
|
||||
for p in patch:
|
||||
attn = p(attn, extra_options)
|
||||
|
||||
# compute activation in mlp stream, cat again and run second linear layer
|
||||
if self.yak_mlp:
|
||||
mlp = self.mlp_act(mlp[..., self.mlp_hidden_dim_first // 2:]) * mlp[..., :self.mlp_hidden_dim_first // 2]
|
||||
|
||||
@@ -142,6 +142,7 @@ class Flux(nn.Module):
|
||||
attn_mask: Tensor = None,
|
||||
) -> Tensor:
|
||||
|
||||
transformer_options = transformer_options.copy()
|
||||
patches = transformer_options.get("patches", {})
|
||||
patches_replace = transformer_options.get("patches_replace", {})
|
||||
if img.ndim != 3 or txt.ndim != 3:
|
||||
@@ -231,6 +232,7 @@ class Flux(nn.Module):
|
||||
|
||||
transformer_options["total_blocks"] = len(self.single_blocks)
|
||||
transformer_options["block_type"] = "single"
|
||||
transformer_options["img_slice"] = [txt.shape[1], img.shape[1]]
|
||||
for i, block in enumerate(self.single_blocks):
|
||||
transformer_options["block_index"] = i
|
||||
if ("single_block", i) in blocks_replace:
|
||||
|
||||
@@ -304,6 +304,7 @@ class HunyuanVideo(nn.Module):
|
||||
control=None,
|
||||
transformer_options={},
|
||||
) -> Tensor:
|
||||
transformer_options = transformer_options.copy()
|
||||
patches_replace = transformer_options.get("patches_replace", {})
|
||||
|
||||
initial_shape = list(img.shape)
|
||||
@@ -416,6 +417,7 @@ class HunyuanVideo(nn.Module):
|
||||
|
||||
transformer_options["total_blocks"] = len(self.single_blocks)
|
||||
transformer_options["block_type"] = "single"
|
||||
transformer_options["img_slice"] = [txt.shape[1], img.shape[1]]
|
||||
for i, block in enumerate(self.single_blocks):
|
||||
transformer_options["block_index"] = i
|
||||
if ("single_block", i) in blocks_replace:
|
||||
|
||||
@@ -178,10 +178,7 @@ class BaseModel(torch.nn.Module):
|
||||
xc = torch.cat([xc] + [comfy.model_management.cast_to_device(c_concat, xc.device, xc.dtype)], dim=1)
|
||||
|
||||
context = c_crossattn
|
||||
dtype = self.get_dtype()
|
||||
|
||||
if self.manual_cast_dtype is not None:
|
||||
dtype = self.manual_cast_dtype
|
||||
dtype = self.get_dtype_inference()
|
||||
|
||||
xc = xc.to(dtype)
|
||||
device = xc.device
|
||||
@@ -218,6 +215,13 @@ class BaseModel(torch.nn.Module):
|
||||
def get_dtype(self):
|
||||
return self.diffusion_model.dtype
|
||||
|
||||
def get_dtype_inference(self):
|
||||
dtype = self.get_dtype()
|
||||
|
||||
if self.manual_cast_dtype is not None:
|
||||
dtype = self.manual_cast_dtype
|
||||
return dtype
|
||||
|
||||
def encode_adm(self, **kwargs):
|
||||
return None
|
||||
|
||||
@@ -372,9 +376,7 @@ class BaseModel(torch.nn.Module):
|
||||
input_shapes += shape
|
||||
|
||||
if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention():
|
||||
dtype = self.get_dtype()
|
||||
if self.manual_cast_dtype is not None:
|
||||
dtype = self.manual_cast_dtype
|
||||
dtype = self.get_dtype_inference()
|
||||
#TODO: this needs to be tweaked
|
||||
area = sum(map(lambda input_shape: input_shape[0] * math.prod(input_shape[2:]), input_shapes))
|
||||
return (area * comfy.model_management.dtype_size(dtype) * 0.01 * self.memory_usage_factor) * (1024 * 1024)
|
||||
@@ -1165,7 +1167,7 @@ class Anima(BaseModel):
|
||||
t5xxl_ids = t5xxl_ids.unsqueeze(0)
|
||||
|
||||
if torch.is_inference_mode_enabled(): # if not we are training
|
||||
cross_attn = self.diffusion_model.preprocess_text_embeds(cross_attn.to(device=device, dtype=self.get_dtype()), t5xxl_ids.to(device=device), t5xxl_weights=t5xxl_weights.to(device=device, dtype=self.get_dtype()))
|
||||
cross_attn = self.diffusion_model.preprocess_text_embeds(cross_attn.to(device=device, dtype=self.get_dtype_inference()), t5xxl_ids.to(device=device), t5xxl_weights=t5xxl_weights.to(device=device, dtype=self.get_dtype_inference()))
|
||||
else:
|
||||
out['t5xxl_ids'] = comfy.conds.CONDRegular(t5xxl_ids)
|
||||
out['t5xxl_weights'] = comfy.conds.CONDRegular(t5xxl_weights)
|
||||
|
||||
@@ -406,13 +406,16 @@ class ModelPatcher:
|
||||
def memory_required(self, input_shape):
|
||||
return self.model.memory_required(input_shape=input_shape)
|
||||
|
||||
def disable_model_cfg1_optimization(self):
|
||||
self.model_options["disable_cfg1_optimization"] = True
|
||||
|
||||
def set_model_sampler_cfg_function(self, sampler_cfg_function, disable_cfg1_optimization=False):
|
||||
if len(inspect.signature(sampler_cfg_function).parameters) == 3:
|
||||
self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
|
||||
else:
|
||||
self.model_options["sampler_cfg_function"] = sampler_cfg_function
|
||||
if disable_cfg1_optimization:
|
||||
self.model_options["disable_cfg1_optimization"] = True
|
||||
self.disable_model_cfg1_optimization()
|
||||
|
||||
def set_model_sampler_post_cfg_function(self, post_cfg_function, disable_cfg1_optimization=False):
|
||||
self.model_options = set_model_options_post_cfg_function(self.model_options, post_cfg_function, disable_cfg1_optimization)
|
||||
|
||||
@@ -21,7 +21,6 @@ import logging
|
||||
import comfy.model_management
|
||||
from comfy.cli_args import args, PerformanceFeature, enables_dynamic_vram
|
||||
import comfy.float
|
||||
import comfy.rmsnorm
|
||||
import json
|
||||
import comfy.memory_management
|
||||
import comfy.pinned_memory
|
||||
@@ -463,7 +462,7 @@ class disable_weight_init:
|
||||
else:
|
||||
return super().forward(*args, **kwargs)
|
||||
|
||||
class RMSNorm(comfy.rmsnorm.RMSNorm, CastWeightBiasOp):
|
||||
class RMSNorm(torch.nn.RMSNorm, CastWeightBiasOp):
|
||||
def reset_parameters(self):
|
||||
self.bias = None
|
||||
return None
|
||||
@@ -475,8 +474,7 @@ class disable_weight_init:
|
||||
weight = None
|
||||
bias = None
|
||||
offload_stream = None
|
||||
x = comfy.rmsnorm.rms_norm(input, weight, self.eps) # TODO: switch to commented out line when old torch is deprecated
|
||||
# x = torch.nn.functional.rms_norm(input, self.normalized_shape, weight, self.eps)
|
||||
x = torch.nn.functional.rms_norm(input, self.normalized_shape, weight, self.eps)
|
||||
uncast_bias_weight(self, weight, bias, offload_stream)
|
||||
return x
|
||||
|
||||
|
||||
@@ -1,57 +1,10 @@
|
||||
import torch
|
||||
import comfy.model_management
|
||||
import numbers
|
||||
import logging
|
||||
|
||||
RMSNorm = None
|
||||
|
||||
try:
|
||||
rms_norm_torch = torch.nn.functional.rms_norm
|
||||
RMSNorm = torch.nn.RMSNorm
|
||||
except:
|
||||
rms_norm_torch = None
|
||||
logging.warning("Please update pytorch to use native RMSNorm")
|
||||
|
||||
RMSNorm = torch.nn.RMSNorm
|
||||
|
||||
def rms_norm(x, weight=None, eps=1e-6):
|
||||
if rms_norm_torch is not None and not (torch.jit.is_tracing() or torch.jit.is_scripting()):
|
||||
if weight is None:
|
||||
return rms_norm_torch(x, (x.shape[-1],), eps=eps)
|
||||
else:
|
||||
return rms_norm_torch(x, weight.shape, weight=comfy.model_management.cast_to(weight, dtype=x.dtype, device=x.device), eps=eps)
|
||||
if weight is None:
|
||||
return torch.nn.functional.rms_norm(x, (x.shape[-1],), eps=eps)
|
||||
else:
|
||||
r = x * torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + eps)
|
||||
if weight is None:
|
||||
return r
|
||||
else:
|
||||
return r * comfy.model_management.cast_to(weight, dtype=x.dtype, device=x.device)
|
||||
|
||||
|
||||
if RMSNorm is None:
|
||||
class RMSNorm(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
normalized_shape,
|
||||
eps=1e-6,
|
||||
elementwise_affine=True,
|
||||
device=None,
|
||||
dtype=None,
|
||||
):
|
||||
factory_kwargs = {"device": device, "dtype": dtype}
|
||||
super().__init__()
|
||||
if isinstance(normalized_shape, numbers.Integral):
|
||||
# mypy error: incompatible types in assignment
|
||||
normalized_shape = (normalized_shape,) # type: ignore[assignment]
|
||||
self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
|
||||
self.eps = eps
|
||||
self.elementwise_affine = elementwise_affine
|
||||
if self.elementwise_affine:
|
||||
self.weight = torch.nn.Parameter(
|
||||
torch.empty(self.normalized_shape, **factory_kwargs)
|
||||
)
|
||||
else:
|
||||
self.register_parameter("weight", None)
|
||||
self.bias = None
|
||||
|
||||
def forward(self, x):
|
||||
return rms_norm(x, self.weight, self.eps)
|
||||
return torch.nn.functional.rms_norm(x, weight.shape, weight=comfy.model_management.cast_to(weight, dtype=x.dtype, device=x.device), eps=eps)
|
||||
|
||||
@@ -75,6 +75,12 @@ class NumberDisplay(str, Enum):
|
||||
slider = "slider"
|
||||
|
||||
|
||||
class ControlAfterGenerate(str, Enum):
|
||||
fixed = "fixed"
|
||||
increment = "increment"
|
||||
decrement = "decrement"
|
||||
randomize = "randomize"
|
||||
|
||||
class _ComfyType(ABC):
|
||||
Type = Any
|
||||
io_type: str = None
|
||||
@@ -263,7 +269,7 @@ class Int(ComfyTypeIO):
|
||||
class Input(WidgetInput):
|
||||
'''Integer input.'''
|
||||
def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None,
|
||||
default: int=None, min: int=None, max: int=None, step: int=None, control_after_generate: bool=None,
|
||||
default: int=None, min: int=None, max: int=None, step: int=None, control_after_generate: bool | ControlAfterGenerate=None,
|
||||
display_mode: NumberDisplay=None, socketless: bool=None, force_input: bool=None, extra_dict=None, raw_link: bool=None, advanced: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input, extra_dict, raw_link, advanced)
|
||||
self.min = min
|
||||
@@ -345,7 +351,7 @@ class Combo(ComfyTypeIO):
|
||||
tooltip: str=None,
|
||||
lazy: bool=None,
|
||||
default: str | int | Enum = None,
|
||||
control_after_generate: bool=None,
|
||||
control_after_generate: bool | ControlAfterGenerate=None,
|
||||
upload: UploadType=None,
|
||||
image_folder: FolderType=None,
|
||||
remote: RemoteOptions=None,
|
||||
@@ -389,7 +395,7 @@ class MultiCombo(ComfyTypeI):
|
||||
Type = list[str]
|
||||
class Input(Combo.Input):
|
||||
def __init__(self, id: str, options: list[str], display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None,
|
||||
default: list[str]=None, placeholder: str=None, chip: bool=None, control_after_generate: bool=None,
|
||||
default: list[str]=None, placeholder: str=None, chip: bool=None, control_after_generate: bool | ControlAfterGenerate=None,
|
||||
socketless: bool=None, extra_dict=None, raw_link: bool=None, advanced: bool=None):
|
||||
super().__init__(id, options, display_name, optional, tooltip, lazy, default, control_after_generate, socketless=socketless, extra_dict=extra_dict, raw_link=raw_link, advanced=advanced)
|
||||
self.multiselect = True
|
||||
@@ -1300,7 +1306,6 @@ class NodeInfoV1:
|
||||
name: str=None
|
||||
display_name: str=None
|
||||
description: str=None
|
||||
short_description: str=None
|
||||
python_module: Any=None
|
||||
category: str=None
|
||||
output_node: bool=None
|
||||
@@ -1391,8 +1396,6 @@ class Schema:
|
||||
hidden: list[Hidden] = field(default_factory=list)
|
||||
description: str=""
|
||||
"""Node description, shown as a tooltip when hovering over the node."""
|
||||
short_description: str=""
|
||||
"""Short node description, shown in the node list/search."""
|
||||
search_aliases: list[str] = field(default_factory=list)
|
||||
"""Alternative names for search. Useful for synonyms, abbreviations, or old names after renaming."""
|
||||
is_input_list: bool = False
|
||||
@@ -1531,7 +1534,6 @@ class Schema:
|
||||
display_name=self.display_name,
|
||||
category=self.category,
|
||||
description=self.description,
|
||||
short_description=self.short_description,
|
||||
output_node=self.is_output_node,
|
||||
deprecated=self.is_deprecated,
|
||||
experimental=self.is_experimental,
|
||||
@@ -1775,14 +1777,6 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
|
||||
cls.GET_SCHEMA()
|
||||
return cls._DESCRIPTION
|
||||
|
||||
_SHORT_DESCRIPTION = None
|
||||
@final
|
||||
@classproperty
|
||||
def SHORT_DESCRIPTION(cls): # noqa
|
||||
if cls._SHORT_DESCRIPTION is None:
|
||||
cls.GET_SCHEMA()
|
||||
return cls._SHORT_DESCRIPTION
|
||||
|
||||
_CATEGORY = None
|
||||
@final
|
||||
@classproperty
|
||||
@@ -1911,8 +1905,6 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
|
||||
schema.validate()
|
||||
if cls._DESCRIPTION is None:
|
||||
cls._DESCRIPTION = schema.description
|
||||
if cls._SHORT_DESCRIPTION is None:
|
||||
cls._SHORT_DESCRIPTION = schema.short_description
|
||||
if cls._CATEGORY is None:
|
||||
cls._CATEGORY = schema.category
|
||||
if cls._EXPERIMENTAL is None:
|
||||
@@ -2111,6 +2103,7 @@ __all__ = [
|
||||
"UploadType",
|
||||
"RemoteOptions",
|
||||
"NumberDisplay",
|
||||
"ControlAfterGenerate",
|
||||
|
||||
"comfytype",
|
||||
"Custom",
|
||||
|
||||
@@ -198,11 +198,6 @@ dict_recraft_substyles_v3 = {
|
||||
}
|
||||
|
||||
|
||||
class RecraftModel(str, Enum):
|
||||
recraftv3 = 'recraftv3'
|
||||
recraftv2 = 'recraftv2'
|
||||
|
||||
|
||||
class RecraftImageSize(str, Enum):
|
||||
res_1024x1024 = '1024x1024'
|
||||
res_1365x1024 = '1365x1024'
|
||||
@@ -221,6 +216,41 @@ class RecraftImageSize(str, Enum):
|
||||
res_1707x1024 = '1707x1024'
|
||||
|
||||
|
||||
RECRAFT_V4_SIZES = [
|
||||
"1024x1024",
|
||||
"1536x768",
|
||||
"768x1536",
|
||||
"1280x832",
|
||||
"832x1280",
|
||||
"1216x896",
|
||||
"896x1216",
|
||||
"1152x896",
|
||||
"896x1152",
|
||||
"832x1344",
|
||||
"1280x896",
|
||||
"896x1280",
|
||||
"1344x768",
|
||||
"768x1344",
|
||||
]
|
||||
|
||||
RECRAFT_V4_PRO_SIZES = [
|
||||
"2048x2048",
|
||||
"3072x1536",
|
||||
"1536x3072",
|
||||
"2560x1664",
|
||||
"1664x2560",
|
||||
"2432x1792",
|
||||
"1792x2432",
|
||||
"2304x1792",
|
||||
"1792x2304",
|
||||
"1664x2688",
|
||||
"1434x1024",
|
||||
"1024x1434",
|
||||
"2560x1792",
|
||||
"1792x2560",
|
||||
]
|
||||
|
||||
|
||||
class RecraftColorObject(BaseModel):
|
||||
rgb: list[int] = Field(..., description='An array of 3 integer values in range of 0...255 defining RGB Color Model')
|
||||
|
||||
@@ -234,17 +264,16 @@ class RecraftControlsObject(BaseModel):
|
||||
|
||||
class RecraftImageGenerationRequest(BaseModel):
|
||||
prompt: str = Field(..., description='The text prompt describing the image to generate')
|
||||
size: RecraftImageSize | None = Field(None, description='The size of the generated image (e.g., "1024x1024")')
|
||||
size: str | None = Field(None, description='The size of the generated image (e.g., "1024x1024")')
|
||||
n: int = Field(..., description='The number of images to generate')
|
||||
negative_prompt: str | None = Field(None, description='A text description of undesired elements on an image')
|
||||
model: RecraftModel | None = Field(RecraftModel.recraftv3, description='The model to use for generation (e.g., "recraftv3")')
|
||||
model: str = Field(...)
|
||||
style: str | None = Field(None, description='The style to apply to the generated image (e.g., "digital_illustration")')
|
||||
substyle: str | None = Field(None, description='The substyle to apply to the generated image, depending on the style input')
|
||||
controls: RecraftControlsObject | None = Field(None, description='A set of custom parameters to tweak generation process')
|
||||
style_id: str | None = Field(None, description='Use a previously uploaded style as a reference; UUID')
|
||||
strength: float | None = Field(None, description='Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity')
|
||||
random_seed: int | None = Field(None, description="Seed for video generation")
|
||||
# text_layout
|
||||
|
||||
|
||||
class RecraftReturnedObject(BaseModel):
|
||||
|
||||
@@ -44,7 +44,6 @@ class FluxProUltraImageNode(IO.ComfyNode):
|
||||
display_name="Flux 1.1 [pro] Ultra Image",
|
||||
category="api node/image/BFL",
|
||||
description="Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.",
|
||||
short_description="Generate images with Flux Pro 1.1 Ultra API.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -155,17 +154,13 @@ class FluxProUltraImageNode(IO.ComfyNode):
|
||||
|
||||
class FluxKontextProImageNode(IO.ComfyNode):
|
||||
|
||||
DESCRIPTION = "Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio."
|
||||
SHORT_DESCRIPTION = "Edit images with Flux.1 Kontext [pro] API."
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls) -> IO.Schema:
|
||||
return IO.Schema(
|
||||
node_id=cls.NODE_ID,
|
||||
display_name=cls.DISPLAY_NAME,
|
||||
category="api node/image/BFL",
|
||||
description=cls.DESCRIPTION,
|
||||
short_description=cls.SHORT_DESCRIPTION,
|
||||
description="Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -273,7 +268,6 @@ class FluxKontextProImageNode(IO.ComfyNode):
|
||||
class FluxKontextMaxImageNode(FluxKontextProImageNode):
|
||||
|
||||
DESCRIPTION = "Edits images using Flux.1 Kontext [max] via api based on prompt and aspect ratio."
|
||||
SHORT_DESCRIPTION = "Edit images with Flux.1 Kontext [max] API."
|
||||
BFL_PATH = "/proxy/bfl/flux-kontext-max/generate"
|
||||
NODE_ID = "FluxKontextMaxImageNode"
|
||||
DISPLAY_NAME = "Flux.1 Kontext [max] Image"
|
||||
@@ -288,7 +282,6 @@ class FluxProExpandNode(IO.ComfyNode):
|
||||
display_name="Flux.1 Expand Image",
|
||||
category="api node/image/BFL",
|
||||
description="Outpaints image based on prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -425,7 +418,6 @@ class FluxProFillNode(IO.ComfyNode):
|
||||
display_name="Flux.1 Fill Image",
|
||||
category="api node/image/BFL",
|
||||
description="Inpaints image based on mask and prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Mask.Input("mask"),
|
||||
@@ -551,7 +543,6 @@ class Flux2ProImageNode(IO.ComfyNode):
|
||||
display_name=cls.DISPLAY_NAME,
|
||||
category="api node/image/BFL",
|
||||
description="Generates images synchronously based on prompt and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -33,7 +33,6 @@ class BriaImageEditNode(IO.ComfyNode):
|
||||
display_name="Bria FIBO Image Edit",
|
||||
category="api node/image/Bria",
|
||||
description="Edit images using Bria latest model",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["FIBO"]),
|
||||
IO.Image.Input("image"),
|
||||
|
||||
@@ -60,7 +60,6 @@ class ByteDanceImageNode(IO.ComfyNode):
|
||||
display_name="ByteDance Image",
|
||||
category="api node/image/ByteDance",
|
||||
description="Generate images using ByteDance models via api based on prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["seedream-3-0-t2i-250415"]),
|
||||
IO.String.Input(
|
||||
@@ -183,7 +182,6 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
|
||||
display_name="ByteDance Seedream 4.5",
|
||||
category="api node/image/ByteDance",
|
||||
description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.",
|
||||
short_description="Text-to-image generation and editing up to 4K.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -382,7 +380,6 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
|
||||
display_name="ByteDance Text to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using ByteDance models via api based on prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -508,7 +505,6 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
|
||||
display_name="ByteDance Image to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using ByteDance models via api based on image and prompt",
|
||||
short_description="Generate video from image and prompt via ByteDance API.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -643,7 +639,6 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
|
||||
display_name="ByteDance First-Last-Frame to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using prompt and first and last frames.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -789,7 +784,6 @@ class ByteDanceImageReferenceNode(IO.ComfyNode):
|
||||
display_name="ByteDance Reference Images to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using prompt and reference images.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -6,6 +6,7 @@ See: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/infer
|
||||
import base64
|
||||
import os
|
||||
from enum import Enum
|
||||
from fnmatch import fnmatch
|
||||
from io import BytesIO
|
||||
from typing import Literal
|
||||
|
||||
@@ -119,6 +120,13 @@ async def create_image_parts(
|
||||
return image_parts
|
||||
|
||||
|
||||
def _mime_matches(mime: GeminiMimeType | None, pattern: str) -> bool:
|
||||
"""Check if a MIME type matches a pattern. Supports fnmatch globs (e.g. 'image/*')."""
|
||||
if mime is None:
|
||||
return False
|
||||
return fnmatch(mime.value, pattern)
|
||||
|
||||
|
||||
def get_parts_by_type(response: GeminiGenerateContentResponse, part_type: Literal["text"] | str) -> list[GeminiPart]:
|
||||
"""
|
||||
Filter response parts by their type.
|
||||
@@ -151,9 +159,9 @@ def get_parts_by_type(response: GeminiGenerateContentResponse, part_type: Litera
|
||||
for part in candidate.content.parts:
|
||||
if part_type == "text" and part.text:
|
||||
parts.append(part)
|
||||
elif part.inlineData and part.inlineData.mimeType == part_type:
|
||||
elif part.inlineData and _mime_matches(part.inlineData.mimeType, part_type):
|
||||
parts.append(part)
|
||||
elif part.fileData and part.fileData.mimeType == part_type:
|
||||
elif part.fileData and _mime_matches(part.fileData.mimeType, part_type):
|
||||
parts.append(part)
|
||||
|
||||
if not parts and blocked_reasons:
|
||||
@@ -178,7 +186,7 @@ def get_text_from_response(response: GeminiGenerateContentResponse) -> str:
|
||||
|
||||
async def get_image_from_response(response: GeminiGenerateContentResponse) -> Input.Image:
|
||||
image_tensors: list[Input.Image] = []
|
||||
parts = get_parts_by_type(response, "image/png")
|
||||
parts = get_parts_by_type(response, "image/*")
|
||||
for part in parts:
|
||||
if part.inlineData:
|
||||
image_data = base64.b64decode(part.inlineData.data)
|
||||
@@ -254,7 +262,6 @@ class GeminiNode(IO.ComfyNode):
|
||||
description="Generate text responses with Google's Gemini AI model. "
|
||||
"You can provide multiple types of inputs (text, images, audio, video) "
|
||||
"as context for generating more relevant and meaningful responses.",
|
||||
short_description="Generate text responses with Google's Gemini AI.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -481,7 +488,6 @@ class GeminiInputFiles(IO.ComfyNode):
|
||||
"The files will be read by the Gemini model when generating a response. "
|
||||
"The contents of the text file count toward the token limit. "
|
||||
"🛈 TIP: Can be chained together with other Gemini Input File nodes.",
|
||||
short_description="Load and prepare input files for Gemini LLM nodes.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"file",
|
||||
@@ -536,7 +542,6 @@ class GeminiImage(IO.ComfyNode):
|
||||
display_name="Nano Banana (Google Gemini Image)",
|
||||
category="api node/image/Gemini",
|
||||
description="Edit images synchronously via Google API.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -668,7 +673,6 @@ class GeminiImage2(IO.ComfyNode):
|
||||
display_name="Nano Banana Pro (Google Gemini Image)",
|
||||
category="api node/image/Gemini",
|
||||
description="Generate or edit images synchronously via Google Vertex API.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -36,7 +36,6 @@ class GrokImageNode(IO.ComfyNode):
|
||||
display_name="Grok Image",
|
||||
category="api node/image/Grok",
|
||||
description="Generate images using Grok based on a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-image-beta"]),
|
||||
IO.String.Input(
|
||||
@@ -138,7 +137,6 @@ class GrokImageEditNode(IO.ComfyNode):
|
||||
display_name="Grok Image Edit",
|
||||
category="api node/image/Grok",
|
||||
description="Modify an existing image based on a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-image-beta"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -228,7 +226,6 @@ class GrokVideoNode(IO.ComfyNode):
|
||||
display_name="Grok Video",
|
||||
category="api node/video/Grok",
|
||||
description="Generate video from a prompt or an image",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-video-beta"]),
|
||||
IO.String.Input(
|
||||
@@ -337,7 +334,6 @@ class GrokVideoEditNode(IO.ComfyNode):
|
||||
display_name="Grok Video Edit",
|
||||
category="api node/video/Grok",
|
||||
description="Edit an existing video based on a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-video-beta"]),
|
||||
IO.String.Input(
|
||||
|
||||
@@ -74,7 +74,6 @@ class HitPawGeneralImageEnhance(IO.ComfyNode):
|
||||
category="api node/image/HitPaw",
|
||||
description="Upscale low-resolution images to super-resolution, eliminate artifacts and noise. "
|
||||
f"Maximum output: {MAX_MP_GENERATIVE} megapixels.",
|
||||
short_description="Upscale images to super-resolution, removing artifacts and noise.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["generative_portrait", "generative"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -206,7 +205,6 @@ class HitPawVideoEnhance(IO.ComfyNode):
|
||||
category="api node/video/HitPaw",
|
||||
description="Upscale low-resolution videos to high resolution, eliminate artifacts and noise. "
|
||||
"Prices shown are per second of video.",
|
||||
short_description="Upscale videos to high resolution, removing artifacts and noise.",
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input("model", options=model_options),
|
||||
IO.Video.Input("video"),
|
||||
|
||||
@@ -54,8 +54,6 @@ class TencentTextToModelNode(IO.ComfyNode):
|
||||
node_id="TencentTextToModelNode",
|
||||
display_name="Hunyuan3D: Text to Model",
|
||||
category="api node/3d/Tencent",
|
||||
description="Generate 3D models from text prompts using Hunyuan3D Pro with configurable face count and geometry options.",
|
||||
short_description="Generate 3D models from text using Hunyuan3D Pro.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -170,8 +168,6 @@ class TencentImageToModelNode(IO.ComfyNode):
|
||||
node_id="TencentImageToModelNode",
|
||||
display_name="Hunyuan3D: Image(s) to Model",
|
||||
category="api node/3d/Tencent",
|
||||
description="Generate 3D models from images using Hunyuan3D Pro with optional multi-view inputs and configurable geometry.",
|
||||
short_description="Generate 3D models from images using Hunyuan3D Pro.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -236,7 +236,6 @@ class IdeogramV1(IO.ComfyNode):
|
||||
display_name="Ideogram V1",
|
||||
category="api node/image/Ideogram",
|
||||
description="Generates images using the Ideogram V1 model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -362,7 +361,6 @@ class IdeogramV2(IO.ComfyNode):
|
||||
display_name="Ideogram V2",
|
||||
category="api node/image/Ideogram",
|
||||
description="Generates images using the Ideogram V2 model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -528,7 +526,6 @@ class IdeogramV3(IO.ComfyNode):
|
||||
category="api node/image/Ideogram",
|
||||
description="Generates images using the Ideogram V3 model. "
|
||||
"Supports both regular image generation from text prompts and image editing with mask.",
|
||||
short_description="Generate and edit images with Ideogram V3.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -642,7 +642,6 @@ class KlingCameraControls(IO.ComfyNode):
|
||||
display_name="Kling Camera Controls",
|
||||
category="api node/video/Kling",
|
||||
description="Allows specifying configuration options for Kling Camera Controls and motion control effects.",
|
||||
short_description="Configure Kling camera controls and motion effects.",
|
||||
inputs=[
|
||||
IO.Combo.Input("camera_control_type", options=KlingCameraControlType),
|
||||
IO.Float.Input(
|
||||
@@ -763,7 +762,6 @@ class KlingTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling Text to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Text to Video Node",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
@@ -851,7 +849,6 @@ class OmniProTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Text to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use text prompts to generate videos with the latest Kling model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -992,7 +989,6 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni First-Last-Frame to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use a start frame, an optional end frame, or reference images with the latest Kling model.",
|
||||
short_description="Generate video from start/end frames or reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1191,7 +1187,6 @@ class OmniProImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Image to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use up to 7 reference images to generate a video with the latest Kling model.",
|
||||
short_description="Generate video from up to 7 reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1352,7 +1347,6 @@ class OmniProVideoToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Video to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use a video and up to 4 reference images to generate a video with the latest Kling model.",
|
||||
short_description="Generate video from a video and reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1464,7 +1458,6 @@ class OmniProEditVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Edit Video",
|
||||
category="api node/video/Kling",
|
||||
description="Edit an existing video with the latest model from Kling.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1572,7 +1565,6 @@ class OmniProImageNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Image",
|
||||
category="api node/image/Kling",
|
||||
description="Create or edit images with the latest model from Kling.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-image-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1701,7 +1693,6 @@ class KlingCameraControlT2VNode(IO.ComfyNode):
|
||||
display_name="Kling Text to Video (Camera Control)",
|
||||
category="api node/video/Kling",
|
||||
description="Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.",
|
||||
short_description="Generate videos from text with camera movement controls.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
@@ -1763,8 +1754,6 @@ class KlingImage2VideoNode(IO.ComfyNode):
|
||||
node_id="KlingImage2VideoNode",
|
||||
display_name="Kling Image(First Frame) to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video from a first-frame image with configurable model, mode, aspect ratio, and duration settings.",
|
||||
short_description="Generate video from a first-frame reference image.",
|
||||
inputs=[
|
||||
IO.Image.Input("start_frame", tooltip="The reference image used to generate the video."),
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
@@ -1865,7 +1854,6 @@ class KlingCameraControlI2VNode(IO.ComfyNode):
|
||||
display_name="Kling Image to Video (Camera Control)",
|
||||
category="api node/video/Kling",
|
||||
description="Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.",
|
||||
short_description="Generate videos from images with camera movement controls.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"start_frame",
|
||||
@@ -1937,7 +1925,6 @@ class KlingStartEndFrameNode(IO.ComfyNode):
|
||||
display_name="Kling Start-End Frame to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.",
|
||||
short_description="Generate video transitioning between start and end frame images.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"start_frame",
|
||||
@@ -2032,7 +2019,6 @@ class KlingVideoExtendNode(IO.ComfyNode):
|
||||
display_name="Kling Video Extend",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.",
|
||||
short_description="Extend videos generated by other Kling nodes.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -2114,7 +2100,6 @@ class KlingDualCharacterVideoEffectNode(IO.ComfyNode):
|
||||
display_name="Kling Dual Character Video Effects",
|
||||
category="api node/video/Kling",
|
||||
description="Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.",
|
||||
short_description="Apply dual-character video effects from two images.",
|
||||
inputs=[
|
||||
IO.Image.Input("image_left", tooltip="Left side image"),
|
||||
IO.Image.Input("image_right", tooltip="Right side image"),
|
||||
@@ -2205,7 +2190,6 @@ class KlingSingleImageVideoEffectNode(IO.ComfyNode):
|
||||
display_name="Kling Video Effects",
|
||||
category="api node/video/Kling",
|
||||
description="Achieve different special effects when generating a video based on the effect_scene.",
|
||||
short_description="Apply special video effects to a single image.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -2279,7 +2263,6 @@ class KlingLipSyncAudioToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling Lip Sync Video with Audio",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
|
||||
short_description="Sync video mouth movements to audio content.",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Audio.Input("audio"),
|
||||
@@ -2331,7 +2314,6 @@ class KlingLipSyncTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling Lip Sync Video with Text",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
|
||||
short_description="Sync video mouth movements to a text prompt.",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.String.Input(
|
||||
@@ -2399,7 +2381,6 @@ class KlingVirtualTryOnNode(IO.ComfyNode):
|
||||
display_name="Kling Virtual Try On",
|
||||
category="api node/image/Kling",
|
||||
description="Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.",
|
||||
short_description="Virtually try clothing onto a human image.",
|
||||
inputs=[
|
||||
IO.Image.Input("human_image"),
|
||||
IO.Image.Input("cloth_image"),
|
||||
@@ -2467,7 +2448,6 @@ class KlingImageGenerationNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Image",
|
||||
category="api node/image/Kling",
|
||||
description="Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.",
|
||||
short_description="Generate images from text with optional reference image.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
@@ -2601,8 +2581,6 @@ class TextToVideoWithAudio(IO.ComfyNode):
|
||||
node_id="KlingTextToVideoWithAudio",
|
||||
display_name="Kling 2.6 Text to Video with Audio",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video with synchronized audio from a text prompt using the Kling v2-6 model.",
|
||||
short_description="Generate video with audio from text using Kling v2-6.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v2-6"]),
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt."),
|
||||
@@ -2671,8 +2649,6 @@ class ImageToVideoWithAudio(IO.ComfyNode):
|
||||
node_id="KlingImageToVideoWithAudio",
|
||||
display_name="Kling 2.6 Image(First Frame) to Video with Audio",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video with synchronized audio from a first-frame image and text prompt using the Kling v2-6 model.",
|
||||
short_description="Generate video with audio from an image using Kling v2-6.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v2-6"]),
|
||||
IO.Image.Input("start_frame"),
|
||||
@@ -2743,8 +2719,6 @@ class MotionControl(IO.ComfyNode):
|
||||
node_id="KlingMotionControl",
|
||||
display_name="Kling Motion Control",
|
||||
category="api node/video/Kling",
|
||||
description="Drive character movement and expression in video using a reference image and motion reference video.",
|
||||
short_description="Control video character motion using reference image and video.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True),
|
||||
IO.Image.Input("reference_image"),
|
||||
@@ -2841,7 +2815,6 @@ class KlingVideoNode(IO.ComfyNode):
|
||||
category="api node/video/Kling",
|
||||
description="Generate videos with Kling V3. "
|
||||
"Supports text-to-video and image-to-video with optional storyboard multi-prompt and audio generation.",
|
||||
short_description="Generate videos with Kling V3 from text or images.",
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"multi_shot",
|
||||
|
||||
@@ -52,7 +52,6 @@ class TextToVideoNode(IO.ComfyNode):
|
||||
display_name="LTXV Text To Video",
|
||||
category="api node/video/LTXV",
|
||||
description="Professional-quality videos with customizable duration and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=list(MODELS_MAP.keys())),
|
||||
IO.String.Input(
|
||||
@@ -129,7 +128,6 @@ class ImageToVideoNode(IO.ComfyNode):
|
||||
display_name="LTXV Image To Video",
|
||||
category="api node/video/LTXV",
|
||||
description="Professional-quality videos with customizable duration and resolution based on start image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="First frame to be used for the video."),
|
||||
IO.Combo.Input("model", options=list(MODELS_MAP.keys())),
|
||||
|
||||
@@ -46,7 +46,6 @@ class LumaReferenceNode(IO.ComfyNode):
|
||||
display_name="Luma Reference",
|
||||
category="api node/image/Luma",
|
||||
description="Holds an image and weight for use with Luma Generate Image node.",
|
||||
short_description="Image and weight input for Luma generation.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -86,7 +85,6 @@ class LumaConceptsNode(IO.ComfyNode):
|
||||
display_name="Luma Concepts",
|
||||
category="api node/video/Luma",
|
||||
description="Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.",
|
||||
short_description="Camera concepts for Luma video generation nodes.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"concept1",
|
||||
@@ -136,7 +134,6 @@ class LumaImageGenerationNode(IO.ComfyNode):
|
||||
display_name="Luma Text to Image",
|
||||
category="api node/image/Luma",
|
||||
description="Generates images synchronously based on prompt and aspect ratio.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -281,7 +278,6 @@ class LumaImageModifyNode(IO.ComfyNode):
|
||||
display_name="Luma Image to Image",
|
||||
category="api node/image/Luma",
|
||||
description="Modifies images synchronously based on prompt and aspect ratio.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -375,7 +371,6 @@ class LumaTextToVideoGenerationNode(IO.ComfyNode):
|
||||
display_name="Luma Text to Video",
|
||||
category="api node/video/Luma",
|
||||
description="Generates videos synchronously based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -477,7 +472,6 @@ class LumaImageToVideoGenerationNode(IO.ComfyNode):
|
||||
display_name="Luma Image to Video",
|
||||
category="api node/video/Luma",
|
||||
description="Generates videos synchronously based on prompt, input images, and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -242,7 +242,6 @@ class MagnificImageUpscalerPreciseV2Node(IO.ComfyNode):
|
||||
category="api node/image/Magnific",
|
||||
description="High-fidelity upscaling with fine control over sharpness, grain, and detail. "
|
||||
"Maximum output: 10060×10060 pixels.",
|
||||
short_description="High-fidelity upscaling with sharpness, grain, and detail control.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
|
||||
@@ -402,7 +401,6 @@ class MagnificImageStyleTransferNode(IO.ComfyNode):
|
||||
display_name="Magnific Image Style Transfer",
|
||||
category="api node/image/Magnific",
|
||||
description="Transfer the style from a reference image to your input image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The image to apply style transfer to."),
|
||||
IO.Image.Input("reference_image", tooltip="The reference image to extract style from."),
|
||||
@@ -551,7 +549,6 @@ class MagnificImageRelightNode(IO.ComfyNode):
|
||||
display_name="Magnific Image Relight",
|
||||
category="api node/image/Magnific",
|
||||
description="Relight an image with lighting adjustments and optional reference-based light transfer.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The image to relight."),
|
||||
IO.String.Input(
|
||||
@@ -790,7 +787,6 @@ class MagnificImageSkinEnhancerNode(IO.ComfyNode):
|
||||
display_name="Magnific Image Skin Enhancer",
|
||||
category="api node/image/Magnific",
|
||||
description="Skin enhancement for portraits with multiple processing modes.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The portrait image to enhance."),
|
||||
IO.Int.Input(
|
||||
|
||||
@@ -34,8 +34,6 @@ class MeshyTextToModelNode(IO.ComfyNode):
|
||||
node_id="MeshyTextToModelNode",
|
||||
display_name="Meshy: Text to Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Generate a 3D model from a text prompt using the Meshy API.",
|
||||
short_description="Generate a 3D model from a text prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.String.Input("prompt", multiline=True, default=""),
|
||||
@@ -148,7 +146,6 @@ class MeshyRefineNode(IO.ComfyNode):
|
||||
display_name="Meshy: Refine Draft Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Refine a previously created draft model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
|
||||
@@ -242,8 +239,6 @@ class MeshyImageToModelNode(IO.ComfyNode):
|
||||
node_id="MeshyImageToModelNode",
|
||||
display_name="Meshy: Image to Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Generate a 3D model from a single image using the Meshy API.",
|
||||
short_description="Generate a 3D model from an image.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -408,7 +403,6 @@ class MeshyMultiImageToModelNode(IO.ComfyNode):
|
||||
node_id="MeshyMultiImageToModelNode",
|
||||
display_name="Meshy: Multi-Image to Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Generate a 3D model from multiple images using the Meshy API.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Autogrow.Input(
|
||||
@@ -581,7 +575,6 @@ class MeshyRigModelNode(IO.ComfyNode):
|
||||
description="Provides a rigged character in standard formats. "
|
||||
"Auto-rigging is currently not suitable for untextured meshes, non-humanoid assets, "
|
||||
"or humanoid assets with unclear limb and body structure.",
|
||||
short_description="Rig a character model for animation.",
|
||||
inputs=[
|
||||
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
|
||||
IO.Float.Input(
|
||||
@@ -661,7 +654,6 @@ class MeshyAnimateModelNode(IO.ComfyNode):
|
||||
display_name="Meshy: Animate Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Apply a specific animation action to a previously rigged character.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Custom("MESHY_RIGGED_TASK_ID").Input("rig_task_id"),
|
||||
IO.Int.Input(
|
||||
@@ -727,7 +719,6 @@ class MeshyTextureNode(IO.ComfyNode):
|
||||
node_id="MeshyTextureNode",
|
||||
display_name="Meshy: Texture Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Apply textures to an existing 3D model using the Meshy API.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
|
||||
|
||||
@@ -103,7 +103,6 @@ class MinimaxTextToVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Text to Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos synchronously based on a prompt, and optional parameters.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt_text",
|
||||
@@ -166,7 +165,6 @@ class MinimaxImageToVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Image to Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos synchronously based on an image and prompt, and optional parameters.",
|
||||
short_description="Generate videos from an image, prompt, and optional parameters.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -234,7 +232,6 @@ class MinimaxSubjectToVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Subject to Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos synchronously based on an image and prompt, and optional parameters.",
|
||||
short_description="Subject-driven video generation from image and prompt.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"subject",
|
||||
@@ -299,7 +296,6 @@ class MinimaxHailuoVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Hailuo Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.",
|
||||
short_description="Generate videos with optional start frame using Hailuo-02.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt_text",
|
||||
|
||||
@@ -166,7 +166,6 @@ class MoonvalleyImg2VideoNode(IO.ComfyNode):
|
||||
display_name="Moonvalley Marey Image to Video",
|
||||
category="api node/video/Moonvalley Marey",
|
||||
description="Moonvalley Marey Image to Video Node",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -291,8 +290,7 @@ class MoonvalleyVideo2VideoNode(IO.ComfyNode):
|
||||
node_id="MoonvalleyVideo2VideoNode",
|
||||
display_name="Moonvalley Marey Video to Video",
|
||||
category="api node/video/Moonvalley Marey",
|
||||
description="Transform an input video into a new video using a text prompt and motion or pose control.",
|
||||
short_description="Transform video using text prompt with motion or pose control.",
|
||||
description="",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -417,8 +415,7 @@ class MoonvalleyTxt2VideoNode(IO.ComfyNode):
|
||||
node_id="MoonvalleyTxt2VideoNode",
|
||||
display_name="Moonvalley Marey Text to Video",
|
||||
category="api node/video/Moonvalley Marey",
|
||||
description="Generate a video from a text prompt using the Moonvalley Marey model.",
|
||||
short_description="Generate video from a text prompt using Moonvalley Marey.",
|
||||
description="",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -98,7 +98,6 @@ class OpenAIDalle2(IO.ComfyNode):
|
||||
display_name="OpenAI DALL·E 2",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's DALL·E 2 endpoint.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -249,7 +248,6 @@ class OpenAIDalle3(IO.ComfyNode):
|
||||
display_name="OpenAI DALL·E 3",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's DALL·E 3 endpoint.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -368,7 +366,6 @@ class OpenAIGPTImage1(IO.ComfyNode):
|
||||
display_name="OpenAI GPT Image 1.5",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's GPT Image endpoint.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -579,7 +576,6 @@ class OpenAIChatNode(IO.ComfyNode):
|
||||
display_name="OpenAI ChatGPT",
|
||||
category="api node/text/OpenAI",
|
||||
description="Generate text responses from an OpenAI model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -807,7 +803,6 @@ class OpenAIInputFiles(IO.ComfyNode):
|
||||
display_name="OpenAI ChatGPT Input Files",
|
||||
category="api node/text/OpenAI",
|
||||
description="Loads and prepares input files (text, pdf, etc.) to include as inputs for the OpenAI Chat Node. The files will be read by the OpenAI model when generating a response. 🛈 TIP: Can be chained together with other OpenAI Input File nodes.",
|
||||
short_description="Load and prepare input files for OpenAI Chat.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"file",
|
||||
@@ -855,7 +850,6 @@ class OpenAIChatConfig(IO.ComfyNode):
|
||||
display_name="OpenAI ChatGPT Advanced Options",
|
||||
category="api node/text/OpenAI",
|
||||
description="Allows specifying advanced configuration options for the OpenAI Chat Nodes.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"truncation",
|
||||
|
||||
@@ -54,8 +54,6 @@ class PixverseTemplateNode(IO.ComfyNode):
|
||||
node_id="PixverseTemplateNode",
|
||||
display_name="PixVerse Template",
|
||||
category="api node/video/PixVerse",
|
||||
description="Select a style template for PixVerse video generation.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("template", options=list(pixverse_templates.keys())),
|
||||
],
|
||||
@@ -78,7 +76,6 @@ class PixverseTextToVideoNode(IO.ComfyNode):
|
||||
display_name="PixVerse Text to Video",
|
||||
category="api node/video/PixVerse",
|
||||
description="Generates videos based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -197,7 +194,6 @@ class PixverseImageToVideoNode(IO.ComfyNode):
|
||||
display_name="PixVerse Image to Video",
|
||||
category="api node/video/PixVerse",
|
||||
description="Generates videos based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -316,7 +312,6 @@ class PixverseTransitionVideoNode(IO.ComfyNode):
|
||||
display_name="PixVerse Transition Video",
|
||||
category="api node/video/PixVerse",
|
||||
description="Generates videos based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("first_frame"),
|
||||
IO.Image.Input("last_frame"),
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from io import BytesIO
|
||||
from typing import Optional, Union
|
||||
|
||||
import aiohttp
|
||||
import torch
|
||||
@@ -9,6 +8,8 @@ from typing_extensions import override
|
||||
from comfy.utils import ProgressBar
|
||||
from comfy_api.latest import IO, ComfyExtension
|
||||
from comfy_api_nodes.apis.recraft import (
|
||||
RECRAFT_V4_PRO_SIZES,
|
||||
RECRAFT_V4_SIZES,
|
||||
RecraftColor,
|
||||
RecraftColorChain,
|
||||
RecraftControls,
|
||||
@@ -18,7 +19,6 @@ from comfy_api_nodes.apis.recraft import (
|
||||
RecraftImageGenerationResponse,
|
||||
RecraftImageSize,
|
||||
RecraftIO,
|
||||
RecraftModel,
|
||||
RecraftStyle,
|
||||
RecraftStyleV3,
|
||||
get_v3_substyles,
|
||||
@@ -39,7 +39,7 @@ async def handle_recraft_file_request(
|
||||
cls: type[IO.ComfyNode],
|
||||
image: torch.Tensor,
|
||||
path: str,
|
||||
mask: Optional[torch.Tensor] = None,
|
||||
mask: torch.Tensor | None = None,
|
||||
total_pixels: int = 4096 * 4096,
|
||||
timeout: int = 1024,
|
||||
request=None,
|
||||
@@ -73,11 +73,11 @@ async def handle_recraft_file_request(
|
||||
def recraft_multipart_parser(
|
||||
data,
|
||||
parent_key=None,
|
||||
formatter: Optional[type[callable]] = None,
|
||||
converted_to_check: Optional[list[list]] = None,
|
||||
formatter: type[callable] | None = None,
|
||||
converted_to_check: list[list] | None = None,
|
||||
is_list: bool = False,
|
||||
return_mode: str = "formdata", # "dict" | "formdata"
|
||||
) -> Union[dict, aiohttp.FormData]:
|
||||
) -> dict | aiohttp.FormData:
|
||||
"""
|
||||
Formats data such that multipart/form-data will work with aiohttp library when both files and data are present.
|
||||
|
||||
@@ -180,7 +180,6 @@ class RecraftColorRGBNode(IO.ComfyNode):
|
||||
display_name="Recraft Color RGB",
|
||||
category="api node/image/Recraft",
|
||||
description="Create Recraft Color by choosing specific RGB values.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Int.Input("r", default=0, min=0, max=255, tooltip="Red value of color."),
|
||||
IO.Int.Input("g", default=0, min=0, max=255, tooltip="Green value of color."),
|
||||
@@ -207,7 +206,6 @@ class RecraftControlsNode(IO.ComfyNode):
|
||||
display_name="Recraft Controls",
|
||||
category="api node/image/Recraft",
|
||||
description="Create Recraft Controls for customizing Recraft generation.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Custom(RecraftIO.COLOR).Input("colors", optional=True),
|
||||
IO.Custom(RecraftIO.COLOR).Input("background_color", optional=True),
|
||||
@@ -232,7 +230,6 @@ class RecraftStyleV3RealisticImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Style - Realistic Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
|
||||
],
|
||||
@@ -257,8 +254,7 @@ class RecraftStyleV3DigitalIllustrationNode(RecraftStyleV3RealisticImageNode):
|
||||
node_id="RecraftStyleV3DigitalIllustration",
|
||||
display_name="Recraft Style - Digital Illustration",
|
||||
category="api node/image/Recraft",
|
||||
description="Select digital_illustration style and optional substyle.",
|
||||
short_description=None,
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
|
||||
],
|
||||
@@ -275,10 +271,9 @@ class RecraftStyleV3VectorIllustrationNode(RecraftStyleV3RealisticImageNode):
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="RecraftStyleV3VectorIllustrationNode",
|
||||
display_name="Recraft Style - Vector Illustration",
|
||||
display_name="Recraft Style - Realistic Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Select vector_illustration style and optional substyle.",
|
||||
short_description=None,
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
|
||||
],
|
||||
@@ -297,8 +292,7 @@ class RecraftStyleV3LogoRasterNode(RecraftStyleV3RealisticImageNode):
|
||||
node_id="RecraftStyleV3LogoRaster",
|
||||
display_name="Recraft Style - Logo Raster",
|
||||
category="api node/image/Recraft",
|
||||
description="Select logo_raster style and optional substyle.",
|
||||
short_description=None,
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE, include_none=False)),
|
||||
],
|
||||
@@ -315,8 +309,7 @@ class RecraftStyleInfiniteStyleLibrary(IO.ComfyNode):
|
||||
node_id="RecraftStyleV3InfiniteStyleLibrary",
|
||||
display_name="Recraft Style - Infinite Style Library",
|
||||
category="api node/image/Recraft",
|
||||
description="Select style based on preexisting UUID from Recraft's Infinite Style Library.",
|
||||
short_description=None,
|
||||
description="Choose style based on preexisting UUID from Recraft's Infinite Style Library.",
|
||||
inputs=[
|
||||
IO.String.Input("style_id", default="", tooltip="UUID of style from Infinite Style Library."),
|
||||
],
|
||||
@@ -342,7 +335,6 @@ class RecraftCreateStyleNode(IO.ComfyNode):
|
||||
description="Create a custom style from reference images. "
|
||||
"Upload 1-5 images to use as style references. "
|
||||
"Total size of all images is limited to 5 MB.",
|
||||
short_description="Create a custom style from 1-5 reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"style",
|
||||
@@ -410,7 +402,6 @@ class RecraftTextToImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Text to Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates images synchronously based on prompt and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."),
|
||||
IO.Combo.Input(
|
||||
@@ -494,7 +485,7 @@ class RecraftTextToImageNode(IO.ComfyNode):
|
||||
data=RecraftImageGenerationRequest(
|
||||
prompt=prompt,
|
||||
negative_prompt=negative_prompt,
|
||||
model=RecraftModel.recraftv3,
|
||||
model="recraftv3",
|
||||
size=size,
|
||||
n=n,
|
||||
style=recraft_style.style,
|
||||
@@ -523,7 +514,6 @@ class RecraftImageToImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Image to Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Modify image based on prompt and strength.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."),
|
||||
@@ -608,7 +598,7 @@ class RecraftImageToImageNode(IO.ComfyNode):
|
||||
request = RecraftImageGenerationRequest(
|
||||
prompt=prompt,
|
||||
negative_prompt=negative_prompt,
|
||||
model=RecraftModel.recraftv3,
|
||||
model="recraftv3",
|
||||
n=n,
|
||||
strength=round(strength, 2),
|
||||
style=recraft_style.style,
|
||||
@@ -642,7 +632,6 @@ class RecraftImageInpaintingNode(IO.ComfyNode):
|
||||
display_name="Recraft Image Inpainting",
|
||||
category="api node/image/Recraft",
|
||||
description="Modify image based on prompt and mask.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Mask.Input("mask"),
|
||||
@@ -709,7 +698,7 @@ class RecraftImageInpaintingNode(IO.ComfyNode):
|
||||
request = RecraftImageGenerationRequest(
|
||||
prompt=prompt,
|
||||
negative_prompt=negative_prompt,
|
||||
model=RecraftModel.recraftv3,
|
||||
model="recraftv3",
|
||||
n=n,
|
||||
style=recraft_style.style,
|
||||
substyle=recraft_style.substyle,
|
||||
@@ -745,7 +734,6 @@ class RecraftTextToVectorNode(IO.ComfyNode):
|
||||
display_name="Recraft Text to Vector",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates SVG synchronously based on prompt and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("prompt", default="", tooltip="Prompt for the image generation.", multiline=True),
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(RecraftStyleV3.vector_illustration)),
|
||||
@@ -822,7 +810,7 @@ class RecraftTextToVectorNode(IO.ComfyNode):
|
||||
data=RecraftImageGenerationRequest(
|
||||
prompt=prompt,
|
||||
negative_prompt=negative_prompt,
|
||||
model=RecraftModel.recraftv3,
|
||||
model="recraftv3",
|
||||
size=size,
|
||||
n=n,
|
||||
style=recraft_style.style,
|
||||
@@ -846,7 +834,6 @@ class RecraftVectorizeImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Vectorize Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates SVG synchronously from an input image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -890,7 +877,6 @@ class RecraftReplaceBackgroundNode(IO.ComfyNode):
|
||||
display_name="Recraft Replace Background",
|
||||
category="api node/image/Recraft",
|
||||
description="Replace background on image, based on provided prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input("prompt", tooltip="Prompt for the image generation.", default="", multiline=True),
|
||||
@@ -947,7 +933,7 @@ class RecraftReplaceBackgroundNode(IO.ComfyNode):
|
||||
request = RecraftImageGenerationRequest(
|
||||
prompt=prompt,
|
||||
negative_prompt=negative_prompt,
|
||||
model=RecraftModel.recraftv3,
|
||||
model="recraftv3",
|
||||
n=n,
|
||||
style=recraft_style.style,
|
||||
substyle=recraft_style.substyle,
|
||||
@@ -978,7 +964,6 @@ class RecraftRemoveBackgroundNode(IO.ComfyNode):
|
||||
display_name="Recraft Remove Background",
|
||||
category="api node/image/Recraft",
|
||||
description="Remove background from image, and return processed image and mask.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -1027,9 +1012,8 @@ class RecraftCrispUpscaleNode(IO.ComfyNode):
|
||||
display_name="Recraft Crisp Upscale Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Upscale image synchronously.\n"
|
||||
"Enhances a given raster image using 'crisp upscale' tool, "
|
||||
"Enhances a given raster image using ‘crisp upscale’ tool, "
|
||||
"increasing image resolution, making the image sharper and cleaner.",
|
||||
short_description="Crisp upscale to sharpen and increase image resolution.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -1074,9 +1058,8 @@ class RecraftCreativeUpscaleNode(RecraftCrispUpscaleNode):
|
||||
display_name="Recraft Creative Upscale Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Upscale image synchronously.\n"
|
||||
"Enhances a given raster image using 'creative upscale' tool, "
|
||||
"Enhances a given raster image using ‘creative upscale’ tool, "
|
||||
"boosting resolution with a focus on refining small details and faces.",
|
||||
short_description="Creative upscale focusing on small details and faces.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -1095,6 +1078,252 @@ class RecraftCreativeUpscaleNode(RecraftCrispUpscaleNode):
|
||||
)
|
||||
|
||||
|
||||
class RecraftV4TextToImageNode(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="RecraftV4TextToImageNode",
|
||||
display_name="Recraft V4 Text to Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates images using Recraft V4 or V4 Pro models.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
tooltip="Prompt for the image generation. Maximum 10,000 characters.",
|
||||
),
|
||||
IO.String.Input(
|
||||
"negative_prompt",
|
||||
multiline=True,
|
||||
tooltip="An optional text description of undesired elements on an image.",
|
||||
),
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
options=[
|
||||
IO.DynamicCombo.Option(
|
||||
"recraftv4",
|
||||
[
|
||||
IO.Combo.Input(
|
||||
"size",
|
||||
options=RECRAFT_V4_SIZES,
|
||||
default="1024x1024",
|
||||
tooltip="The size of the generated image.",
|
||||
),
|
||||
],
|
||||
),
|
||||
IO.DynamicCombo.Option(
|
||||
"recraftv4_pro",
|
||||
[
|
||||
IO.Combo.Input(
|
||||
"size",
|
||||
options=RECRAFT_V4_PRO_SIZES,
|
||||
default="2048x2048",
|
||||
tooltip="The size of the generated image.",
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
tooltip="The model to use for generation.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"n",
|
||||
default=1,
|
||||
min=1,
|
||||
max=6,
|
||||
tooltip="The number of images to generate.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=0xFFFFFFFFFFFFFFFF,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed to determine if node should re-run; "
|
||||
"actual results are nondeterministic regardless of seed.",
|
||||
),
|
||||
IO.Custom(RecraftIO.CONTROLS).Input(
|
||||
"recraft_controls",
|
||||
tooltip="Optional additional controls over the generation via the Recraft Controls node.",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Image.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "n"]),
|
||||
expr="""
|
||||
(
|
||||
$prices := {"recraftv4": 0.04, "recraftv4_pro": 0.25};
|
||||
{"type":"usd","usd": $lookup($prices, widgets.model) * widgets.n}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
prompt: str,
|
||||
negative_prompt: str,
|
||||
model: dict,
|
||||
n: int,
|
||||
seed: int,
|
||||
recraft_controls: RecraftControls | None = None,
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(prompt, strip_whitespace=False, min_length=1, max_length=10000)
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/recraft/image_generation", method="POST"),
|
||||
response_model=RecraftImageGenerationResponse,
|
||||
data=RecraftImageGenerationRequest(
|
||||
prompt=prompt,
|
||||
negative_prompt=negative_prompt if negative_prompt else None,
|
||||
model=model["model"],
|
||||
size=model["size"],
|
||||
n=n,
|
||||
controls=recraft_controls.create_api_model() if recraft_controls else None,
|
||||
),
|
||||
max_retries=1,
|
||||
)
|
||||
images = []
|
||||
for data in response.data:
|
||||
with handle_recraft_image_output():
|
||||
image = bytesio_to_image_tensor(await download_url_as_bytesio(data.url, timeout=1024))
|
||||
if len(image.shape) < 4:
|
||||
image = image.unsqueeze(0)
|
||||
images.append(image)
|
||||
return IO.NodeOutput(torch.cat(images, dim=0))
|
||||
|
||||
|
||||
class RecraftV4TextToVectorNode(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="RecraftV4TextToVectorNode",
|
||||
display_name="Recraft V4 Text to Vector",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates SVG using Recraft V4 or V4 Pro models.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
tooltip="Prompt for the image generation. Maximum 10,000 characters.",
|
||||
),
|
||||
IO.String.Input(
|
||||
"negative_prompt",
|
||||
multiline=True,
|
||||
tooltip="An optional text description of undesired elements on an image.",
|
||||
),
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
options=[
|
||||
IO.DynamicCombo.Option(
|
||||
"recraftv4",
|
||||
[
|
||||
IO.Combo.Input(
|
||||
"size",
|
||||
options=RECRAFT_V4_SIZES,
|
||||
default="1024x1024",
|
||||
tooltip="The size of the generated image.",
|
||||
),
|
||||
],
|
||||
),
|
||||
IO.DynamicCombo.Option(
|
||||
"recraftv4_pro",
|
||||
[
|
||||
IO.Combo.Input(
|
||||
"size",
|
||||
options=RECRAFT_V4_PRO_SIZES,
|
||||
default="2048x2048",
|
||||
tooltip="The size of the generated image.",
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
tooltip="The model to use for generation.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"n",
|
||||
default=1,
|
||||
min=1,
|
||||
max=6,
|
||||
tooltip="The number of images to generate.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=0xFFFFFFFFFFFFFFFF,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed to determine if node should re-run; "
|
||||
"actual results are nondeterministic regardless of seed.",
|
||||
),
|
||||
IO.Custom(RecraftIO.CONTROLS).Input(
|
||||
"recraft_controls",
|
||||
tooltip="Optional additional controls over the generation via the Recraft Controls node.",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.SVG.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "n"]),
|
||||
expr="""
|
||||
(
|
||||
$prices := {"recraftv4": 0.08, "recraftv4_pro": 0.30};
|
||||
{"type":"usd","usd": $lookup($prices, widgets.model) * widgets.n}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
prompt: str,
|
||||
negative_prompt: str,
|
||||
model: dict,
|
||||
n: int,
|
||||
seed: int,
|
||||
recraft_controls: RecraftControls | None = None,
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(prompt, strip_whitespace=False, min_length=1, max_length=10000)
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/recraft/image_generation", method="POST"),
|
||||
response_model=RecraftImageGenerationResponse,
|
||||
data=RecraftImageGenerationRequest(
|
||||
prompt=prompt,
|
||||
negative_prompt=negative_prompt if negative_prompt else None,
|
||||
model=model["model"],
|
||||
size=model["size"],
|
||||
n=n,
|
||||
style="vector_illustration",
|
||||
substyle=None,
|
||||
controls=recraft_controls.create_api_model() if recraft_controls else None,
|
||||
),
|
||||
max_retries=1,
|
||||
)
|
||||
svg_data = []
|
||||
for data in response.data:
|
||||
svg_data.append(await download_url_as_bytesio(data.url, timeout=1024))
|
||||
return IO.NodeOutput(SVG(svg_data))
|
||||
|
||||
|
||||
class RecraftExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||
@@ -1115,6 +1344,8 @@ class RecraftExtension(ComfyExtension):
|
||||
RecraftCreateStyleNode,
|
||||
RecraftColorRGBNode,
|
||||
RecraftControlsNode,
|
||||
RecraftV4TextToImageNode,
|
||||
RecraftV4TextToVectorNode,
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -238,7 +238,6 @@ class Rodin3D_Regular(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Regular Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
*COMMON_PARAMETERS,
|
||||
@@ -298,7 +297,6 @@ class Rodin3D_Detail(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Detail Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
*COMMON_PARAMETERS,
|
||||
@@ -358,7 +356,6 @@ class Rodin3D_Smooth(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Smooth Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
*COMMON_PARAMETERS,
|
||||
@@ -417,7 +414,6 @@ class Rodin3D_Sketch(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Sketch Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
IO.Int.Input(
|
||||
@@ -480,7 +476,6 @@ class Rodin3D_Gen2(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Gen-2 Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
IO.Int.Input(
|
||||
|
||||
@@ -145,7 +145,6 @@ class RunwayImageToVideoNodeGen3a(IO.ComfyNode):
|
||||
"Before diving in, review these best practices to ensure that "
|
||||
"your input selections will set your generation up for success: "
|
||||
"https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.",
|
||||
short_description="Generate video from a starting frame using Gen3a Turbo.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -240,7 +239,6 @@ class RunwayImageToVideoNodeGen4(IO.ComfyNode):
|
||||
"Before diving in, review these best practices to ensure that "
|
||||
"your input selections will set your generation up for success: "
|
||||
"https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.",
|
||||
short_description="Generate video from a starting frame using Gen4 Turbo.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -339,7 +337,6 @@ class RunwayFirstLastFrameNode(IO.ComfyNode):
|
||||
"Before diving in, review these best practices to ensure that your input selections "
|
||||
"will set your generation up for success: "
|
||||
"https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.",
|
||||
short_description="Generate video from first and last keyframes with a prompt.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -446,7 +443,6 @@ class RunwayTextToImageNode(IO.ComfyNode):
|
||||
category="api node/image/Runway",
|
||||
description="Generate an image from a text prompt using Runway's Gen 4 model. "
|
||||
"You can also include reference image to guide the generation.",
|
||||
short_description="Generate an image from text using Runway Gen 4.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -36,7 +36,6 @@ class OpenAIVideoSora2(IO.ComfyNode):
|
||||
display_name="OpenAI Sora - Video",
|
||||
category="api node/video/Sora",
|
||||
description="OpenAI video and audio generation.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -64,7 +64,6 @@ class StabilityStableImageUltraNode(IO.ComfyNode):
|
||||
display_name="Stability AI Stable Image Ultra",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -198,7 +197,6 @@ class StabilityStableImageSD_3_5Node(IO.ComfyNode):
|
||||
display_name="Stability AI Stable Diffusion 3.5 Image",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -354,7 +352,6 @@ class StabilityUpscaleConservativeNode(IO.ComfyNode):
|
||||
display_name="Stability AI Upscale Conservative",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -457,7 +454,6 @@ class StabilityUpscaleCreativeNode(IO.ComfyNode):
|
||||
display_name="Stability AI Upscale Creative",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -577,7 +573,6 @@ class StabilityUpscaleFastNode(IO.ComfyNode):
|
||||
display_name="Stability AI Upscale Fast",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description="Quickly upscale an image to 4x its original size.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -630,7 +625,6 @@ class StabilityTextToAudio(IO.ComfyNode):
|
||||
display_name="Stability AI Text To Audio",
|
||||
category="api node/audio/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -707,7 +701,6 @@ class StabilityAudioToAudio(IO.ComfyNode):
|
||||
display_name="Stability AI Audio To Audio",
|
||||
category="api node/audio/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -801,7 +794,6 @@ class StabilityAudioInpaint(IO.ComfyNode):
|
||||
display_name="Stability AI Audio Inpaint",
|
||||
category="api node/audio/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -49,7 +49,6 @@ class TopazImageEnhance(IO.ComfyNode):
|
||||
display_name="Topaz Image Enhance",
|
||||
category="api node/image/Topaz",
|
||||
description="Industry-standard upscaling and image enhancement.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["Reimagine"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -224,7 +223,6 @@ class TopazVideoEnhance(IO.ComfyNode):
|
||||
display_name="Topaz Video Enhance",
|
||||
category="api node/video/Topaz",
|
||||
description="Breathe new life into video with powerful upscaling and recovery technology.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Boolean.Input("upscaler_enabled", default=True),
|
||||
|
||||
@@ -80,7 +80,6 @@ class TripoTextToModelNode(IO.ComfyNode):
|
||||
node_id="TripoTextToModelNode",
|
||||
display_name="Tripo: Text to Model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Generate a 3D model from a text prompt using Tripo's API.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True),
|
||||
IO.String.Input("negative_prompt", multiline=True, optional=True),
|
||||
@@ -200,7 +199,6 @@ class TripoImageToModelNode(IO.ComfyNode):
|
||||
node_id="TripoImageToModelNode",
|
||||
display_name="Tripo: Image to Model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Generate a 3D model from a single image using Tripo's API.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input(
|
||||
@@ -333,7 +331,6 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
|
||||
node_id="TripoMultiviewToModelNode",
|
||||
display_name="Tripo: Multiview to Model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Generate a 3D model from multiple view images using Tripo's API.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Image.Input("image_left", optional=True),
|
||||
@@ -473,7 +470,6 @@ class TripoTextureNode(IO.ComfyNode):
|
||||
node_id="TripoTextureNode",
|
||||
display_name="Tripo: Texture model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Apply textures to an existing 3D model using Tripo's API.",
|
||||
inputs=[
|
||||
IO.Custom("MODEL_TASK_ID").Input("model_task_id"),
|
||||
IO.Boolean.Input("texture", default=True, optional=True),
|
||||
@@ -542,7 +538,6 @@ class TripoRefineNode(IO.ComfyNode):
|
||||
display_name="Tripo: Refine Draft model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Refine a draft model created by v1.4 Tripo models only.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Custom("MODEL_TASK_ID").Input("model_task_id", tooltip="Must be a v1.4 Tripo model"),
|
||||
],
|
||||
@@ -582,8 +577,6 @@ class TripoRigNode(IO.ComfyNode):
|
||||
node_id="TripoRigNode",
|
||||
display_name="Tripo: Rig model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Add a skeleton rig to an existing 3D model using Tripo's API.",
|
||||
short_description="Add a skeleton rig to a 3D model.",
|
||||
inputs=[IO.Custom("MODEL_TASK_ID").Input("original_model_task_id")],
|
||||
outputs=[
|
||||
IO.String.Output(display_name="model_file"), # for backward compatibility only
|
||||
@@ -621,8 +614,6 @@ class TripoRetargetNode(IO.ComfyNode):
|
||||
node_id="TripoRetargetNode",
|
||||
display_name="Tripo: Retarget rigged model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Apply a preset animation to a rigged 3D model using Tripo's API.",
|
||||
short_description="Apply a preset animation to a rigged model.",
|
||||
inputs=[
|
||||
IO.Custom("RIG_TASK_ID").Input("original_model_task_id"),
|
||||
IO.Combo.Input(
|
||||
@@ -688,8 +679,6 @@ class TripoConversionNode(IO.ComfyNode):
|
||||
node_id="TripoConversionNode",
|
||||
display_name="Tripo: Convert model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Convert a 3D model to different formats with optional post-processing using Tripo's API.",
|
||||
short_description="Convert a 3D model to different formats.",
|
||||
inputs=[
|
||||
IO.Custom("MODEL_TASK_ID,RIG_TASK_ID,RETARGET_TASK_ID").Input("original_model_task_id"),
|
||||
IO.Combo.Input("format", options=["GLTF", "USDZ", "FBX", "OBJ", "STL", "3MF"]),
|
||||
|
||||
@@ -46,7 +46,6 @@ class VeoVideoGenerationNode(IO.ComfyNode):
|
||||
display_name="Google Veo 2 Video Generation",
|
||||
category="api node/video/Veo",
|
||||
description="Generates videos from text prompts using Google's Veo 2 API",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -265,7 +264,6 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
|
||||
display_name="Google Veo 3 Video Generation",
|
||||
category="api node/video/Veo",
|
||||
description="Generates videos from text prompts using Google's Veo 3 API",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -379,7 +377,6 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
display_name="Google Veo 3 First-Last-Frame to Video",
|
||||
category="api node/video/Veo",
|
||||
description="Generate video using prompt and first and last frames.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -54,6 +54,7 @@ async def execute_task(
|
||||
response_model=TaskStatusResponse,
|
||||
status_extractor=lambda r: r.state,
|
||||
progress_extractor=lambda r: r.progress,
|
||||
price_extractor=lambda r: r.credits * 0.005 if r.credits is not None else None,
|
||||
max_poll_attempts=max_poll_attempts,
|
||||
)
|
||||
if not response.creations:
|
||||
@@ -72,7 +73,6 @@ class ViduTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Text To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.String.Input(
|
||||
@@ -169,7 +169,6 @@ class ViduImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Image To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from image and optional prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.Image.Input(
|
||||
@@ -272,7 +271,6 @@ class ViduReferenceVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Reference To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from multiple images and a prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.Image.Input(
|
||||
@@ -386,7 +384,6 @@ class ViduStartEndToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Start End To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from start and end frames and a prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.Image.Input(
|
||||
@@ -489,7 +486,6 @@ class Vidu2TextToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Text-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2"]),
|
||||
IO.String.Input(
|
||||
@@ -581,7 +577,6 @@ class Vidu2ImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Image-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from an image and an optional prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]),
|
||||
IO.Image.Input(
|
||||
@@ -710,7 +705,6 @@ class Vidu2ReferenceVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Reference-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from multiple reference images and a prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2"]),
|
||||
IO.Autogrow.Input(
|
||||
@@ -844,7 +838,6 @@ class Vidu2StartEndToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Start/End Frame-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from a start frame, an end frame, and a prompt.",
|
||||
short_description="Generate video from start frame, end frame, and prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]),
|
||||
IO.Image.Input("first_frame"),
|
||||
@@ -964,7 +957,6 @@ class ViduExtendVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Video Extension",
|
||||
category="api node/video/Vidu",
|
||||
description="Extend an existing video by generating additional frames.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
@@ -1135,7 +1127,6 @@ class ViduMultiFrameVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Multi-Frame Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video with multiple keyframe transitions.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2-pro", "viduq2-turbo"]),
|
||||
IO.Image.Input(
|
||||
@@ -1282,7 +1273,6 @@ class Vidu3TextToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Q3 Text-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
@@ -1317,6 +1307,36 @@ class Vidu3TextToVideoNode(IO.ComfyNode):
|
||||
),
|
||||
],
|
||||
),
|
||||
IO.DynamicCombo.Option(
|
||||
"viduq3-turbo",
|
||||
[
|
||||
IO.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=["16:9", "9:16", "3:4", "4:3", "1:1"],
|
||||
tooltip="The aspect ratio of the output video.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"resolution",
|
||||
options=["720p", "1080p"],
|
||||
tooltip="Resolution of the output video.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"duration",
|
||||
default=5,
|
||||
min=1,
|
||||
max=16,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Duration of the output video in seconds.",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"audio",
|
||||
default=False,
|
||||
tooltip="When enabled, outputs video with sound "
|
||||
"(including dialogue and sound effects).",
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
tooltip="Model to use for video generation.",
|
||||
),
|
||||
@@ -1345,13 +1365,20 @@ class Vidu3TextToVideoNode(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model.duration", "model.resolution"]),
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.duration", "model.resolution"]),
|
||||
expr="""
|
||||
(
|
||||
$res := $lookup(widgets, "model.resolution");
|
||||
$base := $lookup({"720p": 0.075, "1080p": 0.1}, $res);
|
||||
$perSec := $lookup({"720p": 0.025, "1080p": 0.05}, $res);
|
||||
{"type":"usd","usd": $base + $perSec * ($lookup(widgets, "model.duration") - 1)}
|
||||
$d := $lookup(widgets, "model.duration");
|
||||
$contains(widgets.model, "turbo")
|
||||
? (
|
||||
$rate := $lookup({"720p": 0.06, "1080p": 0.08}, $res);
|
||||
{"type":"usd","usd": $rate * $d}
|
||||
)
|
||||
: (
|
||||
$rate := $lookup({"720p": 0.15, "1080p": 0.16}, $res);
|
||||
{"type":"usd","usd": $rate * $d}
|
||||
)
|
||||
)
|
||||
""",
|
||||
),
|
||||
@@ -1391,7 +1418,6 @@ class Vidu3ImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Q3 Image-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from an image and an optional prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
@@ -1421,6 +1447,31 @@ class Vidu3ImageToVideoNode(IO.ComfyNode):
|
||||
),
|
||||
],
|
||||
),
|
||||
IO.DynamicCombo.Option(
|
||||
"viduq3-turbo",
|
||||
[
|
||||
IO.Combo.Input(
|
||||
"resolution",
|
||||
options=["720p", "1080p"],
|
||||
tooltip="Resolution of the output video.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"duration",
|
||||
default=5,
|
||||
min=1,
|
||||
max=16,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Duration of the output video in seconds.",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"audio",
|
||||
default=False,
|
||||
tooltip="When enabled, outputs video with sound "
|
||||
"(including dialogue and sound effects).",
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
tooltip="Model to use for video generation.",
|
||||
),
|
||||
@@ -1454,13 +1505,20 @@ class Vidu3ImageToVideoNode(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model.duration", "model.resolution"]),
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.duration", "model.resolution"]),
|
||||
expr="""
|
||||
(
|
||||
$res := $lookup(widgets, "model.resolution");
|
||||
$base := $lookup({"720p": 0.075, "1080p": 0.275, "2k": 0.35}, $res);
|
||||
$perSec := $lookup({"720p": 0.05, "1080p": 0.075, "2k": 0.075}, $res);
|
||||
{"type":"usd","usd": $base + $perSec * ($lookup(widgets, "model.duration") - 1)}
|
||||
$d := $lookup(widgets, "model.duration");
|
||||
$contains(widgets.model, "turbo")
|
||||
? (
|
||||
$rate := $lookup({"720p": 0.06, "1080p": 0.08}, $res);
|
||||
{"type":"usd","usd": $rate * $d}
|
||||
)
|
||||
: (
|
||||
$rate := $lookup({"720p": 0.15, "1080p": 0.16, "2k": 0.2}, $res);
|
||||
{"type":"usd","usd": $rate * $d}
|
||||
)
|
||||
)
|
||||
""",
|
||||
),
|
||||
@@ -1493,6 +1551,145 @@ class Vidu3ImageToVideoNode(IO.ComfyNode):
|
||||
return IO.NodeOutput(await download_url_to_video_output(results[0].url))
|
||||
|
||||
|
||||
class Vidu3StartEndToVideoNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="Vidu3StartEndToVideoNode",
|
||||
display_name="Vidu Q3 Start/End Frame-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from a start frame, an end frame, and a prompt.",
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
options=[
|
||||
IO.DynamicCombo.Option(
|
||||
"viduq3-pro",
|
||||
[
|
||||
IO.Combo.Input(
|
||||
"resolution",
|
||||
options=["720p", "1080p"],
|
||||
tooltip="Resolution of the output video.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"duration",
|
||||
default=5,
|
||||
min=1,
|
||||
max=16,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Duration of the output video in seconds.",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"audio",
|
||||
default=False,
|
||||
tooltip="When enabled, outputs video with sound "
|
||||
"(including dialogue and sound effects).",
|
||||
),
|
||||
],
|
||||
),
|
||||
IO.DynamicCombo.Option(
|
||||
"viduq3-turbo",
|
||||
[
|
||||
IO.Combo.Input(
|
||||
"resolution",
|
||||
options=["720p", "1080p"],
|
||||
tooltip="Resolution of the output video.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"duration",
|
||||
default=5,
|
||||
min=1,
|
||||
max=16,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Duration of the output video in seconds.",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"audio",
|
||||
default=False,
|
||||
tooltip="When enabled, outputs video with sound "
|
||||
"(including dialogue and sound effects).",
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
tooltip="Model to use for video generation.",
|
||||
),
|
||||
IO.Image.Input("first_frame"),
|
||||
IO.Image.Input("end_frame"),
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
tooltip="Prompt description (max 2000 characters).",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=1,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Video.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.duration", "model.resolution"]),
|
||||
expr="""
|
||||
(
|
||||
$res := $lookup(widgets, "model.resolution");
|
||||
$d := $lookup(widgets, "model.duration");
|
||||
$contains(widgets.model, "turbo")
|
||||
? (
|
||||
$rate := $lookup({"720p": 0.06, "1080p": 0.08}, $res);
|
||||
{"type":"usd","usd": $rate * $d}
|
||||
)
|
||||
: (
|
||||
$rate := $lookup({"720p": 0.15, "1080p": 0.16}, $res);
|
||||
{"type":"usd","usd": $rate * $d}
|
||||
)
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model: dict,
|
||||
first_frame: Input.Image,
|
||||
end_frame: Input.Image,
|
||||
prompt: str,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(prompt, max_length=2000)
|
||||
validate_images_aspect_ratio_closeness(first_frame, end_frame, min_rel=0.8, max_rel=1.25, strict=False)
|
||||
payload = TaskCreationRequest(
|
||||
model=model["model"],
|
||||
prompt=prompt,
|
||||
duration=model["duration"],
|
||||
seed=seed,
|
||||
resolution=model["resolution"],
|
||||
audio=model["audio"],
|
||||
images=[
|
||||
(await upload_images_to_comfyapi(cls, frame, max_images=1, mime_type="image/png"))[0]
|
||||
for frame in (first_frame, end_frame)
|
||||
],
|
||||
)
|
||||
results = await execute_task(cls, VIDU_START_END_VIDEO, payload)
|
||||
return IO.NodeOutput(await download_url_to_video_output(results[0].url))
|
||||
|
||||
|
||||
class ViduExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||
@@ -1509,6 +1706,7 @@ class ViduExtension(ComfyExtension):
|
||||
ViduMultiFrameVideoNode,
|
||||
Vidu3TextToVideoNode,
|
||||
Vidu3ImageToVideoNode,
|
||||
Vidu3StartEndToVideoNode,
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -175,7 +175,6 @@ class WanTextToImageApi(IO.ComfyNode):
|
||||
display_name="Wan Text to Image",
|
||||
category="api node/image/Wan",
|
||||
description="Generates an image based on a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -299,7 +298,6 @@ class WanImageToImageApi(IO.ComfyNode):
|
||||
category="api node/image/Wan",
|
||||
description="Generates an image from one or two input images and a text prompt. "
|
||||
"The output image is currently fixed at 1.6 MP, and its aspect ratio matches the input image(s).",
|
||||
short_description="Generate an image from input images and a text prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -426,7 +424,6 @@ class WanTextToVideoApi(IO.ComfyNode):
|
||||
display_name="Wan Text to Video",
|
||||
category="api node/video/Wan",
|
||||
description="Generates a video based on a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -606,7 +603,6 @@ class WanImageToVideoApi(IO.ComfyNode):
|
||||
display_name="Wan Image to Video",
|
||||
category="api node/video/Wan",
|
||||
description="Generates a video from the first frame and a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -783,7 +779,6 @@ class WanReferenceVideoApi(IO.ComfyNode):
|
||||
category="api node/video/Wan",
|
||||
description="Use the character and voice from input videos, combined with a prompt, "
|
||||
"to generate a new video that maintains character consistency.",
|
||||
short_description="Generate character-consistent video from reference videos and prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["wan2.6-r2v"]),
|
||||
IO.String.Input(
|
||||
|
||||
@@ -30,7 +30,6 @@ class WavespeedFlashVSRNode(IO.ComfyNode):
|
||||
category="api node/video/WaveSpeed",
|
||||
description="Fast, high-quality video upscaler that "
|
||||
"boosts resolution and restores clarity for low-resolution or blurry footage.",
|
||||
short_description="Fast video upscaler that boosts resolution and restores clarity.",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Combo.Input("target_resolution", options=["720p", "1080p", "2K", "4K"]),
|
||||
@@ -102,7 +101,6 @@ class WavespeedImageUpscaleNode(IO.ComfyNode):
|
||||
display_name="WaveSpeed Image Upscale",
|
||||
category="api node/image/WaveSpeed",
|
||||
description="Boost image resolution and quality, upscaling photos to 4K or 8K for sharp, detailed results.",
|
||||
short_description="Upscale images to 4K or 8K with enhanced quality.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["SeedVR2", "Ultimate"]),
|
||||
IO.Image.Input("image"),
|
||||
|
||||
@@ -12,8 +12,6 @@ class TextEncodeAceStepAudio(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeAceStepAudio",
|
||||
category="conditioning",
|
||||
description="Encodes tags and lyrics into conditioning for ACE-Step 1.0 audio generation with adjustable lyrics strength.",
|
||||
short_description="Encodes tags and lyrics for ACE-Step 1.0 audio.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
@@ -36,8 +34,6 @@ class TextEncodeAceStepAudio15(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeAceStepAudio1.5",
|
||||
category="conditioning",
|
||||
description="Encodes tags, lyrics, and music parameters like BPM, key, and language into conditioning for ACE-Step 1.5 audio generation.",
|
||||
short_description="Encodes text and music parameters for ACE-Step 1.5.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
@@ -72,8 +68,6 @@ class EmptyAceStepLatentAudio(io.ComfyNode):
|
||||
node_id="EmptyAceStepLatentAudio",
|
||||
display_name="Empty Ace Step 1.0 Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty latent audio tensor for ACE-Step 1.0 with a specified duration and batch size.",
|
||||
short_description="Creates an empty ACE-Step 1.0 audio latent.",
|
||||
inputs=[
|
||||
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1),
|
||||
io.Int.Input(
|
||||
@@ -97,8 +91,6 @@ class EmptyAceStep15LatentAudio(io.ComfyNode):
|
||||
node_id="EmptyAceStep1.5LatentAudio",
|
||||
display_name="Empty Ace Step 1.5 Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty latent audio tensor for ACE-Step 1.5 with a specified duration and batch size.",
|
||||
short_description="Creates an empty ACE-Step 1.5 audio latent.",
|
||||
inputs=[
|
||||
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.01),
|
||||
io.Int.Input(
|
||||
@@ -123,7 +115,6 @@ class ReferenceAudio(io.ComfyNode):
|
||||
category="advanced/conditioning/audio",
|
||||
is_experimental=True,
|
||||
description="This node sets the reference audio for ace step 1.5",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
|
||||
@@ -46,8 +46,6 @@ class SamplerLCMUpscale(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerLCMUpscale",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Sampler that progressively upscales the latent during LCM sampling steps, combining denoising with gradual resolution increase.",
|
||||
short_description="LCM sampler with progressive latent upscaling.",
|
||||
inputs=[
|
||||
io.Float.Input("scale_ratio", default=1.0, min=0.1, max=20.0, step=0.01),
|
||||
io.Int.Input("scale_steps", default=-1, min=-1, max=1000, step=1),
|
||||
@@ -95,8 +93,6 @@ class SamplerEulerCFGpp(io.ComfyNode):
|
||||
node_id="SamplerEulerCFGpp",
|
||||
display_name="SamplerEulerCFG++",
|
||||
category="_for_testing", # "sampling/custom_sampling/samplers"
|
||||
description="Euler sampler variant using the CFG++ formulation, which modifies the denoising direction using unconditional predictions for improved guidance.",
|
||||
short_description="Euler sampler using CFG++ guidance formulation.",
|
||||
inputs=[
|
||||
io.Combo.Input("version", options=["regular", "alternative"]),
|
||||
],
|
||||
|
||||
@@ -30,8 +30,6 @@ class AlignYourStepsScheduler(io.ComfyNode):
|
||||
node_id="AlignYourStepsScheduler",
|
||||
search_aliases=["AYS scheduler"],
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates an optimized noise schedule using the Align Your Steps method with log-linear interpolation.",
|
||||
short_description="Optimized noise schedule using Align Your Steps.",
|
||||
inputs=[
|
||||
io.Combo.Input("model_type", options=["SD1", "SDXL", "SVD"]),
|
||||
io.Int.Input("steps", default=10, min=1, max=10000),
|
||||
|
||||
@@ -17,8 +17,6 @@ class APG(io.ComfyNode):
|
||||
node_id="APG",
|
||||
display_name="Adaptive Projected Guidance",
|
||||
category="sampling/custom_sampling",
|
||||
description="Applies Adaptive Projected Guidance to a model, decomposing CFG guidance into parallel and orthogonal components with optional momentum and norm thresholding for improved sampling quality.",
|
||||
short_description="Decomposes CFG guidance with projection and normalization.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
|
||||
@@ -26,8 +26,6 @@ class UNetSelfAttentionMultiply(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="UNetSelfAttentionMultiply",
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the query, key, value, and output weights of UNet self-attention layers by specified multipliers to experiment with attention behavior.",
|
||||
short_description="Scale UNet self-attention Q/K/V/Out weights.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
@@ -51,8 +49,6 @@ class UNetCrossAttentionMultiply(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="UNetCrossAttentionMultiply",
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the query, key, value, and output weights of UNet cross-attention layers by specified multipliers to experiment with text-to-image attention.",
|
||||
short_description="Scale UNet cross-attention Q/K/V/Out weights.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
@@ -77,8 +73,6 @@ class CLIPAttentionMultiply(io.ComfyNode):
|
||||
node_id="CLIPAttentionMultiply",
|
||||
search_aliases=["clip attention scale", "text encoder attention"],
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the query, key, value, and output projection weights of CLIP text encoder self-attention layers by specified multipliers.",
|
||||
short_description="Scale CLIP text encoder attention weights.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
@@ -113,8 +107,6 @@ class UNetTemporalAttentionMultiply(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="UNetTemporalAttentionMultiply",
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the output weights of UNet temporal and structural attention layers independently, allowing fine-grained control over video model attention behavior.",
|
||||
short_description="Scale UNet temporal and structural attention weights.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("self_structural", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
|
||||
@@ -19,8 +19,6 @@ class EmptyLatentAudio(IO.ComfyNode):
|
||||
node_id="EmptyLatentAudio",
|
||||
display_name="Empty Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty latent audio tensor with a specified duration and batch size for Stable Audio generation.",
|
||||
short_description="Creates an empty latent audio tensor.",
|
||||
inputs=[
|
||||
IO.Float.Input("seconds", default=47.6, min=1.0, max=1000.0, step=0.1),
|
||||
IO.Int.Input(
|
||||
@@ -45,8 +43,6 @@ class ConditioningStableAudio(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="ConditioningStableAudio",
|
||||
category="conditioning",
|
||||
description="Sets the start time and total duration on Stable Audio positive and negative conditioning.",
|
||||
short_description="Sets timing parameters on Stable Audio conditioning.",
|
||||
inputs=[
|
||||
IO.Conditioning.Input("positive"),
|
||||
IO.Conditioning.Input("negative"),
|
||||
@@ -76,8 +72,6 @@ class VAEEncodeAudio(IO.ComfyNode):
|
||||
search_aliases=["audio to latent"],
|
||||
display_name="VAE Encode Audio",
|
||||
category="latent/audio",
|
||||
description="Encodes an audio waveform into a latent representation using a VAE, resampling if needed.",
|
||||
short_description="Encodes audio into latent via VAE.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -121,8 +115,6 @@ class VAEDecodeAudio(IO.ComfyNode):
|
||||
search_aliases=["latent to audio"],
|
||||
display_name="VAE Decode Audio",
|
||||
category="latent/audio",
|
||||
description="Decodes a latent representation back into an audio waveform using a VAE.",
|
||||
short_description="Decodes latent into audio via VAE.",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -145,8 +137,6 @@ class VAEDecodeAudioTiled(IO.ComfyNode):
|
||||
search_aliases=["latent to audio"],
|
||||
display_name="VAE Decode Audio (Tiled)",
|
||||
category="latent/audio",
|
||||
description="Decodes a latent representation into audio using tiled VAE decoding to reduce memory usage.",
|
||||
short_description="Tiled VAE decoding of latent into audio.",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -169,8 +159,6 @@ class SaveAudio(IO.ComfyNode):
|
||||
search_aliases=["export flac"],
|
||||
display_name="Save Audio (FLAC)",
|
||||
category="audio",
|
||||
description="Saves audio to disk in FLAC format with a configurable filename prefix.",
|
||||
short_description="Saves audio to disk in FLAC format.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
@@ -196,8 +184,6 @@ class SaveAudioMP3(IO.ComfyNode):
|
||||
search_aliases=["export mp3"],
|
||||
display_name="Save Audio (MP3)",
|
||||
category="audio",
|
||||
description="Saves audio to disk in MP3 format with configurable quality and filename prefix.",
|
||||
short_description="Saves audio to disk in MP3 format.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
@@ -226,8 +212,6 @@ class SaveAudioOpus(IO.ComfyNode):
|
||||
search_aliases=["export opus"],
|
||||
display_name="Save Audio (Opus)",
|
||||
category="audio",
|
||||
description="Saves audio to disk in Opus format with configurable quality and filename prefix.",
|
||||
short_description="Saves audio to disk in Opus format.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
@@ -256,8 +240,6 @@ class PreviewAudio(IO.ComfyNode):
|
||||
search_aliases=["play audio"],
|
||||
display_name="Preview Audio",
|
||||
category="audio",
|
||||
description="Plays back audio in the UI for previewing.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
],
|
||||
@@ -318,8 +300,6 @@ class LoadAudio(IO.ComfyNode):
|
||||
search_aliases=["import audio", "open audio", "audio file"],
|
||||
display_name="Load Audio",
|
||||
category="audio",
|
||||
description="Loads an audio or video file from disk and outputs the audio as a single Audio output.",
|
||||
short_description="Loads an audio file from disk.",
|
||||
inputs=[
|
||||
IO.Combo.Input("audio", upload=IO.UploadType.audio, options=sorted(files)),
|
||||
],
|
||||
@@ -358,7 +338,6 @@ class RecordAudio(IO.ComfyNode):
|
||||
search_aliases=["microphone input", "audio capture", "voice input"],
|
||||
display_name="Record Audio",
|
||||
category="audio",
|
||||
description="Records audio from a microphone input and outputs the captured audio.",
|
||||
inputs=[
|
||||
IO.Custom("AUDIO_RECORD").Input("audio"),
|
||||
],
|
||||
@@ -384,7 +363,6 @@ class TrimAudioDuration(IO.ComfyNode):
|
||||
search_aliases=["cut audio", "audio clip", "shorten audio"],
|
||||
display_name="Trim Audio Duration",
|
||||
description="Trim audio tensor into chosen time range.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
@@ -438,7 +416,6 @@ class SplitAudioChannels(IO.ComfyNode):
|
||||
search_aliases=["stereo to mono"],
|
||||
display_name="Split Audio Channels",
|
||||
description="Separates the audio into left and right channels.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
@@ -471,7 +448,6 @@ class JoinAudioChannels(IO.ComfyNode):
|
||||
node_id="JoinAudioChannels",
|
||||
display_name="Join Audio Channels",
|
||||
description="Joins left and right mono audio channels into a stereo audio.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio_left"),
|
||||
@@ -541,7 +517,6 @@ class AudioConcat(IO.ComfyNode):
|
||||
search_aliases=["join audio", "combine audio", "append audio"],
|
||||
display_name="Audio Concat",
|
||||
description="Concatenates the audio1 to audio2 in the specified direction.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio1"),
|
||||
@@ -590,7 +565,6 @@ class AudioMerge(IO.ComfyNode):
|
||||
search_aliases=["mix audio", "overlay audio", "layer audio"],
|
||||
display_name="Audio Merge",
|
||||
description="Combine two audio tracks by overlaying their waveforms.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio1"),
|
||||
@@ -652,8 +626,6 @@ class AudioAdjustVolume(IO.ComfyNode):
|
||||
search_aliases=["audio gain", "loudness", "audio level"],
|
||||
display_name="Audio Adjust Volume",
|
||||
category="audio",
|
||||
description="Adjusts audio volume by a specified number of decibels.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.Int.Input(
|
||||
@@ -690,8 +662,6 @@ class EmptyAudio(IO.ComfyNode):
|
||||
search_aliases=["blank audio"],
|
||||
display_name="Empty Audio",
|
||||
category="audio",
|
||||
description="Creates a silent audio clip with configurable duration, sample rate, and channel count.",
|
||||
short_description="Creates a silent audio clip.",
|
||||
inputs=[
|
||||
IO.Float.Input(
|
||||
"duration",
|
||||
|
||||
@@ -11,8 +11,6 @@ class AudioEncoderLoader(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="AudioEncoderLoader",
|
||||
category="loaders",
|
||||
description="Loads an audio encoder model from a checkpoint file for encoding audio into embeddings.",
|
||||
short_description="Loads an audio encoder model from a checkpoint.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"audio_encoder_name",
|
||||
@@ -38,8 +36,6 @@ class AudioEncoderEncode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="AudioEncoderEncode",
|
||||
category="conditioning",
|
||||
description="Encodes audio into embeddings using a loaded audio encoder model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.AudioEncoder.Input("audio_encoder"),
|
||||
io.Audio.Input("audio"),
|
||||
|
||||
@@ -154,8 +154,6 @@ class WanCameraEmbedding(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="WanCameraEmbedding",
|
||||
category="camera",
|
||||
description="Generates Plucker camera embeddings from a selected camera motion trajectory for Wan video generation.",
|
||||
short_description="Generates camera embeddings for Wan video generation.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"camera_pose",
|
||||
|
||||
@@ -12,8 +12,6 @@ class Canny(io.ComfyNode):
|
||||
node_id="Canny",
|
||||
search_aliases=["edge detection", "outline", "contour detection", "line art"],
|
||||
category="image/preprocessors",
|
||||
description="Detects edges in an image using the Canny edge detection algorithm with configurable low and high thresholds.",
|
||||
short_description="Canny edge detection on images.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Float.Input("low_threshold", default=0.4, min=0.01, max=0.99, step=0.01),
|
||||
|
||||
@@ -27,8 +27,6 @@ class CFGZeroStar(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CFGZeroStar",
|
||||
category="advanced/guidance",
|
||||
description="Applies CFG-Zero* post-CFG correction that computes an optimal scaling factor between conditional and unconditional predictions to reduce CFG artifacts.",
|
||||
short_description="CFG-Zero* guidance correction to reduce artifacts.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
],
|
||||
@@ -56,8 +54,6 @@ class CFGNorm(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CFGNorm",
|
||||
category="advanced/guidance",
|
||||
description="Constrains the CFG-guided prediction norm to not exceed the conditional prediction norm, helping to prevent oversaturation at high CFG scales.",
|
||||
short_description="Constrain CFG output norm to conditional prediction norm.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||
|
||||
@@ -14,8 +14,6 @@ class EmptyChromaRadianceLatentImage(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyChromaRadianceLatentImage",
|
||||
category="latent/chroma_radiance",
|
||||
description="Creates an empty Chroma Radiance latent image tensor with the specified width, height, and batch size.",
|
||||
short_description="Creates an empty Chroma Radiance latent image.",
|
||||
inputs=[
|
||||
io.Int.Input(id="width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input(id="height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -37,7 +35,6 @@ class ChromaRadianceOptions(io.ComfyNode):
|
||||
node_id="ChromaRadianceOptions",
|
||||
category="model_patches/chroma_radiance",
|
||||
description="Allows setting advanced options for the Chroma Radiance model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input(id="model"),
|
||||
io.Boolean.Input(
|
||||
|
||||
@@ -10,8 +10,6 @@ class CLIPTextEncodeSDXLRefiner(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeSDXLRefiner",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text for SDXL refiner models with aesthetic score and resolution conditioning parameters.",
|
||||
short_description="Encodes text for SDXL refiner models.",
|
||||
inputs=[
|
||||
io.Float.Input("ascore", default=6.0, min=0.0, max=1000.0, step=0.01),
|
||||
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
@@ -33,8 +31,6 @@ class CLIPTextEncodeSDXL(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeSDXL",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes separate G and L text prompts for SDXL models with resolution and crop conditioning parameters.",
|
||||
short_description="Encodes dual text prompts for SDXL models.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
|
||||
@@ -10,7 +10,6 @@ class ColorToRGBInt(io.ComfyNode):
|
||||
display_name="Color to RGB Int",
|
||||
category="utils",
|
||||
description="Convert a color to a RGB integer value.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Color.Input("color"),
|
||||
],
|
||||
|
||||
@@ -112,8 +112,6 @@ class PorterDuffImageComposite(io.ComfyNode):
|
||||
search_aliases=["alpha composite", "blend modes", "layer blend", "transparency blend"],
|
||||
display_name="Porter-Duff Image Composite",
|
||||
category="mask/compositing",
|
||||
description="Composites two images with alpha masks using Porter-Duff blend modes.",
|
||||
short_description="",
|
||||
inputs=[
|
||||
io.Image.Input("source"),
|
||||
io.Mask.Input("source_alpha"),
|
||||
@@ -171,8 +169,6 @@ class SplitImageWithAlpha(io.ComfyNode):
|
||||
search_aliases=["extract alpha", "separate transparency", "remove alpha"],
|
||||
display_name="Split Image with Alpha",
|
||||
category="mask/compositing",
|
||||
description="Separates an RGBA image into its RGB color channels and an alpha transparency mask.",
|
||||
short_description="Split RGBA image into RGB and alpha mask.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
@@ -197,8 +193,6 @@ class JoinImageWithAlpha(io.ComfyNode):
|
||||
search_aliases=["add transparency", "apply alpha", "composite alpha", "RGBA"],
|
||||
display_name="Join Image with Alpha",
|
||||
category="mask/compositing",
|
||||
description="Combines an RGB image with an alpha mask to produce an RGBA image with transparency.",
|
||||
short_description="Combine RGB image and alpha into RGBA.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Mask.Input("alpha"),
|
||||
|
||||
@@ -9,8 +9,6 @@ class CLIPTextEncodeControlnet(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeControlnet",
|
||||
category="_for_testing/conditioning",
|
||||
description="Encodes text with CLIP and attaches the result as cross-attention controlnet conditioning to existing conditioning data.",
|
||||
short_description="CLIP text encode for controlnet cross-attention conditioning.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Conditioning.Input("conditioning"),
|
||||
@@ -38,8 +36,6 @@ class T5TokenizerOptions(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="T5TokenizerOptions",
|
||||
category="_for_testing/conditioning",
|
||||
description="Configures minimum padding and length options for T5-family tokenizers used in CLIP text encoding.",
|
||||
short_description="Set T5 tokenizer padding and length options.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Int.Input("min_padding", default=0, min=0, max=10000, step=1),
|
||||
|
||||
@@ -12,7 +12,6 @@ class ContextWindowsManualNode(io.ComfyNode):
|
||||
display_name="Context Windows (Manual)",
|
||||
category="context",
|
||||
description="Manually set context windows.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input("model", tooltip="The model to apply context windows to during sampling."),
|
||||
io.Int.Input("context_length", min=1, default=16, tooltip="The length of the context window."),
|
||||
@@ -66,7 +65,6 @@ class WanContextWindowsManualNode(ContextWindowsManualNode):
|
||||
schema.node_id = "WanContextWindowsManual"
|
||||
schema.display_name = "WAN Context Windows (Manual)"
|
||||
schema.description = "Manually set context windows for WAN-like models (dim=2)."
|
||||
schema.short_description = None
|
||||
schema.inputs = [
|
||||
io.Model.Input("model", tooltip="The model to apply context windows to during sampling."),
|
||||
io.Int.Input("context_length", min=1, max=nodes.MAX_RESOLUTION, step=4, default=81, tooltip="The length of the context window."),
|
||||
|
||||
@@ -10,8 +10,6 @@ class SetUnionControlNetType(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SetUnionControlNetType",
|
||||
category="conditioning/controlnet",
|
||||
description="Sets the control type for a Union ControlNet, selecting which conditioning mode to use.",
|
||||
short_description="Select control mode for Union ControlNet.",
|
||||
inputs=[
|
||||
io.ControlNet.Input("control_net"),
|
||||
io.Combo.Input("type", options=["auto"] + list(UNION_CONTROLNET_TYPES.keys())),
|
||||
@@ -42,8 +40,6 @@ class ControlNetInpaintingAliMamaApply(io.ComfyNode):
|
||||
node_id="ControlNetInpaintingAliMamaApply",
|
||||
search_aliases=["masked controlnet"],
|
||||
category="conditioning/controlnet",
|
||||
description="Applies an AliMama inpainting ControlNet to positive and negative conditioning using an image and mask with VAE encoding.",
|
||||
short_description="Applies AliMama inpainting ControlNet with mask.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
|
||||
@@ -14,7 +14,6 @@ class EmptyCosmosLatentVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyCosmosLatentVideo",
|
||||
category="latent/video",
|
||||
description="Creates an empty latent tensor sized for Cosmos video generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=704, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -47,8 +46,6 @@ class CosmosImageToVideoLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CosmosImageToVideoLatent",
|
||||
category="conditioning/inpaint",
|
||||
description="Creates an inpainting video latent for Cosmos by encoding optional start and end images with a noise mask.",
|
||||
short_description="Cosmos inpainting video latent from start/end images.",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -92,8 +89,6 @@ class CosmosPredict2ImageToVideoLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CosmosPredict2ImageToVideoLatent",
|
||||
category="conditioning/inpaint",
|
||||
description="Creates an inpainting video latent for Cosmos Predict2 by encoding optional start and end images with Wan latent format processing.",
|
||||
short_description="Cosmos Predict2 inpainting video latent from images.",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
|
||||
@@ -18,8 +18,6 @@ class BasicScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="BasicScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule from a model using a selected scheduler algorithm, step count, and denoise strength.",
|
||||
short_description="Generate sigma schedule from model and scheduler.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Combo.Input("scheduler", options=comfy.samplers.SCHEDULER_NAMES),
|
||||
@@ -50,8 +48,6 @@ class KarrasScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="KarrasScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using the Karras noise schedule with configurable sigma range and rho parameter.",
|
||||
short_description="Generate sigmas using Karras noise schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -74,8 +70,6 @@ class ExponentialScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ExponentialScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using an exponential noise schedule with configurable sigma range.",
|
||||
short_description="Generate sigmas using exponential noise schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -97,8 +91,6 @@ class PolyexponentialScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="PolyexponentialScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using a polyexponential noise schedule with configurable sigma range and rho parameter.",
|
||||
short_description="Generate sigmas using polyexponential noise schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -121,8 +113,6 @@ class LaplaceScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LaplaceScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using a Laplace distribution-based noise schedule with configurable mu and beta parameters.",
|
||||
short_description="Generate sigmas using Laplace distribution schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -147,8 +137,6 @@ class SDTurboScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SDTurboScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule optimized for SD Turbo models with very few steps and adjustable denoise strength.",
|
||||
short_description="Generate sigma schedule for SD Turbo models.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("steps", default=1, min=1, max=10),
|
||||
@@ -173,8 +161,6 @@ class BetaSamplingScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="BetaSamplingScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using a beta distribution with configurable alpha and beta shape parameters.",
|
||||
short_description="Generate sigmas using beta distribution schedule.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
@@ -197,8 +183,6 @@ class VPScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="VPScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using the Variance Preserving (VP) SDE formulation with configurable beta and epsilon parameters.",
|
||||
short_description="Generate sigmas using VP SDE schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("beta_d", default=19.9, min=0.0, max=5000.0, step=0.01, round=False), #TODO: fix default values
|
||||
@@ -221,8 +205,6 @@ class SplitSigmas(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SplitSigmas",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Splits a sigma sequence into high and low portions at a specified step index for multi-pass sampling.",
|
||||
short_description="Split sigmas into high and low at a step.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Int.Input("step", default=0, min=0, max=10000),
|
||||
@@ -247,8 +229,6 @@ class SplitSigmasDenoise(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SplitSigmasDenoise",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Splits a sigma sequence into high and low portions based on a denoise ratio for multi-pass sampling workflows.",
|
||||
short_description="Split sigmas by denoise ratio.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
@@ -275,8 +255,6 @@ class FlipSigmas(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="FlipSigmas",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Reverses the order of a sigma sequence, useful for converting between ascending and descending noise schedules.",
|
||||
short_description="Reverse the order of a sigma sequence.",
|
||||
inputs=[io.Sigmas.Input("sigmas")],
|
||||
outputs=[io.Sigmas.Output()]
|
||||
)
|
||||
@@ -299,8 +277,6 @@ class SetFirstSigma(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SetFirstSigma",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Overrides the first sigma value in a sequence with a custom value, allowing manual control of the initial noise level.",
|
||||
short_description="Override the first sigma value in a sequence.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Float.Input("sigma", default=136.0, min=0.0, max=20000.0, step=0.001, round=False),
|
||||
@@ -323,8 +299,6 @@ class ExtendIntermediateSigmas(io.ComfyNode):
|
||||
node_id="ExtendIntermediateSigmas",
|
||||
search_aliases=["interpolate sigmas"],
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Interpolates additional intermediate sigma values between existing steps using selectable spacing within a specified sigma range.",
|
||||
short_description="Interpolate additional sigma steps between existing values.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Int.Input("steps", default=2, min=1, max=100),
|
||||
@@ -378,8 +352,6 @@ class SamplingPercentToSigma(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplingPercentToSigma",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Converts a sampling percentage (0.0 to 1.0) to the corresponding sigma value using a model's noise schedule.",
|
||||
short_description="Convert sampling percentage to sigma value.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("sampling_percent", default=0.0, min=0.0, max=1.0, step=0.0001),
|
||||
@@ -408,8 +380,6 @@ class KSamplerSelect(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="KSamplerSelect",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Selects a sampler algorithm by name from the list of available samplers and outputs the sampler object.",
|
||||
short_description="Select a sampler algorithm by name.",
|
||||
inputs=[io.Combo.Input("sampler_name", options=comfy.samplers.SAMPLER_NAMES)],
|
||||
outputs=[io.Sampler.Output()]
|
||||
)
|
||||
@@ -427,8 +397,6 @@ class SamplerDPMPP_3M_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_3M_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ 3M SDE sampler with configurable eta, noise scale, and GPU or CPU noise generation.",
|
||||
short_description="Create a DPM++ 3M SDE sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -454,8 +422,6 @@ class SamplerDPMPP_2M_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_2M_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ 2M SDE sampler with configurable solver type, eta, noise scale, and noise device.",
|
||||
short_description="Create a DPM++ 2M SDE sampler.",
|
||||
inputs=[
|
||||
io.Combo.Input("solver_type", options=['midpoint', 'heun']),
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -483,8 +449,6 @@ class SamplerDPMPP_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ SDE sampler with configurable eta, noise scale, r parameter, and noise device.",
|
||||
short_description="Create a DPM++ SDE sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -511,8 +475,6 @@ class SamplerDPMPP_2S_Ancestral(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_2S_Ancestral",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ 2S Ancestral sampler with configurable eta and noise scale parameters.",
|
||||
short_description="Create a DPM++ 2S Ancestral sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -533,8 +495,6 @@ class SamplerEulerAncestral(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerEulerAncestral",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an Euler Ancestral sampler with configurable eta and noise scale for stochastic sampling.",
|
||||
short_description="Create an Euler Ancestral stochastic sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -556,8 +516,6 @@ class SamplerEulerAncestralCFGPP(io.ComfyNode):
|
||||
node_id="SamplerEulerAncestralCFGPP",
|
||||
display_name="SamplerEulerAncestralCFG++",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an Euler Ancestral CFG++ sampler that applies classifier-free guidance with improved stability.",
|
||||
short_description="Create an Euler Ancestral CFG++ sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=1.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=10.0, step=0.01, round=False),
|
||||
@@ -580,8 +538,6 @@ class SamplerLMS(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerLMS",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a Linear Multi-Step (LMS) sampler with a configurable order parameter.",
|
||||
short_description="Create a Linear Multi-Step (LMS) sampler.",
|
||||
inputs=[io.Int.Input("order", default=4, min=1, max=100)],
|
||||
outputs=[io.Sampler.Output()]
|
||||
)
|
||||
@@ -599,8 +555,6 @@ class SamplerDPMAdaptative(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMAdaptative",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM Adaptive sampler with configurable order, tolerances, PID coefficients, and stochastic noise parameters for adaptive step-size sampling.",
|
||||
short_description="Create a DPM Adaptive step-size sampler.",
|
||||
inputs=[
|
||||
io.Int.Input("order", default=3, min=2, max=3),
|
||||
io.Float.Input("rtol", default=0.05, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -632,8 +586,6 @@ class SamplerER_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerER_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an ER-SDE sampler supporting ER-SDE, reverse-time SDE, and ODE solver types with configurable stochastic strength and staging.",
|
||||
short_description="Create an ER-SDE, reverse-time SDE, or ODE sampler.",
|
||||
inputs=[
|
||||
io.Combo.Input("solver_type", options=["ER-SDE", "Reverse-time SDE", "ODE"]),
|
||||
io.Int.Input("max_stage", default=3, min=1, max=3),
|
||||
@@ -672,8 +624,6 @@ class SamplerSASolver(io.ComfyNode):
|
||||
node_id="SamplerSASolver",
|
||||
search_aliases=["sde"],
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an SA-Solver sampler with configurable predictor/corrector orders, SDE region, and PECE mode for high-order diffusion sampling.",
|
||||
short_description="Create an SA-Solver high-order diffusion sampler.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=10.0, step=0.01, round=False),
|
||||
@@ -734,8 +684,7 @@ class SamplerSEEDS2(io.ComfyNode):
|
||||
"- solver_type=phi_2, r=1.0, eta=0.0\n\n"
|
||||
"exp_heun_2_x0_sde\n"
|
||||
"- solver_type=phi_2, r=1.0, eta=1.0, s_noise=1.0"
|
||||
),
|
||||
short_description="SEEDS2 sampler with configurable solver and SDE settings.",
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -779,8 +728,6 @@ class SamplerCustom(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerCustom",
|
||||
category="sampling/custom_sampling",
|
||||
description="Runs a complete custom sampling pass by combining a model, sampler, sigmas, and conditioning with optional noise injection.",
|
||||
short_description="Run custom sampling with manual sampler and sigmas.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Boolean.Input("add_noise", default=True),
|
||||
@@ -847,8 +794,6 @@ class BasicGuider(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="BasicGuider",
|
||||
category="sampling/custom_sampling/guiders",
|
||||
description="Creates a basic guider that applies a single conditioning input to guide the diffusion model without classifier-free guidance.",
|
||||
short_description="Create a single-conditioning guider without CFG.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("conditioning"),
|
||||
@@ -870,8 +815,6 @@ class CFGGuider(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CFGGuider",
|
||||
category="sampling/custom_sampling/guiders",
|
||||
description="Creates a classifier-free guidance guider that combines positive and negative conditioning with an adjustable CFG scale.",
|
||||
short_description="Create a CFG guider with positive/negative conditioning.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("positive"),
|
||||
@@ -926,8 +869,6 @@ class DualCFGGuider(io.ComfyNode):
|
||||
node_id="DualCFGGuider",
|
||||
search_aliases=["dual prompt guidance"],
|
||||
category="sampling/custom_sampling/guiders",
|
||||
description="Creates a dual classifier-free guidance guider that blends two conditioning inputs against a negative with independent CFG scales and regular or nested styles.",
|
||||
short_description="Create a dual CFG guider with two conditionings.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("cond1"),
|
||||
@@ -956,8 +897,6 @@ class DisableNoise(io.ComfyNode):
|
||||
node_id="DisableNoise",
|
||||
search_aliases=["zero noise"],
|
||||
category="sampling/custom_sampling/noise",
|
||||
description="Produces a zero-noise source that disables noise injection, useful for deterministic sampling or img2img without added noise.",
|
||||
short_description="Produce zero noise to disable noise injection.",
|
||||
inputs=[],
|
||||
outputs=[io.Noise.Output()]
|
||||
)
|
||||
@@ -975,8 +914,6 @@ class RandomNoise(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="RandomNoise",
|
||||
category="sampling/custom_sampling/noise",
|
||||
description="Produces a random noise source from a seed value for use in custom sampling workflows.",
|
||||
short_description="Produce seeded random noise for sampling.",
|
||||
inputs=[io.Int.Input("noise_seed", default=0, min=0, max=0xffffffffffffffff, control_after_generate=True)],
|
||||
outputs=[io.Noise.Output()]
|
||||
)
|
||||
@@ -994,8 +931,6 @@ class SamplerCustomAdvanced(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerCustomAdvanced",
|
||||
category="sampling/custom_sampling",
|
||||
description="Runs an advanced custom sampling pass using separate noise, guider, sampler, and sigmas inputs for maximum control over the diffusion process.",
|
||||
short_description="Run advanced custom sampling with separate components.",
|
||||
inputs=[
|
||||
io.Noise.Input("noise"),
|
||||
io.Guider.Input("guider"),
|
||||
@@ -1050,8 +985,6 @@ class AddNoise(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="AddNoise",
|
||||
category="_for_testing/custom_sampling/noise",
|
||||
description="Adds scaled noise to a latent image using the model's noise schedule and sigma values for manual noise injection.",
|
||||
short_description="Add scaled noise to a latent image.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
@@ -1102,8 +1035,6 @@ class ManualSigmas(io.ComfyNode):
|
||||
node_id="ManualSigmas",
|
||||
search_aliases=["custom noise schedule", "define sigmas"],
|
||||
category="_for_testing/custom_sampling",
|
||||
description="Defines a custom sigma sequence by manually entering comma-separated numeric values as a text string.",
|
||||
short_description="Define custom sigmas from comma-separated values.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.String.Input("sigmas", default="1, 0.5", multiline=False)
|
||||
|
||||
@@ -49,8 +49,6 @@ class LoadImageDataSetFromFolderNode(io.ComfyNode):
|
||||
node_id="LoadImageDataSetFromFolder",
|
||||
display_name="Load Image Dataset from Folder",
|
||||
category="dataset",
|
||||
description="Loads all images from a selected input subfolder and outputs them as a list of image tensors.",
|
||||
short_description="Loads images from a folder as a list.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
@@ -88,8 +86,6 @@ class LoadImageTextDataSetFromFolderNode(io.ComfyNode):
|
||||
node_id="LoadImageTextDataSetFromFolder",
|
||||
display_name="Load Image and Text Dataset from Folder",
|
||||
category="dataset",
|
||||
description="Loads paired images and text captions from a folder, matching each image with its corresponding text file.",
|
||||
short_description="Loads paired images and text captions from folder.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
@@ -212,8 +208,6 @@ class SaveImageDataSetToFolderNode(io.ComfyNode):
|
||||
node_id="SaveImageDataSetToFolder",
|
||||
display_name="Save Image Dataset to Folder",
|
||||
category="dataset",
|
||||
description="Saves a list of images to a named folder in the output directory with configurable filename prefix.",
|
||||
short_description="Saves image list to an output folder.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
is_input_list=True, # Receive images as list
|
||||
@@ -253,8 +247,6 @@ class SaveImageTextDataSetToFolderNode(io.ComfyNode):
|
||||
node_id="SaveImageTextDataSetToFolder",
|
||||
display_name="Save Image and Text Dataset to Folder",
|
||||
category="dataset",
|
||||
description="Saves paired images and text captions to a named folder in the output directory with configurable filename prefix.",
|
||||
short_description="Saves paired images and text to output folder.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
is_input_list=True, # Receive both images and texts as lists
|
||||
@@ -409,8 +401,6 @@ class ImageProcessingNode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id=cls.node_id,
|
||||
display_name=cls.display_name or cls.node_id,
|
||||
description=getattr(cls, 'description', ''),
|
||||
short_description=getattr(cls, 'short_description', ''),
|
||||
category="dataset/image",
|
||||
is_experimental=True,
|
||||
is_input_list=is_group, # True for group, False for individual
|
||||
@@ -560,8 +550,6 @@ class TextProcessingNode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id=cls.node_id,
|
||||
display_name=cls.display_name or cls.node_id,
|
||||
description=getattr(cls, 'description', ''),
|
||||
short_description=getattr(cls, 'short_description', ''),
|
||||
category="dataset/text",
|
||||
is_experimental=True,
|
||||
is_input_list=is_group, # True for group, False for individual
|
||||
@@ -639,7 +627,6 @@ class ResizeImagesByShorterEdgeNode(ImageProcessingNode):
|
||||
node_id = "ResizeImagesByShorterEdge"
|
||||
display_name = "Resize Images by Shorter Edge"
|
||||
description = "Resize images so that the shorter edge matches the specified length while preserving aspect ratio."
|
||||
short_description = "Resizes images by shorter edge preserving aspect ratio."
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
"shorter_edge",
|
||||
@@ -668,7 +655,6 @@ class ResizeImagesByLongerEdgeNode(ImageProcessingNode):
|
||||
node_id = "ResizeImagesByLongerEdge"
|
||||
display_name = "Resize Images by Longer Edge"
|
||||
description = "Resize images so that the longer edge matches the specified length while preserving aspect ratio."
|
||||
short_description = "Resizes images by longer edge preserving aspect ratio."
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
"longer_edge",
|
||||
@@ -700,7 +686,6 @@ class CenterCropImagesNode(ImageProcessingNode):
|
||||
node_id = "CenterCropImages"
|
||||
display_name = "Center Crop Images"
|
||||
description = "Center crop all images to the specified dimensions."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."),
|
||||
io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."),
|
||||
@@ -723,7 +708,6 @@ class RandomCropImagesNode(ImageProcessingNode):
|
||||
description = (
|
||||
"Randomly crop all images to the specified dimensions (for data augmentation)."
|
||||
)
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."),
|
||||
io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."),
|
||||
@@ -750,7 +734,6 @@ class NormalizeImagesNode(ImageProcessingNode):
|
||||
node_id = "NormalizeImages"
|
||||
display_name = "Normalize Images"
|
||||
description = "Normalize images using mean and standard deviation."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
"mean",
|
||||
@@ -777,7 +760,6 @@ class AdjustBrightnessNode(ImageProcessingNode):
|
||||
node_id = "AdjustBrightness"
|
||||
display_name = "Adjust Brightness"
|
||||
description = "Adjust brightness of all images."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
"factor",
|
||||
@@ -797,7 +779,6 @@ class AdjustContrastNode(ImageProcessingNode):
|
||||
node_id = "AdjustContrast"
|
||||
display_name = "Adjust Contrast"
|
||||
description = "Adjust contrast of all images."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
"factor",
|
||||
@@ -817,7 +798,6 @@ class ShuffleDatasetNode(ImageProcessingNode):
|
||||
node_id = "ShuffleDataset"
|
||||
display_name = "Shuffle Image Dataset"
|
||||
description = "Randomly shuffle the order of images in the dataset."
|
||||
short_description = None
|
||||
is_group_process = True # Requires full list to shuffle
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
@@ -841,8 +821,6 @@ class ShuffleImageTextDatasetNode(io.ComfyNode):
|
||||
node_id="ShuffleImageTextDataset",
|
||||
display_name="Shuffle Image-Text Dataset",
|
||||
category="dataset/image",
|
||||
description="Randomly shuffles paired image and text lists together using a seed, preserving their correspondence.",
|
||||
short_description="Shuffles paired image-text lists together.",
|
||||
is_experimental=True,
|
||||
is_input_list=True,
|
||||
inputs=[
|
||||
@@ -885,7 +863,6 @@ class TextToLowercaseNode(TextProcessingNode):
|
||||
node_id = "TextToLowercase"
|
||||
display_name = "Text to Lowercase"
|
||||
description = "Convert all texts to lowercase."
|
||||
short_description = None
|
||||
|
||||
@classmethod
|
||||
def _process(cls, text):
|
||||
@@ -896,7 +873,6 @@ class TextToUppercaseNode(TextProcessingNode):
|
||||
node_id = "TextToUppercase"
|
||||
display_name = "Text to Uppercase"
|
||||
description = "Convert all texts to uppercase."
|
||||
short_description = None
|
||||
|
||||
@classmethod
|
||||
def _process(cls, text):
|
||||
@@ -907,7 +883,6 @@ class TruncateTextNode(TextProcessingNode):
|
||||
node_id = "TruncateText"
|
||||
display_name = "Truncate Text"
|
||||
description = "Truncate all texts to a maximum length."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
"max_length", default=77, min=1, max=10000, tooltip="Maximum text length."
|
||||
@@ -923,7 +898,6 @@ class AddTextPrefixNode(TextProcessingNode):
|
||||
node_id = "AddTextPrefix"
|
||||
display_name = "Add Text Prefix"
|
||||
description = "Add a prefix to all texts."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.String.Input("prefix", default="", tooltip="Prefix to add."),
|
||||
]
|
||||
@@ -937,7 +911,6 @@ class AddTextSuffixNode(TextProcessingNode):
|
||||
node_id = "AddTextSuffix"
|
||||
display_name = "Add Text Suffix"
|
||||
description = "Add a suffix to all texts."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.String.Input("suffix", default="", tooltip="Suffix to add."),
|
||||
]
|
||||
@@ -951,7 +924,6 @@ class ReplaceTextNode(TextProcessingNode):
|
||||
node_id = "ReplaceText"
|
||||
display_name = "Replace Text"
|
||||
description = "Replace text in all texts."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.String.Input("find", default="", tooltip="Text to find."),
|
||||
io.String.Input("replace", default="", tooltip="Text to replace with."),
|
||||
@@ -966,7 +938,6 @@ class StripWhitespaceNode(TextProcessingNode):
|
||||
node_id = "StripWhitespace"
|
||||
display_name = "Strip Whitespace"
|
||||
description = "Strip leading and trailing whitespace from all texts."
|
||||
short_description = None
|
||||
|
||||
@classmethod
|
||||
def _process(cls, text):
|
||||
@@ -982,7 +953,6 @@ class ImageDeduplicationNode(ImageProcessingNode):
|
||||
node_id = "ImageDeduplication"
|
||||
display_name = "Image Deduplication"
|
||||
description = "Remove duplicate or very similar images from the dataset."
|
||||
short_description = None
|
||||
is_group_process = True # Requires full list to compare images
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
@@ -1053,7 +1023,6 @@ class ImageGridNode(ImageProcessingNode):
|
||||
node_id = "ImageGrid"
|
||||
display_name = "Image Grid"
|
||||
description = "Arrange multiple images into a grid layout."
|
||||
short_description = None
|
||||
is_group_process = True # Requires full list to create grid
|
||||
is_output_list = False # Outputs single grid image
|
||||
extra_inputs = [
|
||||
@@ -1128,7 +1097,6 @@ class MergeImageListsNode(ImageProcessingNode):
|
||||
node_id = "MergeImageLists"
|
||||
display_name = "Merge Image Lists"
|
||||
description = "Concatenate multiple image lists into one."
|
||||
short_description = None
|
||||
is_group_process = True # Receives images as list
|
||||
|
||||
@classmethod
|
||||
@@ -1146,7 +1114,6 @@ class MergeTextListsNode(TextProcessingNode):
|
||||
node_id = "MergeTextLists"
|
||||
display_name = "Merge Text Lists"
|
||||
description = "Concatenate multiple text lists into one."
|
||||
short_description = None
|
||||
is_group_process = True # Receives texts as list
|
||||
|
||||
@classmethod
|
||||
@@ -1170,8 +1137,6 @@ class ResolutionBucket(io.ComfyNode):
|
||||
node_id="ResolutionBucket",
|
||||
display_name="Resolution Bucket",
|
||||
category="dataset",
|
||||
description="Groups latents and conditioning by resolution into batched buckets for efficient training with mixed aspect ratios.",
|
||||
short_description="Groups latents by resolution into training buckets.",
|
||||
is_experimental=True,
|
||||
is_input_list=True,
|
||||
inputs=[
|
||||
@@ -1265,8 +1230,6 @@ class MakeTrainingDataset(io.ComfyNode):
|
||||
search_aliases=["encode dataset"],
|
||||
display_name="Make Training Dataset",
|
||||
category="dataset",
|
||||
description="Encodes images with a VAE and text captions with CLIP to create paired latent and conditioning training data.",
|
||||
short_description="Encodes images and text into training data.",
|
||||
is_experimental=True,
|
||||
is_input_list=True, # images and texts as lists
|
||||
inputs=[
|
||||
@@ -1353,8 +1316,6 @@ class SaveTrainingDataset(io.ComfyNode):
|
||||
search_aliases=["export training data"],
|
||||
display_name="Save Training Dataset",
|
||||
category="dataset",
|
||||
description="Saves encoded latent and conditioning training data to disk in sharded files with configurable shard size.",
|
||||
short_description="Saves encoded training data to sharded files.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
is_input_list=True, # Receive lists
|
||||
@@ -1456,8 +1417,6 @@ class LoadTrainingDataset(io.ComfyNode):
|
||||
search_aliases=["import dataset", "training data"],
|
||||
display_name="Load Training Dataset",
|
||||
category="dataset",
|
||||
description="Loads a previously saved training dataset of latents and conditioning from sharded files on disk.",
|
||||
short_description="Loads saved training dataset from disk.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.String.Input(
|
||||
|
||||
@@ -14,8 +14,6 @@ class DifferentialDiffusion(io.ComfyNode):
|
||||
search_aliases=["inpaint gradient", "variable denoise strength"],
|
||||
display_name="Differential Diffusion",
|
||||
category="_for_testing",
|
||||
description="Enables per-pixel variable denoise strength using a mask, where mask intensity controls how much each region is denoised during sampling.",
|
||||
short_description="Per-pixel variable denoise strength via mask.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
|
||||
@@ -363,7 +363,6 @@ class EasyCacheNode(io.ComfyNode):
|
||||
node_id="EasyCache",
|
||||
display_name="EasyCache",
|
||||
description="Native EasyCache implementation.",
|
||||
short_description=None,
|
||||
category="advanced/debug/model",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
@@ -497,7 +496,6 @@ class LazyCacheNode(io.ComfyNode):
|
||||
node_id="LazyCache",
|
||||
display_name="LazyCache",
|
||||
description="A homebrew version of EasyCache - even 'easier' version of EasyCache to implement. Overall works worse than EasyCache, but better in some rare cases AND universal compatibility with everything in ComfyUI.",
|
||||
short_description="Simpler EasyCache alternative with universal ComfyUI compatibility.",
|
||||
category="advanced/debug/model",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
|
||||
@@ -10,7 +10,6 @@ class ReferenceLatent(io.ComfyNode):
|
||||
node_id="ReferenceLatent",
|
||||
category="advanced/conditioning/edit_models",
|
||||
description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.",
|
||||
short_description="Sets guiding latent for edit models with chaining support.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
|
||||
@@ -19,8 +19,6 @@ class EpsilonScaling(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="Epsilon Scaling",
|
||||
category="model_patches/unet",
|
||||
description="Applies epsilon scaling to mitigate exposure bias in diffusion models by scaling the predicted noise after CFG, improving sample quality.",
|
||||
short_description="Scale predicted noise to reduce exposure bias.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
@@ -123,7 +121,6 @@ class TemporalScoreRescaling(io.ComfyNode):
|
||||
"TSR - Temporal Score Rescaling (2510.01184)\n\n"
|
||||
"Rescaling the model's score or noise to steer the sampling diversity.\n"
|
||||
),
|
||||
short_description="Rescales temporal scores to control sampling diversity.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -13,8 +13,6 @@ class CLIPTextEncodeFlux(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeFlux",
|
||||
category="advanced/conditioning/flux",
|
||||
description="Encodes separate CLIP-L and T5-XXL text prompts with a guidance value into Flux conditioning.",
|
||||
short_description="Encodes CLIP-L and T5-XXL prompts for Flux.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
@@ -42,8 +40,6 @@ class EmptyFlux2LatentImage(io.ComfyNode):
|
||||
node_id="EmptyFlux2LatentImage",
|
||||
display_name="Empty Flux 2 Latent",
|
||||
category="latent",
|
||||
description="Creates an empty Flux 2 latent image tensor with the specified width, height, and batch size.",
|
||||
short_description="Creates an empty Flux 2 latent image tensor.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -65,8 +61,6 @@ class FluxGuidance(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="FluxGuidance",
|
||||
category="advanced/conditioning/flux",
|
||||
description="Sets the guidance strength value on Flux conditioning to control how closely generation follows the prompt.",
|
||||
short_description="Sets guidance strength on Flux conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1),
|
||||
@@ -91,7 +85,6 @@ class FluxDisableGuidance(io.ComfyNode):
|
||||
node_id="FluxDisableGuidance",
|
||||
category="advanced/conditioning/flux",
|
||||
description="This node completely disables the guidance embed on Flux and Flux like models",
|
||||
short_description="Disables guidance embed on Flux and Flux-like models.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
],
|
||||
@@ -136,7 +129,6 @@ class FluxKontextImageScale(io.ComfyNode):
|
||||
node_id="FluxKontextImageScale",
|
||||
category="advanced/conditioning/flux",
|
||||
description="This node resizes the image to one that is more optimal for flux kontext.",
|
||||
short_description="Resizes images to optimal dimensions for Flux Kontext.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
@@ -164,8 +156,6 @@ class FluxKontextMultiReferenceLatentMethod(io.ComfyNode):
|
||||
node_id="FluxKontextMultiReferenceLatentMethod",
|
||||
display_name="Edit Model Reference Method",
|
||||
category="advanced/conditioning/flux",
|
||||
description="Selects the method used for handling multiple reference latents in Flux Kontext edit models.",
|
||||
short_description="Selects reference latent method for Flux Kontext.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Combo.Input(
|
||||
@@ -224,8 +214,6 @@ class Flux2Scheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="Flux2Scheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule for Flux 2 sampling based on step count and image resolution.",
|
||||
short_description="Generates a sigma schedule for Flux 2 sampling.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=4096),
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=1),
|
||||
|
||||
@@ -30,8 +30,6 @@ class FreeU(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="FreeU",
|
||||
category="model_patches/unet",
|
||||
description="Applies FreeU v1 to a UNet model, boosting backbone features and filtering skip connections using Fourier transforms for improved quality.",
|
||||
short_description="Applies FreeU v1 backbone boost and skip filtering.",
|
||||
inputs=[
|
||||
IO.Model.Input("model"),
|
||||
IO.Float.Input("b1", default=1.1, min=0.0, max=10.0, step=0.01),
|
||||
@@ -79,8 +77,6 @@ class FreeU_V2(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="FreeU_V2",
|
||||
category="model_patches/unet",
|
||||
description="Applies FreeU v2 to a UNet model with adaptive backbone scaling based on hidden state magnitude and Fourier skip filtering.",
|
||||
short_description="Applies FreeU v2 with adaptive scaling.",
|
||||
inputs=[
|
||||
IO.Model.Input("model"),
|
||||
IO.Float.Input("b1", default=1.3, min=0.0, max=10.0, step=0.01),
|
||||
|
||||
@@ -62,7 +62,6 @@ class FreSca(io.ComfyNode):
|
||||
display_name="FreSca",
|
||||
category="_for_testing",
|
||||
description="Applies frequency-dependent scaling to the guidance",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01,
|
||||
|
||||
@@ -341,8 +341,6 @@ class GITSScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="GITSScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a noise schedule using the GITS method with precomputed optimal sigma levels and configurable coefficient.",
|
||||
short_description="Generates a GITS noise schedule with optimal sigma levels.",
|
||||
inputs=[
|
||||
io.Float.Input("coeff", default=1.20, min=0.80, max=1.50, step=0.05),
|
||||
io.Int.Input("steps", default=10, min=2, max=1000),
|
||||
|
||||
@@ -13,7 +13,6 @@ class QuadrupleCLIPLoader(io.ComfyNode):
|
||||
node_id="QuadrupleCLIPLoader",
|
||||
category="advanced/loaders",
|
||||
description="[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")),
|
||||
io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")),
|
||||
@@ -41,8 +40,6 @@ class CLIPTextEncodeHiDream(io.ComfyNode):
|
||||
node_id="CLIPTextEncodeHiDream",
|
||||
search_aliases=["hidream prompt"],
|
||||
category="advanced/conditioning",
|
||||
description="Encodes separate CLIP-L, CLIP-G, T5-XXL, and Llama text prompts into HiDream conditioning.",
|
||||
short_description="Encodes multi-encoder text prompts for HiDream.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
|
||||
@@ -38,8 +38,6 @@ class PairConditioningSetProperties:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Set properties like strength, mask, hooks, and timesteps on a positive/negative conditioning pair."
|
||||
SHORT_DESCRIPTION = "Set properties on a positive/negative conditioning pair."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, positive_NEW, negative_NEW,
|
||||
@@ -75,8 +73,6 @@ class PairConditioningSetPropertiesAndCombine:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Set properties on new conditioning pair and combine with existing positive/negative conditioning."
|
||||
SHORT_DESCRIPTION = "Set properties on new cond pair, combine with existing."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, positive, negative, positive_NEW, negative_NEW,
|
||||
@@ -108,8 +104,6 @@ class ConditioningSetProperties:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "advanced/hooks/cond single"
|
||||
DESCRIPTION = "Set properties like strength, mask, hooks, and timesteps on a single conditioning input."
|
||||
SHORT_DESCRIPTION = "Set properties on a single conditioning input."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, cond_NEW,
|
||||
@@ -142,8 +136,6 @@ class ConditioningSetPropertiesAndCombine:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "advanced/hooks/cond single"
|
||||
DESCRIPTION = "Set properties on new conditioning and combine it with an existing conditioning input."
|
||||
SHORT_DESCRIPTION = "Set properties on new conditioning, combine with existing."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, cond, cond_NEW,
|
||||
@@ -172,8 +164,6 @@ class PairConditioningCombine:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Combine two positive/negative conditioning pairs into a single pair."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine"
|
||||
|
||||
def combine(self, positive_A, negative_A, positive_B, negative_B):
|
||||
@@ -201,8 +191,6 @@ class PairConditioningSetDefaultAndCombine:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Set default conditioning pair and combine it with existing positive/negative conditioning and optional hooks."
|
||||
SHORT_DESCRIPTION = "Set default cond pair and combine with existing."
|
||||
FUNCTION = "set_default_and_combine"
|
||||
|
||||
def set_default_and_combine(self, positive, negative, positive_DEFAULT, negative_DEFAULT,
|
||||
@@ -229,8 +217,6 @@ class ConditioningSetDefaultAndCombine:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "advanced/hooks/cond single"
|
||||
DESCRIPTION = "Set default conditioning and combine it with existing conditioning input and optional hooks."
|
||||
SHORT_DESCRIPTION = "Set default conditioning and combine with existing."
|
||||
FUNCTION = "set_default_and_combine"
|
||||
|
||||
def set_default_and_combine(self, cond, cond_DEFAULT,
|
||||
@@ -258,8 +244,6 @@ class SetClipHooks:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CLIP",)
|
||||
CATEGORY = "advanced/hooks/clip"
|
||||
DESCRIPTION = "Apply hooks to a CLIP model, optionally propagating them to conditioning outputs and enabling CLIP scheduling."
|
||||
SHORT_DESCRIPTION = "Apply hooks to a CLIP model with scheduling options."
|
||||
FUNCTION = "apply_hooks"
|
||||
|
||||
def apply_hooks(self, clip: CLIP, schedule_clip: bool, apply_to_conds: bool, hooks: comfy.hooks.HookGroup=None):
|
||||
@@ -291,8 +275,6 @@ class ConditioningTimestepsRange:
|
||||
RETURN_TYPES = ("TIMESTEPS_RANGE", "TIMESTEPS_RANGE", "TIMESTEPS_RANGE")
|
||||
RETURN_NAMES = ("TIMESTEPS_RANGE", "BEFORE_RANGE", "AFTER_RANGE")
|
||||
CATEGORY = "advanced/hooks"
|
||||
DESCRIPTION = "Define a timestep percentage range and output the range plus its complement before and after segments."
|
||||
SHORT_DESCRIPTION = "Define a timestep range with before/after complements."
|
||||
FUNCTION = "create_range"
|
||||
|
||||
def create_range(self, start_percent: float, end_percent: float):
|
||||
@@ -326,8 +308,6 @@ class CreateHookLora:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a LoRA hook with separate model and CLIP strength that can be scheduled on conditioning."
|
||||
SHORT_DESCRIPTION = "Create a LoRA hook with model and CLIP strength."
|
||||
FUNCTION = "create_hook"
|
||||
|
||||
def create_hook(self, lora_name: str, strength_model: float, strength_clip: float, prev_hooks: comfy.hooks.HookGroup=None):
|
||||
@@ -373,8 +353,6 @@ class CreateHookLoraModelOnly(CreateHookLora):
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a LoRA hook that only affects the model (not CLIP) for scheduling on conditioning."
|
||||
SHORT_DESCRIPTION = "Create a model-only LoRA hook."
|
||||
FUNCTION = "create_hook_model_only"
|
||||
|
||||
def create_hook_model_only(self, lora_name: str, strength_model: float, prev_hooks: comfy.hooks.HookGroup=None):
|
||||
@@ -405,8 +383,6 @@ class CreateHookModelAsLora:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a hook from a full checkpoint treated as a LoRA, with separate model and CLIP strength controls."
|
||||
SHORT_DESCRIPTION = "Create a hook from a checkpoint treated as LoRA."
|
||||
FUNCTION = "create_hook"
|
||||
|
||||
def create_hook(self, ckpt_name: str, strength_model: float, strength_clip: float,
|
||||
@@ -455,8 +431,6 @@ class CreateHookModelAsLoraModelOnly(CreateHookModelAsLora):
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a model-only hook from a full checkpoint treated as a LoRA, without affecting CLIP."
|
||||
SHORT_DESCRIPTION = "Create a model-only hook from a checkpoint as LoRA."
|
||||
FUNCTION = "create_hook_model_only"
|
||||
|
||||
def create_hook_model_only(self, ckpt_name: str, strength_model: float,
|
||||
@@ -486,8 +460,6 @@ class SetHookKeyframes:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Assign keyframe schedules to hooks for controlling their strength over time during sampling."
|
||||
SHORT_DESCRIPTION = "Assign keyframe schedules to hooks over time."
|
||||
FUNCTION = "set_hook_keyframes"
|
||||
|
||||
def set_hook_keyframes(self, hooks: comfy.hooks.HookGroup, hook_kf: comfy.hooks.HookKeyframeGroup=None):
|
||||
@@ -516,8 +488,6 @@ class CreateHookKeyframe:
|
||||
RETURN_TYPES = ("HOOK_KEYFRAMES",)
|
||||
RETURN_NAMES = ("HOOK_KF",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Create a single hook keyframe with a strength multiplier at a specific timestep percentage."
|
||||
SHORT_DESCRIPTION = "Create a hook keyframe at a specific timestep."
|
||||
FUNCTION = "create_hook_keyframe"
|
||||
|
||||
def create_hook_keyframe(self, strength_mult: float, start_percent: float, prev_hook_kf: comfy.hooks.HookKeyframeGroup=None):
|
||||
@@ -553,8 +523,6 @@ class CreateHookKeyframesInterpolated:
|
||||
RETURN_TYPES = ("HOOK_KEYFRAMES",)
|
||||
RETURN_NAMES = ("HOOK_KF",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Generate multiple interpolated hook keyframes between start and end strength values over a timestep range."
|
||||
SHORT_DESCRIPTION = "Generate interpolated hook keyframes over a timestep range."
|
||||
FUNCTION = "create_hook_keyframes"
|
||||
|
||||
def create_hook_keyframes(self, strength_start: float, strength_end: float, interpolation: str,
|
||||
@@ -600,8 +568,6 @@ class CreateHookKeyframesFromFloats:
|
||||
RETURN_TYPES = ("HOOK_KEYFRAMES",)
|
||||
RETURN_NAMES = ("HOOK_KF",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Create hook keyframes from a list of float values distributed evenly across a timestep percentage range."
|
||||
SHORT_DESCRIPTION = "Create hook keyframes from a list of float values."
|
||||
FUNCTION = "create_hook_keyframes"
|
||||
|
||||
def create_hook_keyframes(self, floats_strength: Union[float, list[float]],
|
||||
@@ -673,8 +639,6 @@ class CombineHooks:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/combine"
|
||||
DESCRIPTION = "Combine two hook groups into one."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine_hooks"
|
||||
|
||||
def combine_hooks(self,
|
||||
@@ -702,8 +666,6 @@ class CombineHooksFour:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/combine"
|
||||
DESCRIPTION = "Combine up to four hook groups into one."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine_hooks"
|
||||
|
||||
def combine_hooks(self,
|
||||
@@ -737,8 +699,6 @@ class CombineHooksEight:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/combine"
|
||||
DESCRIPTION = "Combine up to eight hook groups into one."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine_hooks"
|
||||
|
||||
def combine_hooks(self,
|
||||
|
||||
@@ -15,8 +15,6 @@ class CLIPTextEncodeHunyuanDiT(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeHunyuanDiT",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text using both BERT and mT5-XL tokenizers for Hunyuan DiT conditioning.",
|
||||
short_description="Dual-tokenizer text encoding for Hunyuan DiT.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("bert", multiline=True, dynamic_prompts=True),
|
||||
@@ -44,8 +42,6 @@ class EmptyHunyuanLatentVideo(io.ComfyNode):
|
||||
node_id="EmptyHunyuanLatentVideo",
|
||||
display_name="Empty HunyuanVideo 1.0 Latent",
|
||||
category="latent/video",
|
||||
description="Creates an empty latent tensor sized for HunyuanVideo 1.0 video generation.",
|
||||
short_description="Empty latent for HunyuanVideo 1.0 generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -71,8 +67,6 @@ class EmptyHunyuanVideo15Latent(EmptyHunyuanLatentVideo):
|
||||
schema = super().define_schema()
|
||||
schema.node_id = "EmptyHunyuanVideo15Latent"
|
||||
schema.display_name = "Empty HunyuanVideo 1.5 Latent"
|
||||
schema.description = "Creates an empty latent tensor sized for HunyuanVideo 1.5 video generation with 16x spatial downscale."
|
||||
schema.short_description = "Empty latent for HunyuanVideo 1.5 generation."
|
||||
return schema
|
||||
|
||||
@classmethod
|
||||
@@ -88,8 +82,6 @@ class HunyuanVideo15ImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="HunyuanVideo15ImageToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Prepares conditioning and latent for HunyuanVideo 1.5 image-to-video generation with start image and CLIP vision support.",
|
||||
short_description="HunyuanVideo 1.5 image-to-video conditioning setup.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -139,9 +131,6 @@ class HunyuanVideo15SuperResolution(io.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="HunyuanVideo15SuperResolution",
|
||||
category="conditioning/video_models",
|
||||
description="Sets up conditioning for HunyuanVideo 1.5 super-resolution upscaling of a latent with noise augmentation and optional image guidance.",
|
||||
short_description="HunyuanVideo 1.5 super-resolution latent conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -188,8 +177,6 @@ class LatentUpscaleModelLoader(io.ComfyNode):
|
||||
node_id="LatentUpscaleModelLoader",
|
||||
display_name="Load Latent Upscale Model",
|
||||
category="loaders",
|
||||
description="Loads a latent upscale model from disk, supporting HunyuanVideo 720p, 1080p, and other latent upsampler architectures.",
|
||||
short_description="Load a latent upscale model from file.",
|
||||
inputs=[
|
||||
io.Combo.Input("model_name", options=folder_paths.get_filename_list("latent_upscale_models")),
|
||||
],
|
||||
@@ -239,8 +226,6 @@ class HunyuanVideo15LatentUpscaleWithModel(io.ComfyNode):
|
||||
node_id="HunyuanVideo15LatentUpscaleWithModel",
|
||||
display_name="Hunyuan Video 15 Latent Upscale With Model",
|
||||
category="latent",
|
||||
description="Upscales a video latent to a target resolution using a loaded latent upscale model and configurable upscale method.",
|
||||
short_description="Upscale video latent using a latent upscale model.",
|
||||
inputs=[
|
||||
io.LatentUpscaleModel.Input("model"),
|
||||
io.Latent.Input("samples"),
|
||||
@@ -290,8 +275,6 @@ class TextEncodeHunyuanVideo_ImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeHunyuanVideo_ImageToVideo",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text with CLIP vision image embeddings for HunyuanVideo image-to-video conditioning using an interleaved template.",
|
||||
short_description="Text and image encoding for HunyuanVideo image-to-video.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.ClipVisionOutput.Input("clip_vision_output"),
|
||||
@@ -323,8 +306,6 @@ class HunyuanImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="HunyuanImageToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Prepares conditioning and latent for Hunyuan image-to-video generation with selectable guidance type.",
|
||||
short_description="Hunyuan image-to-video conditioning with guidance options.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Vae.Input("vae"),
|
||||
@@ -376,8 +357,6 @@ class EmptyHunyuanImageLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyHunyuanImageLatent",
|
||||
category="latent",
|
||||
description="Creates an empty latent tensor sized for Hunyuan image generation.",
|
||||
short_description="Empty latent for Hunyuan image generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
io.Int.Input("height", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
@@ -401,9 +380,6 @@ class HunyuanRefinerLatent(io.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="HunyuanRefinerLatent",
|
||||
category="conditioning/video_models",
|
||||
description="Prepares conditioning for a Hunyuan refiner pass by concatenating the input latent with noise augmentation settings.",
|
||||
short_description="Hunyuan refiner conditioning with noise augmentation.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
|
||||
@@ -18,8 +18,6 @@ class EmptyLatentHunyuan3Dv2(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="EmptyLatentHunyuan3Dv2",
|
||||
category="latent/3d",
|
||||
description="Creates an empty latent tensor for Hunyuan 3D v2 generation with configurable resolution and batch size.",
|
||||
short_description="Empty latent for Hunyuan 3D v2 generation.",
|
||||
inputs=[
|
||||
IO.Int.Input("resolution", default=3072, min=1, max=8192),
|
||||
IO.Int.Input("batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."),
|
||||
@@ -43,8 +41,6 @@ class Hunyuan3Dv2Conditioning(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="Hunyuan3Dv2Conditioning",
|
||||
category="conditioning/video_models",
|
||||
description="Creates positive and negative conditioning for Hunyuan 3D v2 from a CLIP vision output embedding.",
|
||||
short_description="Conditioning from CLIP vision for Hunyuan 3D v2.",
|
||||
inputs=[
|
||||
IO.ClipVisionOutput.Input("clip_vision_output"),
|
||||
],
|
||||
@@ -70,8 +66,6 @@ class Hunyuan3Dv2ConditioningMultiView(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="Hunyuan3Dv2ConditioningMultiView",
|
||||
category="conditioning/video_models",
|
||||
description="Creates multi-view conditioning for Hunyuan 3D v2 from up to four directional CLIP vision outputs with positional encoding.",
|
||||
short_description="Multi-view conditioning for Hunyuan 3D v2.",
|
||||
inputs=[
|
||||
IO.ClipVisionOutput.Input("front", optional=True),
|
||||
IO.ClipVisionOutput.Input("left", optional=True),
|
||||
@@ -109,8 +103,6 @@ class VAEDecodeHunyuan3D(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="VAEDecodeHunyuan3D",
|
||||
category="latent/3d",
|
||||
description="Decodes a Hunyuan 3D latent into a voxel grid using a VAE with configurable chunk size and octree resolution.",
|
||||
short_description="Decodes Hunyuan 3D latent into voxels.",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -433,8 +425,6 @@ class VoxelToMeshBasic(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="VoxelToMeshBasic",
|
||||
category="3d",
|
||||
description="Converts a voxel grid to a 3D mesh using basic cube-based surface extraction with adjustable threshold.",
|
||||
short_description="Converts voxels to mesh using basic extraction.",
|
||||
inputs=[
|
||||
IO.Voxel.Input("voxel"),
|
||||
IO.Float.Input("threshold", default=0.6, min=-1.0, max=1.0, step=0.01),
|
||||
@@ -464,8 +454,6 @@ class VoxelToMesh(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="VoxelToMesh",
|
||||
category="3d",
|
||||
description="Converts a voxel grid to a 3D mesh using selectable surface net or basic algorithm with adjustable threshold.",
|
||||
short_description="Converts voxels to mesh with algorithm selection.",
|
||||
inputs=[
|
||||
IO.Voxel.Input("voxel"),
|
||||
IO.Combo.Input("algorithm", options=["surface net", "basic"]),
|
||||
@@ -633,8 +621,6 @@ class SaveGLB(IO.ComfyNode):
|
||||
display_name="Save 3D Model",
|
||||
search_aliases=["export 3d model", "save mesh"],
|
||||
category="3d",
|
||||
description="Saves a 3D mesh or model file to disk in GLB format with optional workflow metadata embedding.",
|
||||
short_description="Saves 3D mesh or model to GLB file.",
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
IO.MultiType.Input(
|
||||
|
||||
@@ -103,8 +103,6 @@ class HypernetworkLoader(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="HypernetworkLoader",
|
||||
category="loaders",
|
||||
description="Loads a hypernetwork and patches it onto a diffusion model's attention layers with adjustable strength.",
|
||||
short_description="Loads and applies a hypernetwork to a model.",
|
||||
inputs=[
|
||||
IO.Model.Input("model"),
|
||||
IO.Combo.Input("hypernetwork_name", options=folder_paths.get_filename_list("hypernetworks")),
|
||||
|
||||
@@ -28,8 +28,6 @@ class HyperTile(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="HyperTile",
|
||||
category="model_patches/unet",
|
||||
description="Patches the model to split self-attention into smaller tiles during inference, reducing memory usage and speeding up generation at higher resolutions.",
|
||||
short_description="Tile self-attention for faster high-res generation.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("tile_size", default=256, min=1, max=2048),
|
||||
|
||||
@@ -13,7 +13,6 @@ class ImageCompare(IO.ComfyNode):
|
||||
node_id="ImageCompare",
|
||||
display_name="Image Compare",
|
||||
description="Compares two images side by side with a slider.",
|
||||
short_description=None,
|
||||
category="image",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
|
||||
@@ -25,8 +25,6 @@ class ImageCrop(IO.ComfyNode):
|
||||
search_aliases=["trim"],
|
||||
display_name="Image Crop",
|
||||
category="image/transform",
|
||||
description="Crops a rectangular region from an image at the specified position and dimensions.",
|
||||
short_description="Crops a region from an image.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -56,8 +54,6 @@ class RepeatImageBatch(IO.ComfyNode):
|
||||
node_id="RepeatImageBatch",
|
||||
search_aliases=["duplicate image", "clone image"],
|
||||
category="image/batch",
|
||||
description="Repeats an image a specified number of times to create a batch of identical images.",
|
||||
short_description="Repeats an image to create a batch.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("amount", default=1, min=1, max=4096),
|
||||
@@ -80,8 +76,6 @@ class ImageFromBatch(IO.ComfyNode):
|
||||
node_id="ImageFromBatch",
|
||||
search_aliases=["select image", "pick from batch", "extract image"],
|
||||
category="image/batch",
|
||||
description="Selects a contiguous range of images from a batch starting at a given index.",
|
||||
short_description="Selects images from a batch by index.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("batch_index", default=0, min=0, max=4095),
|
||||
@@ -108,8 +102,6 @@ class ImageAddNoise(IO.ComfyNode):
|
||||
node_id="ImageAddNoise",
|
||||
search_aliases=["film grain"],
|
||||
category="image",
|
||||
description="Adds random noise to an image with adjustable strength, useful for film grain effects.",
|
||||
short_description="Adds random noise to an image.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input(
|
||||
@@ -142,8 +134,6 @@ class SaveAnimatedWEBP(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="SaveAnimatedWEBP",
|
||||
category="image/animation",
|
||||
description="Saves a sequence of images as an animated WEBP file with configurable FPS, quality, and compression.",
|
||||
short_description="Saves images as an animated WEBP file.",
|
||||
inputs=[
|
||||
IO.Image.Input("images"),
|
||||
IO.String.Input("filename_prefix", default="ComfyUI"),
|
||||
@@ -181,8 +171,6 @@ class SaveAnimatedPNG(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="SaveAnimatedPNG",
|
||||
category="image/animation",
|
||||
description="Saves a sequence of images as an animated PNG (APNG) file with configurable FPS and compression level.",
|
||||
short_description="Saves images as an animated PNG file.",
|
||||
inputs=[
|
||||
IO.Image.Input("images"),
|
||||
IO.String.Input("filename_prefix", default="ComfyUI"),
|
||||
@@ -219,7 +207,6 @@ class ImageStitch(IO.ComfyNode):
|
||||
description="Stitches image2 to image1 in the specified direction.\n"
|
||||
"If image2 is not provided, returns image1 unchanged.\n"
|
||||
"Optional spacing can be added between images.",
|
||||
short_description="Joins two images together in a specified direction.",
|
||||
category="image/transform",
|
||||
inputs=[
|
||||
IO.Image.Input("image1"),
|
||||
@@ -392,8 +379,6 @@ class ResizeAndPadImage(IO.ComfyNode):
|
||||
node_id="ResizeAndPadImage",
|
||||
search_aliases=["fit to size"],
|
||||
category="image/transform",
|
||||
description="Resizes an image to fit within target dimensions while preserving aspect ratio, then pads with a solid color to fill the target size.",
|
||||
short_description="Resizes an image to fit and pads the remainder.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("target_width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -445,7 +430,6 @@ class SaveSVGNode(IO.ComfyNode):
|
||||
node_id="SaveSVGNode",
|
||||
search_aliases=["export vector", "save vector graphics"],
|
||||
description="Save SVG files on disk.",
|
||||
short_description=None,
|
||||
category="image/save",
|
||||
inputs=[
|
||||
IO.SVG.Input("svg"),
|
||||
@@ -518,7 +502,7 @@ class GetImageSize(IO.ComfyNode):
|
||||
node_id="GetImageSize",
|
||||
search_aliases=["dimensions", "resolution", "image info"],
|
||||
display_name="Get Image Size",
|
||||
description="Returns the width, height, and batch size of an image.",
|
||||
description="Returns width and height of the image, and passes it through unchanged.",
|
||||
category="image",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
@@ -553,8 +537,6 @@ class ImageRotate(IO.ComfyNode):
|
||||
node_id="ImageRotate",
|
||||
search_aliases=["turn", "flip orientation"],
|
||||
category="image/transform",
|
||||
description="Rotates an image by 90, 180, or 270 degrees.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("rotation", options=["none", "90 degrees", "180 degrees", "270 degrees"]),
|
||||
@@ -585,8 +567,6 @@ class ImageFlip(IO.ComfyNode):
|
||||
node_id="ImageFlip",
|
||||
search_aliases=["mirror", "reflect"],
|
||||
category="image/transform",
|
||||
description="Flips an image horizontally or vertically.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("flip_method", options=["x-axis: vertically", "y-axis: horizontally"]),
|
||||
@@ -613,8 +593,6 @@ class ImageScaleToMaxDimension(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="ImageScaleToMaxDimension",
|
||||
category="image/upscaling",
|
||||
description="Scales an image so its largest dimension matches the specified size while preserving aspect ratio.",
|
||||
short_description="Scales image to a target max dimension size.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input(
|
||||
|
||||
@@ -10,8 +10,6 @@ class InstructPixToPixConditioning(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="InstructPixToPixConditioning",
|
||||
category="conditioning/instructpix2pix",
|
||||
description="Prepares conditioning for InstructPix2Pix image editing by encoding the input image through a VAE and attaching it as concat latent to both positive and negative conditioning.",
|
||||
short_description="Prepare conditioning for InstructPix2Pix editing.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
|
||||
@@ -14,8 +14,6 @@ class Kandinsky5ImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="Kandinsky5ImageToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Sets up Kandinsky 5 image-to-video generation by creating an empty video latent and optionally encoding a start image for conditioning.",
|
||||
short_description="Sets up Kandinsky 5 image-to-video conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -75,7 +73,6 @@ class NormalizeVideoLatentStart(io.ComfyNode):
|
||||
node_id="NormalizeVideoLatentStart",
|
||||
category="conditioning/video_models",
|
||||
description="Normalizes the initial frames of a video latent to match the mean and standard deviation of subsequent reference frames. Helps reduce differences between the starting frames and the rest of the video.",
|
||||
short_description="Normalizes initial video latent frames to match reference frames.",
|
||||
inputs=[
|
||||
io.Latent.Input("latent"),
|
||||
io.Int.Input("start_frame_count", default=4, min=1, max=nodes.MAX_RESOLUTION, step=1, tooltip="Number of latent frames to normalize, counted from the start"),
|
||||
@@ -109,8 +106,6 @@ class CLIPTextEncodeKandinsky5(io.ComfyNode):
|
||||
node_id="CLIPTextEncodeKandinsky5",
|
||||
search_aliases=["kandinsky prompt"],
|
||||
category="advanced/conditioning/kandinsky5",
|
||||
description="Encodes separate CLIP-L and Qwen 2.5 7B text prompts into Kandinsky 5 conditioning.",
|
||||
short_description="Encodes CLIP-L and Qwen prompts for Kandinsky 5.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
|
||||
@@ -23,8 +23,6 @@ class LatentAdd(io.ComfyNode):
|
||||
node_id="LatentAdd",
|
||||
search_aliases=["combine latents", "sum latents"],
|
||||
category="latent/advanced",
|
||||
description="Adds two latent tensors element-wise, automatically resizing the second to match the first.",
|
||||
short_description="Add two latent tensors element-wise.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
@@ -52,8 +50,6 @@ class LatentSubtract(io.ComfyNode):
|
||||
node_id="LatentSubtract",
|
||||
search_aliases=["difference latent", "remove features"],
|
||||
category="latent/advanced",
|
||||
description="Subtracts one latent tensor from another element-wise, automatically resizing the second to match the first.",
|
||||
short_description="Subtract one latent tensor from another.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
@@ -81,8 +77,6 @@ class LatentMultiply(io.ComfyNode):
|
||||
node_id="LatentMultiply",
|
||||
search_aliases=["scale latent", "amplify latent", "latent gain"],
|
||||
category="latent/advanced",
|
||||
description="Multiplies a latent tensor by a scalar value to scale its magnitude up or down.",
|
||||
short_description="Scale a latent tensor by a multiplier.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01),
|
||||
@@ -107,8 +101,6 @@ class LatentInterpolate(io.ComfyNode):
|
||||
node_id="LatentInterpolate",
|
||||
search_aliases=["blend latent", "mix latent", "lerp latent", "transition"],
|
||||
category="latent/advanced",
|
||||
description="Interpolates between two latent tensors using a ratio, preserving magnitude for smoother blending than linear interpolation.",
|
||||
short_description="Interpolate between two latent tensors.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
@@ -148,8 +140,6 @@ class LatentConcat(io.ComfyNode):
|
||||
node_id="LatentConcat",
|
||||
search_aliases=["join latents", "stitch latents"],
|
||||
category="latent/advanced",
|
||||
description="Concatenates two latent tensors along a chosen spatial or temporal dimension (x, y, or t) with optional reversal.",
|
||||
short_description="Concatenate two latents along a chosen dimension.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
@@ -190,8 +180,6 @@ class LatentCut(io.ComfyNode):
|
||||
node_id="LatentCut",
|
||||
search_aliases=["crop latent", "slice latent", "extract region"],
|
||||
category="latent/advanced",
|
||||
description="Extracts a contiguous slice from a latent tensor along a chosen spatial or temporal dimension at a specified index and size.",
|
||||
short_description="Extract a slice from a latent along a dimension.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Combo.Input("dim", options=["x", "y", "t"]),
|
||||
@@ -233,8 +221,6 @@ class LatentCutToBatch(io.ComfyNode):
|
||||
node_id="LatentCutToBatch",
|
||||
search_aliases=["slice to batch", "split latent", "tile latent"],
|
||||
category="latent/advanced",
|
||||
description="Slices a latent tensor along a chosen dimension into equal-sized chunks and reshapes them into the batch dimension.",
|
||||
short_description="Slice latent along a dimension into batch chunks.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Combo.Input("dim", options=["t", "x", "y"]),
|
||||
@@ -277,8 +263,6 @@ class LatentBatch(io.ComfyNode):
|
||||
node_id="LatentBatch",
|
||||
search_aliases=["combine latents", "merge latents", "join latents"],
|
||||
category="latent/batch",
|
||||
description="Concatenates two latent tensors along the batch dimension, preserving batch index metadata.",
|
||||
short_description="Concatenate two latents along the batch dimension.",
|
||||
is_deprecated=True,
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
@@ -307,8 +291,6 @@ class LatentBatchSeedBehavior(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LatentBatchSeedBehavior",
|
||||
category="latent/advanced",
|
||||
description="Controls whether each item in a latent batch receives a random or fixed noise seed during sampling.",
|
||||
short_description="Set random or fixed seed behavior for batches.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Combo.Input("seed_behavior", options=["random", "fixed"], default="fixed"),
|
||||
@@ -338,8 +320,6 @@ class LatentApplyOperation(io.ComfyNode):
|
||||
node_id="LatentApplyOperation",
|
||||
search_aliases=["transform latent"],
|
||||
category="latent/advanced/operations",
|
||||
description="Applies a latent operation (such as tonemap or sharpen) directly to a latent tensor.",
|
||||
short_description="Apply a latent operation to a latent tensor.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
@@ -364,8 +344,6 @@ class LatentApplyOperationCFG(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LatentApplyOperationCFG",
|
||||
category="latent/advanced/operations",
|
||||
description="Applies a latent operation during the CFG pre-processing stage of sampling, modifying the model's prediction before guidance is applied.",
|
||||
short_description="Apply a latent operation during CFG pre-processing.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
@@ -398,8 +376,6 @@ class LatentOperationTonemapReinhard(io.ComfyNode):
|
||||
node_id="LatentOperationTonemapReinhard",
|
||||
search_aliases=["hdr latent"],
|
||||
category="latent/advanced/operations",
|
||||
description="Creates a Reinhard tonemapping operation that compresses high-magnitude latent values to reduce blown-out artifacts.",
|
||||
short_description="Create a Reinhard tonemapping latent operation.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||
@@ -435,8 +411,6 @@ class LatentOperationSharpen(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LatentOperationSharpen",
|
||||
category="latent/advanced/operations",
|
||||
description="Creates a sharpening operation that enhances detail in latent space using a Gaussian-based unsharp mask with configurable radius, sigma, and strength.",
|
||||
short_description="Create a Gaussian-based latent sharpening operation.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Int.Input("sharpen_radius", default=9, min=1, max=31, step=1),
|
||||
@@ -474,8 +448,6 @@ class ReplaceVideoLatentFrames(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ReplaceVideoLatentFrames",
|
||||
category="latent/batch",
|
||||
description="Replaces a range of frames in a destination video latent with frames from a source latent at a specified index.",
|
||||
short_description="Replace video latent frames at a given index.",
|
||||
inputs=[
|
||||
io.Latent.Input("destination", tooltip="The destination latent where frames will be replaced."),
|
||||
io.Latent.Input("source", optional=True, tooltip="The source latent providing frames to insert into the destination latent. If not provided, the destination latent is returned unchanged."),
|
||||
|
||||
@@ -31,8 +31,6 @@ class Load3D(IO.ComfyNode):
|
||||
node_id="Load3D",
|
||||
display_name="Load 3D & Animation",
|
||||
category="3d",
|
||||
description="Loads a 3D model file and renders it to produce an image, mask, normal map, camera info, recording video, and 3D file output.",
|
||||
short_description="Loads and renders a 3D model file.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_file", options=sorted(files), upload=IO.UploadType.model),
|
||||
@@ -83,8 +81,6 @@ class Preview3D(IO.ComfyNode):
|
||||
search_aliases=["view mesh", "3d viewer"],
|
||||
display_name="Preview 3D & Animation",
|
||||
category="3d",
|
||||
description="Previews a 3D model or file in the UI with optional camera info and background image overlay.",
|
||||
short_description="Previews a 3D model in the UI.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
|
||||
@@ -16,8 +16,6 @@ class SwitchNode(io.ComfyNode):
|
||||
node_id="ComfySwitchNode",
|
||||
display_name="Switch",
|
||||
category="logic",
|
||||
description="Routes one of two inputs to the output based on a boolean switch value, evaluating only the selected branch lazily.",
|
||||
short_description="Route one of two inputs based on a boolean.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Boolean.Input("switch"),
|
||||
@@ -49,8 +47,6 @@ class SoftSwitchNode(io.ComfyNode):
|
||||
node_id="ComfySoftSwitchNode",
|
||||
display_name="Soft Switch",
|
||||
category="logic",
|
||||
description="Routes one of two optional inputs to the output based on a boolean, falling back to whichever input is connected if only one is provided.",
|
||||
short_description="Switch with optional fallback to connected input.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Boolean.Input("switch"),
|
||||
@@ -106,8 +102,6 @@ class CustomComboNode(io.ComfyNode):
|
||||
node_id="CustomCombo",
|
||||
display_name="Custom Combo",
|
||||
category="utils",
|
||||
description="Provides a user-defined dropdown combo box where options are written by the user, outputting the selected string and its index.",
|
||||
short_description="User-defined dropdown outputting string and index.",
|
||||
is_experimental=True,
|
||||
inputs=[io.Combo.Input("choice", options=[])],
|
||||
outputs=[
|
||||
@@ -143,8 +137,6 @@ class DCTestNode(io.ComfyNode):
|
||||
node_id="DCTestNode",
|
||||
display_name="DCTest",
|
||||
category="logic",
|
||||
description="Test node demonstrating DynamicCombo inputs with nested sub-options that conditionally show different input types.",
|
||||
short_description="Test node for DynamicCombo nested inputs.",
|
||||
is_output_node=True,
|
||||
inputs=[io.DynamicCombo.Input("combo", options=[
|
||||
io.DynamicCombo.Option("option1", [io.String.Input("string")]),
|
||||
@@ -183,8 +175,6 @@ class AutogrowNamesTestNode(io.ComfyNode):
|
||||
node_id="AutogrowNamesTestNode",
|
||||
display_name="AutogrowNamesTest",
|
||||
category="logic",
|
||||
description="Test node demonstrating Autogrow inputs with named template slots that dynamically add float inputs.",
|
||||
short_description="Test node for Autogrow named template inputs.",
|
||||
inputs=[
|
||||
_io.Autogrow.Input("autogrow", template=template)
|
||||
],
|
||||
@@ -205,8 +195,6 @@ class AutogrowPrefixTestNode(io.ComfyNode):
|
||||
node_id="AutogrowPrefixTestNode",
|
||||
display_name="AutogrowPrefixTest",
|
||||
category="logic",
|
||||
description="Test node demonstrating Autogrow inputs with prefix-based template slots that dynamically add numbered float inputs.",
|
||||
short_description="Test node for Autogrow prefix template inputs.",
|
||||
inputs=[
|
||||
_io.Autogrow.Input("autogrow", template=template)
|
||||
],
|
||||
@@ -226,8 +214,6 @@ class ComboOutputTestNode(io.ComfyNode):
|
||||
node_id="ComboOptionTestNode",
|
||||
display_name="ComboOptionTest",
|
||||
category="logic",
|
||||
description="Test node demonstrating combo output types by passing two selected combo values through as outputs.",
|
||||
short_description="Test node for combo output passthrough.",
|
||||
inputs=[io.Combo.Input("combo", options=["option1", "option2", "option3"]),
|
||||
io.Combo.Input("combo2", options=["option4", "option5", "option6"])],
|
||||
outputs=[io.Combo.Output(), io.Combo.Output()],
|
||||
@@ -245,8 +231,6 @@ class ConvertStringToComboNode(io.ComfyNode):
|
||||
search_aliases=["string to dropdown", "text to combo"],
|
||||
display_name="Convert String to Combo",
|
||||
category="logic",
|
||||
description="Converts a string value into a combo type output so it can be used as a dropdown selection in downstream nodes.",
|
||||
short_description="Convert a string to a combo type output.",
|
||||
inputs=[io.String.Input("string")],
|
||||
outputs=[io.Combo.Output()],
|
||||
)
|
||||
@@ -263,8 +247,6 @@ class InvertBooleanNode(io.ComfyNode):
|
||||
search_aliases=["not", "toggle", "negate", "flip boolean"],
|
||||
display_name="Invert Boolean",
|
||||
category="logic",
|
||||
description="Inverts a boolean value, outputting true when input is false and vice versa.",
|
||||
short_description="Invert a boolean value.",
|
||||
inputs=[io.Boolean.Input("boolean")],
|
||||
outputs=[io.Boolean.Output()],
|
||||
)
|
||||
|
||||
@@ -32,7 +32,6 @@ class LoraLoaderBypass:
|
||||
|
||||
CATEGORY = "loaders"
|
||||
DESCRIPTION = "Apply LoRA in bypass mode. Unlike regular LoRA, this doesn't modify model weights - instead it injects the LoRA computation during forward pass. Useful for training scenarios."
|
||||
SHORT_DESCRIPTION = "Applies LoRA via forward pass injection, not weight modification."
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
|
||||
@@ -63,8 +62,6 @@ class LoraLoaderBypassModelOnly(LoraLoaderBypass):
|
||||
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
|
||||
}}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
DESCRIPTION = "Apply LoRA in bypass mode to only the diffusion model without modifying base weights or affecting CLIP."
|
||||
SHORT_DESCRIPTION = "Apply bypass LoRA to model only, no CLIP."
|
||||
FUNCTION = "load_lora_model_only"
|
||||
|
||||
def load_lora_model_only(self, model, lora_name, strength_model):
|
||||
|
||||
@@ -92,8 +92,6 @@ class LoraSave(io.ComfyNode):
|
||||
search_aliases=["export lora"],
|
||||
display_name="Extract and Save Lora",
|
||||
category="_for_testing",
|
||||
description="Extracts LoRA weights from a model or text encoder diff using SVD decomposition and saves them as a safetensors file, supporting standard and full diff modes.",
|
||||
short_description="Extract and save LoRA from model diff.",
|
||||
inputs=[
|
||||
io.String.Input("filename_prefix", default="loras/ComfyUI_extracted_lora"),
|
||||
io.Int.Input("rank", default=8, min=1, max=4096, step=1),
|
||||
|
||||
@@ -11,8 +11,6 @@ class LotusConditioning(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LotusConditioning",
|
||||
category="conditioning/lotus",
|
||||
description="Provides precomputed null conditioning embeddings for the Lotus depth/normal estimation model, avoiding the need for a separate text encoder.",
|
||||
short_description="Precomputed null conditioning for Lotus model.",
|
||||
inputs=[],
|
||||
outputs=[io.Conditioning.Output(display_name="conditioning")],
|
||||
)
|
||||
|
||||
@@ -18,8 +18,6 @@ class EmptyLTXVLatentVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyLTXVLatentVideo",
|
||||
category="latent/video/ltxv",
|
||||
description="Creates an empty LTXV video latent tensor with the specified dimensions and batch size.",
|
||||
short_description="Creates an empty LTXV video latent tensor.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
@@ -44,8 +42,6 @@ class LTXVImgToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVImgToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Encodes an image through a VAE and sets up conditioning for LTXV image-to-video generation with adjustable strength.",
|
||||
short_description="Sets up LTXV image-to-video conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -91,8 +87,6 @@ class LTXVImgToVideoInplace(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVImgToVideoInplace",
|
||||
category="conditioning/video_models",
|
||||
description="Encodes an image through a VAE and injects it into an existing latent for in-place LTXV image-to-video conditioning.",
|
||||
short_description="In-place LTXV image-to-video latent conditioning.",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Image.Input("image"),
|
||||
@@ -177,8 +171,6 @@ class LTXVAddGuide(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVAddGuide",
|
||||
category="conditioning/video_models",
|
||||
description="Adds a guiding image or video to LTXV conditioning at a specified frame index to control video generation.",
|
||||
short_description="Adds a guiding image or video to LTXV conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -343,8 +335,6 @@ class LTXVCropGuides(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVCropGuides",
|
||||
category="conditioning/video_models",
|
||||
description="Removes appended keyframe guide latents from an LTXV latent and resets keyframe indices in the conditioning.",
|
||||
short_description="Removes keyframe guide latents from LTXV conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -383,8 +373,6 @@ class LTXVConditioning(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVConditioning",
|
||||
category="conditioning/video_models",
|
||||
description="Sets the frame rate on LTXV positive and negative conditioning for video generation.",
|
||||
short_description="Sets frame rate on LTXV conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -409,8 +397,6 @@ class ModelSamplingLTXV(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ModelSamplingLTXV",
|
||||
category="advanced/model",
|
||||
description="Configures LTXV model sampling by computing a shift parameter from max_shift, base_shift, and latent token count.",
|
||||
short_description="Configures LTXV model sampling shift parameters.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01),
|
||||
@@ -456,8 +442,6 @@ class LTXVScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule for LTXV sampling with configurable shift parameters, stretch, and terminal value.",
|
||||
short_description="Generates a sigma schedule for LTXV sampling.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01),
|
||||
@@ -562,8 +546,6 @@ class LTXVPreprocess(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVPreprocess",
|
||||
category="image",
|
||||
description="Applies H.264 video compression preprocessing to images to improve LTXV generation quality.",
|
||||
short_description="Applies video compression preprocessing for LTXV.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input(
|
||||
@@ -592,8 +574,6 @@ class LTXVConcatAVLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVConcatAVLatent",
|
||||
category="latent/video/ltxv",
|
||||
description="Concatenates separate video and audio latents into a combined audio-video latent for LTXV processing.",
|
||||
short_description="Concatenates video and audio latents for LTXV.",
|
||||
inputs=[
|
||||
io.Latent.Input("video_latent"),
|
||||
io.Latent.Input("audio_latent"),
|
||||
@@ -629,8 +609,7 @@ class LTXVSeparateAVLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVSeparateAVLatent",
|
||||
category="latent/video/ltxv",
|
||||
description="Separates a combined audio-video latent into individual video and audio latents.",
|
||||
short_description=None,
|
||||
description="LTXV Separate AV Latent",
|
||||
inputs=[
|
||||
io.Latent.Input("av_latent"),
|
||||
],
|
||||
|
||||
@@ -14,8 +14,6 @@ class LTXVAudioVAELoader(io.ComfyNode):
|
||||
node_id="LTXVAudioVAELoader",
|
||||
display_name="LTXV Audio VAE Loader",
|
||||
category="audio",
|
||||
description="Loads an LTXV Audio VAE model from a checkpoint file for audio encoding and decoding.",
|
||||
short_description="Loads an LTXV Audio VAE model checkpoint.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"ckpt_name",
|
||||
@@ -40,7 +38,6 @@ class LTXVAudioVAEEncode(io.ComfyNode):
|
||||
node_id="LTXVAudioVAEEncode",
|
||||
display_name="LTXV Audio VAE Encode",
|
||||
category="audio",
|
||||
description="Encodes audio into latent representations using the LTXV Audio VAE model.",
|
||||
inputs=[
|
||||
io.Audio.Input("audio", tooltip="The audio to be encoded."),
|
||||
io.Vae.Input(
|
||||
@@ -71,8 +68,6 @@ class LTXVAudioVAEDecode(io.ComfyNode):
|
||||
node_id="LTXVAudioVAEDecode",
|
||||
display_name="LTXV Audio VAE Decode",
|
||||
category="audio",
|
||||
description="Decodes latent representations back into audio using the LTXV Audio VAE model.",
|
||||
short_description="Decodes latents back to audio via LTXV Audio VAE.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples", tooltip="The latent to be decoded."),
|
||||
io.Vae.Input(
|
||||
@@ -106,8 +101,6 @@ class LTXVEmptyLatentAudio(io.ComfyNode):
|
||||
node_id="LTXVEmptyLatentAudio",
|
||||
display_name="LTXV Empty Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty LTXV audio latent tensor sized according to the frame count, frame rate, and Audio VAE configuration.",
|
||||
short_description="Creates an empty LTXV audio latent tensor.",
|
||||
inputs=[
|
||||
io.Int.Input(
|
||||
"frames_number",
|
||||
@@ -184,7 +177,6 @@ class LTXAVTextEncoderLoader(io.ComfyNode):
|
||||
display_name="LTXV Audio Text Encoder Loader",
|
||||
category="advanced/loaders",
|
||||
description="[Recipes]\n\nltxav: gemma 3 12B",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"text_encoder",
|
||||
|
||||
@@ -19,8 +19,6 @@ class LTXVLatentUpsampler:
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "upsample_latent"
|
||||
CATEGORY = "latent/video"
|
||||
DESCRIPTION = "Upsample an LTXV video latent by a factor of 2 using a dedicated latent upscale model."
|
||||
SHORT_DESCRIPTION = "Upsample an LTXV video latent by 2x."
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def upsample_latent(
|
||||
|
||||
@@ -10,8 +10,6 @@ class RenormCFG(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="RenormCFG",
|
||||
category="advanced/model",
|
||||
description="Applies renormalized classifier-free guidance with configurable truncation threshold and renormalization strength to control CFG output magnitude.",
|
||||
short_description="Applies renormalized classifier-free guidance with truncation.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("cfg_trunc", default=100, min=0.0, max=100.0, step=0.01),
|
||||
@@ -86,7 +84,6 @@ class CLIPTextEncodeLumina2(io.ComfyNode):
|
||||
category="conditioning",
|
||||
description="Encodes a system prompt and a user prompt using a CLIP model into an embedding "
|
||||
"that can be used to guide the diffusion model towards generating specific images.",
|
||||
short_description="Encodes system and user prompts via CLIP for Lumina2.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"system_prompt",
|
||||
|
||||
@@ -13,7 +13,6 @@ class Mahiro(io.ComfyNode):
|
||||
display_name="Mahiro CFG",
|
||||
category="_for_testing",
|
||||
description="Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.",
|
||||
short_description="Scales guidance toward positive prompt direction over negative difference.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
],
|
||||
|
||||
@@ -52,8 +52,6 @@ class LatentCompositeMasked(IO.ComfyNode):
|
||||
node_id="LatentCompositeMasked",
|
||||
search_aliases=["overlay latent", "layer latent", "paste latent", "inpaint latent"],
|
||||
category="latent",
|
||||
description="Composites a source latent onto a destination latent at a specified position with optional mask and resize support.",
|
||||
short_description="Composites one latent onto another with masking.",
|
||||
inputs=[
|
||||
IO.Latent.Input("destination"),
|
||||
IO.Latent.Input("source"),
|
||||
@@ -83,8 +81,6 @@ class ImageCompositeMasked(IO.ComfyNode):
|
||||
node_id="ImageCompositeMasked",
|
||||
search_aliases=["paste image", "overlay", "layer"],
|
||||
category="image",
|
||||
description="Composites a source image onto a destination image at a specified position with optional mask and resize support.",
|
||||
short_description="Composites one image onto another with masking.",
|
||||
inputs=[
|
||||
IO.Image.Input("destination"),
|
||||
IO.Image.Input("source"),
|
||||
@@ -114,8 +110,6 @@ class MaskToImage(IO.ComfyNode):
|
||||
search_aliases=["convert mask"],
|
||||
display_name="Convert Mask to Image",
|
||||
category="mask",
|
||||
description="Converts a single-channel mask into a three-channel grayscale image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
],
|
||||
@@ -138,7 +132,6 @@ class ImageToMask(IO.ComfyNode):
|
||||
search_aliases=["extract channel", "channel to mask"],
|
||||
display_name="Convert Image to Mask",
|
||||
category="mask",
|
||||
description="Extracts a selected color channel from an image as a mask.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("channel", options=["red", "green", "blue", "alpha"]),
|
||||
@@ -162,8 +155,6 @@ class ImageColorToMask(IO.ComfyNode):
|
||||
node_id="ImageColorToMask",
|
||||
search_aliases=["color keying", "chroma key"],
|
||||
category="mask",
|
||||
description="Creates a mask from an image where pixels matching a specified RGB color value become white.",
|
||||
short_description="Creates a mask from pixels matching a color.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("color", default=0, min=0, max=0xFFFFFF, step=1, display_mode=IO.NumberDisplay.number),
|
||||
@@ -187,8 +178,6 @@ class SolidMask(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="SolidMask",
|
||||
category="mask",
|
||||
description="Creates a uniform solid mask filled with a single value at the specified dimensions.",
|
||||
short_description="Creates a solid mask with a uniform value.",
|
||||
inputs=[
|
||||
IO.Float.Input("value", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -212,8 +201,6 @@ class InvertMask(IO.ComfyNode):
|
||||
node_id="InvertMask",
|
||||
search_aliases=["reverse mask", "flip mask"],
|
||||
category="mask",
|
||||
description="Inverts a mask so white becomes black and vice versa.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
],
|
||||
@@ -235,8 +222,6 @@ class CropMask(IO.ComfyNode):
|
||||
node_id="CropMask",
|
||||
search_aliases=["cut mask", "extract mask region", "mask slice"],
|
||||
category="mask",
|
||||
description="Crops a rectangular region from a mask at the specified position and dimensions.",
|
||||
short_description="Crops a rectangular region from a mask.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
IO.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -263,8 +248,6 @@ class MaskComposite(IO.ComfyNode):
|
||||
node_id="MaskComposite",
|
||||
search_aliases=["combine masks", "blend masks", "layer masks"],
|
||||
category="mask",
|
||||
description="Composites a source mask onto a destination mask at a specified position using selectable blend operations.",
|
||||
short_description="Composites masks with selectable blend operations.",
|
||||
inputs=[
|
||||
IO.Mask.Input("destination"),
|
||||
IO.Mask.Input("source"),
|
||||
@@ -314,8 +297,6 @@ class FeatherMask(IO.ComfyNode):
|
||||
node_id="FeatherMask",
|
||||
search_aliases=["soft edge mask", "blur mask edges", "gradient mask edge"],
|
||||
category="mask",
|
||||
description="Applies a soft gradient feather to the edges of a mask with independent control for each side.",
|
||||
short_description="Feathers mask edges with per-side control.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
IO.Int.Input("left", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -364,8 +345,6 @@ class GrowMask(IO.ComfyNode):
|
||||
search_aliases=["expand mask", "shrink mask"],
|
||||
display_name="Grow Mask",
|
||||
category="mask",
|
||||
description="Expands or shrinks a mask by a specified number of pixels using morphological dilation or erosion with optional tapered corners.",
|
||||
short_description="Expands or shrinks a mask by pixel amount.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
IO.Int.Input("expand", default=0, min=-nodes.MAX_RESOLUTION, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -403,8 +382,6 @@ class ThresholdMask(IO.ComfyNode):
|
||||
node_id="ThresholdMask",
|
||||
search_aliases=["binary mask"],
|
||||
category="mask",
|
||||
description="Converts a mask to binary by setting pixels above a threshold to white and below to black.",
|
||||
short_description="Converts a mask to binary using a threshold.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
IO.Float.Input("value", default=0.5, min=0.0, max=1.0, step=0.01),
|
||||
@@ -431,8 +408,7 @@ class MaskPreview(IO.ComfyNode):
|
||||
search_aliases=["show mask", "view mask", "inspect mask", "debug mask"],
|
||||
display_name="Preview Mask",
|
||||
category="mask",
|
||||
description="Previews a mask in the UI by rendering it as a grayscale image.",
|
||||
short_description="Previews a mask as a grayscale image.",
|
||||
description="Saves the input images to your ComfyUI output directory.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
],
|
||||
|
||||
@@ -11,8 +11,6 @@ class EmptyMochiLatentVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyMochiLatentVideo",
|
||||
category="latent/video",
|
||||
description="Creates an empty latent tensor sized for Mochi video generation with configurable width, height, frame length, and batch size.",
|
||||
short_description="Create empty latent for Mochi video generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
|
||||
@@ -60,8 +60,6 @@ class ModelSamplingDiscrete:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling method to use a discrete noise schedule with a selectable prediction type."
|
||||
SHORT_DESCRIPTION = "Override model sampling to a discrete noise schedule."
|
||||
|
||||
def patch(self, model, sampling, zsnr):
|
||||
m = model.clone()
|
||||
@@ -98,8 +96,6 @@ class ModelSamplingStableCascade:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use Stable Cascade noise scheduling with an adjustable shift parameter."
|
||||
SHORT_DESCRIPTION = "Override sampling to Stable Cascade noise scheduling."
|
||||
|
||||
def patch(self, model, shift):
|
||||
m = model.clone()
|
||||
@@ -126,8 +122,6 @@ class ModelSamplingSD3:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use the SD3 discrete flow noise schedule with an adjustable shift parameter."
|
||||
SHORT_DESCRIPTION = "Override sampling to SD3 discrete flow schedule."
|
||||
|
||||
def patch(self, model, shift, multiplier=1000):
|
||||
m = model.clone()
|
||||
@@ -150,8 +144,6 @@ class ModelSamplingAuraFlow(ModelSamplingSD3):
|
||||
"shift": ("FLOAT", {"default": 1.73, "min": 0.0, "max": 100.0, "step":0.01}),
|
||||
}}
|
||||
|
||||
DESCRIPTION = "Override the model's sampling to use the AuraFlow discrete flow noise schedule with an adjustable shift."
|
||||
SHORT_DESCRIPTION = "Override sampling to AuraFlow discrete flow schedule."
|
||||
FUNCTION = "patch_aura"
|
||||
|
||||
def patch_aura(self, model, shift):
|
||||
@@ -171,8 +163,6 @@ class ModelSamplingFlux:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use the Flux flow schedule with resolution-dependent shift computed from base and max shift values."
|
||||
SHORT_DESCRIPTION = "Override sampling to Flux flow schedule with resolution shift."
|
||||
|
||||
def patch(self, model, max_shift, base_shift, width, height):
|
||||
m = model.clone()
|
||||
@@ -208,8 +198,6 @@ class ModelSamplingContinuousEDM:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use a continuous EDM noise schedule with configurable sigma range and prediction type."
|
||||
SHORT_DESCRIPTION = "Override sampling to continuous EDM noise schedule."
|
||||
|
||||
def patch(self, model, sampling, sigma_max, sigma_min):
|
||||
m = model.clone()
|
||||
@@ -255,8 +243,6 @@ class ModelSamplingContinuousV:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use a continuous V-prediction noise schedule with configurable sigma range."
|
||||
SHORT_DESCRIPTION = "Override sampling to continuous V-prediction schedule."
|
||||
|
||||
def patch(self, model, sampling, sigma_max, sigma_min):
|
||||
m = model.clone()
|
||||
@@ -283,8 +269,6 @@ class RescaleCFG:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Apply Rescale CFG to the model, which normalizes the CFG output to match the standard deviation of the positive conditioning prediction."
|
||||
SHORT_DESCRIPTION = "Normalize CFG output to match positive conditioning std."
|
||||
|
||||
def patch(self, model, multiplier):
|
||||
def rescale_cfg(args):
|
||||
@@ -326,7 +310,6 @@ class ModelComputeDtype:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/debug/model"
|
||||
DESCRIPTION = "Override the compute dtype used by the model during inference."
|
||||
|
||||
def patch(self, model, dtype):
|
||||
m = model.clone()
|
||||
|
||||
@@ -11,8 +11,6 @@ class PatchModelAddDownscale(io.ComfyNode):
|
||||
node_id="PatchModelAddDownscale",
|
||||
display_name="PatchModelAddDownscale (Kohya Deep Shrink)",
|
||||
category="model_patches/unet",
|
||||
description="Patches the UNet to downscale internal feature maps at a specified block during a configurable sigma range, then upscale on output, implementing the Kohya Deep Shrink technique for faster generation.",
|
||||
short_description="Kohya Deep Shrink: downscale UNet internals for speed.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("block_number", default=3, min=1, max=32, step=1),
|
||||
|
||||
@@ -22,8 +22,6 @@ class ModelMergeSimple:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Merge two diffusion models using a simple ratio to blend all weights uniformly."
|
||||
SHORT_DESCRIPTION = "Merge two models with a uniform blend ratio."
|
||||
|
||||
def merge(self, model1, model2, ratio):
|
||||
m = model1.clone()
|
||||
@@ -43,8 +41,6 @@ class ModelSubtract:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Subtract one diffusion model's weights from another with an adjustable multiplier for extracting differences."
|
||||
SHORT_DESCRIPTION = "Subtract model weights with adjustable multiplier."
|
||||
|
||||
def merge(self, model1, model2, multiplier):
|
||||
m = model1.clone()
|
||||
@@ -63,8 +59,6 @@ class ModelAdd:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Add the weights of one diffusion model on top of another."
|
||||
SHORT_DESCRIPTION = None
|
||||
|
||||
def merge(self, model1, model2):
|
||||
m = model1.clone()
|
||||
@@ -85,8 +79,6 @@ class CLIPMergeSimple:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Merge two CLIP text encoder models using a simple ratio to blend all weights uniformly."
|
||||
SHORT_DESCRIPTION = "Merge two CLIP models with a uniform blend ratio."
|
||||
|
||||
def merge(self, clip1, clip2, ratio):
|
||||
m = clip1.clone()
|
||||
@@ -110,8 +102,6 @@ class CLIPSubtract:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Subtract one CLIP model's weights from another with an adjustable multiplier for extracting differences."
|
||||
SHORT_DESCRIPTION = "Subtract CLIP weights with adjustable multiplier."
|
||||
|
||||
def merge(self, clip1, clip2, multiplier):
|
||||
m = clip1.clone()
|
||||
@@ -134,8 +124,6 @@ class CLIPAdd:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Add the weights of one CLIP model on top of another."
|
||||
SHORT_DESCRIPTION = None
|
||||
|
||||
def merge(self, clip1, clip2):
|
||||
m = clip1.clone()
|
||||
@@ -160,8 +148,6 @@ class ModelMergeBlocks:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Merge two diffusion models with separate blend ratios for input, middle, and output blocks."
|
||||
SHORT_DESCRIPTION = "Merge two models with per-block blend ratios."
|
||||
|
||||
def merge(self, model1, model2, **kwargs):
|
||||
m = model1.clone()
|
||||
@@ -242,8 +228,6 @@ def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefi
|
||||
|
||||
class CheckpointSave:
|
||||
SEARCH_ALIASES = ["save model", "export checkpoint", "merge save"]
|
||||
DESCRIPTION = "Saves a model, CLIP, and VAE as a combined checkpoint file in safetensors format with optional workflow metadata."
|
||||
SHORT_DESCRIPTION = "Saves model, CLIP, and VAE as a checkpoint."
|
||||
def __init__(self):
|
||||
self.output_dir = folder_paths.get_output_directory()
|
||||
|
||||
@@ -278,8 +262,6 @@ class CLIPSave:
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Save a CLIP text encoder model to safetensors files, splitting by model component."
|
||||
SHORT_DESCRIPTION = "Save a CLIP model to safetensors files."
|
||||
|
||||
def save(self, clip, filename_prefix, prompt=None, extra_pnginfo=None):
|
||||
prompt_info = ""
|
||||
@@ -337,8 +319,6 @@ class VAESave:
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Save a VAE model to a safetensors file."
|
||||
SHORT_DESCRIPTION = None
|
||||
|
||||
def save(self, vae, filename_prefix, prompt=None, extra_pnginfo=None):
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
|
||||
@@ -374,8 +354,6 @@ class ModelSave:
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Save a diffusion model to a safetensors file."
|
||||
SHORT_DESCRIPTION = None
|
||||
|
||||
def save(self, model, filename_prefix, prompt=None, extra_pnginfo=None):
|
||||
save_checkpoint(model, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo)
|
||||
|
||||
@@ -2,8 +2,6 @@ import comfy_extras.nodes_model_merging
|
||||
|
||||
class ModelMergeSD1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two SD1 models with per-block weight control over input, middle, and output blocks."
|
||||
SHORT_DESCRIPTION = "Merge two SD1 models with per-block control."
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
arg_dict = { "model1": ("MODEL",),
|
||||
@@ -28,15 +26,8 @@ class ModelMergeSD1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
return {"required": arg_dict}
|
||||
|
||||
|
||||
class ModelMergeSD2(ModelMergeSD1):
|
||||
DESCRIPTION = "Merge two SD2 models with per-block weight control over input, middle, and output blocks."
|
||||
SHORT_DESCRIPTION = "Merge two SD2 models with per-block control."
|
||||
|
||||
|
||||
class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two SDXL models with per-block weight control over input, middle, and output blocks."
|
||||
SHORT_DESCRIPTION = "Merge two SDXL models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -63,8 +54,6 @@ class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two SD3 2B models with per-block weight control over 24 joint blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two SD3 2B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -89,8 +78,6 @@ class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two AuraFlow models with per-block weight control over double and single layers."
|
||||
SHORT_DESCRIPTION = "Merge two AuraFlow models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -118,8 +105,6 @@ class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Flux1 models with per-block weight control over 19 double blocks and 38 single blocks."
|
||||
SHORT_DESCRIPTION = "Merge two Flux1 models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -146,8 +131,6 @@ class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two SD3.5 Large models with per-block weight control over 38 joint blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two SD3.5 Large models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -171,8 +154,6 @@ class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeMochiPreview(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Mochi Preview models with per-block weight control over 48 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Mochi Preview models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -195,8 +176,6 @@ class ModelMergeMochiPreview(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two LTXV models with per-block weight control over 28 transformer blocks."
|
||||
SHORT_DESCRIPTION = "Merge two LTXV models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -219,8 +198,6 @@ class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Cosmos 7B models with per-block weight control over 28 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Cosmos 7B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -245,8 +222,6 @@ class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Cosmos 14B models with per-block weight control over 36 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Cosmos 14B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -272,7 +247,6 @@ class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "1.3B model has 30 blocks, 14B model has 40 blocks. Image to video model has the extra img_emb."
|
||||
SHORT_DESCRIPTION = "WAN 2.1 model merging with block-level control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -296,8 +270,6 @@ class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeCosmosPredict2_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Cosmos Predict2 2B models with per-block weight control over 28 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Cosmos Predict2 2B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -321,8 +293,6 @@ class ModelMergeCosmosPredict2_2B(comfy_extras.nodes_model_merging.ModelMergeBlo
|
||||
|
||||
class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Cosmos Predict2 14B models with per-block weight control over 36 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Cosmos Predict2 14B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -346,8 +316,6 @@ class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBl
|
||||
|
||||
class ModelMergeQwenImage(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Qwen Image models with per-block weight control over 60 transformer blocks."
|
||||
SHORT_DESCRIPTION = "Merge two Qwen Image models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -371,7 +339,7 @@ class ModelMergeQwenImage(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"ModelMergeSD1": ModelMergeSD1,
|
||||
"ModelMergeSD2": ModelMergeSD2, #SD1 and SD2 have the same blocks
|
||||
"ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks
|
||||
"ModelMergeSDXL": ModelMergeSDXL,
|
||||
"ModelMergeSD3_2B": ModelMergeSD3_2B,
|
||||
"ModelMergeAuraflow": ModelMergeAuraflow,
|
||||
|
||||
@@ -230,8 +230,6 @@ class ModelPatchLoader:
|
||||
EXPERIMENTAL = True
|
||||
|
||||
CATEGORY = "advanced/loaders"
|
||||
DESCRIPTION = "Load a model patch file such as a controlnet or style reference patch for use with compatible model nodes."
|
||||
SHORT_DESCRIPTION = "Load a model patch file for controlnet or style."
|
||||
|
||||
def load_model_patch(self, name):
|
||||
model_patch_path = folder_paths.get_full_path_or_raise("model_patches", name)
|
||||
@@ -458,8 +456,6 @@ class QwenImageDiffsynthControlnet:
|
||||
EXPERIMENTAL = True
|
||||
|
||||
CATEGORY = "advanced/loaders/qwen"
|
||||
DESCRIPTION = "Apply a DiffSynth-style controlnet patch to a Qwen Image model using a VAE-encoded control image."
|
||||
SHORT_DESCRIPTION = "Apply DiffSynth controlnet to a Qwen Image model."
|
||||
|
||||
def diffsynth_controlnet(self, model, model_patch, vae, image=None, strength=1.0, inpaint_image=None, mask=None):
|
||||
model_patched = model.clone()
|
||||
@@ -493,8 +489,6 @@ class ZImageFunControlnet(QwenImageDiffsynthControlnet):
|
||||
"optional": {"image": ("IMAGE",), "inpaint_image": ("IMAGE",), "mask": ("MASK",)}}
|
||||
|
||||
CATEGORY = "advanced/loaders/zimage"
|
||||
DESCRIPTION = "Apply a Z-Image Fun controlnet patch to a model with optional control image, inpaint image, and mask inputs."
|
||||
SHORT_DESCRIPTION = "Apply Z-Image Fun controlnet with optional inpainting."
|
||||
|
||||
class UsoStyleProjectorPatch:
|
||||
def __init__(self, model_patch, encoded_image):
|
||||
@@ -531,8 +525,6 @@ class USOStyleReference:
|
||||
EXPERIMENTAL = True
|
||||
|
||||
CATEGORY = "advanced/model_patches/flux"
|
||||
DESCRIPTION = "Apply a USO style reference patch to a Flux model using multi-layer SigLIP features from CLIP vision output."
|
||||
SHORT_DESCRIPTION = "Apply USO style reference to a Flux model."
|
||||
|
||||
def apply_patch(self, model, model_patch, clip_vision_output):
|
||||
encoded_image = torch.stack((clip_vision_output.all_hidden_states[:, -20], clip_vision_output.all_hidden_states[:, -11], clip_vision_output.penultimate_hidden_states))
|
||||
|
||||
@@ -15,8 +15,6 @@ class Morphology(io.ComfyNode):
|
||||
search_aliases=["erode", "dilate"],
|
||||
display_name="ImageMorphology",
|
||||
category="image/postprocessing",
|
||||
description="Applies morphological operations to an image using a configurable kernel size.",
|
||||
short_description="",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Combo.Input(
|
||||
@@ -62,8 +60,6 @@ class ImageRGBToYUV(io.ComfyNode):
|
||||
node_id="ImageRGBToYUV",
|
||||
search_aliases=["color space conversion"],
|
||||
category="image/batch",
|
||||
description="Converts an RGB image to YUV (YCbCr) color space, outputting separate Y, U, and V channel images.",
|
||||
short_description="Convert RGB image to YUV color space.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
@@ -86,8 +82,6 @@ class ImageYUVToRGB(io.ComfyNode):
|
||||
node_id="ImageYUVToRGB",
|
||||
search_aliases=["color space conversion"],
|
||||
category="image/batch",
|
||||
description="Converts separate Y, U, and V (YCbCr) channel images back into a single RGB image.",
|
||||
short_description="Convert YUV channels back to RGB image.",
|
||||
inputs=[
|
||||
io.Image.Input("Y"),
|
||||
io.Image.Input("U"),
|
||||
|
||||
99
comfy_extras/nodes_nag.py
Normal file
99
comfy_extras/nodes_nag.py
Normal file
@@ -0,0 +1,99 @@
|
||||
import torch
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
from typing_extensions import override
|
||||
|
||||
|
||||
class NAGuidance(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="NAGuidance",
|
||||
display_name="Normalized Attention Guidance",
|
||||
description="Applies Normalized Attention Guidance to models, enabling negative prompts on distilled/schnell models.",
|
||||
category="",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model", tooltip="The model to apply NAG to."),
|
||||
io.Float.Input("nag_scale", min=0.0, default=5.0, max=50.0, step=0.1, tooltip="The guidance scale factor. Higher values push further from the negative prompt."),
|
||||
io.Float.Input("nag_alpha", min=0.0, default=0.5, max=1.0, step=0.01, tooltip="Blending factor for the normalized attention. 1.0 is full replacement, 0.0 is no effect."),
|
||||
io.Float.Input("nag_tau", min=1.0, default=1.5, max=10.0, step=0.01),
|
||||
# io.Float.Input("start_percent", min=0.0, default=0.0, max=1.0, step=0.01, tooltip="The relative sampling step to begin applying NAG."),
|
||||
# io.Float.Input("end_percent", min=0.0, default=1.0, max=1.0, step=0.01, tooltip="The relative sampling step to stop applying NAG."),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(tooltip="The patched model with NAG enabled."),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model: io.Model.Type, nag_scale: float, nag_alpha: float, nag_tau: float) -> io.NodeOutput:
|
||||
m = model.clone()
|
||||
|
||||
# sigma_start = m.get_model_object("model_sampling").percent_to_sigma(start_percent)
|
||||
# sigma_end = m.get_model_object("model_sampling").percent_to_sigma(end_percent)
|
||||
|
||||
def nag_attention_output_patch(out, extra_options):
|
||||
cond_or_uncond = extra_options.get("cond_or_uncond", None)
|
||||
if cond_or_uncond is None:
|
||||
return out
|
||||
|
||||
if not (1 in cond_or_uncond and 0 in cond_or_uncond):
|
||||
return out
|
||||
|
||||
# sigma = extra_options.get("sigmas", None)
|
||||
# if sigma is not None and len(sigma) > 0:
|
||||
# sigma = sigma[0].item()
|
||||
# if sigma > sigma_start or sigma < sigma_end:
|
||||
# return out
|
||||
|
||||
img_slice = extra_options.get("img_slice", None)
|
||||
|
||||
if img_slice is not None:
|
||||
orig_out = out
|
||||
out = out[:, img_slice[0]:img_slice[1]] # only apply on img part
|
||||
|
||||
batch_size = out.shape[0]
|
||||
half_size = batch_size // len(cond_or_uncond)
|
||||
|
||||
ind_neg = cond_or_uncond.index(1)
|
||||
ind_pos = cond_or_uncond.index(0)
|
||||
z_pos = out[half_size * ind_pos:half_size * (ind_pos + 1)]
|
||||
z_neg = out[half_size * ind_neg:half_size * (ind_neg + 1)]
|
||||
|
||||
guided = z_pos * nag_scale - z_neg * (nag_scale - 1.0)
|
||||
|
||||
eps = 1e-6
|
||||
norm_pos = torch.norm(z_pos, p=1, dim=-1, keepdim=True).clamp_min(eps)
|
||||
norm_guided = torch.norm(guided, p=1, dim=-1, keepdim=True).clamp_min(eps)
|
||||
|
||||
ratio = norm_guided / norm_pos
|
||||
scale_factor = torch.minimum(ratio, torch.full_like(ratio, nag_tau)) / ratio
|
||||
|
||||
guided_normalized = guided * scale_factor
|
||||
|
||||
z_final = guided_normalized * nag_alpha + z_pos * (1.0 - nag_alpha)
|
||||
|
||||
if img_slice is not None:
|
||||
orig_out[half_size * ind_neg:half_size * (ind_neg + 1), img_slice[0]:img_slice[1]] = z_final
|
||||
orig_out[half_size * ind_pos:half_size * (ind_pos + 1), img_slice[0]:img_slice[1]] = z_final
|
||||
return orig_out
|
||||
else:
|
||||
out[half_size * ind_pos:half_size * (ind_pos + 1)] = z_final
|
||||
return out
|
||||
|
||||
m.set_model_attn1_output_patch(nag_attention_output_patch)
|
||||
m.disable_model_cfg1_optimization()
|
||||
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class NagExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
NAGuidance,
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> NagExtension:
|
||||
return NagExtension()
|
||||
@@ -14,7 +14,6 @@ class wanBlockSwap(io.ComfyNode):
|
||||
node_id="wanBlockSwap",
|
||||
category="",
|
||||
description="NOP",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
],
|
||||
|
||||
@@ -32,8 +32,6 @@ class OptimalStepsScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="OptimalStepsScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates an optimized noise schedule with precomputed optimal sigma levels using log-linear interpolation.",
|
||||
short_description="Optimal noise schedule with precomputed sigma levels.",
|
||||
inputs=[
|
||||
io.Combo.Input("model_type", options=["FLUX", "Wan", "Chroma"]),
|
||||
io.Int.Input("steps", default=20, min=3, max=1000),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user