wip add AI generated descriptions to all nodes

This commit is contained in:
pythongosssss
2026-02-16 14:02:17 -08:00
parent 88e6370527
commit ecec1310b2
119 changed files with 1059 additions and 15 deletions

View File

@@ -1300,6 +1300,7 @@ class NodeInfoV1:
name: str=None name: str=None
display_name: str=None display_name: str=None
description: str=None description: str=None
short_description: str=None
python_module: Any=None python_module: Any=None
category: str=None category: str=None
output_node: bool=None output_node: bool=None
@@ -1390,6 +1391,8 @@ class Schema:
hidden: list[Hidden] = field(default_factory=list) hidden: list[Hidden] = field(default_factory=list)
description: str="" description: str=""
"""Node description, shown as a tooltip when hovering over the node.""" """Node description, shown as a tooltip when hovering over the node."""
short_description: str=""
"""Short node description, shown in the node list/search."""
search_aliases: list[str] = field(default_factory=list) search_aliases: list[str] = field(default_factory=list)
"""Alternative names for search. Useful for synonyms, abbreviations, or old names after renaming.""" """Alternative names for search. Useful for synonyms, abbreviations, or old names after renaming."""
is_input_list: bool = False is_input_list: bool = False
@@ -1528,6 +1531,7 @@ class Schema:
display_name=self.display_name, display_name=self.display_name,
category=self.category, category=self.category,
description=self.description, description=self.description,
short_description=self.short_description,
output_node=self.is_output_node, output_node=self.is_output_node,
deprecated=self.is_deprecated, deprecated=self.is_deprecated,
experimental=self.is_experimental, experimental=self.is_experimental,
@@ -1771,6 +1775,14 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
cls.GET_SCHEMA() cls.GET_SCHEMA()
return cls._DESCRIPTION return cls._DESCRIPTION
_SHORT_DESCRIPTION = None
@final
@classproperty
def SHORT_DESCRIPTION(cls): # noqa
if cls._SHORT_DESCRIPTION is None:
cls.GET_SCHEMA()
return cls._SHORT_DESCRIPTION
_CATEGORY = None _CATEGORY = None
@final @final
@classproperty @classproperty
@@ -1899,6 +1911,8 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
schema.validate() schema.validate()
if cls._DESCRIPTION is None: if cls._DESCRIPTION is None:
cls._DESCRIPTION = schema.description cls._DESCRIPTION = schema.description
if cls._SHORT_DESCRIPTION is None:
cls._SHORT_DESCRIPTION = schema.short_description
if cls._CATEGORY is None: if cls._CATEGORY is None:
cls._CATEGORY = schema.category cls._CATEGORY = schema.category
if cls._EXPERIMENTAL is None: if cls._EXPERIMENTAL is None:

View File

@@ -44,6 +44,7 @@ class FluxProUltraImageNode(IO.ComfyNode):
display_name="Flux 1.1 [pro] Ultra Image", display_name="Flux 1.1 [pro] Ultra Image",
category="api node/image/BFL", category="api node/image/BFL",
description="Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.", description="Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.",
short_description="Generate images with Flux Pro 1.1 Ultra API.",
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -154,13 +155,17 @@ class FluxProUltraImageNode(IO.ComfyNode):
class FluxKontextProImageNode(IO.ComfyNode): class FluxKontextProImageNode(IO.ComfyNode):
DESCRIPTION = "Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio."
SHORT_DESCRIPTION = "Edit images with Flux.1 Kontext [pro] API."
@classmethod @classmethod
def define_schema(cls) -> IO.Schema: def define_schema(cls) -> IO.Schema:
return IO.Schema( return IO.Schema(
node_id=cls.NODE_ID, node_id=cls.NODE_ID,
display_name=cls.DISPLAY_NAME, display_name=cls.DISPLAY_NAME,
category="api node/image/BFL", category="api node/image/BFL",
description="Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio.", description=cls.DESCRIPTION,
short_description=cls.SHORT_DESCRIPTION,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -268,6 +273,7 @@ class FluxKontextProImageNode(IO.ComfyNode):
class FluxKontextMaxImageNode(FluxKontextProImageNode): class FluxKontextMaxImageNode(FluxKontextProImageNode):
DESCRIPTION = "Edits images using Flux.1 Kontext [max] via api based on prompt and aspect ratio." DESCRIPTION = "Edits images using Flux.1 Kontext [max] via api based on prompt and aspect ratio."
SHORT_DESCRIPTION = "Edit images with Flux.1 Kontext [max] API."
BFL_PATH = "/proxy/bfl/flux-kontext-max/generate" BFL_PATH = "/proxy/bfl/flux-kontext-max/generate"
NODE_ID = "FluxKontextMaxImageNode" NODE_ID = "FluxKontextMaxImageNode"
DISPLAY_NAME = "Flux.1 Kontext [max] Image" DISPLAY_NAME = "Flux.1 Kontext [max] Image"
@@ -282,6 +288,7 @@ class FluxProExpandNode(IO.ComfyNode):
display_name="Flux.1 Expand Image", display_name="Flux.1 Expand Image",
category="api node/image/BFL", category="api node/image/BFL",
description="Outpaints image based on prompt.", description="Outpaints image based on prompt.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.String.Input( IO.String.Input(
@@ -418,6 +425,7 @@ class FluxProFillNode(IO.ComfyNode):
display_name="Flux.1 Fill Image", display_name="Flux.1 Fill Image",
category="api node/image/BFL", category="api node/image/BFL",
description="Inpaints image based on mask and prompt.", description="Inpaints image based on mask and prompt.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Mask.Input("mask"), IO.Mask.Input("mask"),
@@ -543,6 +551,7 @@ class Flux2ProImageNode(IO.ComfyNode):
display_name=cls.DISPLAY_NAME, display_name=cls.DISPLAY_NAME,
category="api node/image/BFL", category="api node/image/BFL",
description="Generates images synchronously based on prompt and resolution.", description="Generates images synchronously based on prompt and resolution.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",

View File

@@ -33,6 +33,7 @@ class BriaImageEditNode(IO.ComfyNode):
display_name="Bria FIBO Image Edit", display_name="Bria FIBO Image Edit",
category="api node/image/Bria", category="api node/image/Bria",
description="Edit images using Bria latest model", description="Edit images using Bria latest model",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["FIBO"]), IO.Combo.Input("model", options=["FIBO"]),
IO.Image.Input("image"), IO.Image.Input("image"),

View File

@@ -60,6 +60,7 @@ class ByteDanceImageNode(IO.ComfyNode):
display_name="ByteDance Image", display_name="ByteDance Image",
category="api node/image/ByteDance", category="api node/image/ByteDance",
description="Generate images using ByteDance models via api based on prompt", description="Generate images using ByteDance models via api based on prompt",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["seedream-3-0-t2i-250415"]), IO.Combo.Input("model", options=["seedream-3-0-t2i-250415"]),
IO.String.Input( IO.String.Input(
@@ -182,6 +183,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
display_name="ByteDance Seedream 4.5", display_name="ByteDance Seedream 4.5",
category="api node/image/ByteDance", category="api node/image/ByteDance",
description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.", description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.",
short_description="Text-to-image generation and editing up to 4K.",
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",
@@ -380,6 +382,7 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
display_name="ByteDance Text to Video", display_name="ByteDance Text to Video",
category="api node/video/ByteDance", category="api node/video/ByteDance",
description="Generate video using ByteDance models via api based on prompt", description="Generate video using ByteDance models via api based on prompt",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",
@@ -505,6 +508,7 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
display_name="ByteDance Image to Video", display_name="ByteDance Image to Video",
category="api node/video/ByteDance", category="api node/video/ByteDance",
description="Generate video using ByteDance models via api based on image and prompt", description="Generate video using ByteDance models via api based on image and prompt",
short_description="Generate video from image and prompt via ByteDance API.",
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",
@@ -639,6 +643,7 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
display_name="ByteDance First-Last-Frame to Video", display_name="ByteDance First-Last-Frame to Video",
category="api node/video/ByteDance", category="api node/video/ByteDance",
description="Generate video using prompt and first and last frames.", description="Generate video using prompt and first and last frames.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",
@@ -784,6 +789,7 @@ class ByteDanceImageReferenceNode(IO.ComfyNode):
display_name="ByteDance Reference Images to Video", display_name="ByteDance Reference Images to Video",
category="api node/video/ByteDance", category="api node/video/ByteDance",
description="Generate video using prompt and reference images.", description="Generate video using prompt and reference images.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",

View File

@@ -254,6 +254,7 @@ class GeminiNode(IO.ComfyNode):
description="Generate text responses with Google's Gemini AI model. " description="Generate text responses with Google's Gemini AI model. "
"You can provide multiple types of inputs (text, images, audio, video) " "You can provide multiple types of inputs (text, images, audio, video) "
"as context for generating more relevant and meaningful responses.", "as context for generating more relevant and meaningful responses.",
short_description="Generate text responses with Google's Gemini AI.",
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -480,6 +481,7 @@ class GeminiInputFiles(IO.ComfyNode):
"The files will be read by the Gemini model when generating a response. " "The files will be read by the Gemini model when generating a response. "
"The contents of the text file count toward the token limit. " "The contents of the text file count toward the token limit. "
"🛈 TIP: Can be chained together with other Gemini Input File nodes.", "🛈 TIP: Can be chained together with other Gemini Input File nodes.",
short_description="Load and prepare input files for Gemini LLM nodes.",
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"file", "file",
@@ -534,6 +536,7 @@ class GeminiImage(IO.ComfyNode):
display_name="Nano Banana (Google Gemini Image)", display_name="Nano Banana (Google Gemini Image)",
category="api node/image/Gemini", category="api node/image/Gemini",
description="Edit images synchronously via Google API.", description="Edit images synchronously via Google API.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -665,6 +668,7 @@ class GeminiImage2(IO.ComfyNode):
display_name="Nano Banana Pro (Google Gemini Image)", display_name="Nano Banana Pro (Google Gemini Image)",
category="api node/image/Gemini", category="api node/image/Gemini",
description="Generate or edit images synchronously via Google Vertex API.", description="Generate or edit images synchronously via Google Vertex API.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",

View File

@@ -36,6 +36,7 @@ class GrokImageNode(IO.ComfyNode):
display_name="Grok Image", display_name="Grok Image",
category="api node/image/Grok", category="api node/image/Grok",
description="Generate images using Grok based on a text prompt", description="Generate images using Grok based on a text prompt",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["grok-imagine-image-beta"]), IO.Combo.Input("model", options=["grok-imagine-image-beta"]),
IO.String.Input( IO.String.Input(
@@ -137,6 +138,7 @@ class GrokImageEditNode(IO.ComfyNode):
display_name="Grok Image Edit", display_name="Grok Image Edit",
category="api node/image/Grok", category="api node/image/Grok",
description="Modify an existing image based on a text prompt", description="Modify an existing image based on a text prompt",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["grok-imagine-image-beta"]), IO.Combo.Input("model", options=["grok-imagine-image-beta"]),
IO.Image.Input("image"), IO.Image.Input("image"),
@@ -226,6 +228,7 @@ class GrokVideoNode(IO.ComfyNode):
display_name="Grok Video", display_name="Grok Video",
category="api node/video/Grok", category="api node/video/Grok",
description="Generate video from a prompt or an image", description="Generate video from a prompt or an image",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["grok-imagine-video-beta"]), IO.Combo.Input("model", options=["grok-imagine-video-beta"]),
IO.String.Input( IO.String.Input(
@@ -334,6 +337,7 @@ class GrokVideoEditNode(IO.ComfyNode):
display_name="Grok Video Edit", display_name="Grok Video Edit",
category="api node/video/Grok", category="api node/video/Grok",
description="Edit an existing video based on a text prompt.", description="Edit an existing video based on a text prompt.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["grok-imagine-video-beta"]), IO.Combo.Input("model", options=["grok-imagine-video-beta"]),
IO.String.Input( IO.String.Input(

View File

@@ -74,6 +74,7 @@ class HitPawGeneralImageEnhance(IO.ComfyNode):
category="api node/image/HitPaw", category="api node/image/HitPaw",
description="Upscale low-resolution images to super-resolution, eliminate artifacts and noise. " description="Upscale low-resolution images to super-resolution, eliminate artifacts and noise. "
f"Maximum output: {MAX_MP_GENERATIVE} megapixels.", f"Maximum output: {MAX_MP_GENERATIVE} megapixels.",
short_description="Upscale images to super-resolution, removing artifacts and noise.",
inputs=[ inputs=[
IO.Combo.Input("model", options=["generative_portrait", "generative"]), IO.Combo.Input("model", options=["generative_portrait", "generative"]),
IO.Image.Input("image"), IO.Image.Input("image"),
@@ -205,6 +206,7 @@ class HitPawVideoEnhance(IO.ComfyNode):
category="api node/video/HitPaw", category="api node/video/HitPaw",
description="Upscale low-resolution videos to high resolution, eliminate artifacts and noise. " description="Upscale low-resolution videos to high resolution, eliminate artifacts and noise. "
"Prices shown are per second of video.", "Prices shown are per second of video.",
short_description="Upscale videos to high resolution, removing artifacts and noise.",
inputs=[ inputs=[
IO.DynamicCombo.Input("model", options=model_options), IO.DynamicCombo.Input("model", options=model_options),
IO.Video.Input("video"), IO.Video.Input("video"),

View File

@@ -54,6 +54,8 @@ class TencentTextToModelNode(IO.ComfyNode):
node_id="TencentTextToModelNode", node_id="TencentTextToModelNode",
display_name="Hunyuan3D: Text to Model", display_name="Hunyuan3D: Text to Model",
category="api node/3d/Tencent", category="api node/3d/Tencent",
description="Generate 3D models from text prompts using Hunyuan3D Pro with configurable face count and geometry options.",
short_description="Generate 3D models from text using Hunyuan3D Pro.",
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",
@@ -168,6 +170,8 @@ class TencentImageToModelNode(IO.ComfyNode):
node_id="TencentImageToModelNode", node_id="TencentImageToModelNode",
display_name="Hunyuan3D: Image(s) to Model", display_name="Hunyuan3D: Image(s) to Model",
category="api node/3d/Tencent", category="api node/3d/Tencent",
description="Generate 3D models from images using Hunyuan3D Pro with optional multi-view inputs and configurable geometry.",
short_description="Generate 3D models from images using Hunyuan3D Pro.",
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",

View File

@@ -236,6 +236,7 @@ class IdeogramV1(IO.ComfyNode):
display_name="Ideogram V1", display_name="Ideogram V1",
category="api node/image/Ideogram", category="api node/image/Ideogram",
description="Generates images using the Ideogram V1 model.", description="Generates images using the Ideogram V1 model.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -361,6 +362,7 @@ class IdeogramV2(IO.ComfyNode):
display_name="Ideogram V2", display_name="Ideogram V2",
category="api node/image/Ideogram", category="api node/image/Ideogram",
description="Generates images using the Ideogram V2 model.", description="Generates images using the Ideogram V2 model.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -526,6 +528,7 @@ class IdeogramV3(IO.ComfyNode):
category="api node/image/Ideogram", category="api node/image/Ideogram",
description="Generates images using the Ideogram V3 model. " description="Generates images using the Ideogram V3 model. "
"Supports both regular image generation from text prompts and image editing with mask.", "Supports both regular image generation from text prompts and image editing with mask.",
short_description="Generate and edit images with Ideogram V3.",
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",

View File

@@ -642,6 +642,7 @@ class KlingCameraControls(IO.ComfyNode):
display_name="Kling Camera Controls", display_name="Kling Camera Controls",
category="api node/video/Kling", category="api node/video/Kling",
description="Allows specifying configuration options for Kling Camera Controls and motion control effects.", description="Allows specifying configuration options for Kling Camera Controls and motion control effects.",
short_description="Configure Kling camera controls and motion effects.",
inputs=[ inputs=[
IO.Combo.Input("camera_control_type", options=KlingCameraControlType), IO.Combo.Input("camera_control_type", options=KlingCameraControlType),
IO.Float.Input( IO.Float.Input(
@@ -762,6 +763,7 @@ class KlingTextToVideoNode(IO.ComfyNode):
display_name="Kling Text to Video", display_name="Kling Text to Video",
category="api node/video/Kling", category="api node/video/Kling",
description="Kling Text to Video Node", description="Kling Text to Video Node",
short_description=None,
inputs=[ inputs=[
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
@@ -849,6 +851,7 @@ class OmniProTextToVideoNode(IO.ComfyNode):
display_name="Kling 3.0 Omni Text to Video", display_name="Kling 3.0 Omni Text to Video",
category="api node/video/Kling", category="api node/video/Kling",
description="Use text prompts to generate videos with the latest Kling model.", description="Use text prompts to generate videos with the latest Kling model.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]), IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
IO.String.Input( IO.String.Input(
@@ -989,6 +992,7 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
display_name="Kling 3.0 Omni First-Last-Frame to Video", display_name="Kling 3.0 Omni First-Last-Frame to Video",
category="api node/video/Kling", category="api node/video/Kling",
description="Use a start frame, an optional end frame, or reference images with the latest Kling model.", description="Use a start frame, an optional end frame, or reference images with the latest Kling model.",
short_description="Generate video from start/end frames or reference images.",
inputs=[ inputs=[
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]), IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
IO.String.Input( IO.String.Input(
@@ -1187,6 +1191,7 @@ class OmniProImageToVideoNode(IO.ComfyNode):
display_name="Kling 3.0 Omni Image to Video", display_name="Kling 3.0 Omni Image to Video",
category="api node/video/Kling", category="api node/video/Kling",
description="Use up to 7 reference images to generate a video with the latest Kling model.", description="Use up to 7 reference images to generate a video with the latest Kling model.",
short_description="Generate video from up to 7 reference images.",
inputs=[ inputs=[
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]), IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
IO.String.Input( IO.String.Input(
@@ -1347,6 +1352,7 @@ class OmniProVideoToVideoNode(IO.ComfyNode):
display_name="Kling 3.0 Omni Video to Video", display_name="Kling 3.0 Omni Video to Video",
category="api node/video/Kling", category="api node/video/Kling",
description="Use a video and up to 4 reference images to generate a video with the latest Kling model.", description="Use a video and up to 4 reference images to generate a video with the latest Kling model.",
short_description="Generate video from a video and reference images.",
inputs=[ inputs=[
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]), IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
IO.String.Input( IO.String.Input(
@@ -1458,6 +1464,7 @@ class OmniProEditVideoNode(IO.ComfyNode):
display_name="Kling 3.0 Omni Edit Video", display_name="Kling 3.0 Omni Edit Video",
category="api node/video/Kling", category="api node/video/Kling",
description="Edit an existing video with the latest model from Kling.", description="Edit an existing video with the latest model from Kling.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]), IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
IO.String.Input( IO.String.Input(
@@ -1565,6 +1572,7 @@ class OmniProImageNode(IO.ComfyNode):
display_name="Kling 3.0 Omni Image", display_name="Kling 3.0 Omni Image",
category="api node/image/Kling", category="api node/image/Kling",
description="Create or edit images with the latest model from Kling.", description="Create or edit images with the latest model from Kling.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-image-o1"]), IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-image-o1"]),
IO.String.Input( IO.String.Input(
@@ -1693,6 +1701,7 @@ class KlingCameraControlT2VNode(IO.ComfyNode):
display_name="Kling Text to Video (Camera Control)", display_name="Kling Text to Video (Camera Control)",
category="api node/video/Kling", category="api node/video/Kling",
description="Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.", description="Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.",
short_description="Generate videos from text with camera movement controls.",
inputs=[ inputs=[
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
@@ -1754,6 +1763,8 @@ class KlingImage2VideoNode(IO.ComfyNode):
node_id="KlingImage2VideoNode", node_id="KlingImage2VideoNode",
display_name="Kling Image(First Frame) to Video", display_name="Kling Image(First Frame) to Video",
category="api node/video/Kling", category="api node/video/Kling",
description="Generate a video from a first-frame image with configurable model, mode, aspect ratio, and duration settings.",
short_description="Generate video from a first-frame reference image.",
inputs=[ inputs=[
IO.Image.Input("start_frame", tooltip="The reference image used to generate the video."), IO.Image.Input("start_frame", tooltip="The reference image used to generate the video."),
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
@@ -1854,6 +1865,7 @@ class KlingCameraControlI2VNode(IO.ComfyNode):
display_name="Kling Image to Video (Camera Control)", display_name="Kling Image to Video (Camera Control)",
category="api node/video/Kling", category="api node/video/Kling",
description="Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.", description="Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.",
short_description="Generate videos from images with camera movement controls.",
inputs=[ inputs=[
IO.Image.Input( IO.Image.Input(
"start_frame", "start_frame",
@@ -1925,6 +1937,7 @@ class KlingStartEndFrameNode(IO.ComfyNode):
display_name="Kling Start-End Frame to Video", display_name="Kling Start-End Frame to Video",
category="api node/video/Kling", category="api node/video/Kling",
description="Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.", description="Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.",
short_description="Generate video transitioning between start and end frame images.",
inputs=[ inputs=[
IO.Image.Input( IO.Image.Input(
"start_frame", "start_frame",
@@ -2019,6 +2032,7 @@ class KlingVideoExtendNode(IO.ComfyNode):
display_name="Kling Video Extend", display_name="Kling Video Extend",
category="api node/video/Kling", category="api node/video/Kling",
description="Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.", description="Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.",
short_description="Extend videos generated by other Kling nodes.",
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -2100,6 +2114,7 @@ class KlingDualCharacterVideoEffectNode(IO.ComfyNode):
display_name="Kling Dual Character Video Effects", display_name="Kling Dual Character Video Effects",
category="api node/video/Kling", category="api node/video/Kling",
description="Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.", description="Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.",
short_description="Apply dual-character video effects from two images.",
inputs=[ inputs=[
IO.Image.Input("image_left", tooltip="Left side image"), IO.Image.Input("image_left", tooltip="Left side image"),
IO.Image.Input("image_right", tooltip="Right side image"), IO.Image.Input("image_right", tooltip="Right side image"),
@@ -2190,6 +2205,7 @@ class KlingSingleImageVideoEffectNode(IO.ComfyNode):
display_name="Kling Video Effects", display_name="Kling Video Effects",
category="api node/video/Kling", category="api node/video/Kling",
description="Achieve different special effects when generating a video based on the effect_scene.", description="Achieve different special effects when generating a video based on the effect_scene.",
short_description="Apply special video effects to a single image.",
inputs=[ inputs=[
IO.Image.Input( IO.Image.Input(
"image", "image",
@@ -2263,6 +2279,7 @@ class KlingLipSyncAudioToVideoNode(IO.ComfyNode):
display_name="Kling Lip Sync Video with Audio", display_name="Kling Lip Sync Video with Audio",
category="api node/video/Kling", category="api node/video/Kling",
description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.", description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
short_description="Sync video mouth movements to audio content.",
inputs=[ inputs=[
IO.Video.Input("video"), IO.Video.Input("video"),
IO.Audio.Input("audio"), IO.Audio.Input("audio"),
@@ -2314,6 +2331,7 @@ class KlingLipSyncTextToVideoNode(IO.ComfyNode):
display_name="Kling Lip Sync Video with Text", display_name="Kling Lip Sync Video with Text",
category="api node/video/Kling", category="api node/video/Kling",
description="Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.", description="Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
short_description="Sync video mouth movements to a text prompt.",
inputs=[ inputs=[
IO.Video.Input("video"), IO.Video.Input("video"),
IO.String.Input( IO.String.Input(
@@ -2381,6 +2399,7 @@ class KlingVirtualTryOnNode(IO.ComfyNode):
display_name="Kling Virtual Try On", display_name="Kling Virtual Try On",
category="api node/image/Kling", category="api node/image/Kling",
description="Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.", description="Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.",
short_description="Virtually try clothing onto a human image.",
inputs=[ inputs=[
IO.Image.Input("human_image"), IO.Image.Input("human_image"),
IO.Image.Input("cloth_image"), IO.Image.Input("cloth_image"),
@@ -2448,6 +2467,7 @@ class KlingImageGenerationNode(IO.ComfyNode):
display_name="Kling 3.0 Image", display_name="Kling 3.0 Image",
category="api node/image/Kling", category="api node/image/Kling",
description="Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.", description="Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.",
short_description="Generate images from text with optional reference image.",
inputs=[ inputs=[
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
@@ -2581,6 +2601,8 @@ class TextToVideoWithAudio(IO.ComfyNode):
node_id="KlingTextToVideoWithAudio", node_id="KlingTextToVideoWithAudio",
display_name="Kling 2.6 Text to Video with Audio", display_name="Kling 2.6 Text to Video with Audio",
category="api node/video/Kling", category="api node/video/Kling",
description="Generate a video with synchronized audio from a text prompt using the Kling v2-6 model.",
short_description="Generate video with audio from text using Kling v2-6.",
inputs=[ inputs=[
IO.Combo.Input("model_name", options=["kling-v2-6"]), IO.Combo.Input("model_name", options=["kling-v2-6"]),
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt."), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt."),
@@ -2649,6 +2671,8 @@ class ImageToVideoWithAudio(IO.ComfyNode):
node_id="KlingImageToVideoWithAudio", node_id="KlingImageToVideoWithAudio",
display_name="Kling 2.6 Image(First Frame) to Video with Audio", display_name="Kling 2.6 Image(First Frame) to Video with Audio",
category="api node/video/Kling", category="api node/video/Kling",
description="Generate a video with synchronized audio from a first-frame image and text prompt using the Kling v2-6 model.",
short_description="Generate video with audio from an image using Kling v2-6.",
inputs=[ inputs=[
IO.Combo.Input("model_name", options=["kling-v2-6"]), IO.Combo.Input("model_name", options=["kling-v2-6"]),
IO.Image.Input("start_frame"), IO.Image.Input("start_frame"),
@@ -2719,6 +2743,8 @@ class MotionControl(IO.ComfyNode):
node_id="KlingMotionControl", node_id="KlingMotionControl",
display_name="Kling Motion Control", display_name="Kling Motion Control",
category="api node/video/Kling", category="api node/video/Kling",
description="Drive character movement and expression in video using a reference image and motion reference video.",
short_description="Control video character motion using reference image and video.",
inputs=[ inputs=[
IO.String.Input("prompt", multiline=True), IO.String.Input("prompt", multiline=True),
IO.Image.Input("reference_image"), IO.Image.Input("reference_image"),
@@ -2815,6 +2841,7 @@ class KlingVideoNode(IO.ComfyNode):
category="api node/video/Kling", category="api node/video/Kling",
description="Generate videos with Kling V3. " description="Generate videos with Kling V3. "
"Supports text-to-video and image-to-video with optional storyboard multi-prompt and audio generation.", "Supports text-to-video and image-to-video with optional storyboard multi-prompt and audio generation.",
short_description="Generate videos with Kling V3 from text or images.",
inputs=[ inputs=[
IO.DynamicCombo.Input( IO.DynamicCombo.Input(
"multi_shot", "multi_shot",

View File

@@ -52,6 +52,7 @@ class TextToVideoNode(IO.ComfyNode):
display_name="LTXV Text To Video", display_name="LTXV Text To Video",
category="api node/video/LTXV", category="api node/video/LTXV",
description="Professional-quality videos with customizable duration and resolution.", description="Professional-quality videos with customizable duration and resolution.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=list(MODELS_MAP.keys())), IO.Combo.Input("model", options=list(MODELS_MAP.keys())),
IO.String.Input( IO.String.Input(
@@ -128,6 +129,7 @@ class ImageToVideoNode(IO.ComfyNode):
display_name="LTXV Image To Video", display_name="LTXV Image To Video",
category="api node/video/LTXV", category="api node/video/LTXV",
description="Professional-quality videos with customizable duration and resolution based on start image.", description="Professional-quality videos with customizable duration and resolution based on start image.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image", tooltip="First frame to be used for the video."), IO.Image.Input("image", tooltip="First frame to be used for the video."),
IO.Combo.Input("model", options=list(MODELS_MAP.keys())), IO.Combo.Input("model", options=list(MODELS_MAP.keys())),

View File

@@ -46,6 +46,7 @@ class LumaReferenceNode(IO.ComfyNode):
display_name="Luma Reference", display_name="Luma Reference",
category="api node/image/Luma", category="api node/image/Luma",
description="Holds an image and weight for use with Luma Generate Image node.", description="Holds an image and weight for use with Luma Generate Image node.",
short_description="Image and weight input for Luma generation.",
inputs=[ inputs=[
IO.Image.Input( IO.Image.Input(
"image", "image",
@@ -85,6 +86,7 @@ class LumaConceptsNode(IO.ComfyNode):
display_name="Luma Concepts", display_name="Luma Concepts",
category="api node/video/Luma", category="api node/video/Luma",
description="Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.", description="Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.",
short_description="Camera concepts for Luma video generation nodes.",
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"concept1", "concept1",
@@ -134,6 +136,7 @@ class LumaImageGenerationNode(IO.ComfyNode):
display_name="Luma Text to Image", display_name="Luma Text to Image",
category="api node/image/Luma", category="api node/image/Luma",
description="Generates images synchronously based on prompt and aspect ratio.", description="Generates images synchronously based on prompt and aspect ratio.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -278,6 +281,7 @@ class LumaImageModifyNode(IO.ComfyNode):
display_name="Luma Image to Image", display_name="Luma Image to Image",
category="api node/image/Luma", category="api node/image/Luma",
description="Modifies images synchronously based on prompt and aspect ratio.", description="Modifies images synchronously based on prompt and aspect ratio.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input( IO.Image.Input(
"image", "image",
@@ -371,6 +375,7 @@ class LumaTextToVideoGenerationNode(IO.ComfyNode):
display_name="Luma Text to Video", display_name="Luma Text to Video",
category="api node/video/Luma", category="api node/video/Luma",
description="Generates videos synchronously based on prompt and output_size.", description="Generates videos synchronously based on prompt and output_size.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -472,6 +477,7 @@ class LumaImageToVideoGenerationNode(IO.ComfyNode):
display_name="Luma Image to Video", display_name="Luma Image to Video",
category="api node/video/Luma", category="api node/video/Luma",
description="Generates videos synchronously based on prompt, input images, and output_size.", description="Generates videos synchronously based on prompt, input images, and output_size.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",

View File

@@ -242,6 +242,7 @@ class MagnificImageUpscalerPreciseV2Node(IO.ComfyNode):
category="api node/image/Magnific", category="api node/image/Magnific",
description="High-fidelity upscaling with fine control over sharpness, grain, and detail. " description="High-fidelity upscaling with fine control over sharpness, grain, and detail. "
"Maximum output: 10060×10060 pixels.", "Maximum output: 10060×10060 pixels.",
short_description="High-fidelity upscaling with sharpness, grain, and detail control.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]), IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
@@ -401,6 +402,7 @@ class MagnificImageStyleTransferNode(IO.ComfyNode):
display_name="Magnific Image Style Transfer", display_name="Magnific Image Style Transfer",
category="api node/image/Magnific", category="api node/image/Magnific",
description="Transfer the style from a reference image to your input image.", description="Transfer the style from a reference image to your input image.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image", tooltip="The image to apply style transfer to."), IO.Image.Input("image", tooltip="The image to apply style transfer to."),
IO.Image.Input("reference_image", tooltip="The reference image to extract style from."), IO.Image.Input("reference_image", tooltip="The reference image to extract style from."),
@@ -549,6 +551,7 @@ class MagnificImageRelightNode(IO.ComfyNode):
display_name="Magnific Image Relight", display_name="Magnific Image Relight",
category="api node/image/Magnific", category="api node/image/Magnific",
description="Relight an image with lighting adjustments and optional reference-based light transfer.", description="Relight an image with lighting adjustments and optional reference-based light transfer.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image", tooltip="The image to relight."), IO.Image.Input("image", tooltip="The image to relight."),
IO.String.Input( IO.String.Input(
@@ -787,6 +790,7 @@ class MagnificImageSkinEnhancerNode(IO.ComfyNode):
display_name="Magnific Image Skin Enhancer", display_name="Magnific Image Skin Enhancer",
category="api node/image/Magnific", category="api node/image/Magnific",
description="Skin enhancement for portraits with multiple processing modes.", description="Skin enhancement for portraits with multiple processing modes.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image", tooltip="The portrait image to enhance."), IO.Image.Input("image", tooltip="The portrait image to enhance."),
IO.Int.Input( IO.Int.Input(

View File

@@ -34,6 +34,8 @@ class MeshyTextToModelNode(IO.ComfyNode):
node_id="MeshyTextToModelNode", node_id="MeshyTextToModelNode",
display_name="Meshy: Text to Model", display_name="Meshy: Text to Model",
category="api node/3d/Meshy", category="api node/3d/Meshy",
description="Generate a 3D model from a text prompt using the Meshy API.",
short_description="Generate a 3D model from a text prompt.",
inputs=[ inputs=[
IO.Combo.Input("model", options=["latest"]), IO.Combo.Input("model", options=["latest"]),
IO.String.Input("prompt", multiline=True, default=""), IO.String.Input("prompt", multiline=True, default=""),
@@ -146,6 +148,7 @@ class MeshyRefineNode(IO.ComfyNode):
display_name="Meshy: Refine Draft Model", display_name="Meshy: Refine Draft Model",
category="api node/3d/Meshy", category="api node/3d/Meshy",
description="Refine a previously created draft model.", description="Refine a previously created draft model.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["latest"]), IO.Combo.Input("model", options=["latest"]),
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"), IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
@@ -239,6 +242,8 @@ class MeshyImageToModelNode(IO.ComfyNode):
node_id="MeshyImageToModelNode", node_id="MeshyImageToModelNode",
display_name="Meshy: Image to Model", display_name="Meshy: Image to Model",
category="api node/3d/Meshy", category="api node/3d/Meshy",
description="Generate a 3D model from a single image using the Meshy API.",
short_description="Generate a 3D model from an image.",
inputs=[ inputs=[
IO.Combo.Input("model", options=["latest"]), IO.Combo.Input("model", options=["latest"]),
IO.Image.Input("image"), IO.Image.Input("image"),
@@ -403,6 +408,7 @@ class MeshyMultiImageToModelNode(IO.ComfyNode):
node_id="MeshyMultiImageToModelNode", node_id="MeshyMultiImageToModelNode",
display_name="Meshy: Multi-Image to Model", display_name="Meshy: Multi-Image to Model",
category="api node/3d/Meshy", category="api node/3d/Meshy",
description="Generate a 3D model from multiple images using the Meshy API.",
inputs=[ inputs=[
IO.Combo.Input("model", options=["latest"]), IO.Combo.Input("model", options=["latest"]),
IO.Autogrow.Input( IO.Autogrow.Input(
@@ -575,6 +581,7 @@ class MeshyRigModelNode(IO.ComfyNode):
description="Provides a rigged character in standard formats. " description="Provides a rigged character in standard formats. "
"Auto-rigging is currently not suitable for untextured meshes, non-humanoid assets, " "Auto-rigging is currently not suitable for untextured meshes, non-humanoid assets, "
"or humanoid assets with unclear limb and body structure.", "or humanoid assets with unclear limb and body structure.",
short_description="Rig a character model for animation.",
inputs=[ inputs=[
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"), IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
IO.Float.Input( IO.Float.Input(
@@ -654,6 +661,7 @@ class MeshyAnimateModelNode(IO.ComfyNode):
display_name="Meshy: Animate Model", display_name="Meshy: Animate Model",
category="api node/3d/Meshy", category="api node/3d/Meshy",
description="Apply a specific animation action to a previously rigged character.", description="Apply a specific animation action to a previously rigged character.",
short_description=None,
inputs=[ inputs=[
IO.Custom("MESHY_RIGGED_TASK_ID").Input("rig_task_id"), IO.Custom("MESHY_RIGGED_TASK_ID").Input("rig_task_id"),
IO.Int.Input( IO.Int.Input(
@@ -719,6 +727,7 @@ class MeshyTextureNode(IO.ComfyNode):
node_id="MeshyTextureNode", node_id="MeshyTextureNode",
display_name="Meshy: Texture Model", display_name="Meshy: Texture Model",
category="api node/3d/Meshy", category="api node/3d/Meshy",
description="Apply textures to an existing 3D model using the Meshy API.",
inputs=[ inputs=[
IO.Combo.Input("model", options=["latest"]), IO.Combo.Input("model", options=["latest"]),
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"), IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),

View File

@@ -103,6 +103,7 @@ class MinimaxTextToVideoNode(IO.ComfyNode):
display_name="MiniMax Text to Video", display_name="MiniMax Text to Video",
category="api node/video/MiniMax", category="api node/video/MiniMax",
description="Generates videos synchronously based on a prompt, and optional parameters.", description="Generates videos synchronously based on a prompt, and optional parameters.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt_text", "prompt_text",
@@ -165,6 +166,7 @@ class MinimaxImageToVideoNode(IO.ComfyNode):
display_name="MiniMax Image to Video", display_name="MiniMax Image to Video",
category="api node/video/MiniMax", category="api node/video/MiniMax",
description="Generates videos synchronously based on an image and prompt, and optional parameters.", description="Generates videos synchronously based on an image and prompt, and optional parameters.",
short_description="Generate videos from an image, prompt, and optional parameters.",
inputs=[ inputs=[
IO.Image.Input( IO.Image.Input(
"image", "image",
@@ -232,6 +234,7 @@ class MinimaxSubjectToVideoNode(IO.ComfyNode):
display_name="MiniMax Subject to Video", display_name="MiniMax Subject to Video",
category="api node/video/MiniMax", category="api node/video/MiniMax",
description="Generates videos synchronously based on an image and prompt, and optional parameters.", description="Generates videos synchronously based on an image and prompt, and optional parameters.",
short_description="Subject-driven video generation from image and prompt.",
inputs=[ inputs=[
IO.Image.Input( IO.Image.Input(
"subject", "subject",
@@ -296,6 +299,7 @@ class MinimaxHailuoVideoNode(IO.ComfyNode):
display_name="MiniMax Hailuo Video", display_name="MiniMax Hailuo Video",
category="api node/video/MiniMax", category="api node/video/MiniMax",
description="Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.", description="Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.",
short_description="Generate videos with optional start frame using Hailuo-02.",
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt_text", "prompt_text",

View File

@@ -166,6 +166,7 @@ class MoonvalleyImg2VideoNode(IO.ComfyNode):
display_name="Moonvalley Marey Image to Video", display_name="Moonvalley Marey Image to Video",
category="api node/video/Moonvalley Marey", category="api node/video/Moonvalley Marey",
description="Moonvalley Marey Image to Video Node", description="Moonvalley Marey Image to Video Node",
short_description=None,
inputs=[ inputs=[
IO.Image.Input( IO.Image.Input(
"image", "image",
@@ -290,7 +291,8 @@ class MoonvalleyVideo2VideoNode(IO.ComfyNode):
node_id="MoonvalleyVideo2VideoNode", node_id="MoonvalleyVideo2VideoNode",
display_name="Moonvalley Marey Video to Video", display_name="Moonvalley Marey Video to Video",
category="api node/video/Moonvalley Marey", category="api node/video/Moonvalley Marey",
description="", description="Transform an input video into a new video using a text prompt and motion or pose control.",
short_description="Transform video using text prompt with motion or pose control.",
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -415,7 +417,8 @@ class MoonvalleyTxt2VideoNode(IO.ComfyNode):
node_id="MoonvalleyTxt2VideoNode", node_id="MoonvalleyTxt2VideoNode",
display_name="Moonvalley Marey Text to Video", display_name="Moonvalley Marey Text to Video",
category="api node/video/Moonvalley Marey", category="api node/video/Moonvalley Marey",
description="", description="Generate a video from a text prompt using the Moonvalley Marey model.",
short_description="Generate video from a text prompt using Moonvalley Marey.",
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",

View File

@@ -98,6 +98,7 @@ class OpenAIDalle2(IO.ComfyNode):
display_name="OpenAI DALL·E 2", display_name="OpenAI DALL·E 2",
category="api node/image/OpenAI", category="api node/image/OpenAI",
description="Generates images synchronously via OpenAI's DALL·E 2 endpoint.", description="Generates images synchronously via OpenAI's DALL·E 2 endpoint.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -248,6 +249,7 @@ class OpenAIDalle3(IO.ComfyNode):
display_name="OpenAI DALL·E 3", display_name="OpenAI DALL·E 3",
category="api node/image/OpenAI", category="api node/image/OpenAI",
description="Generates images synchronously via OpenAI's DALL·E 3 endpoint.", description="Generates images synchronously via OpenAI's DALL·E 3 endpoint.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -366,6 +368,7 @@ class OpenAIGPTImage1(IO.ComfyNode):
display_name="OpenAI GPT Image 1.5", display_name="OpenAI GPT Image 1.5",
category="api node/image/OpenAI", category="api node/image/OpenAI",
description="Generates images synchronously via OpenAI's GPT Image endpoint.", description="Generates images synchronously via OpenAI's GPT Image endpoint.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -576,6 +579,7 @@ class OpenAIChatNode(IO.ComfyNode):
display_name="OpenAI ChatGPT", display_name="OpenAI ChatGPT",
category="api node/text/OpenAI", category="api node/text/OpenAI",
description="Generate text responses from an OpenAI model.", description="Generate text responses from an OpenAI model.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -803,6 +807,7 @@ class OpenAIInputFiles(IO.ComfyNode):
display_name="OpenAI ChatGPT Input Files", display_name="OpenAI ChatGPT Input Files",
category="api node/text/OpenAI", category="api node/text/OpenAI",
description="Loads and prepares input files (text, pdf, etc.) to include as inputs for the OpenAI Chat Node. The files will be read by the OpenAI model when generating a response. 🛈 TIP: Can be chained together with other OpenAI Input File nodes.", description="Loads and prepares input files (text, pdf, etc.) to include as inputs for the OpenAI Chat Node. The files will be read by the OpenAI model when generating a response. 🛈 TIP: Can be chained together with other OpenAI Input File nodes.",
short_description="Load and prepare input files for OpenAI Chat.",
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"file", "file",
@@ -850,6 +855,7 @@ class OpenAIChatConfig(IO.ComfyNode):
display_name="OpenAI ChatGPT Advanced Options", display_name="OpenAI ChatGPT Advanced Options",
category="api node/text/OpenAI", category="api node/text/OpenAI",
description="Allows specifying advanced configuration options for the OpenAI Chat Nodes.", description="Allows specifying advanced configuration options for the OpenAI Chat Nodes.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"truncation", "truncation",

View File

@@ -54,6 +54,8 @@ class PixverseTemplateNode(IO.ComfyNode):
node_id="PixverseTemplateNode", node_id="PixverseTemplateNode",
display_name="PixVerse Template", display_name="PixVerse Template",
category="api node/video/PixVerse", category="api node/video/PixVerse",
description="Select a style template for PixVerse video generation.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("template", options=list(pixverse_templates.keys())), IO.Combo.Input("template", options=list(pixverse_templates.keys())),
], ],
@@ -76,6 +78,7 @@ class PixverseTextToVideoNode(IO.ComfyNode):
display_name="PixVerse Text to Video", display_name="PixVerse Text to Video",
category="api node/video/PixVerse", category="api node/video/PixVerse",
description="Generates videos based on prompt and output_size.", description="Generates videos based on prompt and output_size.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -194,6 +197,7 @@ class PixverseImageToVideoNode(IO.ComfyNode):
display_name="PixVerse Image to Video", display_name="PixVerse Image to Video",
category="api node/video/PixVerse", category="api node/video/PixVerse",
description="Generates videos based on prompt and output_size.", description="Generates videos based on prompt and output_size.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.String.Input( IO.String.Input(
@@ -312,6 +316,7 @@ class PixverseTransitionVideoNode(IO.ComfyNode):
display_name="PixVerse Transition Video", display_name="PixVerse Transition Video",
category="api node/video/PixVerse", category="api node/video/PixVerse",
description="Generates videos based on prompt and output_size.", description="Generates videos based on prompt and output_size.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("first_frame"), IO.Image.Input("first_frame"),
IO.Image.Input("last_frame"), IO.Image.Input("last_frame"),

View File

@@ -180,6 +180,7 @@ class RecraftColorRGBNode(IO.ComfyNode):
display_name="Recraft Color RGB", display_name="Recraft Color RGB",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Create Recraft Color by choosing specific RGB values.", description="Create Recraft Color by choosing specific RGB values.",
short_description=None,
inputs=[ inputs=[
IO.Int.Input("r", default=0, min=0, max=255, tooltip="Red value of color."), IO.Int.Input("r", default=0, min=0, max=255, tooltip="Red value of color."),
IO.Int.Input("g", default=0, min=0, max=255, tooltip="Green value of color."), IO.Int.Input("g", default=0, min=0, max=255, tooltip="Green value of color."),
@@ -206,6 +207,7 @@ class RecraftControlsNode(IO.ComfyNode):
display_name="Recraft Controls", display_name="Recraft Controls",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Create Recraft Controls for customizing Recraft generation.", description="Create Recraft Controls for customizing Recraft generation.",
short_description=None,
inputs=[ inputs=[
IO.Custom(RecraftIO.COLOR).Input("colors", optional=True), IO.Custom(RecraftIO.COLOR).Input("colors", optional=True),
IO.Custom(RecraftIO.COLOR).Input("background_color", optional=True), IO.Custom(RecraftIO.COLOR).Input("background_color", optional=True),
@@ -230,6 +232,7 @@ class RecraftStyleV3RealisticImageNode(IO.ComfyNode):
display_name="Recraft Style - Realistic Image", display_name="Recraft Style - Realistic Image",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Select realistic_image style and optional substyle.", description="Select realistic_image style and optional substyle.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)), IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
], ],
@@ -254,7 +257,8 @@ class RecraftStyleV3DigitalIllustrationNode(RecraftStyleV3RealisticImageNode):
node_id="RecraftStyleV3DigitalIllustration", node_id="RecraftStyleV3DigitalIllustration",
display_name="Recraft Style - Digital Illustration", display_name="Recraft Style - Digital Illustration",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Select realistic_image style and optional substyle.", description="Select digital_illustration style and optional substyle.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)), IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
], ],
@@ -271,9 +275,10 @@ class RecraftStyleV3VectorIllustrationNode(RecraftStyleV3RealisticImageNode):
def define_schema(cls): def define_schema(cls):
return IO.Schema( return IO.Schema(
node_id="RecraftStyleV3VectorIllustrationNode", node_id="RecraftStyleV3VectorIllustrationNode",
display_name="Recraft Style - Realistic Image", display_name="Recraft Style - Vector Illustration",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Select realistic_image style and optional substyle.", description="Select vector_illustration style and optional substyle.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)), IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
], ],
@@ -292,7 +297,8 @@ class RecraftStyleV3LogoRasterNode(RecraftStyleV3RealisticImageNode):
node_id="RecraftStyleV3LogoRaster", node_id="RecraftStyleV3LogoRaster",
display_name="Recraft Style - Logo Raster", display_name="Recraft Style - Logo Raster",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Select realistic_image style and optional substyle.", description="Select logo_raster style and optional substyle.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE, include_none=False)), IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE, include_none=False)),
], ],
@@ -310,6 +316,7 @@ class RecraftStyleInfiniteStyleLibrary(IO.ComfyNode):
display_name="Recraft Style - Infinite Style Library", display_name="Recraft Style - Infinite Style Library",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Select style based on preexisting UUID from Recraft's Infinite Style Library.", description="Select style based on preexisting UUID from Recraft's Infinite Style Library.",
short_description=None,
inputs=[ inputs=[
IO.String.Input("style_id", default="", tooltip="UUID of style from Infinite Style Library."), IO.String.Input("style_id", default="", tooltip="UUID of style from Infinite Style Library."),
], ],
@@ -335,6 +342,7 @@ class RecraftCreateStyleNode(IO.ComfyNode):
description="Create a custom style from reference images. " description="Create a custom style from reference images. "
"Upload 1-5 images to use as style references. " "Upload 1-5 images to use as style references. "
"Total size of all images is limited to 5 MB.", "Total size of all images is limited to 5 MB.",
short_description="Create a custom style from 1-5 reference images.",
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"style", "style",
@@ -402,6 +410,7 @@ class RecraftTextToImageNode(IO.ComfyNode):
display_name="Recraft Text to Image", display_name="Recraft Text to Image",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Generates images synchronously based on prompt and resolution.", description="Generates images synchronously based on prompt and resolution.",
short_description=None,
inputs=[ inputs=[
IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."), IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."),
IO.Combo.Input( IO.Combo.Input(
@@ -514,6 +523,7 @@ class RecraftImageToImageNode(IO.ComfyNode):
display_name="Recraft Image to Image", display_name="Recraft Image to Image",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Modify image based on prompt and strength.", description="Modify image based on prompt and strength.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."), IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."),
@@ -632,6 +642,7 @@ class RecraftImageInpaintingNode(IO.ComfyNode):
display_name="Recraft Image Inpainting", display_name="Recraft Image Inpainting",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Modify image based on prompt and mask.", description="Modify image based on prompt and mask.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Mask.Input("mask"), IO.Mask.Input("mask"),
@@ -734,6 +745,7 @@ class RecraftTextToVectorNode(IO.ComfyNode):
display_name="Recraft Text to Vector", display_name="Recraft Text to Vector",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Generates SVG synchronously based on prompt and resolution.", description="Generates SVG synchronously based on prompt and resolution.",
short_description=None,
inputs=[ inputs=[
IO.String.Input("prompt", default="", tooltip="Prompt for the image generation.", multiline=True), IO.String.Input("prompt", default="", tooltip="Prompt for the image generation.", multiline=True),
IO.Combo.Input("substyle", options=get_v3_substyles(RecraftStyleV3.vector_illustration)), IO.Combo.Input("substyle", options=get_v3_substyles(RecraftStyleV3.vector_illustration)),
@@ -834,6 +846,7 @@ class RecraftVectorizeImageNode(IO.ComfyNode):
display_name="Recraft Vectorize Image", display_name="Recraft Vectorize Image",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Generates SVG synchronously from an input image.", description="Generates SVG synchronously from an input image.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
], ],
@@ -877,6 +890,7 @@ class RecraftReplaceBackgroundNode(IO.ComfyNode):
display_name="Recraft Replace Background", display_name="Recraft Replace Background",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Replace background on image, based on provided prompt.", description="Replace background on image, based on provided prompt.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.String.Input("prompt", tooltip="Prompt for the image generation.", default="", multiline=True), IO.String.Input("prompt", tooltip="Prompt for the image generation.", default="", multiline=True),
@@ -964,6 +978,7 @@ class RecraftRemoveBackgroundNode(IO.ComfyNode):
display_name="Recraft Remove Background", display_name="Recraft Remove Background",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Remove background from image, and return processed image and mask.", description="Remove background from image, and return processed image and mask.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
], ],
@@ -1012,8 +1027,9 @@ class RecraftCrispUpscaleNode(IO.ComfyNode):
display_name="Recraft Crisp Upscale Image", display_name="Recraft Crisp Upscale Image",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Upscale image synchronously.\n" description="Upscale image synchronously.\n"
"Enhances a given raster image using crisp upscale tool, " "Enhances a given raster image using 'crisp upscale' tool, "
"increasing image resolution, making the image sharper and cleaner.", "increasing image resolution, making the image sharper and cleaner.",
short_description="Crisp upscale to sharpen and increase image resolution.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
], ],
@@ -1058,8 +1074,9 @@ class RecraftCreativeUpscaleNode(RecraftCrispUpscaleNode):
display_name="Recraft Creative Upscale Image", display_name="Recraft Creative Upscale Image",
category="api node/image/Recraft", category="api node/image/Recraft",
description="Upscale image synchronously.\n" description="Upscale image synchronously.\n"
"Enhances a given raster image using creative upscale tool, " "Enhances a given raster image using 'creative upscale' tool, "
"boosting resolution with a focus on refining small details and faces.", "boosting resolution with a focus on refining small details and faces.",
short_description="Creative upscale focusing on small details and faces.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
], ],

View File

@@ -238,6 +238,7 @@ class Rodin3D_Regular(IO.ComfyNode):
display_name="Rodin 3D Generate - Regular Generate", display_name="Rodin 3D Generate - Regular Generate",
category="api node/3d/Rodin", category="api node/3d/Rodin",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description=None,
inputs=[ inputs=[
IO.Image.Input("Images"), IO.Image.Input("Images"),
*COMMON_PARAMETERS, *COMMON_PARAMETERS,
@@ -297,6 +298,7 @@ class Rodin3D_Detail(IO.ComfyNode):
display_name="Rodin 3D Generate - Detail Generate", display_name="Rodin 3D Generate - Detail Generate",
category="api node/3d/Rodin", category="api node/3d/Rodin",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description=None,
inputs=[ inputs=[
IO.Image.Input("Images"), IO.Image.Input("Images"),
*COMMON_PARAMETERS, *COMMON_PARAMETERS,
@@ -356,6 +358,7 @@ class Rodin3D_Smooth(IO.ComfyNode):
display_name="Rodin 3D Generate - Smooth Generate", display_name="Rodin 3D Generate - Smooth Generate",
category="api node/3d/Rodin", category="api node/3d/Rodin",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description=None,
inputs=[ inputs=[
IO.Image.Input("Images"), IO.Image.Input("Images"),
*COMMON_PARAMETERS, *COMMON_PARAMETERS,
@@ -414,6 +417,7 @@ class Rodin3D_Sketch(IO.ComfyNode):
display_name="Rodin 3D Generate - Sketch Generate", display_name="Rodin 3D Generate - Sketch Generate",
category="api node/3d/Rodin", category="api node/3d/Rodin",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description=None,
inputs=[ inputs=[
IO.Image.Input("Images"), IO.Image.Input("Images"),
IO.Int.Input( IO.Int.Input(
@@ -476,6 +480,7 @@ class Rodin3D_Gen2(IO.ComfyNode):
display_name="Rodin 3D Generate - Gen-2 Generate", display_name="Rodin 3D Generate - Gen-2 Generate",
category="api node/3d/Rodin", category="api node/3d/Rodin",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description=None,
inputs=[ inputs=[
IO.Image.Input("Images"), IO.Image.Input("Images"),
IO.Int.Input( IO.Int.Input(

View File

@@ -145,6 +145,7 @@ class RunwayImageToVideoNodeGen3a(IO.ComfyNode):
"Before diving in, review these best practices to ensure that " "Before diving in, review these best practices to ensure that "
"your input selections will set your generation up for success: " "your input selections will set your generation up for success: "
"https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.", "https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.",
short_description="Generate video from a starting frame using Gen3a Turbo.",
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -239,6 +240,7 @@ class RunwayImageToVideoNodeGen4(IO.ComfyNode):
"Before diving in, review these best practices to ensure that " "Before diving in, review these best practices to ensure that "
"your input selections will set your generation up for success: " "your input selections will set your generation up for success: "
"https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.", "https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.",
short_description="Generate video from a starting frame using Gen4 Turbo.",
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -337,6 +339,7 @@ class RunwayFirstLastFrameNode(IO.ComfyNode):
"Before diving in, review these best practices to ensure that your input selections " "Before diving in, review these best practices to ensure that your input selections "
"will set your generation up for success: " "will set your generation up for success: "
"https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.", "https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.",
short_description="Generate video from first and last keyframes with a prompt.",
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -443,6 +446,7 @@ class RunwayTextToImageNode(IO.ComfyNode):
category="api node/image/Runway", category="api node/image/Runway",
description="Generate an image from a text prompt using Runway's Gen 4 model. " description="Generate an image from a text prompt using Runway's Gen 4 model. "
"You can also include reference image to guide the generation.", "You can also include reference image to guide the generation.",
short_description="Generate an image from text using Runway Gen 4.",
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",

View File

@@ -36,6 +36,7 @@ class OpenAIVideoSora2(IO.ComfyNode):
display_name="OpenAI Sora - Video", display_name="OpenAI Sora - Video",
category="api node/video/Sora", category="api node/video/Sora",
description="OpenAI video and audio generation.", description="OpenAI video and audio generation.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",

View File

@@ -64,6 +64,7 @@ class StabilityStableImageUltraNode(IO.ComfyNode):
display_name="Stability AI Stable Image Ultra", display_name="Stability AI Stable Image Ultra",
category="api node/image/Stability AI", category="api node/image/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -197,6 +198,7 @@ class StabilityStableImageSD_3_5Node(IO.ComfyNode):
display_name="Stability AI Stable Diffusion 3.5 Image", display_name="Stability AI Stable Diffusion 3.5 Image",
category="api node/image/Stability AI", category="api node/image/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -352,6 +354,7 @@ class StabilityUpscaleConservativeNode(IO.ComfyNode):
display_name="Stability AI Upscale Conservative", display_name="Stability AI Upscale Conservative",
category="api node/image/Stability AI", category="api node/image/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.String.Input( IO.String.Input(
@@ -454,6 +457,7 @@ class StabilityUpscaleCreativeNode(IO.ComfyNode):
display_name="Stability AI Upscale Creative", display_name="Stability AI Upscale Creative",
category="api node/image/Stability AI", category="api node/image/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.String.Input( IO.String.Input(
@@ -573,6 +577,7 @@ class StabilityUpscaleFastNode(IO.ComfyNode):
display_name="Stability AI Upscale Fast", display_name="Stability AI Upscale Fast",
category="api node/image/Stability AI", category="api node/image/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description="Quickly upscale an image to 4x its original size.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
], ],
@@ -625,6 +630,7 @@ class StabilityTextToAudio(IO.ComfyNode):
display_name="Stability AI Text To Audio", display_name="Stability AI Text To Audio",
category="api node/audio/Stability AI", category="api node/audio/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description=None,
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",
@@ -701,6 +707,7 @@ class StabilityAudioToAudio(IO.ComfyNode):
display_name="Stability AI Audio To Audio", display_name="Stability AI Audio To Audio",
category="api node/audio/Stability AI", category="api node/audio/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description=None,
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",
@@ -794,6 +801,7 @@ class StabilityAudioInpaint(IO.ComfyNode):
display_name="Stability AI Audio Inpaint", display_name="Stability AI Audio Inpaint",
category="api node/audio/Stability AI", category="api node/audio/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
short_description=None,
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",

View File

@@ -49,6 +49,7 @@ class TopazImageEnhance(IO.ComfyNode):
display_name="Topaz Image Enhance", display_name="Topaz Image Enhance",
category="api node/image/Topaz", category="api node/image/Topaz",
description="Industry-standard upscaling and image enhancement.", description="Industry-standard upscaling and image enhancement.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["Reimagine"]), IO.Combo.Input("model", options=["Reimagine"]),
IO.Image.Input("image"), IO.Image.Input("image"),
@@ -223,6 +224,7 @@ class TopazVideoEnhance(IO.ComfyNode):
display_name="Topaz Video Enhance", display_name="Topaz Video Enhance",
category="api node/video/Topaz", category="api node/video/Topaz",
description="Breathe new life into video with powerful upscaling and recovery technology.", description="Breathe new life into video with powerful upscaling and recovery technology.",
short_description=None,
inputs=[ inputs=[
IO.Video.Input("video"), IO.Video.Input("video"),
IO.Boolean.Input("upscaler_enabled", default=True), IO.Boolean.Input("upscaler_enabled", default=True),

View File

@@ -80,6 +80,7 @@ class TripoTextToModelNode(IO.ComfyNode):
node_id="TripoTextToModelNode", node_id="TripoTextToModelNode",
display_name="Tripo: Text to Model", display_name="Tripo: Text to Model",
category="api node/3d/Tripo", category="api node/3d/Tripo",
description="Generate a 3D model from a text prompt using Tripo's API.",
inputs=[ inputs=[
IO.String.Input("prompt", multiline=True), IO.String.Input("prompt", multiline=True),
IO.String.Input("negative_prompt", multiline=True, optional=True), IO.String.Input("negative_prompt", multiline=True, optional=True),
@@ -199,6 +200,7 @@ class TripoImageToModelNode(IO.ComfyNode):
node_id="TripoImageToModelNode", node_id="TripoImageToModelNode",
display_name="Tripo: Image to Model", display_name="Tripo: Image to Model",
category="api node/3d/Tripo", category="api node/3d/Tripo",
description="Generate a 3D model from a single image using Tripo's API.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Combo.Input( IO.Combo.Input(
@@ -331,6 +333,7 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
node_id="TripoMultiviewToModelNode", node_id="TripoMultiviewToModelNode",
display_name="Tripo: Multiview to Model", display_name="Tripo: Multiview to Model",
category="api node/3d/Tripo", category="api node/3d/Tripo",
description="Generate a 3D model from multiple view images using Tripo's API.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Image.Input("image_left", optional=True), IO.Image.Input("image_left", optional=True),
@@ -470,6 +473,7 @@ class TripoTextureNode(IO.ComfyNode):
node_id="TripoTextureNode", node_id="TripoTextureNode",
display_name="Tripo: Texture model", display_name="Tripo: Texture model",
category="api node/3d/Tripo", category="api node/3d/Tripo",
description="Apply textures to an existing 3D model using Tripo's API.",
inputs=[ inputs=[
IO.Custom("MODEL_TASK_ID").Input("model_task_id"), IO.Custom("MODEL_TASK_ID").Input("model_task_id"),
IO.Boolean.Input("texture", default=True, optional=True), IO.Boolean.Input("texture", default=True, optional=True),
@@ -538,6 +542,7 @@ class TripoRefineNode(IO.ComfyNode):
display_name="Tripo: Refine Draft model", display_name="Tripo: Refine Draft model",
category="api node/3d/Tripo", category="api node/3d/Tripo",
description="Refine a draft model created by v1.4 Tripo models only.", description="Refine a draft model created by v1.4 Tripo models only.",
short_description=None,
inputs=[ inputs=[
IO.Custom("MODEL_TASK_ID").Input("model_task_id", tooltip="Must be a v1.4 Tripo model"), IO.Custom("MODEL_TASK_ID").Input("model_task_id", tooltip="Must be a v1.4 Tripo model"),
], ],
@@ -577,6 +582,8 @@ class TripoRigNode(IO.ComfyNode):
node_id="TripoRigNode", node_id="TripoRigNode",
display_name="Tripo: Rig model", display_name="Tripo: Rig model",
category="api node/3d/Tripo", category="api node/3d/Tripo",
description="Add a skeleton rig to an existing 3D model using Tripo's API.",
short_description="Add a skeleton rig to a 3D model.",
inputs=[IO.Custom("MODEL_TASK_ID").Input("original_model_task_id")], inputs=[IO.Custom("MODEL_TASK_ID").Input("original_model_task_id")],
outputs=[ outputs=[
IO.String.Output(display_name="model_file"), # for backward compatibility only IO.String.Output(display_name="model_file"), # for backward compatibility only
@@ -614,6 +621,8 @@ class TripoRetargetNode(IO.ComfyNode):
node_id="TripoRetargetNode", node_id="TripoRetargetNode",
display_name="Tripo: Retarget rigged model", display_name="Tripo: Retarget rigged model",
category="api node/3d/Tripo", category="api node/3d/Tripo",
description="Apply a preset animation to a rigged 3D model using Tripo's API.",
short_description="Apply a preset animation to a rigged model.",
inputs=[ inputs=[
IO.Custom("RIG_TASK_ID").Input("original_model_task_id"), IO.Custom("RIG_TASK_ID").Input("original_model_task_id"),
IO.Combo.Input( IO.Combo.Input(
@@ -679,6 +688,8 @@ class TripoConversionNode(IO.ComfyNode):
node_id="TripoConversionNode", node_id="TripoConversionNode",
display_name="Tripo: Convert model", display_name="Tripo: Convert model",
category="api node/3d/Tripo", category="api node/3d/Tripo",
description="Convert a 3D model to different formats with optional post-processing using Tripo's API.",
short_description="Convert a 3D model to different formats.",
inputs=[ inputs=[
IO.Custom("MODEL_TASK_ID,RIG_TASK_ID,RETARGET_TASK_ID").Input("original_model_task_id"), IO.Custom("MODEL_TASK_ID,RIG_TASK_ID,RETARGET_TASK_ID").Input("original_model_task_id"),
IO.Combo.Input("format", options=["GLTF", "USDZ", "FBX", "OBJ", "STL", "3MF"]), IO.Combo.Input("format", options=["GLTF", "USDZ", "FBX", "OBJ", "STL", "3MF"]),

View File

@@ -46,6 +46,7 @@ class VeoVideoGenerationNode(IO.ComfyNode):
display_name="Google Veo 2 Video Generation", display_name="Google Veo 2 Video Generation",
category="api node/video/Veo", category="api node/video/Veo",
description="Generates videos from text prompts using Google's Veo 2 API", description="Generates videos from text prompts using Google's Veo 2 API",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -264,6 +265,7 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
display_name="Google Veo 3 Video Generation", display_name="Google Veo 3 Video Generation",
category="api node/video/Veo", category="api node/video/Veo",
description="Generates videos from text prompts using Google's Veo 3 API", description="Generates videos from text prompts using Google's Veo 3 API",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",
@@ -377,6 +379,7 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
display_name="Google Veo 3 First-Last-Frame to Video", display_name="Google Veo 3 First-Last-Frame to Video",
category="api node/video/Veo", category="api node/video/Veo",
description="Generate video using prompt and first and last frames.", description="Generate video using prompt and first and last frames.",
short_description=None,
inputs=[ inputs=[
IO.String.Input( IO.String.Input(
"prompt", "prompt",

View File

@@ -72,6 +72,7 @@ class ViduTextToVideoNode(IO.ComfyNode):
display_name="Vidu Text To Video Generation", display_name="Vidu Text To Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate video from a text prompt", description="Generate video from a text prompt",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"), IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
IO.String.Input( IO.String.Input(
@@ -168,6 +169,7 @@ class ViduImageToVideoNode(IO.ComfyNode):
display_name="Vidu Image To Video Generation", display_name="Vidu Image To Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate video from image and optional prompt", description="Generate video from image and optional prompt",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"), IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
IO.Image.Input( IO.Image.Input(
@@ -270,6 +272,7 @@ class ViduReferenceVideoNode(IO.ComfyNode):
display_name="Vidu Reference To Video Generation", display_name="Vidu Reference To Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate video from multiple images and a prompt", description="Generate video from multiple images and a prompt",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"), IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
IO.Image.Input( IO.Image.Input(
@@ -383,6 +386,7 @@ class ViduStartEndToVideoNode(IO.ComfyNode):
display_name="Vidu Start End To Video Generation", display_name="Vidu Start End To Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate a video from start and end frames and a prompt", description="Generate a video from start and end frames and a prompt",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"), IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
IO.Image.Input( IO.Image.Input(
@@ -485,6 +489,7 @@ class Vidu2TextToVideoNode(IO.ComfyNode):
display_name="Vidu2 Text-to-Video Generation", display_name="Vidu2 Text-to-Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate video from a text prompt", description="Generate video from a text prompt",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["viduq2"]), IO.Combo.Input("model", options=["viduq2"]),
IO.String.Input( IO.String.Input(
@@ -576,6 +581,7 @@ class Vidu2ImageToVideoNode(IO.ComfyNode):
display_name="Vidu2 Image-to-Video Generation", display_name="Vidu2 Image-to-Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate a video from an image and an optional prompt.", description="Generate a video from an image and an optional prompt.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]), IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]),
IO.Image.Input( IO.Image.Input(
@@ -704,6 +710,7 @@ class Vidu2ReferenceVideoNode(IO.ComfyNode):
display_name="Vidu2 Reference-to-Video Generation", display_name="Vidu2 Reference-to-Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate a video from multiple reference images and a prompt.", description="Generate a video from multiple reference images and a prompt.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["viduq2"]), IO.Combo.Input("model", options=["viduq2"]),
IO.Autogrow.Input( IO.Autogrow.Input(
@@ -837,6 +844,7 @@ class Vidu2StartEndToVideoNode(IO.ComfyNode):
display_name="Vidu2 Start/End Frame-to-Video Generation", display_name="Vidu2 Start/End Frame-to-Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate a video from a start frame, an end frame, and a prompt.", description="Generate a video from a start frame, an end frame, and a prompt.",
short_description="Generate video from start frame, end frame, and prompt.",
inputs=[ inputs=[
IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]), IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]),
IO.Image.Input("first_frame"), IO.Image.Input("first_frame"),
@@ -956,6 +964,7 @@ class ViduExtendVideoNode(IO.ComfyNode):
display_name="Vidu Video Extension", display_name="Vidu Video Extension",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Extend an existing video by generating additional frames.", description="Extend an existing video by generating additional frames.",
short_description=None,
inputs=[ inputs=[
IO.DynamicCombo.Input( IO.DynamicCombo.Input(
"model", "model",
@@ -1126,6 +1135,7 @@ class ViduMultiFrameVideoNode(IO.ComfyNode):
display_name="Vidu Multi-Frame Video Generation", display_name="Vidu Multi-Frame Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate a video with multiple keyframe transitions.", description="Generate a video with multiple keyframe transitions.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input("model", options=["viduq2-pro", "viduq2-turbo"]), IO.Combo.Input("model", options=["viduq2-pro", "viduq2-turbo"]),
IO.Image.Input( IO.Image.Input(
@@ -1272,6 +1282,7 @@ class Vidu3TextToVideoNode(IO.ComfyNode):
display_name="Vidu Q3 Text-to-Video Generation", display_name="Vidu Q3 Text-to-Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate video from a text prompt.", description="Generate video from a text prompt.",
short_description=None,
inputs=[ inputs=[
IO.DynamicCombo.Input( IO.DynamicCombo.Input(
"model", "model",
@@ -1380,6 +1391,7 @@ class Vidu3ImageToVideoNode(IO.ComfyNode):
display_name="Vidu Q3 Image-to-Video Generation", display_name="Vidu Q3 Image-to-Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate a video from an image and an optional prompt.", description="Generate a video from an image and an optional prompt.",
short_description=None,
inputs=[ inputs=[
IO.DynamicCombo.Input( IO.DynamicCombo.Input(
"model", "model",

View File

@@ -175,6 +175,7 @@ class WanTextToImageApi(IO.ComfyNode):
display_name="Wan Text to Image", display_name="Wan Text to Image",
category="api node/image/Wan", category="api node/image/Wan",
description="Generates an image based on a text prompt.", description="Generates an image based on a text prompt.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",
@@ -298,6 +299,7 @@ class WanImageToImageApi(IO.ComfyNode):
category="api node/image/Wan", category="api node/image/Wan",
description="Generates an image from one or two input images and a text prompt. " description="Generates an image from one or two input images and a text prompt. "
"The output image is currently fixed at 1.6 MP, and its aspect ratio matches the input image(s).", "The output image is currently fixed at 1.6 MP, and its aspect ratio matches the input image(s).",
short_description="Generate an image from input images and a text prompt.",
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",
@@ -424,6 +426,7 @@ class WanTextToVideoApi(IO.ComfyNode):
display_name="Wan Text to Video", display_name="Wan Text to Video",
category="api node/video/Wan", category="api node/video/Wan",
description="Generates a video based on a text prompt.", description="Generates a video based on a text prompt.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",
@@ -603,6 +606,7 @@ class WanImageToVideoApi(IO.ComfyNode):
display_name="Wan Image to Video", display_name="Wan Image to Video",
category="api node/video/Wan", category="api node/video/Wan",
description="Generates a video from the first frame and a text prompt.", description="Generates a video from the first frame and a text prompt.",
short_description=None,
inputs=[ inputs=[
IO.Combo.Input( IO.Combo.Input(
"model", "model",
@@ -779,6 +783,7 @@ class WanReferenceVideoApi(IO.ComfyNode):
category="api node/video/Wan", category="api node/video/Wan",
description="Use the character and voice from input videos, combined with a prompt, " description="Use the character and voice from input videos, combined with a prompt, "
"to generate a new video that maintains character consistency.", "to generate a new video that maintains character consistency.",
short_description="Generate character-consistent video from reference videos and prompt.",
inputs=[ inputs=[
IO.Combo.Input("model", options=["wan2.6-r2v"]), IO.Combo.Input("model", options=["wan2.6-r2v"]),
IO.String.Input( IO.String.Input(

View File

@@ -30,6 +30,7 @@ class WavespeedFlashVSRNode(IO.ComfyNode):
category="api node/video/WaveSpeed", category="api node/video/WaveSpeed",
description="Fast, high-quality video upscaler that " description="Fast, high-quality video upscaler that "
"boosts resolution and restores clarity for low-resolution or blurry footage.", "boosts resolution and restores clarity for low-resolution or blurry footage.",
short_description="Fast video upscaler that boosts resolution and restores clarity.",
inputs=[ inputs=[
IO.Video.Input("video"), IO.Video.Input("video"),
IO.Combo.Input("target_resolution", options=["720p", "1080p", "2K", "4K"]), IO.Combo.Input("target_resolution", options=["720p", "1080p", "2K", "4K"]),
@@ -101,6 +102,7 @@ class WavespeedImageUpscaleNode(IO.ComfyNode):
display_name="WaveSpeed Image Upscale", display_name="WaveSpeed Image Upscale",
category="api node/image/WaveSpeed", category="api node/image/WaveSpeed",
description="Boost image resolution and quality, upscaling photos to 4K or 8K for sharp, detailed results.", description="Boost image resolution and quality, upscaling photos to 4K or 8K for sharp, detailed results.",
short_description="Upscale images to 4K or 8K with enhanced quality.",
inputs=[ inputs=[
IO.Combo.Input("model", options=["SeedVR2", "Ultimate"]), IO.Combo.Input("model", options=["SeedVR2", "Ultimate"]),
IO.Image.Input("image"), IO.Image.Input("image"),

View File

@@ -12,6 +12,8 @@ class TextEncodeAceStepAudio(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="TextEncodeAceStepAudio", node_id="TextEncodeAceStepAudio",
category="conditioning", category="conditioning",
description="Encodes tags and lyrics into conditioning for ACE-Step 1.0 audio generation with adjustable lyrics strength.",
short_description="Encodes tags and lyrics for ACE-Step 1.0 audio.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.String.Input("tags", multiline=True, dynamic_prompts=True), io.String.Input("tags", multiline=True, dynamic_prompts=True),
@@ -34,6 +36,8 @@ class TextEncodeAceStepAudio15(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="TextEncodeAceStepAudio1.5", node_id="TextEncodeAceStepAudio1.5",
category="conditioning", category="conditioning",
description="Encodes tags, lyrics, and music parameters like BPM, key, and language into conditioning for ACE-Step 1.5 audio generation.",
short_description="Encodes text and music parameters for ACE-Step 1.5.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.String.Input("tags", multiline=True, dynamic_prompts=True), io.String.Input("tags", multiline=True, dynamic_prompts=True),
@@ -68,6 +72,8 @@ class EmptyAceStepLatentAudio(io.ComfyNode):
node_id="EmptyAceStepLatentAudio", node_id="EmptyAceStepLatentAudio",
display_name="Empty Ace Step 1.0 Latent Audio", display_name="Empty Ace Step 1.0 Latent Audio",
category="latent/audio", category="latent/audio",
description="Creates an empty latent audio tensor for ACE-Step 1.0 with a specified duration and batch size.",
short_description="Creates an empty ACE-Step 1.0 audio latent.",
inputs=[ inputs=[
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1), io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1),
io.Int.Input( io.Int.Input(
@@ -91,6 +97,8 @@ class EmptyAceStep15LatentAudio(io.ComfyNode):
node_id="EmptyAceStep1.5LatentAudio", node_id="EmptyAceStep1.5LatentAudio",
display_name="Empty Ace Step 1.5 Latent Audio", display_name="Empty Ace Step 1.5 Latent Audio",
category="latent/audio", category="latent/audio",
description="Creates an empty latent audio tensor for ACE-Step 1.5 with a specified duration and batch size.",
short_description="Creates an empty ACE-Step 1.5 audio latent.",
inputs=[ inputs=[
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.01), io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.01),
io.Int.Input( io.Int.Input(
@@ -115,6 +123,7 @@ class ReferenceAudio(io.ComfyNode):
category="advanced/conditioning/audio", category="advanced/conditioning/audio",
is_experimental=True, is_experimental=True,
description="This node sets the reference audio for ace step 1.5", description="This node sets the reference audio for ace step 1.5",
short_description=None,
inputs=[ inputs=[
io.Conditioning.Input("conditioning"), io.Conditioning.Input("conditioning"),
io.Latent.Input("latent", optional=True), io.Latent.Input("latent", optional=True),

View File

@@ -46,6 +46,8 @@ class SamplerLCMUpscale(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SamplerLCMUpscale", node_id="SamplerLCMUpscale",
category="sampling/custom_sampling/samplers", category="sampling/custom_sampling/samplers",
description="Sampler that progressively upscales the latent during LCM sampling steps, combining denoising with gradual resolution increase.",
short_description="LCM sampler with progressive latent upscaling.",
inputs=[ inputs=[
io.Float.Input("scale_ratio", default=1.0, min=0.1, max=20.0, step=0.01), io.Float.Input("scale_ratio", default=1.0, min=0.1, max=20.0, step=0.01),
io.Int.Input("scale_steps", default=-1, min=-1, max=1000, step=1), io.Int.Input("scale_steps", default=-1, min=-1, max=1000, step=1),
@@ -93,6 +95,8 @@ class SamplerEulerCFGpp(io.ComfyNode):
node_id="SamplerEulerCFGpp", node_id="SamplerEulerCFGpp",
display_name="SamplerEulerCFG++", display_name="SamplerEulerCFG++",
category="_for_testing", # "sampling/custom_sampling/samplers" category="_for_testing", # "sampling/custom_sampling/samplers"
description="Euler sampler variant using the CFG++ formulation, which modifies the denoising direction using unconditional predictions for improved guidance.",
short_description="Euler sampler using CFG++ guidance formulation.",
inputs=[ inputs=[
io.Combo.Input("version", options=["regular", "alternative"]), io.Combo.Input("version", options=["regular", "alternative"]),
], ],

View File

@@ -30,6 +30,8 @@ class AlignYourStepsScheduler(io.ComfyNode):
node_id="AlignYourStepsScheduler", node_id="AlignYourStepsScheduler",
search_aliases=["AYS scheduler"], search_aliases=["AYS scheduler"],
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates an optimized noise schedule using the Align Your Steps method with log-linear interpolation.",
short_description="Optimized noise schedule using Align Your Steps.",
inputs=[ inputs=[
io.Combo.Input("model_type", options=["SD1", "SDXL", "SVD"]), io.Combo.Input("model_type", options=["SD1", "SDXL", "SVD"]),
io.Int.Input("steps", default=10, min=1, max=10000), io.Int.Input("steps", default=10, min=1, max=10000),

View File

@@ -17,6 +17,8 @@ class APG(io.ComfyNode):
node_id="APG", node_id="APG",
display_name="Adaptive Projected Guidance", display_name="Adaptive Projected Guidance",
category="sampling/custom_sampling", category="sampling/custom_sampling",
description="Applies Adaptive Projected Guidance to a model, decomposing CFG guidance into parallel and orthogonal components with optional momentum and norm thresholding for improved sampling quality.",
short_description="Decomposes CFG guidance with projection and normalization.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input( io.Float.Input(

View File

@@ -26,6 +26,8 @@ class UNetSelfAttentionMultiply(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="UNetSelfAttentionMultiply", node_id="UNetSelfAttentionMultiply",
category="_for_testing/attention_experiments", category="_for_testing/attention_experiments",
description="Scales the query, key, value, and output weights of UNet self-attention layers by specified multipliers to experiment with attention behavior.",
short_description="Scale UNet self-attention Q/K/V/Out weights.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01), io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
@@ -49,6 +51,8 @@ class UNetCrossAttentionMultiply(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="UNetCrossAttentionMultiply", node_id="UNetCrossAttentionMultiply",
category="_for_testing/attention_experiments", category="_for_testing/attention_experiments",
description="Scales the query, key, value, and output weights of UNet cross-attention layers by specified multipliers to experiment with text-to-image attention.",
short_description="Scale UNet cross-attention Q/K/V/Out weights.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01), io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
@@ -73,6 +77,8 @@ class CLIPAttentionMultiply(io.ComfyNode):
node_id="CLIPAttentionMultiply", node_id="CLIPAttentionMultiply",
search_aliases=["clip attention scale", "text encoder attention"], search_aliases=["clip attention scale", "text encoder attention"],
category="_for_testing/attention_experiments", category="_for_testing/attention_experiments",
description="Scales the query, key, value, and output projection weights of CLIP text encoder self-attention layers by specified multipliers.",
short_description="Scale CLIP text encoder attention weights.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01), io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
@@ -107,6 +113,8 @@ class UNetTemporalAttentionMultiply(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="UNetTemporalAttentionMultiply", node_id="UNetTemporalAttentionMultiply",
category="_for_testing/attention_experiments", category="_for_testing/attention_experiments",
description="Scales the output weights of UNet temporal and structural attention layers independently, allowing fine-grained control over video model attention behavior.",
short_description="Scale UNet temporal and structural attention weights.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input("self_structural", default=1.0, min=0.0, max=10.0, step=0.01), io.Float.Input("self_structural", default=1.0, min=0.0, max=10.0, step=0.01),

View File

@@ -19,6 +19,8 @@ class EmptyLatentAudio(IO.ComfyNode):
node_id="EmptyLatentAudio", node_id="EmptyLatentAudio",
display_name="Empty Latent Audio", display_name="Empty Latent Audio",
category="latent/audio", category="latent/audio",
description="Creates an empty latent audio tensor with a specified duration and batch size for Stable Audio generation.",
short_description="Creates an empty latent audio tensor.",
inputs=[ inputs=[
IO.Float.Input("seconds", default=47.6, min=1.0, max=1000.0, step=0.1), IO.Float.Input("seconds", default=47.6, min=1.0, max=1000.0, step=0.1),
IO.Int.Input( IO.Int.Input(
@@ -43,6 +45,8 @@ class ConditioningStableAudio(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="ConditioningStableAudio", node_id="ConditioningStableAudio",
category="conditioning", category="conditioning",
description="Sets the start time and total duration on Stable Audio positive and negative conditioning.",
short_description="Sets timing parameters on Stable Audio conditioning.",
inputs=[ inputs=[
IO.Conditioning.Input("positive"), IO.Conditioning.Input("positive"),
IO.Conditioning.Input("negative"), IO.Conditioning.Input("negative"),
@@ -72,6 +76,8 @@ class VAEEncodeAudio(IO.ComfyNode):
search_aliases=["audio to latent"], search_aliases=["audio to latent"],
display_name="VAE Encode Audio", display_name="VAE Encode Audio",
category="latent/audio", category="latent/audio",
description="Encodes an audio waveform into a latent representation using a VAE, resampling if needed.",
short_description="Encodes audio into latent via VAE.",
inputs=[ inputs=[
IO.Audio.Input("audio"), IO.Audio.Input("audio"),
IO.Vae.Input("vae"), IO.Vae.Input("vae"),
@@ -115,6 +121,8 @@ class VAEDecodeAudio(IO.ComfyNode):
search_aliases=["latent to audio"], search_aliases=["latent to audio"],
display_name="VAE Decode Audio", display_name="VAE Decode Audio",
category="latent/audio", category="latent/audio",
description="Decodes a latent representation back into an audio waveform using a VAE.",
short_description="Decodes latent into audio via VAE.",
inputs=[ inputs=[
IO.Latent.Input("samples"), IO.Latent.Input("samples"),
IO.Vae.Input("vae"), IO.Vae.Input("vae"),
@@ -137,6 +145,8 @@ class VAEDecodeAudioTiled(IO.ComfyNode):
search_aliases=["latent to audio"], search_aliases=["latent to audio"],
display_name="VAE Decode Audio (Tiled)", display_name="VAE Decode Audio (Tiled)",
category="latent/audio", category="latent/audio",
description="Decodes a latent representation into audio using tiled VAE decoding to reduce memory usage.",
short_description="Tiled VAE decoding of latent into audio.",
inputs=[ inputs=[
IO.Latent.Input("samples"), IO.Latent.Input("samples"),
IO.Vae.Input("vae"), IO.Vae.Input("vae"),
@@ -159,6 +169,8 @@ class SaveAudio(IO.ComfyNode):
search_aliases=["export flac"], search_aliases=["export flac"],
display_name="Save Audio (FLAC)", display_name="Save Audio (FLAC)",
category="audio", category="audio",
description="Saves audio to disk in FLAC format with a configurable filename prefix.",
short_description="Saves audio to disk in FLAC format.",
inputs=[ inputs=[
IO.Audio.Input("audio"), IO.Audio.Input("audio"),
IO.String.Input("filename_prefix", default="audio/ComfyUI"), IO.String.Input("filename_prefix", default="audio/ComfyUI"),
@@ -184,6 +196,8 @@ class SaveAudioMP3(IO.ComfyNode):
search_aliases=["export mp3"], search_aliases=["export mp3"],
display_name="Save Audio (MP3)", display_name="Save Audio (MP3)",
category="audio", category="audio",
description="Saves audio to disk in MP3 format with configurable quality and filename prefix.",
short_description="Saves audio to disk in MP3 format.",
inputs=[ inputs=[
IO.Audio.Input("audio"), IO.Audio.Input("audio"),
IO.String.Input("filename_prefix", default="audio/ComfyUI"), IO.String.Input("filename_prefix", default="audio/ComfyUI"),
@@ -212,6 +226,8 @@ class SaveAudioOpus(IO.ComfyNode):
search_aliases=["export opus"], search_aliases=["export opus"],
display_name="Save Audio (Opus)", display_name="Save Audio (Opus)",
category="audio", category="audio",
description="Saves audio to disk in Opus format with configurable quality and filename prefix.",
short_description="Saves audio to disk in Opus format.",
inputs=[ inputs=[
IO.Audio.Input("audio"), IO.Audio.Input("audio"),
IO.String.Input("filename_prefix", default="audio/ComfyUI"), IO.String.Input("filename_prefix", default="audio/ComfyUI"),
@@ -240,6 +256,8 @@ class PreviewAudio(IO.ComfyNode):
search_aliases=["play audio"], search_aliases=["play audio"],
display_name="Preview Audio", display_name="Preview Audio",
category="audio", category="audio",
description="Plays back audio in the UI for previewing.",
short_description=None,
inputs=[ inputs=[
IO.Audio.Input("audio"), IO.Audio.Input("audio"),
], ],
@@ -300,6 +318,8 @@ class LoadAudio(IO.ComfyNode):
search_aliases=["import audio", "open audio", "audio file"], search_aliases=["import audio", "open audio", "audio file"],
display_name="Load Audio", display_name="Load Audio",
category="audio", category="audio",
description="Loads an audio or video file from disk and outputs the audio as a single Audio output.",
short_description="Loads an audio file from disk.",
inputs=[ inputs=[
IO.Combo.Input("audio", upload=IO.UploadType.audio, options=sorted(files)), IO.Combo.Input("audio", upload=IO.UploadType.audio, options=sorted(files)),
], ],
@@ -338,6 +358,7 @@ class RecordAudio(IO.ComfyNode):
search_aliases=["microphone input", "audio capture", "voice input"], search_aliases=["microphone input", "audio capture", "voice input"],
display_name="Record Audio", display_name="Record Audio",
category="audio", category="audio",
description="Records audio from a microphone input and outputs the captured audio.",
inputs=[ inputs=[
IO.Custom("AUDIO_RECORD").Input("audio"), IO.Custom("AUDIO_RECORD").Input("audio"),
], ],
@@ -363,6 +384,7 @@ class TrimAudioDuration(IO.ComfyNode):
search_aliases=["cut audio", "audio clip", "shorten audio"], search_aliases=["cut audio", "audio clip", "shorten audio"],
display_name="Trim Audio Duration", display_name="Trim Audio Duration",
description="Trim audio tensor into chosen time range.", description="Trim audio tensor into chosen time range.",
short_description=None,
category="audio", category="audio",
inputs=[ inputs=[
IO.Audio.Input("audio"), IO.Audio.Input("audio"),
@@ -416,6 +438,7 @@ class SplitAudioChannels(IO.ComfyNode):
search_aliases=["stereo to mono"], search_aliases=["stereo to mono"],
display_name="Split Audio Channels", display_name="Split Audio Channels",
description="Separates the audio into left and right channels.", description="Separates the audio into left and right channels.",
short_description=None,
category="audio", category="audio",
inputs=[ inputs=[
IO.Audio.Input("audio"), IO.Audio.Input("audio"),
@@ -448,6 +471,7 @@ class JoinAudioChannels(IO.ComfyNode):
node_id="JoinAudioChannels", node_id="JoinAudioChannels",
display_name="Join Audio Channels", display_name="Join Audio Channels",
description="Joins left and right mono audio channels into a stereo audio.", description="Joins left and right mono audio channels into a stereo audio.",
short_description=None,
category="audio", category="audio",
inputs=[ inputs=[
IO.Audio.Input("audio_left"), IO.Audio.Input("audio_left"),
@@ -517,6 +541,7 @@ class AudioConcat(IO.ComfyNode):
search_aliases=["join audio", "combine audio", "append audio"], search_aliases=["join audio", "combine audio", "append audio"],
display_name="Audio Concat", display_name="Audio Concat",
description="Concatenates the audio1 to audio2 in the specified direction.", description="Concatenates the audio1 to audio2 in the specified direction.",
short_description=None,
category="audio", category="audio",
inputs=[ inputs=[
IO.Audio.Input("audio1"), IO.Audio.Input("audio1"),
@@ -565,6 +590,7 @@ class AudioMerge(IO.ComfyNode):
search_aliases=["mix audio", "overlay audio", "layer audio"], search_aliases=["mix audio", "overlay audio", "layer audio"],
display_name="Audio Merge", display_name="Audio Merge",
description="Combine two audio tracks by overlaying their waveforms.", description="Combine two audio tracks by overlaying their waveforms.",
short_description=None,
category="audio", category="audio",
inputs=[ inputs=[
IO.Audio.Input("audio1"), IO.Audio.Input("audio1"),
@@ -626,6 +652,8 @@ class AudioAdjustVolume(IO.ComfyNode):
search_aliases=["audio gain", "loudness", "audio level"], search_aliases=["audio gain", "loudness", "audio level"],
display_name="Audio Adjust Volume", display_name="Audio Adjust Volume",
category="audio", category="audio",
description="Adjusts audio volume by a specified number of decibels.",
short_description=None,
inputs=[ inputs=[
IO.Audio.Input("audio"), IO.Audio.Input("audio"),
IO.Int.Input( IO.Int.Input(
@@ -662,6 +690,8 @@ class EmptyAudio(IO.ComfyNode):
search_aliases=["blank audio"], search_aliases=["blank audio"],
display_name="Empty Audio", display_name="Empty Audio",
category="audio", category="audio",
description="Creates a silent audio clip with configurable duration, sample rate, and channel count.",
short_description="Creates a silent audio clip.",
inputs=[ inputs=[
IO.Float.Input( IO.Float.Input(
"duration", "duration",

View File

@@ -11,6 +11,8 @@ class AudioEncoderLoader(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="AudioEncoderLoader", node_id="AudioEncoderLoader",
category="loaders", category="loaders",
description="Loads an audio encoder model from a checkpoint file for encoding audio into embeddings.",
short_description="Loads an audio encoder model from a checkpoint.",
inputs=[ inputs=[
io.Combo.Input( io.Combo.Input(
"audio_encoder_name", "audio_encoder_name",
@@ -36,6 +38,8 @@ class AudioEncoderEncode(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="AudioEncoderEncode", node_id="AudioEncoderEncode",
category="conditioning", category="conditioning",
description="Encodes audio into embeddings using a loaded audio encoder model.",
short_description=None,
inputs=[ inputs=[
io.AudioEncoder.Input("audio_encoder"), io.AudioEncoder.Input("audio_encoder"),
io.Audio.Input("audio"), io.Audio.Input("audio"),

View File

@@ -154,6 +154,8 @@ class WanCameraEmbedding(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="WanCameraEmbedding", node_id="WanCameraEmbedding",
category="camera", category="camera",
description="Generates Plucker camera embeddings from a selected camera motion trajectory for Wan video generation.",
short_description="Generates camera embeddings for Wan video generation.",
inputs=[ inputs=[
io.Combo.Input( io.Combo.Input(
"camera_pose", "camera_pose",

View File

@@ -12,6 +12,8 @@ class Canny(io.ComfyNode):
node_id="Canny", node_id="Canny",
search_aliases=["edge detection", "outline", "contour detection", "line art"], search_aliases=["edge detection", "outline", "contour detection", "line art"],
category="image/preprocessors", category="image/preprocessors",
description="Detects edges in an image using the Canny edge detection algorithm with configurable low and high thresholds.",
short_description="Canny edge detection on images.",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
io.Float.Input("low_threshold", default=0.4, min=0.01, max=0.99, step=0.01), io.Float.Input("low_threshold", default=0.4, min=0.01, max=0.99, step=0.01),

View File

@@ -27,6 +27,8 @@ class CFGZeroStar(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="CFGZeroStar", node_id="CFGZeroStar",
category="advanced/guidance", category="advanced/guidance",
description="Applies CFG-Zero* post-CFG correction that computes an optimal scaling factor between conditional and unconditional predictions to reduce CFG artifacts.",
short_description="CFG-Zero* guidance correction to reduce artifacts.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
], ],
@@ -54,6 +56,8 @@ class CFGNorm(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="CFGNorm", node_id="CFGNorm",
category="advanced/guidance", category="advanced/guidance",
description="Constrains the CFG-guided prediction norm to not exceed the conditional prediction norm, helping to prevent oversaturation at high CFG scales.",
short_description="Constrain CFG output norm to conditional prediction norm.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input("strength", default=1.0, min=0.0, max=100.0, step=0.01), io.Float.Input("strength", default=1.0, min=0.0, max=100.0, step=0.01),

View File

@@ -14,6 +14,8 @@ class EmptyChromaRadianceLatentImage(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="EmptyChromaRadianceLatentImage", node_id="EmptyChromaRadianceLatentImage",
category="latent/chroma_radiance", category="latent/chroma_radiance",
description="Creates an empty Chroma Radiance latent image tensor with the specified width, height, and batch size.",
short_description="Creates an empty Chroma Radiance latent image.",
inputs=[ inputs=[
io.Int.Input(id="width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input(id="width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input(id="height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input(id="height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
@@ -35,6 +37,7 @@ class ChromaRadianceOptions(io.ComfyNode):
node_id="ChromaRadianceOptions", node_id="ChromaRadianceOptions",
category="model_patches/chroma_radiance", category="model_patches/chroma_radiance",
description="Allows setting advanced options for the Chroma Radiance model.", description="Allows setting advanced options for the Chroma Radiance model.",
short_description=None,
inputs=[ inputs=[
io.Model.Input(id="model"), io.Model.Input(id="model"),
io.Boolean.Input( io.Boolean.Input(

View File

@@ -10,6 +10,8 @@ class CLIPTextEncodeSDXLRefiner(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="CLIPTextEncodeSDXLRefiner", node_id="CLIPTextEncodeSDXLRefiner",
category="advanced/conditioning", category="advanced/conditioning",
description="Encodes text for SDXL refiner models with aesthetic score and resolution conditioning parameters.",
short_description="Encodes text for SDXL refiner models.",
inputs=[ inputs=[
io.Float.Input("ascore", default=6.0, min=0.0, max=1000.0, step=0.01), io.Float.Input("ascore", default=6.0, min=0.0, max=1000.0, step=0.01),
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION), io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
@@ -31,6 +33,8 @@ class CLIPTextEncodeSDXL(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="CLIPTextEncodeSDXL", node_id="CLIPTextEncodeSDXL",
category="advanced/conditioning", category="advanced/conditioning",
description="Encodes separate G and L text prompts for SDXL models with resolution and crop conditioning parameters.",
short_description="Encodes dual text prompts for SDXL models.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION), io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),

View File

@@ -10,6 +10,7 @@ class ColorToRGBInt(io.ComfyNode):
display_name="Color to RGB Int", display_name="Color to RGB Int",
category="utils", category="utils",
description="Convert a color to a RGB integer value.", description="Convert a color to a RGB integer value.",
short_description=None,
inputs=[ inputs=[
io.Color.Input("color"), io.Color.Input("color"),
], ],

View File

@@ -112,6 +112,8 @@ class PorterDuffImageComposite(io.ComfyNode):
search_aliases=["alpha composite", "blend modes", "layer blend", "transparency blend"], search_aliases=["alpha composite", "blend modes", "layer blend", "transparency blend"],
display_name="Porter-Duff Image Composite", display_name="Porter-Duff Image Composite",
category="mask/compositing", category="mask/compositing",
description="Composites two images with alpha masks using Porter-Duff blend modes.",
short_description="",
inputs=[ inputs=[
io.Image.Input("source"), io.Image.Input("source"),
io.Mask.Input("source_alpha"), io.Mask.Input("source_alpha"),
@@ -169,6 +171,8 @@ class SplitImageWithAlpha(io.ComfyNode):
search_aliases=["extract alpha", "separate transparency", "remove alpha"], search_aliases=["extract alpha", "separate transparency", "remove alpha"],
display_name="Split Image with Alpha", display_name="Split Image with Alpha",
category="mask/compositing", category="mask/compositing",
description="Separates an RGBA image into its RGB color channels and an alpha transparency mask.",
short_description="Split RGBA image into RGB and alpha mask.",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
], ],
@@ -193,6 +197,8 @@ class JoinImageWithAlpha(io.ComfyNode):
search_aliases=["add transparency", "apply alpha", "composite alpha", "RGBA"], search_aliases=["add transparency", "apply alpha", "composite alpha", "RGBA"],
display_name="Join Image with Alpha", display_name="Join Image with Alpha",
category="mask/compositing", category="mask/compositing",
description="Combines an RGB image with an alpha mask to produce an RGBA image with transparency.",
short_description="Combine RGB image and alpha into RGBA.",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
io.Mask.Input("alpha"), io.Mask.Input("alpha"),

View File

@@ -9,6 +9,8 @@ class CLIPTextEncodeControlnet(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="CLIPTextEncodeControlnet", node_id="CLIPTextEncodeControlnet",
category="_for_testing/conditioning", category="_for_testing/conditioning",
description="Encodes text with CLIP and attaches the result as cross-attention controlnet conditioning to existing conditioning data.",
short_description="CLIP text encode for controlnet cross-attention conditioning.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.Conditioning.Input("conditioning"), io.Conditioning.Input("conditioning"),
@@ -36,6 +38,8 @@ class T5TokenizerOptions(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="T5TokenizerOptions", node_id="T5TokenizerOptions",
category="_for_testing/conditioning", category="_for_testing/conditioning",
description="Configures minimum padding and length options for T5-family tokenizers used in CLIP text encoding.",
short_description="Set T5 tokenizer padding and length options.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.Int.Input("min_padding", default=0, min=0, max=10000, step=1), io.Int.Input("min_padding", default=0, min=0, max=10000, step=1),

View File

@@ -12,6 +12,7 @@ class ContextWindowsManualNode(io.ComfyNode):
display_name="Context Windows (Manual)", display_name="Context Windows (Manual)",
category="context", category="context",
description="Manually set context windows.", description="Manually set context windows.",
short_description=None,
inputs=[ inputs=[
io.Model.Input("model", tooltip="The model to apply context windows to during sampling."), io.Model.Input("model", tooltip="The model to apply context windows to during sampling."),
io.Int.Input("context_length", min=1, default=16, tooltip="The length of the context window."), io.Int.Input("context_length", min=1, default=16, tooltip="The length of the context window."),
@@ -65,6 +66,7 @@ class WanContextWindowsManualNode(ContextWindowsManualNode):
schema.node_id = "WanContextWindowsManual" schema.node_id = "WanContextWindowsManual"
schema.display_name = "WAN Context Windows (Manual)" schema.display_name = "WAN Context Windows (Manual)"
schema.description = "Manually set context windows for WAN-like models (dim=2)." schema.description = "Manually set context windows for WAN-like models (dim=2)."
schema.short_description = None
schema.inputs = [ schema.inputs = [
io.Model.Input("model", tooltip="The model to apply context windows to during sampling."), io.Model.Input("model", tooltip="The model to apply context windows to during sampling."),
io.Int.Input("context_length", min=1, max=nodes.MAX_RESOLUTION, step=4, default=81, tooltip="The length of the context window."), io.Int.Input("context_length", min=1, max=nodes.MAX_RESOLUTION, step=4, default=81, tooltip="The length of the context window."),

View File

@@ -10,6 +10,8 @@ class SetUnionControlNetType(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SetUnionControlNetType", node_id="SetUnionControlNetType",
category="conditioning/controlnet", category="conditioning/controlnet",
description="Sets the control type for a Union ControlNet, selecting which conditioning mode to use.",
short_description="Select control mode for Union ControlNet.",
inputs=[ inputs=[
io.ControlNet.Input("control_net"), io.ControlNet.Input("control_net"),
io.Combo.Input("type", options=["auto"] + list(UNION_CONTROLNET_TYPES.keys())), io.Combo.Input("type", options=["auto"] + list(UNION_CONTROLNET_TYPES.keys())),
@@ -40,6 +42,8 @@ class ControlNetInpaintingAliMamaApply(io.ComfyNode):
node_id="ControlNetInpaintingAliMamaApply", node_id="ControlNetInpaintingAliMamaApply",
search_aliases=["masked controlnet"], search_aliases=["masked controlnet"],
category="conditioning/controlnet", category="conditioning/controlnet",
description="Applies an AliMama inpainting ControlNet to positive and negative conditioning using an image and mask with VAE encoding.",
short_description="Applies AliMama inpainting ControlNet with mask.",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),

View File

@@ -14,6 +14,7 @@ class EmptyCosmosLatentVideo(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="EmptyCosmosLatentVideo", node_id="EmptyCosmosLatentVideo",
category="latent/video", category="latent/video",
description="Creates an empty latent tensor sized for Cosmos video generation.",
inputs=[ inputs=[
io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("height", default=704, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=704, min=16, max=nodes.MAX_RESOLUTION, step=16),
@@ -46,6 +47,8 @@ class CosmosImageToVideoLatent(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="CosmosImageToVideoLatent", node_id="CosmosImageToVideoLatent",
category="conditioning/inpaint", category="conditioning/inpaint",
description="Creates an inpainting video latent for Cosmos by encoding optional start and end images with a noise mask.",
short_description="Cosmos inpainting video latent from start/end images.",
inputs=[ inputs=[
io.Vae.Input("vae"), io.Vae.Input("vae"),
io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16),
@@ -89,6 +92,8 @@ class CosmosPredict2ImageToVideoLatent(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="CosmosPredict2ImageToVideoLatent", node_id="CosmosPredict2ImageToVideoLatent",
category="conditioning/inpaint", category="conditioning/inpaint",
description="Creates an inpainting video latent for Cosmos Predict2 by encoding optional start and end images with Wan latent format processing.",
short_description="Cosmos Predict2 inpainting video latent from images.",
inputs=[ inputs=[
io.Vae.Input("vae"), io.Vae.Input("vae"),
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),

View File

@@ -18,6 +18,8 @@ class BasicScheduler(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="BasicScheduler", node_id="BasicScheduler",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates a sigma schedule from a model using a selected scheduler algorithm, step count, and denoise strength.",
short_description="Generate sigma schedule from model and scheduler.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Combo.Input("scheduler", options=comfy.samplers.SCHEDULER_NAMES), io.Combo.Input("scheduler", options=comfy.samplers.SCHEDULER_NAMES),
@@ -48,6 +50,8 @@ class KarrasScheduler(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="KarrasScheduler", node_id="KarrasScheduler",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates a sigma schedule using the Karras noise schedule with configurable sigma range and rho parameter.",
short_description="Generate sigmas using Karras noise schedule.",
inputs=[ inputs=[
io.Int.Input("steps", default=20, min=1, max=10000), io.Int.Input("steps", default=20, min=1, max=10000),
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False), io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
@@ -70,6 +74,8 @@ class ExponentialScheduler(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="ExponentialScheduler", node_id="ExponentialScheduler",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates a sigma schedule using an exponential noise schedule with configurable sigma range.",
short_description="Generate sigmas using exponential noise schedule.",
inputs=[ inputs=[
io.Int.Input("steps", default=20, min=1, max=10000), io.Int.Input("steps", default=20, min=1, max=10000),
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False), io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
@@ -91,6 +97,8 @@ class PolyexponentialScheduler(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="PolyexponentialScheduler", node_id="PolyexponentialScheduler",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates a sigma schedule using a polyexponential noise schedule with configurable sigma range and rho parameter.",
short_description="Generate sigmas using polyexponential noise schedule.",
inputs=[ inputs=[
io.Int.Input("steps", default=20, min=1, max=10000), io.Int.Input("steps", default=20, min=1, max=10000),
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False), io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
@@ -113,6 +121,8 @@ class LaplaceScheduler(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LaplaceScheduler", node_id="LaplaceScheduler",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates a sigma schedule using a Laplace distribution-based noise schedule with configurable mu and beta parameters.",
short_description="Generate sigmas using Laplace distribution schedule.",
inputs=[ inputs=[
io.Int.Input("steps", default=20, min=1, max=10000), io.Int.Input("steps", default=20, min=1, max=10000),
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False), io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
@@ -137,6 +147,8 @@ class SDTurboScheduler(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SDTurboScheduler", node_id="SDTurboScheduler",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates a sigma schedule optimized for SD Turbo models with very few steps and adjustable denoise strength.",
short_description="Generate sigma schedule for SD Turbo models.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Int.Input("steps", default=1, min=1, max=10), io.Int.Input("steps", default=1, min=1, max=10),
@@ -161,6 +173,8 @@ class BetaSamplingScheduler(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="BetaSamplingScheduler", node_id="BetaSamplingScheduler",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates a sigma schedule using a beta distribution with configurable alpha and beta shape parameters.",
short_description="Generate sigmas using beta distribution schedule.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Int.Input("steps", default=20, min=1, max=10000), io.Int.Input("steps", default=20, min=1, max=10000),
@@ -183,6 +197,8 @@ class VPScheduler(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="VPScheduler", node_id="VPScheduler",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates a sigma schedule using the Variance Preserving (VP) SDE formulation with configurable beta and epsilon parameters.",
short_description="Generate sigmas using VP SDE schedule.",
inputs=[ inputs=[
io.Int.Input("steps", default=20, min=1, max=10000), io.Int.Input("steps", default=20, min=1, max=10000),
io.Float.Input("beta_d", default=19.9, min=0.0, max=5000.0, step=0.01, round=False), #TODO: fix default values io.Float.Input("beta_d", default=19.9, min=0.0, max=5000.0, step=0.01, round=False), #TODO: fix default values
@@ -205,6 +221,8 @@ class SplitSigmas(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SplitSigmas", node_id="SplitSigmas",
category="sampling/custom_sampling/sigmas", category="sampling/custom_sampling/sigmas",
description="Splits a sigma sequence into high and low portions at a specified step index for multi-pass sampling.",
short_description="Split sigmas into high and low at a step.",
inputs=[ inputs=[
io.Sigmas.Input("sigmas"), io.Sigmas.Input("sigmas"),
io.Int.Input("step", default=0, min=0, max=10000), io.Int.Input("step", default=0, min=0, max=10000),
@@ -229,6 +247,8 @@ class SplitSigmasDenoise(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SplitSigmasDenoise", node_id="SplitSigmasDenoise",
category="sampling/custom_sampling/sigmas", category="sampling/custom_sampling/sigmas",
description="Splits a sigma sequence into high and low portions based on a denoise ratio for multi-pass sampling workflows.",
short_description="Split sigmas by denoise ratio.",
inputs=[ inputs=[
io.Sigmas.Input("sigmas"), io.Sigmas.Input("sigmas"),
io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01), io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01),
@@ -255,6 +275,8 @@ class FlipSigmas(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="FlipSigmas", node_id="FlipSigmas",
category="sampling/custom_sampling/sigmas", category="sampling/custom_sampling/sigmas",
description="Reverses the order of a sigma sequence, useful for converting between ascending and descending noise schedules.",
short_description="Reverse the order of a sigma sequence.",
inputs=[io.Sigmas.Input("sigmas")], inputs=[io.Sigmas.Input("sigmas")],
outputs=[io.Sigmas.Output()] outputs=[io.Sigmas.Output()]
) )
@@ -277,6 +299,8 @@ class SetFirstSigma(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SetFirstSigma", node_id="SetFirstSigma",
category="sampling/custom_sampling/sigmas", category="sampling/custom_sampling/sigmas",
description="Overrides the first sigma value in a sequence with a custom value, allowing manual control of the initial noise level.",
short_description="Override the first sigma value in a sequence.",
inputs=[ inputs=[
io.Sigmas.Input("sigmas"), io.Sigmas.Input("sigmas"),
io.Float.Input("sigma", default=136.0, min=0.0, max=20000.0, step=0.001, round=False), io.Float.Input("sigma", default=136.0, min=0.0, max=20000.0, step=0.001, round=False),
@@ -299,6 +323,8 @@ class ExtendIntermediateSigmas(io.ComfyNode):
node_id="ExtendIntermediateSigmas", node_id="ExtendIntermediateSigmas",
search_aliases=["interpolate sigmas"], search_aliases=["interpolate sigmas"],
category="sampling/custom_sampling/sigmas", category="sampling/custom_sampling/sigmas",
description="Interpolates additional intermediate sigma values between existing steps using selectable spacing within a specified sigma range.",
short_description="Interpolate additional sigma steps between existing values.",
inputs=[ inputs=[
io.Sigmas.Input("sigmas"), io.Sigmas.Input("sigmas"),
io.Int.Input("steps", default=2, min=1, max=100), io.Int.Input("steps", default=2, min=1, max=100),
@@ -352,6 +378,8 @@ class SamplingPercentToSigma(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SamplingPercentToSigma", node_id="SamplingPercentToSigma",
category="sampling/custom_sampling/sigmas", category="sampling/custom_sampling/sigmas",
description="Converts a sampling percentage (0.0 to 1.0) to the corresponding sigma value using a model's noise schedule.",
short_description="Convert sampling percentage to sigma value.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input("sampling_percent", default=0.0, min=0.0, max=1.0, step=0.0001), io.Float.Input("sampling_percent", default=0.0, min=0.0, max=1.0, step=0.0001),
@@ -380,6 +408,8 @@ class KSamplerSelect(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="KSamplerSelect", node_id="KSamplerSelect",
category="sampling/custom_sampling/samplers", category="sampling/custom_sampling/samplers",
description="Selects a sampler algorithm by name from the list of available samplers and outputs the sampler object.",
short_description="Select a sampler algorithm by name.",
inputs=[io.Combo.Input("sampler_name", options=comfy.samplers.SAMPLER_NAMES)], inputs=[io.Combo.Input("sampler_name", options=comfy.samplers.SAMPLER_NAMES)],
outputs=[io.Sampler.Output()] outputs=[io.Sampler.Output()]
) )
@@ -397,6 +427,8 @@ class SamplerDPMPP_3M_SDE(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SamplerDPMPP_3M_SDE", node_id="SamplerDPMPP_3M_SDE",
category="sampling/custom_sampling/samplers", category="sampling/custom_sampling/samplers",
description="Creates a DPM++ 3M SDE sampler with configurable eta, noise scale, and GPU or CPU noise generation.",
short_description="Create a DPM++ 3M SDE sampler.",
inputs=[ inputs=[
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
@@ -422,6 +454,8 @@ class SamplerDPMPP_2M_SDE(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SamplerDPMPP_2M_SDE", node_id="SamplerDPMPP_2M_SDE",
category="sampling/custom_sampling/samplers", category="sampling/custom_sampling/samplers",
description="Creates a DPM++ 2M SDE sampler with configurable solver type, eta, noise scale, and noise device.",
short_description="Create a DPM++ 2M SDE sampler.",
inputs=[ inputs=[
io.Combo.Input("solver_type", options=['midpoint', 'heun']), io.Combo.Input("solver_type", options=['midpoint', 'heun']),
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
@@ -449,6 +483,8 @@ class SamplerDPMPP_SDE(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SamplerDPMPP_SDE", node_id="SamplerDPMPP_SDE",
category="sampling/custom_sampling/samplers", category="sampling/custom_sampling/samplers",
description="Creates a DPM++ SDE sampler with configurable eta, noise scale, r parameter, and noise device.",
short_description="Create a DPM++ SDE sampler.",
inputs=[ inputs=[
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
@@ -475,6 +511,8 @@ class SamplerDPMPP_2S_Ancestral(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SamplerDPMPP_2S_Ancestral", node_id="SamplerDPMPP_2S_Ancestral",
category="sampling/custom_sampling/samplers", category="sampling/custom_sampling/samplers",
description="Creates a DPM++ 2S Ancestral sampler with configurable eta and noise scale parameters.",
short_description="Create a DPM++ 2S Ancestral sampler.",
inputs=[ inputs=[
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
@@ -495,6 +533,8 @@ class SamplerEulerAncestral(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SamplerEulerAncestral", node_id="SamplerEulerAncestral",
category="sampling/custom_sampling/samplers", category="sampling/custom_sampling/samplers",
description="Creates an Euler Ancestral sampler with configurable eta and noise scale for stochastic sampling.",
short_description="Create an Euler Ancestral stochastic sampler.",
inputs=[ inputs=[
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
@@ -516,6 +556,8 @@ class SamplerEulerAncestralCFGPP(io.ComfyNode):
node_id="SamplerEulerAncestralCFGPP", node_id="SamplerEulerAncestralCFGPP",
display_name="SamplerEulerAncestralCFG++", display_name="SamplerEulerAncestralCFG++",
category="sampling/custom_sampling/samplers", category="sampling/custom_sampling/samplers",
description="Creates an Euler Ancestral CFG++ sampler that applies classifier-free guidance with improved stability.",
short_description="Create an Euler Ancestral CFG++ sampler.",
inputs=[ inputs=[
io.Float.Input("eta", default=1.0, min=0.0, max=1.0, step=0.01, round=False), io.Float.Input("eta", default=1.0, min=0.0, max=1.0, step=0.01, round=False),
io.Float.Input("s_noise", default=1.0, min=0.0, max=10.0, step=0.01, round=False), io.Float.Input("s_noise", default=1.0, min=0.0, max=10.0, step=0.01, round=False),
@@ -538,6 +580,8 @@ class SamplerLMS(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SamplerLMS", node_id="SamplerLMS",
category="sampling/custom_sampling/samplers", category="sampling/custom_sampling/samplers",
description="Creates a Linear Multi-Step (LMS) sampler with a configurable order parameter.",
short_description="Create a Linear Multi-Step (LMS) sampler.",
inputs=[io.Int.Input("order", default=4, min=1, max=100)], inputs=[io.Int.Input("order", default=4, min=1, max=100)],
outputs=[io.Sampler.Output()] outputs=[io.Sampler.Output()]
) )
@@ -555,6 +599,8 @@ class SamplerDPMAdaptative(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SamplerDPMAdaptative", node_id="SamplerDPMAdaptative",
category="sampling/custom_sampling/samplers", category="sampling/custom_sampling/samplers",
description="Creates a DPM Adaptive sampler with configurable order, tolerances, PID coefficients, and stochastic noise parameters for adaptive step-size sampling.",
short_description="Create a DPM Adaptive step-size sampler.",
inputs=[ inputs=[
io.Int.Input("order", default=3, min=2, max=3), io.Int.Input("order", default=3, min=2, max=3),
io.Float.Input("rtol", default=0.05, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("rtol", default=0.05, min=0.0, max=100.0, step=0.01, round=False),
@@ -586,6 +632,8 @@ class SamplerER_SDE(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SamplerER_SDE", node_id="SamplerER_SDE",
category="sampling/custom_sampling/samplers", category="sampling/custom_sampling/samplers",
description="Creates an ER-SDE sampler supporting ER-SDE, reverse-time SDE, and ODE solver types with configurable stochastic strength and staging.",
short_description="Create an ER-SDE, reverse-time SDE, or ODE sampler.",
inputs=[ inputs=[
io.Combo.Input("solver_type", options=["ER-SDE", "Reverse-time SDE", "ODE"]), io.Combo.Input("solver_type", options=["ER-SDE", "Reverse-time SDE", "ODE"]),
io.Int.Input("max_stage", default=3, min=1, max=3), io.Int.Input("max_stage", default=3, min=1, max=3),
@@ -624,6 +672,8 @@ class SamplerSASolver(io.ComfyNode):
node_id="SamplerSASolver", node_id="SamplerSASolver",
search_aliases=["sde"], search_aliases=["sde"],
category="sampling/custom_sampling/samplers", category="sampling/custom_sampling/samplers",
description="Creates an SA-Solver sampler with configurable predictor/corrector orders, SDE region, and PECE mode for high-order diffusion sampling.",
short_description="Create an SA-Solver high-order diffusion sampler.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input("eta", default=1.0, min=0.0, max=10.0, step=0.01, round=False), io.Float.Input("eta", default=1.0, min=0.0, max=10.0, step=0.01, round=False),
@@ -684,7 +734,8 @@ class SamplerSEEDS2(io.ComfyNode):
"- solver_type=phi_2, r=1.0, eta=0.0\n\n" "- solver_type=phi_2, r=1.0, eta=0.0\n\n"
"exp_heun_2_x0_sde\n" "exp_heun_2_x0_sde\n"
"- solver_type=phi_2, r=1.0, eta=1.0, s_noise=1.0" "- solver_type=phi_2, r=1.0, eta=1.0, s_noise=1.0"
) ),
short_description="SEEDS2 sampler with configurable solver and SDE settings.",
) )
@classmethod @classmethod
@@ -728,6 +779,8 @@ class SamplerCustom(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SamplerCustom", node_id="SamplerCustom",
category="sampling/custom_sampling", category="sampling/custom_sampling",
description="Runs a complete custom sampling pass by combining a model, sampler, sigmas, and conditioning with optional noise injection.",
short_description="Run custom sampling with manual sampler and sigmas.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Boolean.Input("add_noise", default=True), io.Boolean.Input("add_noise", default=True),
@@ -794,6 +847,8 @@ class BasicGuider(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="BasicGuider", node_id="BasicGuider",
category="sampling/custom_sampling/guiders", category="sampling/custom_sampling/guiders",
description="Creates a basic guider that applies a single conditioning input to guide the diffusion model without classifier-free guidance.",
short_description="Create a single-conditioning guider without CFG.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Conditioning.Input("conditioning"), io.Conditioning.Input("conditioning"),
@@ -815,6 +870,8 @@ class CFGGuider(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="CFGGuider", node_id="CFGGuider",
category="sampling/custom_sampling/guiders", category="sampling/custom_sampling/guiders",
description="Creates a classifier-free guidance guider that combines positive and negative conditioning with an adjustable CFG scale.",
short_description="Create a CFG guider with positive/negative conditioning.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
@@ -869,6 +926,8 @@ class DualCFGGuider(io.ComfyNode):
node_id="DualCFGGuider", node_id="DualCFGGuider",
search_aliases=["dual prompt guidance"], search_aliases=["dual prompt guidance"],
category="sampling/custom_sampling/guiders", category="sampling/custom_sampling/guiders",
description="Creates a dual classifier-free guidance guider that blends two conditioning inputs against a negative with independent CFG scales and regular or nested styles.",
short_description="Create a dual CFG guider with two conditionings.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Conditioning.Input("cond1"), io.Conditioning.Input("cond1"),
@@ -897,6 +956,8 @@ class DisableNoise(io.ComfyNode):
node_id="DisableNoise", node_id="DisableNoise",
search_aliases=["zero noise"], search_aliases=["zero noise"],
category="sampling/custom_sampling/noise", category="sampling/custom_sampling/noise",
description="Produces a zero-noise source that disables noise injection, useful for deterministic sampling or img2img without added noise.",
short_description="Produce zero noise to disable noise injection.",
inputs=[], inputs=[],
outputs=[io.Noise.Output()] outputs=[io.Noise.Output()]
) )
@@ -914,6 +975,8 @@ class RandomNoise(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="RandomNoise", node_id="RandomNoise",
category="sampling/custom_sampling/noise", category="sampling/custom_sampling/noise",
description="Produces a random noise source from a seed value for use in custom sampling workflows.",
short_description="Produce seeded random noise for sampling.",
inputs=[io.Int.Input("noise_seed", default=0, min=0, max=0xffffffffffffffff, control_after_generate=True)], inputs=[io.Int.Input("noise_seed", default=0, min=0, max=0xffffffffffffffff, control_after_generate=True)],
outputs=[io.Noise.Output()] outputs=[io.Noise.Output()]
) )
@@ -931,6 +994,8 @@ class SamplerCustomAdvanced(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SamplerCustomAdvanced", node_id="SamplerCustomAdvanced",
category="sampling/custom_sampling", category="sampling/custom_sampling",
description="Runs an advanced custom sampling pass using separate noise, guider, sampler, and sigmas inputs for maximum control over the diffusion process.",
short_description="Run advanced custom sampling with separate components.",
inputs=[ inputs=[
io.Noise.Input("noise"), io.Noise.Input("noise"),
io.Guider.Input("guider"), io.Guider.Input("guider"),
@@ -985,6 +1050,8 @@ class AddNoise(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="AddNoise", node_id="AddNoise",
category="_for_testing/custom_sampling/noise", category="_for_testing/custom_sampling/noise",
description="Adds scaled noise to a latent image using the model's noise schedule and sigma values for manual noise injection.",
short_description="Add scaled noise to a latent image.",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
@@ -1035,6 +1102,8 @@ class ManualSigmas(io.ComfyNode):
node_id="ManualSigmas", node_id="ManualSigmas",
search_aliases=["custom noise schedule", "define sigmas"], search_aliases=["custom noise schedule", "define sigmas"],
category="_for_testing/custom_sampling", category="_for_testing/custom_sampling",
description="Defines a custom sigma sequence by manually entering comma-separated numeric values as a text string.",
short_description="Define custom sigmas from comma-separated values.",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.String.Input("sigmas", default="1, 0.5", multiline=False) io.String.Input("sigmas", default="1, 0.5", multiline=False)

View File

@@ -49,6 +49,8 @@ class LoadImageDataSetFromFolderNode(io.ComfyNode):
node_id="LoadImageDataSetFromFolder", node_id="LoadImageDataSetFromFolder",
display_name="Load Image Dataset from Folder", display_name="Load Image Dataset from Folder",
category="dataset", category="dataset",
description="Loads all images from a selected input subfolder and outputs them as a list of image tensors.",
short_description="Loads images from a folder as a list.",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Combo.Input( io.Combo.Input(
@@ -86,6 +88,8 @@ class LoadImageTextDataSetFromFolderNode(io.ComfyNode):
node_id="LoadImageTextDataSetFromFolder", node_id="LoadImageTextDataSetFromFolder",
display_name="Load Image and Text Dataset from Folder", display_name="Load Image and Text Dataset from Folder",
category="dataset", category="dataset",
description="Loads paired images and text captions from a folder, matching each image with its corresponding text file.",
short_description="Loads paired images and text captions from folder.",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Combo.Input( io.Combo.Input(
@@ -208,6 +212,8 @@ class SaveImageDataSetToFolderNode(io.ComfyNode):
node_id="SaveImageDataSetToFolder", node_id="SaveImageDataSetToFolder",
display_name="Save Image Dataset to Folder", display_name="Save Image Dataset to Folder",
category="dataset", category="dataset",
description="Saves a list of images to a named folder in the output directory with configurable filename prefix.",
short_description="Saves image list to an output folder.",
is_experimental=True, is_experimental=True,
is_output_node=True, is_output_node=True,
is_input_list=True, # Receive images as list is_input_list=True, # Receive images as list
@@ -247,6 +253,8 @@ class SaveImageTextDataSetToFolderNode(io.ComfyNode):
node_id="SaveImageTextDataSetToFolder", node_id="SaveImageTextDataSetToFolder",
display_name="Save Image and Text Dataset to Folder", display_name="Save Image and Text Dataset to Folder",
category="dataset", category="dataset",
description="Saves paired images and text captions to a named folder in the output directory with configurable filename prefix.",
short_description="Saves paired images and text to output folder.",
is_experimental=True, is_experimental=True,
is_output_node=True, is_output_node=True,
is_input_list=True, # Receive both images and texts as lists is_input_list=True, # Receive both images and texts as lists
@@ -401,6 +409,8 @@ class ImageProcessingNode(io.ComfyNode):
return io.Schema( return io.Schema(
node_id=cls.node_id, node_id=cls.node_id,
display_name=cls.display_name or cls.node_id, display_name=cls.display_name or cls.node_id,
description=getattr(cls, 'description', ''),
short_description=getattr(cls, 'short_description', ''),
category="dataset/image", category="dataset/image",
is_experimental=True, is_experimental=True,
is_input_list=is_group, # True for group, False for individual is_input_list=is_group, # True for group, False for individual
@@ -550,6 +560,8 @@ class TextProcessingNode(io.ComfyNode):
return io.Schema( return io.Schema(
node_id=cls.node_id, node_id=cls.node_id,
display_name=cls.display_name or cls.node_id, display_name=cls.display_name or cls.node_id,
description=getattr(cls, 'description', ''),
short_description=getattr(cls, 'short_description', ''),
category="dataset/text", category="dataset/text",
is_experimental=True, is_experimental=True,
is_input_list=is_group, # True for group, False for individual is_input_list=is_group, # True for group, False for individual
@@ -627,6 +639,7 @@ class ResizeImagesByShorterEdgeNode(ImageProcessingNode):
node_id = "ResizeImagesByShorterEdge" node_id = "ResizeImagesByShorterEdge"
display_name = "Resize Images by Shorter Edge" display_name = "Resize Images by Shorter Edge"
description = "Resize images so that the shorter edge matches the specified length while preserving aspect ratio." description = "Resize images so that the shorter edge matches the specified length while preserving aspect ratio."
short_description = "Resizes images by shorter edge preserving aspect ratio."
extra_inputs = [ extra_inputs = [
io.Int.Input( io.Int.Input(
"shorter_edge", "shorter_edge",
@@ -655,6 +668,7 @@ class ResizeImagesByLongerEdgeNode(ImageProcessingNode):
node_id = "ResizeImagesByLongerEdge" node_id = "ResizeImagesByLongerEdge"
display_name = "Resize Images by Longer Edge" display_name = "Resize Images by Longer Edge"
description = "Resize images so that the longer edge matches the specified length while preserving aspect ratio." description = "Resize images so that the longer edge matches the specified length while preserving aspect ratio."
short_description = "Resizes images by longer edge preserving aspect ratio."
extra_inputs = [ extra_inputs = [
io.Int.Input( io.Int.Input(
"longer_edge", "longer_edge",
@@ -686,6 +700,7 @@ class CenterCropImagesNode(ImageProcessingNode):
node_id = "CenterCropImages" node_id = "CenterCropImages"
display_name = "Center Crop Images" display_name = "Center Crop Images"
description = "Center crop all images to the specified dimensions." description = "Center crop all images to the specified dimensions."
short_description = None
extra_inputs = [ extra_inputs = [
io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."), io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."),
io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."), io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."),
@@ -708,6 +723,7 @@ class RandomCropImagesNode(ImageProcessingNode):
description = ( description = (
"Randomly crop all images to the specified dimensions (for data augmentation)." "Randomly crop all images to the specified dimensions (for data augmentation)."
) )
short_description = None
extra_inputs = [ extra_inputs = [
io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."), io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."),
io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."), io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."),
@@ -734,6 +750,7 @@ class NormalizeImagesNode(ImageProcessingNode):
node_id = "NormalizeImages" node_id = "NormalizeImages"
display_name = "Normalize Images" display_name = "Normalize Images"
description = "Normalize images using mean and standard deviation." description = "Normalize images using mean and standard deviation."
short_description = None
extra_inputs = [ extra_inputs = [
io.Float.Input( io.Float.Input(
"mean", "mean",
@@ -760,6 +777,7 @@ class AdjustBrightnessNode(ImageProcessingNode):
node_id = "AdjustBrightness" node_id = "AdjustBrightness"
display_name = "Adjust Brightness" display_name = "Adjust Brightness"
description = "Adjust brightness of all images." description = "Adjust brightness of all images."
short_description = None
extra_inputs = [ extra_inputs = [
io.Float.Input( io.Float.Input(
"factor", "factor",
@@ -779,6 +797,7 @@ class AdjustContrastNode(ImageProcessingNode):
node_id = "AdjustContrast" node_id = "AdjustContrast"
display_name = "Adjust Contrast" display_name = "Adjust Contrast"
description = "Adjust contrast of all images." description = "Adjust contrast of all images."
short_description = None
extra_inputs = [ extra_inputs = [
io.Float.Input( io.Float.Input(
"factor", "factor",
@@ -798,6 +817,7 @@ class ShuffleDatasetNode(ImageProcessingNode):
node_id = "ShuffleDataset" node_id = "ShuffleDataset"
display_name = "Shuffle Image Dataset" display_name = "Shuffle Image Dataset"
description = "Randomly shuffle the order of images in the dataset." description = "Randomly shuffle the order of images in the dataset."
short_description = None
is_group_process = True # Requires full list to shuffle is_group_process = True # Requires full list to shuffle
extra_inputs = [ extra_inputs = [
io.Int.Input( io.Int.Input(
@@ -821,6 +841,8 @@ class ShuffleImageTextDatasetNode(io.ComfyNode):
node_id="ShuffleImageTextDataset", node_id="ShuffleImageTextDataset",
display_name="Shuffle Image-Text Dataset", display_name="Shuffle Image-Text Dataset",
category="dataset/image", category="dataset/image",
description="Randomly shuffles paired image and text lists together using a seed, preserving their correspondence.",
short_description="Shuffles paired image-text lists together.",
is_experimental=True, is_experimental=True,
is_input_list=True, is_input_list=True,
inputs=[ inputs=[
@@ -863,6 +885,7 @@ class TextToLowercaseNode(TextProcessingNode):
node_id = "TextToLowercase" node_id = "TextToLowercase"
display_name = "Text to Lowercase" display_name = "Text to Lowercase"
description = "Convert all texts to lowercase." description = "Convert all texts to lowercase."
short_description = None
@classmethod @classmethod
def _process(cls, text): def _process(cls, text):
@@ -873,6 +896,7 @@ class TextToUppercaseNode(TextProcessingNode):
node_id = "TextToUppercase" node_id = "TextToUppercase"
display_name = "Text to Uppercase" display_name = "Text to Uppercase"
description = "Convert all texts to uppercase." description = "Convert all texts to uppercase."
short_description = None
@classmethod @classmethod
def _process(cls, text): def _process(cls, text):
@@ -883,6 +907,7 @@ class TruncateTextNode(TextProcessingNode):
node_id = "TruncateText" node_id = "TruncateText"
display_name = "Truncate Text" display_name = "Truncate Text"
description = "Truncate all texts to a maximum length." description = "Truncate all texts to a maximum length."
short_description = None
extra_inputs = [ extra_inputs = [
io.Int.Input( io.Int.Input(
"max_length", default=77, min=1, max=10000, tooltip="Maximum text length." "max_length", default=77, min=1, max=10000, tooltip="Maximum text length."
@@ -898,6 +923,7 @@ class AddTextPrefixNode(TextProcessingNode):
node_id = "AddTextPrefix" node_id = "AddTextPrefix"
display_name = "Add Text Prefix" display_name = "Add Text Prefix"
description = "Add a prefix to all texts." description = "Add a prefix to all texts."
short_description = None
extra_inputs = [ extra_inputs = [
io.String.Input("prefix", default="", tooltip="Prefix to add."), io.String.Input("prefix", default="", tooltip="Prefix to add."),
] ]
@@ -911,6 +937,7 @@ class AddTextSuffixNode(TextProcessingNode):
node_id = "AddTextSuffix" node_id = "AddTextSuffix"
display_name = "Add Text Suffix" display_name = "Add Text Suffix"
description = "Add a suffix to all texts." description = "Add a suffix to all texts."
short_description = None
extra_inputs = [ extra_inputs = [
io.String.Input("suffix", default="", tooltip="Suffix to add."), io.String.Input("suffix", default="", tooltip="Suffix to add."),
] ]
@@ -924,6 +951,7 @@ class ReplaceTextNode(TextProcessingNode):
node_id = "ReplaceText" node_id = "ReplaceText"
display_name = "Replace Text" display_name = "Replace Text"
description = "Replace text in all texts." description = "Replace text in all texts."
short_description = None
extra_inputs = [ extra_inputs = [
io.String.Input("find", default="", tooltip="Text to find."), io.String.Input("find", default="", tooltip="Text to find."),
io.String.Input("replace", default="", tooltip="Text to replace with."), io.String.Input("replace", default="", tooltip="Text to replace with."),
@@ -938,6 +966,7 @@ class StripWhitespaceNode(TextProcessingNode):
node_id = "StripWhitespace" node_id = "StripWhitespace"
display_name = "Strip Whitespace" display_name = "Strip Whitespace"
description = "Strip leading and trailing whitespace from all texts." description = "Strip leading and trailing whitespace from all texts."
short_description = None
@classmethod @classmethod
def _process(cls, text): def _process(cls, text):
@@ -953,6 +982,7 @@ class ImageDeduplicationNode(ImageProcessingNode):
node_id = "ImageDeduplication" node_id = "ImageDeduplication"
display_name = "Image Deduplication" display_name = "Image Deduplication"
description = "Remove duplicate or very similar images from the dataset." description = "Remove duplicate or very similar images from the dataset."
short_description = None
is_group_process = True # Requires full list to compare images is_group_process = True # Requires full list to compare images
extra_inputs = [ extra_inputs = [
io.Float.Input( io.Float.Input(
@@ -1023,6 +1053,7 @@ class ImageGridNode(ImageProcessingNode):
node_id = "ImageGrid" node_id = "ImageGrid"
display_name = "Image Grid" display_name = "Image Grid"
description = "Arrange multiple images into a grid layout." description = "Arrange multiple images into a grid layout."
short_description = None
is_group_process = True # Requires full list to create grid is_group_process = True # Requires full list to create grid
is_output_list = False # Outputs single grid image is_output_list = False # Outputs single grid image
extra_inputs = [ extra_inputs = [
@@ -1097,6 +1128,7 @@ class MergeImageListsNode(ImageProcessingNode):
node_id = "MergeImageLists" node_id = "MergeImageLists"
display_name = "Merge Image Lists" display_name = "Merge Image Lists"
description = "Concatenate multiple image lists into one." description = "Concatenate multiple image lists into one."
short_description = None
is_group_process = True # Receives images as list is_group_process = True # Receives images as list
@classmethod @classmethod
@@ -1114,6 +1146,7 @@ class MergeTextListsNode(TextProcessingNode):
node_id = "MergeTextLists" node_id = "MergeTextLists"
display_name = "Merge Text Lists" display_name = "Merge Text Lists"
description = "Concatenate multiple text lists into one." description = "Concatenate multiple text lists into one."
short_description = None
is_group_process = True # Receives texts as list is_group_process = True # Receives texts as list
@classmethod @classmethod
@@ -1137,6 +1170,8 @@ class ResolutionBucket(io.ComfyNode):
node_id="ResolutionBucket", node_id="ResolutionBucket",
display_name="Resolution Bucket", display_name="Resolution Bucket",
category="dataset", category="dataset",
description="Groups latents and conditioning by resolution into batched buckets for efficient training with mixed aspect ratios.",
short_description="Groups latents by resolution into training buckets.",
is_experimental=True, is_experimental=True,
is_input_list=True, is_input_list=True,
inputs=[ inputs=[
@@ -1230,6 +1265,8 @@ class MakeTrainingDataset(io.ComfyNode):
search_aliases=["encode dataset"], search_aliases=["encode dataset"],
display_name="Make Training Dataset", display_name="Make Training Dataset",
category="dataset", category="dataset",
description="Encodes images with a VAE and text captions with CLIP to create paired latent and conditioning training data.",
short_description="Encodes images and text into training data.",
is_experimental=True, is_experimental=True,
is_input_list=True, # images and texts as lists is_input_list=True, # images and texts as lists
inputs=[ inputs=[
@@ -1316,6 +1353,8 @@ class SaveTrainingDataset(io.ComfyNode):
search_aliases=["export training data"], search_aliases=["export training data"],
display_name="Save Training Dataset", display_name="Save Training Dataset",
category="dataset", category="dataset",
description="Saves encoded latent and conditioning training data to disk in sharded files with configurable shard size.",
short_description="Saves encoded training data to sharded files.",
is_experimental=True, is_experimental=True,
is_output_node=True, is_output_node=True,
is_input_list=True, # Receive lists is_input_list=True, # Receive lists
@@ -1417,6 +1456,8 @@ class LoadTrainingDataset(io.ComfyNode):
search_aliases=["import dataset", "training data"], search_aliases=["import dataset", "training data"],
display_name="Load Training Dataset", display_name="Load Training Dataset",
category="dataset", category="dataset",
description="Loads a previously saved training dataset of latents and conditioning from sharded files on disk.",
short_description="Loads saved training dataset from disk.",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.String.Input( io.String.Input(

View File

@@ -14,6 +14,8 @@ class DifferentialDiffusion(io.ComfyNode):
search_aliases=["inpaint gradient", "variable denoise strength"], search_aliases=["inpaint gradient", "variable denoise strength"],
display_name="Differential Diffusion", display_name="Differential Diffusion",
category="_for_testing", category="_for_testing",
description="Enables per-pixel variable denoise strength using a mask, where mask intensity controls how much each region is denoised during sampling.",
short_description="Per-pixel variable denoise strength via mask.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input( io.Float.Input(

View File

@@ -363,6 +363,7 @@ class EasyCacheNode(io.ComfyNode):
node_id="EasyCache", node_id="EasyCache",
display_name="EasyCache", display_name="EasyCache",
description="Native EasyCache implementation.", description="Native EasyCache implementation.",
short_description=None,
category="advanced/debug/model", category="advanced/debug/model",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
@@ -496,6 +497,7 @@ class LazyCacheNode(io.ComfyNode):
node_id="LazyCache", node_id="LazyCache",
display_name="LazyCache", display_name="LazyCache",
description="A homebrew version of EasyCache - even 'easier' version of EasyCache to implement. Overall works worse than EasyCache, but better in some rare cases AND universal compatibility with everything in ComfyUI.", description="A homebrew version of EasyCache - even 'easier' version of EasyCache to implement. Overall works worse than EasyCache, but better in some rare cases AND universal compatibility with everything in ComfyUI.",
short_description="Simpler EasyCache alternative with universal ComfyUI compatibility.",
category="advanced/debug/model", category="advanced/debug/model",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[

View File

@@ -10,6 +10,7 @@ class ReferenceLatent(io.ComfyNode):
node_id="ReferenceLatent", node_id="ReferenceLatent",
category="advanced/conditioning/edit_models", category="advanced/conditioning/edit_models",
description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.", description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.",
short_description="Sets guiding latent for edit models with chaining support.",
inputs=[ inputs=[
io.Conditioning.Input("conditioning"), io.Conditioning.Input("conditioning"),
io.Latent.Input("latent", optional=True), io.Latent.Input("latent", optional=True),

View File

@@ -19,6 +19,8 @@ class EpsilonScaling(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="Epsilon Scaling", node_id="Epsilon Scaling",
category="model_patches/unet", category="model_patches/unet",
description="Applies epsilon scaling to mitigate exposure bias in diffusion models by scaling the predicted noise after CFG, improving sample quality.",
short_description="Scale predicted noise to reduce exposure bias.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input( io.Float.Input(
@@ -121,6 +123,7 @@ class TemporalScoreRescaling(io.ComfyNode):
"TSR - Temporal Score Rescaling (2510.01184)\n\n" "TSR - Temporal Score Rescaling (2510.01184)\n\n"
"Rescaling the model's score or noise to steer the sampling diversity.\n" "Rescaling the model's score or noise to steer the sampling diversity.\n"
), ),
short_description="Rescales temporal scores to control sampling diversity.",
) )
@classmethod @classmethod

View File

@@ -13,6 +13,8 @@ class CLIPTextEncodeFlux(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="CLIPTextEncodeFlux", node_id="CLIPTextEncodeFlux",
category="advanced/conditioning/flux", category="advanced/conditioning/flux",
description="Encodes separate CLIP-L and T5-XXL text prompts with a guidance value into Flux conditioning.",
short_description="Encodes CLIP-L and T5-XXL prompts for Flux.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.String.Input("clip_l", multiline=True, dynamic_prompts=True), io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
@@ -40,6 +42,8 @@ class EmptyFlux2LatentImage(io.ComfyNode):
node_id="EmptyFlux2LatentImage", node_id="EmptyFlux2LatentImage",
display_name="Empty Flux 2 Latent", display_name="Empty Flux 2 Latent",
category="latent", category="latent",
description="Creates an empty Flux 2 latent image tensor with the specified width, height, and batch size.",
short_description="Creates an empty Flux 2 latent image tensor.",
inputs=[ inputs=[
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
@@ -61,6 +65,8 @@ class FluxGuidance(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="FluxGuidance", node_id="FluxGuidance",
category="advanced/conditioning/flux", category="advanced/conditioning/flux",
description="Sets the guidance strength value on Flux conditioning to control how closely generation follows the prompt.",
short_description="Sets guidance strength on Flux conditioning.",
inputs=[ inputs=[
io.Conditioning.Input("conditioning"), io.Conditioning.Input("conditioning"),
io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1), io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1),
@@ -85,6 +91,7 @@ class FluxDisableGuidance(io.ComfyNode):
node_id="FluxDisableGuidance", node_id="FluxDisableGuidance",
category="advanced/conditioning/flux", category="advanced/conditioning/flux",
description="This node completely disables the guidance embed on Flux and Flux like models", description="This node completely disables the guidance embed on Flux and Flux like models",
short_description="Disables guidance embed on Flux and Flux-like models.",
inputs=[ inputs=[
io.Conditioning.Input("conditioning"), io.Conditioning.Input("conditioning"),
], ],
@@ -129,6 +136,7 @@ class FluxKontextImageScale(io.ComfyNode):
node_id="FluxKontextImageScale", node_id="FluxKontextImageScale",
category="advanced/conditioning/flux", category="advanced/conditioning/flux",
description="This node resizes the image to one that is more optimal for flux kontext.", description="This node resizes the image to one that is more optimal for flux kontext.",
short_description="Resizes images to optimal dimensions for Flux Kontext.",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
], ],
@@ -156,6 +164,8 @@ class FluxKontextMultiReferenceLatentMethod(io.ComfyNode):
node_id="FluxKontextMultiReferenceLatentMethod", node_id="FluxKontextMultiReferenceLatentMethod",
display_name="Edit Model Reference Method", display_name="Edit Model Reference Method",
category="advanced/conditioning/flux", category="advanced/conditioning/flux",
description="Selects the method used for handling multiple reference latents in Flux Kontext edit models.",
short_description="Selects reference latent method for Flux Kontext.",
inputs=[ inputs=[
io.Conditioning.Input("conditioning"), io.Conditioning.Input("conditioning"),
io.Combo.Input( io.Combo.Input(
@@ -214,6 +224,8 @@ class Flux2Scheduler(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="Flux2Scheduler", node_id="Flux2Scheduler",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates a sigma schedule for Flux 2 sampling based on step count and image resolution.",
short_description="Generates a sigma schedule for Flux 2 sampling.",
inputs=[ inputs=[
io.Int.Input("steps", default=20, min=1, max=4096), io.Int.Input("steps", default=20, min=1, max=4096),
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=1), io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=1),

View File

@@ -30,6 +30,8 @@ class FreeU(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="FreeU", node_id="FreeU",
category="model_patches/unet", category="model_patches/unet",
description="Applies FreeU v1 to a UNet model, boosting backbone features and filtering skip connections using Fourier transforms for improved quality.",
short_description="Applies FreeU v1 backbone boost and skip filtering.",
inputs=[ inputs=[
IO.Model.Input("model"), IO.Model.Input("model"),
IO.Float.Input("b1", default=1.1, min=0.0, max=10.0, step=0.01), IO.Float.Input("b1", default=1.1, min=0.0, max=10.0, step=0.01),
@@ -77,6 +79,8 @@ class FreeU_V2(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="FreeU_V2", node_id="FreeU_V2",
category="model_patches/unet", category="model_patches/unet",
description="Applies FreeU v2 to a UNet model with adaptive backbone scaling based on hidden state magnitude and Fourier skip filtering.",
short_description="Applies FreeU v2 with adaptive scaling.",
inputs=[ inputs=[
IO.Model.Input("model"), IO.Model.Input("model"),
IO.Float.Input("b1", default=1.3, min=0.0, max=10.0, step=0.01), IO.Float.Input("b1", default=1.3, min=0.0, max=10.0, step=0.01),

View File

@@ -62,6 +62,7 @@ class FreSca(io.ComfyNode):
display_name="FreSca", display_name="FreSca",
category="_for_testing", category="_for_testing",
description="Applies frequency-dependent scaling to the guidance", description="Applies frequency-dependent scaling to the guidance",
short_description=None,
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01, io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01,

View File

@@ -341,6 +341,8 @@ class GITSScheduler(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="GITSScheduler", node_id="GITSScheduler",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates a noise schedule using the GITS method with precomputed optimal sigma levels and configurable coefficient.",
short_description="Generates a GITS noise schedule with optimal sigma levels.",
inputs=[ inputs=[
io.Float.Input("coeff", default=1.20, min=0.80, max=1.50, step=0.05), io.Float.Input("coeff", default=1.20, min=0.80, max=1.50, step=0.05),
io.Int.Input("steps", default=10, min=2, max=1000), io.Int.Input("steps", default=10, min=2, max=1000),

View File

@@ -13,6 +13,7 @@ class QuadrupleCLIPLoader(io.ComfyNode):
node_id="QuadrupleCLIPLoader", node_id="QuadrupleCLIPLoader",
category="advanced/loaders", category="advanced/loaders",
description="[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct", description="[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct",
short_description=None,
inputs=[ inputs=[
io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")), io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")),
io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")), io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")),
@@ -40,6 +41,8 @@ class CLIPTextEncodeHiDream(io.ComfyNode):
node_id="CLIPTextEncodeHiDream", node_id="CLIPTextEncodeHiDream",
search_aliases=["hidream prompt"], search_aliases=["hidream prompt"],
category="advanced/conditioning", category="advanced/conditioning",
description="Encodes separate CLIP-L, CLIP-G, T5-XXL, and Llama text prompts into HiDream conditioning.",
short_description="Encodes multi-encoder text prompts for HiDream.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.String.Input("clip_l", multiline=True, dynamic_prompts=True), io.String.Input("clip_l", multiline=True, dynamic_prompts=True),

View File

@@ -38,6 +38,8 @@ class PairConditioningSetProperties:
RETURN_TYPES = ("CONDITIONING", "CONDITIONING") RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
RETURN_NAMES = ("positive", "negative") RETURN_NAMES = ("positive", "negative")
CATEGORY = "advanced/hooks/cond pair" CATEGORY = "advanced/hooks/cond pair"
DESCRIPTION = "Set properties like strength, mask, hooks, and timesteps on a positive/negative conditioning pair."
SHORT_DESCRIPTION = "Set properties on a positive/negative conditioning pair."
FUNCTION = "set_properties" FUNCTION = "set_properties"
def set_properties(self, positive_NEW, negative_NEW, def set_properties(self, positive_NEW, negative_NEW,
@@ -73,6 +75,8 @@ class PairConditioningSetPropertiesAndCombine:
RETURN_TYPES = ("CONDITIONING", "CONDITIONING") RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
RETURN_NAMES = ("positive", "negative") RETURN_NAMES = ("positive", "negative")
CATEGORY = "advanced/hooks/cond pair" CATEGORY = "advanced/hooks/cond pair"
DESCRIPTION = "Set properties on new conditioning pair and combine with existing positive/negative conditioning."
SHORT_DESCRIPTION = "Set properties on new cond pair, combine with existing."
FUNCTION = "set_properties" FUNCTION = "set_properties"
def set_properties(self, positive, negative, positive_NEW, negative_NEW, def set_properties(self, positive, negative, positive_NEW, negative_NEW,
@@ -104,6 +108,8 @@ class ConditioningSetProperties:
EXPERIMENTAL = True EXPERIMENTAL = True
RETURN_TYPES = ("CONDITIONING",) RETURN_TYPES = ("CONDITIONING",)
CATEGORY = "advanced/hooks/cond single" CATEGORY = "advanced/hooks/cond single"
DESCRIPTION = "Set properties like strength, mask, hooks, and timesteps on a single conditioning input."
SHORT_DESCRIPTION = "Set properties on a single conditioning input."
FUNCTION = "set_properties" FUNCTION = "set_properties"
def set_properties(self, cond_NEW, def set_properties(self, cond_NEW,
@@ -136,6 +142,8 @@ class ConditioningSetPropertiesAndCombine:
EXPERIMENTAL = True EXPERIMENTAL = True
RETURN_TYPES = ("CONDITIONING",) RETURN_TYPES = ("CONDITIONING",)
CATEGORY = "advanced/hooks/cond single" CATEGORY = "advanced/hooks/cond single"
DESCRIPTION = "Set properties on new conditioning and combine it with an existing conditioning input."
SHORT_DESCRIPTION = "Set properties on new conditioning, combine with existing."
FUNCTION = "set_properties" FUNCTION = "set_properties"
def set_properties(self, cond, cond_NEW, def set_properties(self, cond, cond_NEW,
@@ -164,6 +172,8 @@ class PairConditioningCombine:
RETURN_TYPES = ("CONDITIONING", "CONDITIONING") RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
RETURN_NAMES = ("positive", "negative") RETURN_NAMES = ("positive", "negative")
CATEGORY = "advanced/hooks/cond pair" CATEGORY = "advanced/hooks/cond pair"
DESCRIPTION = "Combine two positive/negative conditioning pairs into a single pair."
SHORT_DESCRIPTION = None
FUNCTION = "combine" FUNCTION = "combine"
def combine(self, positive_A, negative_A, positive_B, negative_B): def combine(self, positive_A, negative_A, positive_B, negative_B):
@@ -191,6 +201,8 @@ class PairConditioningSetDefaultAndCombine:
RETURN_TYPES = ("CONDITIONING", "CONDITIONING") RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
RETURN_NAMES = ("positive", "negative") RETURN_NAMES = ("positive", "negative")
CATEGORY = "advanced/hooks/cond pair" CATEGORY = "advanced/hooks/cond pair"
DESCRIPTION = "Set default conditioning pair and combine it with existing positive/negative conditioning and optional hooks."
SHORT_DESCRIPTION = "Set default cond pair and combine with existing."
FUNCTION = "set_default_and_combine" FUNCTION = "set_default_and_combine"
def set_default_and_combine(self, positive, negative, positive_DEFAULT, negative_DEFAULT, def set_default_and_combine(self, positive, negative, positive_DEFAULT, negative_DEFAULT,
@@ -217,6 +229,8 @@ class ConditioningSetDefaultAndCombine:
EXPERIMENTAL = True EXPERIMENTAL = True
RETURN_TYPES = ("CONDITIONING",) RETURN_TYPES = ("CONDITIONING",)
CATEGORY = "advanced/hooks/cond single" CATEGORY = "advanced/hooks/cond single"
DESCRIPTION = "Set default conditioning and combine it with existing conditioning input and optional hooks."
SHORT_DESCRIPTION = "Set default conditioning and combine with existing."
FUNCTION = "set_default_and_combine" FUNCTION = "set_default_and_combine"
def set_default_and_combine(self, cond, cond_DEFAULT, def set_default_and_combine(self, cond, cond_DEFAULT,
@@ -244,6 +258,8 @@ class SetClipHooks:
EXPERIMENTAL = True EXPERIMENTAL = True
RETURN_TYPES = ("CLIP",) RETURN_TYPES = ("CLIP",)
CATEGORY = "advanced/hooks/clip" CATEGORY = "advanced/hooks/clip"
DESCRIPTION = "Apply hooks to a CLIP model, optionally propagating them to conditioning outputs and enabling CLIP scheduling."
SHORT_DESCRIPTION = "Apply hooks to a CLIP model with scheduling options."
FUNCTION = "apply_hooks" FUNCTION = "apply_hooks"
def apply_hooks(self, clip: CLIP, schedule_clip: bool, apply_to_conds: bool, hooks: comfy.hooks.HookGroup=None): def apply_hooks(self, clip: CLIP, schedule_clip: bool, apply_to_conds: bool, hooks: comfy.hooks.HookGroup=None):
@@ -275,6 +291,8 @@ class ConditioningTimestepsRange:
RETURN_TYPES = ("TIMESTEPS_RANGE", "TIMESTEPS_RANGE", "TIMESTEPS_RANGE") RETURN_TYPES = ("TIMESTEPS_RANGE", "TIMESTEPS_RANGE", "TIMESTEPS_RANGE")
RETURN_NAMES = ("TIMESTEPS_RANGE", "BEFORE_RANGE", "AFTER_RANGE") RETURN_NAMES = ("TIMESTEPS_RANGE", "BEFORE_RANGE", "AFTER_RANGE")
CATEGORY = "advanced/hooks" CATEGORY = "advanced/hooks"
DESCRIPTION = "Define a timestep percentage range and output the range plus its complement before and after segments."
SHORT_DESCRIPTION = "Define a timestep range with before/after complements."
FUNCTION = "create_range" FUNCTION = "create_range"
def create_range(self, start_percent: float, end_percent: float): def create_range(self, start_percent: float, end_percent: float):
@@ -308,6 +326,8 @@ class CreateHookLora:
EXPERIMENTAL = True EXPERIMENTAL = True
RETURN_TYPES = ("HOOKS",) RETURN_TYPES = ("HOOKS",)
CATEGORY = "advanced/hooks/create" CATEGORY = "advanced/hooks/create"
DESCRIPTION = "Create a LoRA hook with separate model and CLIP strength that can be scheduled on conditioning."
SHORT_DESCRIPTION = "Create a LoRA hook with model and CLIP strength."
FUNCTION = "create_hook" FUNCTION = "create_hook"
def create_hook(self, lora_name: str, strength_model: float, strength_clip: float, prev_hooks: comfy.hooks.HookGroup=None): def create_hook(self, lora_name: str, strength_model: float, strength_clip: float, prev_hooks: comfy.hooks.HookGroup=None):
@@ -353,6 +373,8 @@ class CreateHookLoraModelOnly(CreateHookLora):
EXPERIMENTAL = True EXPERIMENTAL = True
RETURN_TYPES = ("HOOKS",) RETURN_TYPES = ("HOOKS",)
CATEGORY = "advanced/hooks/create" CATEGORY = "advanced/hooks/create"
DESCRIPTION = "Create a LoRA hook that only affects the model (not CLIP) for scheduling on conditioning."
SHORT_DESCRIPTION = "Create a model-only LoRA hook."
FUNCTION = "create_hook_model_only" FUNCTION = "create_hook_model_only"
def create_hook_model_only(self, lora_name: str, strength_model: float, prev_hooks: comfy.hooks.HookGroup=None): def create_hook_model_only(self, lora_name: str, strength_model: float, prev_hooks: comfy.hooks.HookGroup=None):
@@ -383,6 +405,8 @@ class CreateHookModelAsLora:
EXPERIMENTAL = True EXPERIMENTAL = True
RETURN_TYPES = ("HOOKS",) RETURN_TYPES = ("HOOKS",)
CATEGORY = "advanced/hooks/create" CATEGORY = "advanced/hooks/create"
DESCRIPTION = "Create a hook from a full checkpoint treated as a LoRA, with separate model and CLIP strength controls."
SHORT_DESCRIPTION = "Create a hook from a checkpoint treated as LoRA."
FUNCTION = "create_hook" FUNCTION = "create_hook"
def create_hook(self, ckpt_name: str, strength_model: float, strength_clip: float, def create_hook(self, ckpt_name: str, strength_model: float, strength_clip: float,
@@ -431,6 +455,8 @@ class CreateHookModelAsLoraModelOnly(CreateHookModelAsLora):
EXPERIMENTAL = True EXPERIMENTAL = True
RETURN_TYPES = ("HOOKS",) RETURN_TYPES = ("HOOKS",)
CATEGORY = "advanced/hooks/create" CATEGORY = "advanced/hooks/create"
DESCRIPTION = "Create a model-only hook from a full checkpoint treated as a LoRA, without affecting CLIP."
SHORT_DESCRIPTION = "Create a model-only hook from a checkpoint as LoRA."
FUNCTION = "create_hook_model_only" FUNCTION = "create_hook_model_only"
def create_hook_model_only(self, ckpt_name: str, strength_model: float, def create_hook_model_only(self, ckpt_name: str, strength_model: float,
@@ -460,6 +486,8 @@ class SetHookKeyframes:
EXPERIMENTAL = True EXPERIMENTAL = True
RETURN_TYPES = ("HOOKS",) RETURN_TYPES = ("HOOKS",)
CATEGORY = "advanced/hooks/scheduling" CATEGORY = "advanced/hooks/scheduling"
DESCRIPTION = "Assign keyframe schedules to hooks for controlling their strength over time during sampling."
SHORT_DESCRIPTION = "Assign keyframe schedules to hooks over time."
FUNCTION = "set_hook_keyframes" FUNCTION = "set_hook_keyframes"
def set_hook_keyframes(self, hooks: comfy.hooks.HookGroup, hook_kf: comfy.hooks.HookKeyframeGroup=None): def set_hook_keyframes(self, hooks: comfy.hooks.HookGroup, hook_kf: comfy.hooks.HookKeyframeGroup=None):
@@ -488,6 +516,8 @@ class CreateHookKeyframe:
RETURN_TYPES = ("HOOK_KEYFRAMES",) RETURN_TYPES = ("HOOK_KEYFRAMES",)
RETURN_NAMES = ("HOOK_KF",) RETURN_NAMES = ("HOOK_KF",)
CATEGORY = "advanced/hooks/scheduling" CATEGORY = "advanced/hooks/scheduling"
DESCRIPTION = "Create a single hook keyframe with a strength multiplier at a specific timestep percentage."
SHORT_DESCRIPTION = "Create a hook keyframe at a specific timestep."
FUNCTION = "create_hook_keyframe" FUNCTION = "create_hook_keyframe"
def create_hook_keyframe(self, strength_mult: float, start_percent: float, prev_hook_kf: comfy.hooks.HookKeyframeGroup=None): def create_hook_keyframe(self, strength_mult: float, start_percent: float, prev_hook_kf: comfy.hooks.HookKeyframeGroup=None):
@@ -523,6 +553,8 @@ class CreateHookKeyframesInterpolated:
RETURN_TYPES = ("HOOK_KEYFRAMES",) RETURN_TYPES = ("HOOK_KEYFRAMES",)
RETURN_NAMES = ("HOOK_KF",) RETURN_NAMES = ("HOOK_KF",)
CATEGORY = "advanced/hooks/scheduling" CATEGORY = "advanced/hooks/scheduling"
DESCRIPTION = "Generate multiple interpolated hook keyframes between start and end strength values over a timestep range."
SHORT_DESCRIPTION = "Generate interpolated hook keyframes over a timestep range."
FUNCTION = "create_hook_keyframes" FUNCTION = "create_hook_keyframes"
def create_hook_keyframes(self, strength_start: float, strength_end: float, interpolation: str, def create_hook_keyframes(self, strength_start: float, strength_end: float, interpolation: str,
@@ -568,6 +600,8 @@ class CreateHookKeyframesFromFloats:
RETURN_TYPES = ("HOOK_KEYFRAMES",) RETURN_TYPES = ("HOOK_KEYFRAMES",)
RETURN_NAMES = ("HOOK_KF",) RETURN_NAMES = ("HOOK_KF",)
CATEGORY = "advanced/hooks/scheduling" CATEGORY = "advanced/hooks/scheduling"
DESCRIPTION = "Create hook keyframes from a list of float values distributed evenly across a timestep percentage range."
SHORT_DESCRIPTION = "Create hook keyframes from a list of float values."
FUNCTION = "create_hook_keyframes" FUNCTION = "create_hook_keyframes"
def create_hook_keyframes(self, floats_strength: Union[float, list[float]], def create_hook_keyframes(self, floats_strength: Union[float, list[float]],
@@ -639,6 +673,8 @@ class CombineHooks:
EXPERIMENTAL = True EXPERIMENTAL = True
RETURN_TYPES = ("HOOKS",) RETURN_TYPES = ("HOOKS",)
CATEGORY = "advanced/hooks/combine" CATEGORY = "advanced/hooks/combine"
DESCRIPTION = "Combine two hook groups into one."
SHORT_DESCRIPTION = None
FUNCTION = "combine_hooks" FUNCTION = "combine_hooks"
def combine_hooks(self, def combine_hooks(self,
@@ -666,6 +702,8 @@ class CombineHooksFour:
EXPERIMENTAL = True EXPERIMENTAL = True
RETURN_TYPES = ("HOOKS",) RETURN_TYPES = ("HOOKS",)
CATEGORY = "advanced/hooks/combine" CATEGORY = "advanced/hooks/combine"
DESCRIPTION = "Combine up to four hook groups into one."
SHORT_DESCRIPTION = None
FUNCTION = "combine_hooks" FUNCTION = "combine_hooks"
def combine_hooks(self, def combine_hooks(self,
@@ -699,6 +737,8 @@ class CombineHooksEight:
EXPERIMENTAL = True EXPERIMENTAL = True
RETURN_TYPES = ("HOOKS",) RETURN_TYPES = ("HOOKS",)
CATEGORY = "advanced/hooks/combine" CATEGORY = "advanced/hooks/combine"
DESCRIPTION = "Combine up to eight hook groups into one."
SHORT_DESCRIPTION = None
FUNCTION = "combine_hooks" FUNCTION = "combine_hooks"
def combine_hooks(self, def combine_hooks(self,

View File

@@ -15,6 +15,8 @@ class CLIPTextEncodeHunyuanDiT(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="CLIPTextEncodeHunyuanDiT", node_id="CLIPTextEncodeHunyuanDiT",
category="advanced/conditioning", category="advanced/conditioning",
description="Encodes text using both BERT and mT5-XL tokenizers for Hunyuan DiT conditioning.",
short_description="Dual-tokenizer text encoding for Hunyuan DiT.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.String.Input("bert", multiline=True, dynamic_prompts=True), io.String.Input("bert", multiline=True, dynamic_prompts=True),
@@ -42,6 +44,8 @@ class EmptyHunyuanLatentVideo(io.ComfyNode):
node_id="EmptyHunyuanLatentVideo", node_id="EmptyHunyuanLatentVideo",
display_name="Empty HunyuanVideo 1.0 Latent", display_name="Empty HunyuanVideo 1.0 Latent",
category="latent/video", category="latent/video",
description="Creates an empty latent tensor sized for HunyuanVideo 1.0 video generation.",
short_description="Empty latent for HunyuanVideo 1.0 generation.",
inputs=[ inputs=[
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
@@ -67,6 +71,8 @@ class EmptyHunyuanVideo15Latent(EmptyHunyuanLatentVideo):
schema = super().define_schema() schema = super().define_schema()
schema.node_id = "EmptyHunyuanVideo15Latent" schema.node_id = "EmptyHunyuanVideo15Latent"
schema.display_name = "Empty HunyuanVideo 1.5 Latent" schema.display_name = "Empty HunyuanVideo 1.5 Latent"
schema.description = "Creates an empty latent tensor sized for HunyuanVideo 1.5 video generation with 16x spatial downscale."
schema.short_description = "Empty latent for HunyuanVideo 1.5 generation."
return schema return schema
@classmethod @classmethod
@@ -82,6 +88,8 @@ class HunyuanVideo15ImageToVideo(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="HunyuanVideo15ImageToVideo", node_id="HunyuanVideo15ImageToVideo",
category="conditioning/video_models", category="conditioning/video_models",
description="Prepares conditioning and latent for HunyuanVideo 1.5 image-to-video generation with start image and CLIP vision support.",
short_description="HunyuanVideo 1.5 image-to-video conditioning setup.",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),
@@ -131,6 +139,9 @@ class HunyuanVideo15SuperResolution(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="HunyuanVideo15SuperResolution", node_id="HunyuanVideo15SuperResolution",
category="conditioning/video_models",
description="Sets up conditioning for HunyuanVideo 1.5 super-resolution upscaling of a latent with noise augmentation and optional image guidance.",
short_description="HunyuanVideo 1.5 super-resolution latent conditioning.",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),
@@ -177,6 +188,8 @@ class LatentUpscaleModelLoader(io.ComfyNode):
node_id="LatentUpscaleModelLoader", node_id="LatentUpscaleModelLoader",
display_name="Load Latent Upscale Model", display_name="Load Latent Upscale Model",
category="loaders", category="loaders",
description="Loads a latent upscale model from disk, supporting HunyuanVideo 720p, 1080p, and other latent upsampler architectures.",
short_description="Load a latent upscale model from file.",
inputs=[ inputs=[
io.Combo.Input("model_name", options=folder_paths.get_filename_list("latent_upscale_models")), io.Combo.Input("model_name", options=folder_paths.get_filename_list("latent_upscale_models")),
], ],
@@ -226,6 +239,8 @@ class HunyuanVideo15LatentUpscaleWithModel(io.ComfyNode):
node_id="HunyuanVideo15LatentUpscaleWithModel", node_id="HunyuanVideo15LatentUpscaleWithModel",
display_name="Hunyuan Video 15 Latent Upscale With Model", display_name="Hunyuan Video 15 Latent Upscale With Model",
category="latent", category="latent",
description="Upscales a video latent to a target resolution using a loaded latent upscale model and configurable upscale method.",
short_description="Upscale video latent using a latent upscale model.",
inputs=[ inputs=[
io.LatentUpscaleModel.Input("model"), io.LatentUpscaleModel.Input("model"),
io.Latent.Input("samples"), io.Latent.Input("samples"),
@@ -275,6 +290,8 @@ class TextEncodeHunyuanVideo_ImageToVideo(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="TextEncodeHunyuanVideo_ImageToVideo", node_id="TextEncodeHunyuanVideo_ImageToVideo",
category="advanced/conditioning", category="advanced/conditioning",
description="Encodes text with CLIP vision image embeddings for HunyuanVideo image-to-video conditioning using an interleaved template.",
short_description="Text and image encoding for HunyuanVideo image-to-video.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.ClipVisionOutput.Input("clip_vision_output"), io.ClipVisionOutput.Input("clip_vision_output"),
@@ -306,6 +323,8 @@ class HunyuanImageToVideo(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="HunyuanImageToVideo", node_id="HunyuanImageToVideo",
category="conditioning/video_models", category="conditioning/video_models",
description="Prepares conditioning and latent for Hunyuan image-to-video generation with selectable guidance type.",
short_description="Hunyuan image-to-video conditioning with guidance options.",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Vae.Input("vae"), io.Vae.Input("vae"),
@@ -357,6 +376,8 @@ class EmptyHunyuanImageLatent(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="EmptyHunyuanImageLatent", node_id="EmptyHunyuanImageLatent",
category="latent", category="latent",
description="Creates an empty latent tensor sized for Hunyuan image generation.",
short_description="Empty latent for Hunyuan image generation.",
inputs=[ inputs=[
io.Int.Input("width", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32), io.Int.Input("width", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32),
io.Int.Input("height", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32), io.Int.Input("height", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32),
@@ -380,6 +401,9 @@ class HunyuanRefinerLatent(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="HunyuanRefinerLatent", node_id="HunyuanRefinerLatent",
category="conditioning/video_models",
description="Prepares conditioning for a Hunyuan refiner pass by concatenating the input latent with noise augmentation settings.",
short_description="Hunyuan refiner conditioning with noise augmentation.",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),

View File

@@ -18,6 +18,8 @@ class EmptyLatentHunyuan3Dv2(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="EmptyLatentHunyuan3Dv2", node_id="EmptyLatentHunyuan3Dv2",
category="latent/3d", category="latent/3d",
description="Creates an empty latent tensor for Hunyuan 3D v2 generation with configurable resolution and batch size.",
short_description="Empty latent for Hunyuan 3D v2 generation.",
inputs=[ inputs=[
IO.Int.Input("resolution", default=3072, min=1, max=8192), IO.Int.Input("resolution", default=3072, min=1, max=8192),
IO.Int.Input("batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."), IO.Int.Input("batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."),
@@ -41,6 +43,8 @@ class Hunyuan3Dv2Conditioning(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="Hunyuan3Dv2Conditioning", node_id="Hunyuan3Dv2Conditioning",
category="conditioning/video_models", category="conditioning/video_models",
description="Creates positive and negative conditioning for Hunyuan 3D v2 from a CLIP vision output embedding.",
short_description="Conditioning from CLIP vision for Hunyuan 3D v2.",
inputs=[ inputs=[
IO.ClipVisionOutput.Input("clip_vision_output"), IO.ClipVisionOutput.Input("clip_vision_output"),
], ],
@@ -66,6 +70,8 @@ class Hunyuan3Dv2ConditioningMultiView(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="Hunyuan3Dv2ConditioningMultiView", node_id="Hunyuan3Dv2ConditioningMultiView",
category="conditioning/video_models", category="conditioning/video_models",
description="Creates multi-view conditioning for Hunyuan 3D v2 from up to four directional CLIP vision outputs with positional encoding.",
short_description="Multi-view conditioning for Hunyuan 3D v2.",
inputs=[ inputs=[
IO.ClipVisionOutput.Input("front", optional=True), IO.ClipVisionOutput.Input("front", optional=True),
IO.ClipVisionOutput.Input("left", optional=True), IO.ClipVisionOutput.Input("left", optional=True),
@@ -103,6 +109,8 @@ class VAEDecodeHunyuan3D(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="VAEDecodeHunyuan3D", node_id="VAEDecodeHunyuan3D",
category="latent/3d", category="latent/3d",
description="Decodes a Hunyuan 3D latent into a voxel grid using a VAE with configurable chunk size and octree resolution.",
short_description="Decodes Hunyuan 3D latent into voxels.",
inputs=[ inputs=[
IO.Latent.Input("samples"), IO.Latent.Input("samples"),
IO.Vae.Input("vae"), IO.Vae.Input("vae"),
@@ -425,6 +433,8 @@ class VoxelToMeshBasic(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="VoxelToMeshBasic", node_id="VoxelToMeshBasic",
category="3d", category="3d",
description="Converts a voxel grid to a 3D mesh using basic cube-based surface extraction with adjustable threshold.",
short_description="Converts voxels to mesh using basic extraction.",
inputs=[ inputs=[
IO.Voxel.Input("voxel"), IO.Voxel.Input("voxel"),
IO.Float.Input("threshold", default=0.6, min=-1.0, max=1.0, step=0.01), IO.Float.Input("threshold", default=0.6, min=-1.0, max=1.0, step=0.01),
@@ -454,6 +464,8 @@ class VoxelToMesh(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="VoxelToMesh", node_id="VoxelToMesh",
category="3d", category="3d",
description="Converts a voxel grid to a 3D mesh using selectable surface net or basic algorithm with adjustable threshold.",
short_description="Converts voxels to mesh with algorithm selection.",
inputs=[ inputs=[
IO.Voxel.Input("voxel"), IO.Voxel.Input("voxel"),
IO.Combo.Input("algorithm", options=["surface net", "basic"]), IO.Combo.Input("algorithm", options=["surface net", "basic"]),
@@ -621,6 +633,8 @@ class SaveGLB(IO.ComfyNode):
display_name="Save 3D Model", display_name="Save 3D Model",
search_aliases=["export 3d model", "save mesh"], search_aliases=["export 3d model", "save mesh"],
category="3d", category="3d",
description="Saves a 3D mesh or model file to disk in GLB format with optional workflow metadata embedding.",
short_description="Saves 3D mesh or model to GLB file.",
is_output_node=True, is_output_node=True,
inputs=[ inputs=[
IO.MultiType.Input( IO.MultiType.Input(

View File

@@ -103,6 +103,8 @@ class HypernetworkLoader(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="HypernetworkLoader", node_id="HypernetworkLoader",
category="loaders", category="loaders",
description="Loads a hypernetwork and patches it onto a diffusion model's attention layers with adjustable strength.",
short_description="Loads and applies a hypernetwork to a model.",
inputs=[ inputs=[
IO.Model.Input("model"), IO.Model.Input("model"),
IO.Combo.Input("hypernetwork_name", options=folder_paths.get_filename_list("hypernetworks")), IO.Combo.Input("hypernetwork_name", options=folder_paths.get_filename_list("hypernetworks")),

View File

@@ -28,6 +28,8 @@ class HyperTile(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="HyperTile", node_id="HyperTile",
category="model_patches/unet", category="model_patches/unet",
description="Patches the model to split self-attention into smaller tiles during inference, reducing memory usage and speeding up generation at higher resolutions.",
short_description="Tile self-attention for faster high-res generation.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Int.Input("tile_size", default=256, min=1, max=2048), io.Int.Input("tile_size", default=256, min=1, max=2048),

View File

@@ -13,6 +13,7 @@ class ImageCompare(IO.ComfyNode):
node_id="ImageCompare", node_id="ImageCompare",
display_name="Image Compare", display_name="Image Compare",
description="Compares two images side by side with a slider.", description="Compares two images side by side with a slider.",
short_description=None,
category="image", category="image",
is_experimental=True, is_experimental=True,
is_output_node=True, is_output_node=True,

View File

@@ -25,6 +25,8 @@ class ImageCrop(IO.ComfyNode):
search_aliases=["trim"], search_aliases=["trim"],
display_name="Image Crop", display_name="Image Crop",
category="image/transform", category="image/transform",
description="Crops a rectangular region from an image at the specified position and dimensions.",
short_description="Crops a region from an image.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1), IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
@@ -54,6 +56,8 @@ class RepeatImageBatch(IO.ComfyNode):
node_id="RepeatImageBatch", node_id="RepeatImageBatch",
search_aliases=["duplicate image", "clone image"], search_aliases=["duplicate image", "clone image"],
category="image/batch", category="image/batch",
description="Repeats an image a specified number of times to create a batch of identical images.",
short_description="Repeats an image to create a batch.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Int.Input("amount", default=1, min=1, max=4096), IO.Int.Input("amount", default=1, min=1, max=4096),
@@ -76,6 +80,8 @@ class ImageFromBatch(IO.ComfyNode):
node_id="ImageFromBatch", node_id="ImageFromBatch",
search_aliases=["select image", "pick from batch", "extract image"], search_aliases=["select image", "pick from batch", "extract image"],
category="image/batch", category="image/batch",
description="Selects a contiguous range of images from a batch starting at a given index.",
short_description="Selects images from a batch by index.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Int.Input("batch_index", default=0, min=0, max=4095), IO.Int.Input("batch_index", default=0, min=0, max=4095),
@@ -102,6 +108,8 @@ class ImageAddNoise(IO.ComfyNode):
node_id="ImageAddNoise", node_id="ImageAddNoise",
search_aliases=["film grain"], search_aliases=["film grain"],
category="image", category="image",
description="Adds random noise to an image with adjustable strength, useful for film grain effects.",
short_description="Adds random noise to an image.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Int.Input( IO.Int.Input(
@@ -134,6 +142,8 @@ class SaveAnimatedWEBP(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="SaveAnimatedWEBP", node_id="SaveAnimatedWEBP",
category="image/animation", category="image/animation",
description="Saves a sequence of images as an animated WEBP file with configurable FPS, quality, and compression.",
short_description="Saves images as an animated WEBP file.",
inputs=[ inputs=[
IO.Image.Input("images"), IO.Image.Input("images"),
IO.String.Input("filename_prefix", default="ComfyUI"), IO.String.Input("filename_prefix", default="ComfyUI"),
@@ -171,6 +181,8 @@ class SaveAnimatedPNG(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="SaveAnimatedPNG", node_id="SaveAnimatedPNG",
category="image/animation", category="image/animation",
description="Saves a sequence of images as an animated PNG (APNG) file with configurable FPS and compression level.",
short_description="Saves images as an animated PNG file.",
inputs=[ inputs=[
IO.Image.Input("images"), IO.Image.Input("images"),
IO.String.Input("filename_prefix", default="ComfyUI"), IO.String.Input("filename_prefix", default="ComfyUI"),
@@ -207,6 +219,7 @@ class ImageStitch(IO.ComfyNode):
description="Stitches image2 to image1 in the specified direction.\n" description="Stitches image2 to image1 in the specified direction.\n"
"If image2 is not provided, returns image1 unchanged.\n" "If image2 is not provided, returns image1 unchanged.\n"
"Optional spacing can be added between images.", "Optional spacing can be added between images.",
short_description="Joins two images together in a specified direction.",
category="image/transform", category="image/transform",
inputs=[ inputs=[
IO.Image.Input("image1"), IO.Image.Input("image1"),
@@ -379,6 +392,8 @@ class ResizeAndPadImage(IO.ComfyNode):
node_id="ResizeAndPadImage", node_id="ResizeAndPadImage",
search_aliases=["fit to size"], search_aliases=["fit to size"],
category="image/transform", category="image/transform",
description="Resizes an image to fit within target dimensions while preserving aspect ratio, then pads with a solid color to fill the target size.",
short_description="Resizes an image to fit and pads the remainder.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Int.Input("target_width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1), IO.Int.Input("target_width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
@@ -430,6 +445,7 @@ class SaveSVGNode(IO.ComfyNode):
node_id="SaveSVGNode", node_id="SaveSVGNode",
search_aliases=["export vector", "save vector graphics"], search_aliases=["export vector", "save vector graphics"],
description="Save SVG files on disk.", description="Save SVG files on disk.",
short_description=None,
category="image/save", category="image/save",
inputs=[ inputs=[
IO.SVG.Input("svg"), IO.SVG.Input("svg"),
@@ -502,7 +518,7 @@ class GetImageSize(IO.ComfyNode):
node_id="GetImageSize", node_id="GetImageSize",
search_aliases=["dimensions", "resolution", "image info"], search_aliases=["dimensions", "resolution", "image info"],
display_name="Get Image Size", display_name="Get Image Size",
description="Returns width and height of the image, and passes it through unchanged.", description="Returns the width, height, and batch size of an image.",
category="image", category="image",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
@@ -537,6 +553,8 @@ class ImageRotate(IO.ComfyNode):
node_id="ImageRotate", node_id="ImageRotate",
search_aliases=["turn", "flip orientation"], search_aliases=["turn", "flip orientation"],
category="image/transform", category="image/transform",
description="Rotates an image by 90, 180, or 270 degrees.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Combo.Input("rotation", options=["none", "90 degrees", "180 degrees", "270 degrees"]), IO.Combo.Input("rotation", options=["none", "90 degrees", "180 degrees", "270 degrees"]),
@@ -567,6 +585,8 @@ class ImageFlip(IO.ComfyNode):
node_id="ImageFlip", node_id="ImageFlip",
search_aliases=["mirror", "reflect"], search_aliases=["mirror", "reflect"],
category="image/transform", category="image/transform",
description="Flips an image horizontally or vertically.",
short_description=None,
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Combo.Input("flip_method", options=["x-axis: vertically", "y-axis: horizontally"]), IO.Combo.Input("flip_method", options=["x-axis: vertically", "y-axis: horizontally"]),
@@ -593,6 +613,8 @@ class ImageScaleToMaxDimension(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="ImageScaleToMaxDimension", node_id="ImageScaleToMaxDimension",
category="image/upscaling", category="image/upscaling",
description="Scales an image so its largest dimension matches the specified size while preserving aspect ratio.",
short_description="Scales image to a target max dimension size.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Combo.Input( IO.Combo.Input(

View File

@@ -10,6 +10,8 @@ class InstructPixToPixConditioning(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="InstructPixToPixConditioning", node_id="InstructPixToPixConditioning",
category="conditioning/instructpix2pix", category="conditioning/instructpix2pix",
description="Prepares conditioning for InstructPix2Pix image editing by encoding the input image through a VAE and attaching it as concat latent to both positive and negative conditioning.",
short_description="Prepare conditioning for InstructPix2Pix editing.",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),

View File

@@ -14,6 +14,8 @@ class Kandinsky5ImageToVideo(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="Kandinsky5ImageToVideo", node_id="Kandinsky5ImageToVideo",
category="conditioning/video_models", category="conditioning/video_models",
description="Sets up Kandinsky 5 image-to-video generation by creating an empty video latent and optionally encoding a start image for conditioning.",
short_description="Sets up Kandinsky 5 image-to-video conditioning.",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),
@@ -73,6 +75,7 @@ class NormalizeVideoLatentStart(io.ComfyNode):
node_id="NormalizeVideoLatentStart", node_id="NormalizeVideoLatentStart",
category="conditioning/video_models", category="conditioning/video_models",
description="Normalizes the initial frames of a video latent to match the mean and standard deviation of subsequent reference frames. Helps reduce differences between the starting frames and the rest of the video.", description="Normalizes the initial frames of a video latent to match the mean and standard deviation of subsequent reference frames. Helps reduce differences between the starting frames and the rest of the video.",
short_description="Normalizes initial video latent frames to match reference frames.",
inputs=[ inputs=[
io.Latent.Input("latent"), io.Latent.Input("latent"),
io.Int.Input("start_frame_count", default=4, min=1, max=nodes.MAX_RESOLUTION, step=1, tooltip="Number of latent frames to normalize, counted from the start"), io.Int.Input("start_frame_count", default=4, min=1, max=nodes.MAX_RESOLUTION, step=1, tooltip="Number of latent frames to normalize, counted from the start"),
@@ -106,6 +109,8 @@ class CLIPTextEncodeKandinsky5(io.ComfyNode):
node_id="CLIPTextEncodeKandinsky5", node_id="CLIPTextEncodeKandinsky5",
search_aliases=["kandinsky prompt"], search_aliases=["kandinsky prompt"],
category="advanced/conditioning/kandinsky5", category="advanced/conditioning/kandinsky5",
description="Encodes separate CLIP-L and Qwen 2.5 7B text prompts into Kandinsky 5 conditioning.",
short_description="Encodes CLIP-L and Qwen prompts for Kandinsky 5.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.String.Input("clip_l", multiline=True, dynamic_prompts=True), io.String.Input("clip_l", multiline=True, dynamic_prompts=True),

View File

@@ -23,6 +23,8 @@ class LatentAdd(io.ComfyNode):
node_id="LatentAdd", node_id="LatentAdd",
search_aliases=["combine latents", "sum latents"], search_aliases=["combine latents", "sum latents"],
category="latent/advanced", category="latent/advanced",
description="Adds two latent tensors element-wise, automatically resizing the second to match the first.",
short_description="Add two latent tensors element-wise.",
inputs=[ inputs=[
io.Latent.Input("samples1"), io.Latent.Input("samples1"),
io.Latent.Input("samples2"), io.Latent.Input("samples2"),
@@ -50,6 +52,8 @@ class LatentSubtract(io.ComfyNode):
node_id="LatentSubtract", node_id="LatentSubtract",
search_aliases=["difference latent", "remove features"], search_aliases=["difference latent", "remove features"],
category="latent/advanced", category="latent/advanced",
description="Subtracts one latent tensor from another element-wise, automatically resizing the second to match the first.",
short_description="Subtract one latent tensor from another.",
inputs=[ inputs=[
io.Latent.Input("samples1"), io.Latent.Input("samples1"),
io.Latent.Input("samples2"), io.Latent.Input("samples2"),
@@ -77,6 +81,8 @@ class LatentMultiply(io.ComfyNode):
node_id="LatentMultiply", node_id="LatentMultiply",
search_aliases=["scale latent", "amplify latent", "latent gain"], search_aliases=["scale latent", "amplify latent", "latent gain"],
category="latent/advanced", category="latent/advanced",
description="Multiplies a latent tensor by a scalar value to scale its magnitude up or down.",
short_description="Scale a latent tensor by a multiplier.",
inputs=[ inputs=[
io.Latent.Input("samples"), io.Latent.Input("samples"),
io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01), io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01),
@@ -101,6 +107,8 @@ class LatentInterpolate(io.ComfyNode):
node_id="LatentInterpolate", node_id="LatentInterpolate",
search_aliases=["blend latent", "mix latent", "lerp latent", "transition"], search_aliases=["blend latent", "mix latent", "lerp latent", "transition"],
category="latent/advanced", category="latent/advanced",
description="Interpolates between two latent tensors using a ratio, preserving magnitude for smoother blending than linear interpolation.",
short_description="Interpolate between two latent tensors.",
inputs=[ inputs=[
io.Latent.Input("samples1"), io.Latent.Input("samples1"),
io.Latent.Input("samples2"), io.Latent.Input("samples2"),
@@ -140,6 +148,8 @@ class LatentConcat(io.ComfyNode):
node_id="LatentConcat", node_id="LatentConcat",
search_aliases=["join latents", "stitch latents"], search_aliases=["join latents", "stitch latents"],
category="latent/advanced", category="latent/advanced",
description="Concatenates two latent tensors along a chosen spatial or temporal dimension (x, y, or t) with optional reversal.",
short_description="Concatenate two latents along a chosen dimension.",
inputs=[ inputs=[
io.Latent.Input("samples1"), io.Latent.Input("samples1"),
io.Latent.Input("samples2"), io.Latent.Input("samples2"),
@@ -180,6 +190,8 @@ class LatentCut(io.ComfyNode):
node_id="LatentCut", node_id="LatentCut",
search_aliases=["crop latent", "slice latent", "extract region"], search_aliases=["crop latent", "slice latent", "extract region"],
category="latent/advanced", category="latent/advanced",
description="Extracts a contiguous slice from a latent tensor along a chosen spatial or temporal dimension at a specified index and size.",
short_description="Extract a slice from a latent along a dimension.",
inputs=[ inputs=[
io.Latent.Input("samples"), io.Latent.Input("samples"),
io.Combo.Input("dim", options=["x", "y", "t"]), io.Combo.Input("dim", options=["x", "y", "t"]),
@@ -221,6 +233,8 @@ class LatentCutToBatch(io.ComfyNode):
node_id="LatentCutToBatch", node_id="LatentCutToBatch",
search_aliases=["slice to batch", "split latent", "tile latent"], search_aliases=["slice to batch", "split latent", "tile latent"],
category="latent/advanced", category="latent/advanced",
description="Slices a latent tensor along a chosen dimension into equal-sized chunks and reshapes them into the batch dimension.",
short_description="Slice latent along a dimension into batch chunks.",
inputs=[ inputs=[
io.Latent.Input("samples"), io.Latent.Input("samples"),
io.Combo.Input("dim", options=["t", "x", "y"]), io.Combo.Input("dim", options=["t", "x", "y"]),
@@ -263,6 +277,8 @@ class LatentBatch(io.ComfyNode):
node_id="LatentBatch", node_id="LatentBatch",
search_aliases=["combine latents", "merge latents", "join latents"], search_aliases=["combine latents", "merge latents", "join latents"],
category="latent/batch", category="latent/batch",
description="Concatenates two latent tensors along the batch dimension, preserving batch index metadata.",
short_description="Concatenate two latents along the batch dimension.",
is_deprecated=True, is_deprecated=True,
inputs=[ inputs=[
io.Latent.Input("samples1"), io.Latent.Input("samples1"),
@@ -291,6 +307,8 @@ class LatentBatchSeedBehavior(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LatentBatchSeedBehavior", node_id="LatentBatchSeedBehavior",
category="latent/advanced", category="latent/advanced",
description="Controls whether each item in a latent batch receives a random or fixed noise seed during sampling.",
short_description="Set random or fixed seed behavior for batches.",
inputs=[ inputs=[
io.Latent.Input("samples"), io.Latent.Input("samples"),
io.Combo.Input("seed_behavior", options=["random", "fixed"], default="fixed"), io.Combo.Input("seed_behavior", options=["random", "fixed"], default="fixed"),
@@ -320,6 +338,8 @@ class LatentApplyOperation(io.ComfyNode):
node_id="LatentApplyOperation", node_id="LatentApplyOperation",
search_aliases=["transform latent"], search_aliases=["transform latent"],
category="latent/advanced/operations", category="latent/advanced/operations",
description="Applies a latent operation (such as tonemap or sharpen) directly to a latent tensor.",
short_description="Apply a latent operation to a latent tensor.",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Latent.Input("samples"), io.Latent.Input("samples"),
@@ -344,6 +364,8 @@ class LatentApplyOperationCFG(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LatentApplyOperationCFG", node_id="LatentApplyOperationCFG",
category="latent/advanced/operations", category="latent/advanced/operations",
description="Applies a latent operation during the CFG pre-processing stage of sampling, modifying the model's prediction before guidance is applied.",
short_description="Apply a latent operation during CFG pre-processing.",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
@@ -376,6 +398,8 @@ class LatentOperationTonemapReinhard(io.ComfyNode):
node_id="LatentOperationTonemapReinhard", node_id="LatentOperationTonemapReinhard",
search_aliases=["hdr latent"], search_aliases=["hdr latent"],
category="latent/advanced/operations", category="latent/advanced/operations",
description="Creates a Reinhard tonemapping operation that compresses high-magnitude latent values to reduce blown-out artifacts.",
short_description="Create a Reinhard tonemapping latent operation.",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01), io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01),
@@ -411,6 +435,8 @@ class LatentOperationSharpen(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LatentOperationSharpen", node_id="LatentOperationSharpen",
category="latent/advanced/operations", category="latent/advanced/operations",
description="Creates a sharpening operation that enhances detail in latent space using a Gaussian-based unsharp mask with configurable radius, sigma, and strength.",
short_description="Create a Gaussian-based latent sharpening operation.",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Int.Input("sharpen_radius", default=9, min=1, max=31, step=1), io.Int.Input("sharpen_radius", default=9, min=1, max=31, step=1),
@@ -448,6 +474,8 @@ class ReplaceVideoLatentFrames(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="ReplaceVideoLatentFrames", node_id="ReplaceVideoLatentFrames",
category="latent/batch", category="latent/batch",
description="Replaces a range of frames in a destination video latent with frames from a source latent at a specified index.",
short_description="Replace video latent frames at a given index.",
inputs=[ inputs=[
io.Latent.Input("destination", tooltip="The destination latent where frames will be replaced."), io.Latent.Input("destination", tooltip="The destination latent where frames will be replaced."),
io.Latent.Input("source", optional=True, tooltip="The source latent providing frames to insert into the destination latent. If not provided, the destination latent is returned unchanged."), io.Latent.Input("source", optional=True, tooltip="The source latent providing frames to insert into the destination latent. If not provided, the destination latent is returned unchanged."),

View File

@@ -31,6 +31,8 @@ class Load3D(IO.ComfyNode):
node_id="Load3D", node_id="Load3D",
display_name="Load 3D & Animation", display_name="Load 3D & Animation",
category="3d", category="3d",
description="Loads a 3D model file and renders it to produce an image, mask, normal map, camera info, recording video, and 3D file output.",
short_description="Loads and renders a 3D model file.",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
IO.Combo.Input("model_file", options=sorted(files), upload=IO.UploadType.model), IO.Combo.Input("model_file", options=sorted(files), upload=IO.UploadType.model),
@@ -81,6 +83,8 @@ class Preview3D(IO.ComfyNode):
search_aliases=["view mesh", "3d viewer"], search_aliases=["view mesh", "3d viewer"],
display_name="Preview 3D & Animation", display_name="Preview 3D & Animation",
category="3d", category="3d",
description="Previews a 3D model or file in the UI with optional camera info and background image overlay.",
short_description="Previews a 3D model in the UI.",
is_experimental=True, is_experimental=True,
is_output_node=True, is_output_node=True,
inputs=[ inputs=[

View File

@@ -16,6 +16,8 @@ class SwitchNode(io.ComfyNode):
node_id="ComfySwitchNode", node_id="ComfySwitchNode",
display_name="Switch", display_name="Switch",
category="logic", category="logic",
description="Routes one of two inputs to the output based on a boolean switch value, evaluating only the selected branch lazily.",
short_description="Route one of two inputs based on a boolean.",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Boolean.Input("switch"), io.Boolean.Input("switch"),
@@ -47,6 +49,8 @@ class SoftSwitchNode(io.ComfyNode):
node_id="ComfySoftSwitchNode", node_id="ComfySoftSwitchNode",
display_name="Soft Switch", display_name="Soft Switch",
category="logic", category="logic",
description="Routes one of two optional inputs to the output based on a boolean, falling back to whichever input is connected if only one is provided.",
short_description="Switch with optional fallback to connected input.",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Boolean.Input("switch"), io.Boolean.Input("switch"),
@@ -102,6 +106,8 @@ class CustomComboNode(io.ComfyNode):
node_id="CustomCombo", node_id="CustomCombo",
display_name="Custom Combo", display_name="Custom Combo",
category="utils", category="utils",
description="Provides a user-defined dropdown combo box where options are written by the user, outputting the selected string and its index.",
short_description="User-defined dropdown outputting string and index.",
is_experimental=True, is_experimental=True,
inputs=[io.Combo.Input("choice", options=[])], inputs=[io.Combo.Input("choice", options=[])],
outputs=[ outputs=[
@@ -137,6 +143,8 @@ class DCTestNode(io.ComfyNode):
node_id="DCTestNode", node_id="DCTestNode",
display_name="DCTest", display_name="DCTest",
category="logic", category="logic",
description="Test node demonstrating DynamicCombo inputs with nested sub-options that conditionally show different input types.",
short_description="Test node for DynamicCombo nested inputs.",
is_output_node=True, is_output_node=True,
inputs=[io.DynamicCombo.Input("combo", options=[ inputs=[io.DynamicCombo.Input("combo", options=[
io.DynamicCombo.Option("option1", [io.String.Input("string")]), io.DynamicCombo.Option("option1", [io.String.Input("string")]),
@@ -175,6 +183,8 @@ class AutogrowNamesTestNode(io.ComfyNode):
node_id="AutogrowNamesTestNode", node_id="AutogrowNamesTestNode",
display_name="AutogrowNamesTest", display_name="AutogrowNamesTest",
category="logic", category="logic",
description="Test node demonstrating Autogrow inputs with named template slots that dynamically add float inputs.",
short_description="Test node for Autogrow named template inputs.",
inputs=[ inputs=[
_io.Autogrow.Input("autogrow", template=template) _io.Autogrow.Input("autogrow", template=template)
], ],
@@ -195,6 +205,8 @@ class AutogrowPrefixTestNode(io.ComfyNode):
node_id="AutogrowPrefixTestNode", node_id="AutogrowPrefixTestNode",
display_name="AutogrowPrefixTest", display_name="AutogrowPrefixTest",
category="logic", category="logic",
description="Test node demonstrating Autogrow inputs with prefix-based template slots that dynamically add numbered float inputs.",
short_description="Test node for Autogrow prefix template inputs.",
inputs=[ inputs=[
_io.Autogrow.Input("autogrow", template=template) _io.Autogrow.Input("autogrow", template=template)
], ],
@@ -214,6 +226,8 @@ class ComboOutputTestNode(io.ComfyNode):
node_id="ComboOptionTestNode", node_id="ComboOptionTestNode",
display_name="ComboOptionTest", display_name="ComboOptionTest",
category="logic", category="logic",
description="Test node demonstrating combo output types by passing two selected combo values through as outputs.",
short_description="Test node for combo output passthrough.",
inputs=[io.Combo.Input("combo", options=["option1", "option2", "option3"]), inputs=[io.Combo.Input("combo", options=["option1", "option2", "option3"]),
io.Combo.Input("combo2", options=["option4", "option5", "option6"])], io.Combo.Input("combo2", options=["option4", "option5", "option6"])],
outputs=[io.Combo.Output(), io.Combo.Output()], outputs=[io.Combo.Output(), io.Combo.Output()],
@@ -231,6 +245,8 @@ class ConvertStringToComboNode(io.ComfyNode):
search_aliases=["string to dropdown", "text to combo"], search_aliases=["string to dropdown", "text to combo"],
display_name="Convert String to Combo", display_name="Convert String to Combo",
category="logic", category="logic",
description="Converts a string value into a combo type output so it can be used as a dropdown selection in downstream nodes.",
short_description="Convert a string to a combo type output.",
inputs=[io.String.Input("string")], inputs=[io.String.Input("string")],
outputs=[io.Combo.Output()], outputs=[io.Combo.Output()],
) )
@@ -247,6 +263,8 @@ class InvertBooleanNode(io.ComfyNode):
search_aliases=["not", "toggle", "negate", "flip boolean"], search_aliases=["not", "toggle", "negate", "flip boolean"],
display_name="Invert Boolean", display_name="Invert Boolean",
category="logic", category="logic",
description="Inverts a boolean value, outputting true when input is false and vice versa.",
short_description="Invert a boolean value.",
inputs=[io.Boolean.Input("boolean")], inputs=[io.Boolean.Input("boolean")],
outputs=[io.Boolean.Output()], outputs=[io.Boolean.Output()],
) )

View File

@@ -32,6 +32,7 @@ class LoraLoaderBypass:
CATEGORY = "loaders" CATEGORY = "loaders"
DESCRIPTION = "Apply LoRA in bypass mode. Unlike regular LoRA, this doesn't modify model weights - instead it injects the LoRA computation during forward pass. Useful for training scenarios." DESCRIPTION = "Apply LoRA in bypass mode. Unlike regular LoRA, this doesn't modify model weights - instead it injects the LoRA computation during forward pass. Useful for training scenarios."
SHORT_DESCRIPTION = "Applies LoRA via forward pass injection, not weight modification."
EXPERIMENTAL = True EXPERIMENTAL = True
def load_lora(self, model, clip, lora_name, strength_model, strength_clip): def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
@@ -62,6 +63,8 @@ class LoraLoaderBypassModelOnly(LoraLoaderBypass):
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}), "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
}} }}
RETURN_TYPES = ("MODEL",) RETURN_TYPES = ("MODEL",)
DESCRIPTION = "Apply LoRA in bypass mode to only the diffusion model without modifying base weights or affecting CLIP."
SHORT_DESCRIPTION = "Apply bypass LoRA to model only, no CLIP."
FUNCTION = "load_lora_model_only" FUNCTION = "load_lora_model_only"
def load_lora_model_only(self, model, lora_name, strength_model): def load_lora_model_only(self, model, lora_name, strength_model):

View File

@@ -92,6 +92,8 @@ class LoraSave(io.ComfyNode):
search_aliases=["export lora"], search_aliases=["export lora"],
display_name="Extract and Save Lora", display_name="Extract and Save Lora",
category="_for_testing", category="_for_testing",
description="Extracts LoRA weights from a model or text encoder diff using SVD decomposition and saves them as a safetensors file, supporting standard and full diff modes.",
short_description="Extract and save LoRA from model diff.",
inputs=[ inputs=[
io.String.Input("filename_prefix", default="loras/ComfyUI_extracted_lora"), io.String.Input("filename_prefix", default="loras/ComfyUI_extracted_lora"),
io.Int.Input("rank", default=8, min=1, max=4096, step=1), io.Int.Input("rank", default=8, min=1, max=4096, step=1),

View File

@@ -11,6 +11,8 @@ class LotusConditioning(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LotusConditioning", node_id="LotusConditioning",
category="conditioning/lotus", category="conditioning/lotus",
description="Provides precomputed null conditioning embeddings for the Lotus depth/normal estimation model, avoiding the need for a separate text encoder.",
short_description="Precomputed null conditioning for Lotus model.",
inputs=[], inputs=[],
outputs=[io.Conditioning.Output(display_name="conditioning")], outputs=[io.Conditioning.Output(display_name="conditioning")],
) )

View File

@@ -18,6 +18,8 @@ class EmptyLTXVLatentVideo(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="EmptyLTXVLatentVideo", node_id="EmptyLTXVLatentVideo",
category="latent/video/ltxv", category="latent/video/ltxv",
description="Creates an empty LTXV video latent tensor with the specified dimensions and batch size.",
short_description="Creates an empty LTXV video latent tensor.",
inputs=[ inputs=[
io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32), io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32),
io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32), io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32),
@@ -42,6 +44,8 @@ class LTXVImgToVideo(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LTXVImgToVideo", node_id="LTXVImgToVideo",
category="conditioning/video_models", category="conditioning/video_models",
description="Encodes an image through a VAE and sets up conditioning for LTXV image-to-video generation with adjustable strength.",
short_description="Sets up LTXV image-to-video conditioning.",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),
@@ -87,6 +91,8 @@ class LTXVImgToVideoInplace(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LTXVImgToVideoInplace", node_id="LTXVImgToVideoInplace",
category="conditioning/video_models", category="conditioning/video_models",
description="Encodes an image through a VAE and injects it into an existing latent for in-place LTXV image-to-video conditioning.",
short_description="In-place LTXV image-to-video latent conditioning.",
inputs=[ inputs=[
io.Vae.Input("vae"), io.Vae.Input("vae"),
io.Image.Input("image"), io.Image.Input("image"),
@@ -171,6 +177,8 @@ class LTXVAddGuide(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LTXVAddGuide", node_id="LTXVAddGuide",
category="conditioning/video_models", category="conditioning/video_models",
description="Adds a guiding image or video to LTXV conditioning at a specified frame index to control video generation.",
short_description="Adds a guiding image or video to LTXV conditioning.",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),
@@ -335,6 +343,8 @@ class LTXVCropGuides(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LTXVCropGuides", node_id="LTXVCropGuides",
category="conditioning/video_models", category="conditioning/video_models",
description="Removes appended keyframe guide latents from an LTXV latent and resets keyframe indices in the conditioning.",
short_description="Removes keyframe guide latents from LTXV conditioning.",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),
@@ -373,6 +383,8 @@ class LTXVConditioning(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LTXVConditioning", node_id="LTXVConditioning",
category="conditioning/video_models", category="conditioning/video_models",
description="Sets the frame rate on LTXV positive and negative conditioning for video generation.",
short_description="Sets frame rate on LTXV conditioning.",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),
@@ -397,6 +409,8 @@ class ModelSamplingLTXV(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="ModelSamplingLTXV", node_id="ModelSamplingLTXV",
category="advanced/model", category="advanced/model",
description="Configures LTXV model sampling by computing a shift parameter from max_shift, base_shift, and latent token count.",
short_description="Configures LTXV model sampling shift parameters.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01), io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01),
@@ -442,6 +456,8 @@ class LTXVScheduler(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LTXVScheduler", node_id="LTXVScheduler",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates a sigma schedule for LTXV sampling with configurable shift parameters, stretch, and terminal value.",
short_description="Generates a sigma schedule for LTXV sampling.",
inputs=[ inputs=[
io.Int.Input("steps", default=20, min=1, max=10000), io.Int.Input("steps", default=20, min=1, max=10000),
io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01), io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01),
@@ -546,6 +562,8 @@ class LTXVPreprocess(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LTXVPreprocess", node_id="LTXVPreprocess",
category="image", category="image",
description="Applies H.264 video compression preprocessing to images to improve LTXV generation quality.",
short_description="Applies video compression preprocessing for LTXV.",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
io.Int.Input( io.Int.Input(
@@ -574,6 +592,8 @@ class LTXVConcatAVLatent(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LTXVConcatAVLatent", node_id="LTXVConcatAVLatent",
category="latent/video/ltxv", category="latent/video/ltxv",
description="Concatenates separate video and audio latents into a combined audio-video latent for LTXV processing.",
short_description="Concatenates video and audio latents for LTXV.",
inputs=[ inputs=[
io.Latent.Input("video_latent"), io.Latent.Input("video_latent"),
io.Latent.Input("audio_latent"), io.Latent.Input("audio_latent"),
@@ -609,7 +629,8 @@ class LTXVSeparateAVLatent(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="LTXVSeparateAVLatent", node_id="LTXVSeparateAVLatent",
category="latent/video/ltxv", category="latent/video/ltxv",
description="LTXV Separate AV Latent", description="Separates a combined audio-video latent into individual video and audio latents.",
short_description=None,
inputs=[ inputs=[
io.Latent.Input("av_latent"), io.Latent.Input("av_latent"),
], ],

View File

@@ -14,6 +14,8 @@ class LTXVAudioVAELoader(io.ComfyNode):
node_id="LTXVAudioVAELoader", node_id="LTXVAudioVAELoader",
display_name="LTXV Audio VAE Loader", display_name="LTXV Audio VAE Loader",
category="audio", category="audio",
description="Loads an LTXV Audio VAE model from a checkpoint file for audio encoding and decoding.",
short_description="Loads an LTXV Audio VAE model checkpoint.",
inputs=[ inputs=[
io.Combo.Input( io.Combo.Input(
"ckpt_name", "ckpt_name",
@@ -38,6 +40,7 @@ class LTXVAudioVAEEncode(io.ComfyNode):
node_id="LTXVAudioVAEEncode", node_id="LTXVAudioVAEEncode",
display_name="LTXV Audio VAE Encode", display_name="LTXV Audio VAE Encode",
category="audio", category="audio",
description="Encodes audio into latent representations using the LTXV Audio VAE model.",
inputs=[ inputs=[
io.Audio.Input("audio", tooltip="The audio to be encoded."), io.Audio.Input("audio", tooltip="The audio to be encoded."),
io.Vae.Input( io.Vae.Input(
@@ -68,6 +71,8 @@ class LTXVAudioVAEDecode(io.ComfyNode):
node_id="LTXVAudioVAEDecode", node_id="LTXVAudioVAEDecode",
display_name="LTXV Audio VAE Decode", display_name="LTXV Audio VAE Decode",
category="audio", category="audio",
description="Decodes latent representations back into audio using the LTXV Audio VAE model.",
short_description="Decodes latents back to audio via LTXV Audio VAE.",
inputs=[ inputs=[
io.Latent.Input("samples", tooltip="The latent to be decoded."), io.Latent.Input("samples", tooltip="The latent to be decoded."),
io.Vae.Input( io.Vae.Input(
@@ -101,6 +106,8 @@ class LTXVEmptyLatentAudio(io.ComfyNode):
node_id="LTXVEmptyLatentAudio", node_id="LTXVEmptyLatentAudio",
display_name="LTXV Empty Latent Audio", display_name="LTXV Empty Latent Audio",
category="latent/audio", category="latent/audio",
description="Creates an empty LTXV audio latent tensor sized according to the frame count, frame rate, and Audio VAE configuration.",
short_description="Creates an empty LTXV audio latent tensor.",
inputs=[ inputs=[
io.Int.Input( io.Int.Input(
"frames_number", "frames_number",
@@ -177,6 +184,7 @@ class LTXAVTextEncoderLoader(io.ComfyNode):
display_name="LTXV Audio Text Encoder Loader", display_name="LTXV Audio Text Encoder Loader",
category="advanced/loaders", category="advanced/loaders",
description="[Recipes]\n\nltxav: gemma 3 12B", description="[Recipes]\n\nltxav: gemma 3 12B",
short_description=None,
inputs=[ inputs=[
io.Combo.Input( io.Combo.Input(
"text_encoder", "text_encoder",

View File

@@ -19,6 +19,8 @@ class LTXVLatentUpsampler:
RETURN_TYPES = ("LATENT",) RETURN_TYPES = ("LATENT",)
FUNCTION = "upsample_latent" FUNCTION = "upsample_latent"
CATEGORY = "latent/video" CATEGORY = "latent/video"
DESCRIPTION = "Upsample an LTXV video latent by a factor of 2 using a dedicated latent upscale model."
SHORT_DESCRIPTION = "Upsample an LTXV video latent by 2x."
EXPERIMENTAL = True EXPERIMENTAL = True
def upsample_latent( def upsample_latent(

View File

@@ -10,6 +10,8 @@ class RenormCFG(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="RenormCFG", node_id="RenormCFG",
category="advanced/model", category="advanced/model",
description="Applies renormalized classifier-free guidance with configurable truncation threshold and renormalization strength to control CFG output magnitude.",
short_description="Applies renormalized classifier-free guidance with truncation.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input("cfg_trunc", default=100, min=0.0, max=100.0, step=0.01), io.Float.Input("cfg_trunc", default=100, min=0.0, max=100.0, step=0.01),
@@ -84,6 +86,7 @@ class CLIPTextEncodeLumina2(io.ComfyNode):
category="conditioning", category="conditioning",
description="Encodes a system prompt and a user prompt using a CLIP model into an embedding " description="Encodes a system prompt and a user prompt using a CLIP model into an embedding "
"that can be used to guide the diffusion model towards generating specific images.", "that can be used to guide the diffusion model towards generating specific images.",
short_description="Encodes system and user prompts via CLIP for Lumina2.",
inputs=[ inputs=[
io.Combo.Input( io.Combo.Input(
"system_prompt", "system_prompt",

View File

@@ -13,6 +13,7 @@ class Mahiro(io.ComfyNode):
display_name="Mahiro CFG", display_name="Mahiro CFG",
category="_for_testing", category="_for_testing",
description="Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.", description="Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.",
short_description="Scales guidance toward positive prompt direction over negative difference.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
], ],

View File

@@ -52,6 +52,8 @@ class LatentCompositeMasked(IO.ComfyNode):
node_id="LatentCompositeMasked", node_id="LatentCompositeMasked",
search_aliases=["overlay latent", "layer latent", "paste latent", "inpaint latent"], search_aliases=["overlay latent", "layer latent", "paste latent", "inpaint latent"],
category="latent", category="latent",
description="Composites a source latent onto a destination latent at a specified position with optional mask and resize support.",
short_description="Composites one latent onto another with masking.",
inputs=[ inputs=[
IO.Latent.Input("destination"), IO.Latent.Input("destination"),
IO.Latent.Input("source"), IO.Latent.Input("source"),
@@ -81,6 +83,8 @@ class ImageCompositeMasked(IO.ComfyNode):
node_id="ImageCompositeMasked", node_id="ImageCompositeMasked",
search_aliases=["paste image", "overlay", "layer"], search_aliases=["paste image", "overlay", "layer"],
category="image", category="image",
description="Composites a source image onto a destination image at a specified position with optional mask and resize support.",
short_description="Composites one image onto another with masking.",
inputs=[ inputs=[
IO.Image.Input("destination"), IO.Image.Input("destination"),
IO.Image.Input("source"), IO.Image.Input("source"),
@@ -110,6 +114,8 @@ class MaskToImage(IO.ComfyNode):
search_aliases=["convert mask"], search_aliases=["convert mask"],
display_name="Convert Mask to Image", display_name="Convert Mask to Image",
category="mask", category="mask",
description="Converts a single-channel mask into a three-channel grayscale image.",
short_description=None,
inputs=[ inputs=[
IO.Mask.Input("mask"), IO.Mask.Input("mask"),
], ],
@@ -132,6 +138,7 @@ class ImageToMask(IO.ComfyNode):
search_aliases=["extract channel", "channel to mask"], search_aliases=["extract channel", "channel to mask"],
display_name="Convert Image to Mask", display_name="Convert Image to Mask",
category="mask", category="mask",
description="Extracts a selected color channel from an image as a mask.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Combo.Input("channel", options=["red", "green", "blue", "alpha"]), IO.Combo.Input("channel", options=["red", "green", "blue", "alpha"]),
@@ -155,6 +162,8 @@ class ImageColorToMask(IO.ComfyNode):
node_id="ImageColorToMask", node_id="ImageColorToMask",
search_aliases=["color keying", "chroma key"], search_aliases=["color keying", "chroma key"],
category="mask", category="mask",
description="Creates a mask from an image where pixels matching a specified RGB color value become white.",
short_description="Creates a mask from pixels matching a color.",
inputs=[ inputs=[
IO.Image.Input("image"), IO.Image.Input("image"),
IO.Int.Input("color", default=0, min=0, max=0xFFFFFF, step=1, display_mode=IO.NumberDisplay.number), IO.Int.Input("color", default=0, min=0, max=0xFFFFFF, step=1, display_mode=IO.NumberDisplay.number),
@@ -178,6 +187,8 @@ class SolidMask(IO.ComfyNode):
return IO.Schema( return IO.Schema(
node_id="SolidMask", node_id="SolidMask",
category="mask", category="mask",
description="Creates a uniform solid mask filled with a single value at the specified dimensions.",
short_description="Creates a solid mask with a uniform value.",
inputs=[ inputs=[
IO.Float.Input("value", default=1.0, min=0.0, max=1.0, step=0.01), IO.Float.Input("value", default=1.0, min=0.0, max=1.0, step=0.01),
IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1), IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
@@ -201,6 +212,8 @@ class InvertMask(IO.ComfyNode):
node_id="InvertMask", node_id="InvertMask",
search_aliases=["reverse mask", "flip mask"], search_aliases=["reverse mask", "flip mask"],
category="mask", category="mask",
description="Inverts a mask so white becomes black and vice versa.",
short_description=None,
inputs=[ inputs=[
IO.Mask.Input("mask"), IO.Mask.Input("mask"),
], ],
@@ -222,6 +235,8 @@ class CropMask(IO.ComfyNode):
node_id="CropMask", node_id="CropMask",
search_aliases=["cut mask", "extract mask region", "mask slice"], search_aliases=["cut mask", "extract mask region", "mask slice"],
category="mask", category="mask",
description="Crops a rectangular region from a mask at the specified position and dimensions.",
short_description="Crops a rectangular region from a mask.",
inputs=[ inputs=[
IO.Mask.Input("mask"), IO.Mask.Input("mask"),
IO.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), IO.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1),
@@ -248,6 +263,8 @@ class MaskComposite(IO.ComfyNode):
node_id="MaskComposite", node_id="MaskComposite",
search_aliases=["combine masks", "blend masks", "layer masks"], search_aliases=["combine masks", "blend masks", "layer masks"],
category="mask", category="mask",
description="Composites a source mask onto a destination mask at a specified position using selectable blend operations.",
short_description="Composites masks with selectable blend operations.",
inputs=[ inputs=[
IO.Mask.Input("destination"), IO.Mask.Input("destination"),
IO.Mask.Input("source"), IO.Mask.Input("source"),
@@ -297,6 +314,8 @@ class FeatherMask(IO.ComfyNode):
node_id="FeatherMask", node_id="FeatherMask",
search_aliases=["soft edge mask", "blur mask edges", "gradient mask edge"], search_aliases=["soft edge mask", "blur mask edges", "gradient mask edge"],
category="mask", category="mask",
description="Applies a soft gradient feather to the edges of a mask with independent control for each side.",
short_description="Feathers mask edges with per-side control.",
inputs=[ inputs=[
IO.Mask.Input("mask"), IO.Mask.Input("mask"),
IO.Int.Input("left", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), IO.Int.Input("left", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1),
@@ -345,6 +364,8 @@ class GrowMask(IO.ComfyNode):
search_aliases=["expand mask", "shrink mask"], search_aliases=["expand mask", "shrink mask"],
display_name="Grow Mask", display_name="Grow Mask",
category="mask", category="mask",
description="Expands or shrinks a mask by a specified number of pixels using morphological dilation or erosion with optional tapered corners.",
short_description="Expands or shrinks a mask by pixel amount.",
inputs=[ inputs=[
IO.Mask.Input("mask"), IO.Mask.Input("mask"),
IO.Int.Input("expand", default=0, min=-nodes.MAX_RESOLUTION, max=nodes.MAX_RESOLUTION, step=1), IO.Int.Input("expand", default=0, min=-nodes.MAX_RESOLUTION, max=nodes.MAX_RESOLUTION, step=1),
@@ -382,6 +403,8 @@ class ThresholdMask(IO.ComfyNode):
node_id="ThresholdMask", node_id="ThresholdMask",
search_aliases=["binary mask"], search_aliases=["binary mask"],
category="mask", category="mask",
description="Converts a mask to binary by setting pixels above a threshold to white and below to black.",
short_description="Converts a mask to binary using a threshold.",
inputs=[ inputs=[
IO.Mask.Input("mask"), IO.Mask.Input("mask"),
IO.Float.Input("value", default=0.5, min=0.0, max=1.0, step=0.01), IO.Float.Input("value", default=0.5, min=0.0, max=1.0, step=0.01),
@@ -408,7 +431,8 @@ class MaskPreview(IO.ComfyNode):
search_aliases=["show mask", "view mask", "inspect mask", "debug mask"], search_aliases=["show mask", "view mask", "inspect mask", "debug mask"],
display_name="Preview Mask", display_name="Preview Mask",
category="mask", category="mask",
description="Saves the input images to your ComfyUI output directory.", description="Previews a mask in the UI by rendering it as a grayscale image.",
short_description="Previews a mask as a grayscale image.",
inputs=[ inputs=[
IO.Mask.Input("mask"), IO.Mask.Input("mask"),
], ],

View File

@@ -11,6 +11,8 @@ class EmptyMochiLatentVideo(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="EmptyMochiLatentVideo", node_id="EmptyMochiLatentVideo",
category="latent/video", category="latent/video",
description="Creates an empty latent tensor sized for Mochi video generation with configurable width, height, frame length, and batch size.",
short_description="Create empty latent for Mochi video generation.",
inputs=[ inputs=[
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),

View File

@@ -60,6 +60,8 @@ class ModelSamplingDiscrete:
FUNCTION = "patch" FUNCTION = "patch"
CATEGORY = "advanced/model" CATEGORY = "advanced/model"
DESCRIPTION = "Override the model's sampling method to use a discrete noise schedule with a selectable prediction type."
SHORT_DESCRIPTION = "Override model sampling to a discrete noise schedule."
def patch(self, model, sampling, zsnr): def patch(self, model, sampling, zsnr):
m = model.clone() m = model.clone()
@@ -96,6 +98,8 @@ class ModelSamplingStableCascade:
FUNCTION = "patch" FUNCTION = "patch"
CATEGORY = "advanced/model" CATEGORY = "advanced/model"
DESCRIPTION = "Override the model's sampling to use Stable Cascade noise scheduling with an adjustable shift parameter."
SHORT_DESCRIPTION = "Override sampling to Stable Cascade noise scheduling."
def patch(self, model, shift): def patch(self, model, shift):
m = model.clone() m = model.clone()
@@ -122,6 +126,8 @@ class ModelSamplingSD3:
FUNCTION = "patch" FUNCTION = "patch"
CATEGORY = "advanced/model" CATEGORY = "advanced/model"
DESCRIPTION = "Override the model's sampling to use the SD3 discrete flow noise schedule with an adjustable shift parameter."
SHORT_DESCRIPTION = "Override sampling to SD3 discrete flow schedule."
def patch(self, model, shift, multiplier=1000): def patch(self, model, shift, multiplier=1000):
m = model.clone() m = model.clone()
@@ -144,6 +150,8 @@ class ModelSamplingAuraFlow(ModelSamplingSD3):
"shift": ("FLOAT", {"default": 1.73, "min": 0.0, "max": 100.0, "step":0.01}), "shift": ("FLOAT", {"default": 1.73, "min": 0.0, "max": 100.0, "step":0.01}),
}} }}
DESCRIPTION = "Override the model's sampling to use the AuraFlow discrete flow noise schedule with an adjustable shift."
SHORT_DESCRIPTION = "Override sampling to AuraFlow discrete flow schedule."
FUNCTION = "patch_aura" FUNCTION = "patch_aura"
def patch_aura(self, model, shift): def patch_aura(self, model, shift):
@@ -163,6 +171,8 @@ class ModelSamplingFlux:
FUNCTION = "patch" FUNCTION = "patch"
CATEGORY = "advanced/model" CATEGORY = "advanced/model"
DESCRIPTION = "Override the model's sampling to use the Flux flow schedule with resolution-dependent shift computed from base and max shift values."
SHORT_DESCRIPTION = "Override sampling to Flux flow schedule with resolution shift."
def patch(self, model, max_shift, base_shift, width, height): def patch(self, model, max_shift, base_shift, width, height):
m = model.clone() m = model.clone()
@@ -198,6 +208,8 @@ class ModelSamplingContinuousEDM:
FUNCTION = "patch" FUNCTION = "patch"
CATEGORY = "advanced/model" CATEGORY = "advanced/model"
DESCRIPTION = "Override the model's sampling to use a continuous EDM noise schedule with configurable sigma range and prediction type."
SHORT_DESCRIPTION = "Override sampling to continuous EDM noise schedule."
def patch(self, model, sampling, sigma_max, sigma_min): def patch(self, model, sampling, sigma_max, sigma_min):
m = model.clone() m = model.clone()
@@ -243,6 +255,8 @@ class ModelSamplingContinuousV:
FUNCTION = "patch" FUNCTION = "patch"
CATEGORY = "advanced/model" CATEGORY = "advanced/model"
DESCRIPTION = "Override the model's sampling to use a continuous V-prediction noise schedule with configurable sigma range."
SHORT_DESCRIPTION = "Override sampling to continuous V-prediction schedule."
def patch(self, model, sampling, sigma_max, sigma_min): def patch(self, model, sampling, sigma_max, sigma_min):
m = model.clone() m = model.clone()
@@ -269,6 +283,8 @@ class RescaleCFG:
FUNCTION = "patch" FUNCTION = "patch"
CATEGORY = "advanced/model" CATEGORY = "advanced/model"
DESCRIPTION = "Apply Rescale CFG to the model, which normalizes the CFG output to match the standard deviation of the positive conditioning prediction."
SHORT_DESCRIPTION = "Normalize CFG output to match positive conditioning std."
def patch(self, model, multiplier): def patch(self, model, multiplier):
def rescale_cfg(args): def rescale_cfg(args):
@@ -310,6 +326,7 @@ class ModelComputeDtype:
FUNCTION = "patch" FUNCTION = "patch"
CATEGORY = "advanced/debug/model" CATEGORY = "advanced/debug/model"
DESCRIPTION = "Override the compute dtype used by the model during inference."
def patch(self, model, dtype): def patch(self, model, dtype):
m = model.clone() m = model.clone()

View File

@@ -11,6 +11,8 @@ class PatchModelAddDownscale(io.ComfyNode):
node_id="PatchModelAddDownscale", node_id="PatchModelAddDownscale",
display_name="PatchModelAddDownscale (Kohya Deep Shrink)", display_name="PatchModelAddDownscale (Kohya Deep Shrink)",
category="model_patches/unet", category="model_patches/unet",
description="Patches the UNet to downscale internal feature maps at a specified block during a configurable sigma range, then upscale on output, implementing the Kohya Deep Shrink technique for faster generation.",
short_description="Kohya Deep Shrink: downscale UNet internals for speed.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Int.Input("block_number", default=3, min=1, max=32, step=1), io.Int.Input("block_number", default=3, min=1, max=32, step=1),

View File

@@ -22,6 +22,8 @@ class ModelMergeSimple:
FUNCTION = "merge" FUNCTION = "merge"
CATEGORY = "advanced/model_merging" CATEGORY = "advanced/model_merging"
DESCRIPTION = "Merge two diffusion models using a simple ratio to blend all weights uniformly."
SHORT_DESCRIPTION = "Merge two models with a uniform blend ratio."
def merge(self, model1, model2, ratio): def merge(self, model1, model2, ratio):
m = model1.clone() m = model1.clone()
@@ -41,6 +43,8 @@ class ModelSubtract:
FUNCTION = "merge" FUNCTION = "merge"
CATEGORY = "advanced/model_merging" CATEGORY = "advanced/model_merging"
DESCRIPTION = "Subtract one diffusion model's weights from another with an adjustable multiplier for extracting differences."
SHORT_DESCRIPTION = "Subtract model weights with adjustable multiplier."
def merge(self, model1, model2, multiplier): def merge(self, model1, model2, multiplier):
m = model1.clone() m = model1.clone()
@@ -59,6 +63,8 @@ class ModelAdd:
FUNCTION = "merge" FUNCTION = "merge"
CATEGORY = "advanced/model_merging" CATEGORY = "advanced/model_merging"
DESCRIPTION = "Add the weights of one diffusion model on top of another."
SHORT_DESCRIPTION = None
def merge(self, model1, model2): def merge(self, model1, model2):
m = model1.clone() m = model1.clone()
@@ -79,6 +85,8 @@ class CLIPMergeSimple:
FUNCTION = "merge" FUNCTION = "merge"
CATEGORY = "advanced/model_merging" CATEGORY = "advanced/model_merging"
DESCRIPTION = "Merge two CLIP text encoder models using a simple ratio to blend all weights uniformly."
SHORT_DESCRIPTION = "Merge two CLIP models with a uniform blend ratio."
def merge(self, clip1, clip2, ratio): def merge(self, clip1, clip2, ratio):
m = clip1.clone() m = clip1.clone()
@@ -102,6 +110,8 @@ class CLIPSubtract:
FUNCTION = "merge" FUNCTION = "merge"
CATEGORY = "advanced/model_merging" CATEGORY = "advanced/model_merging"
DESCRIPTION = "Subtract one CLIP model's weights from another with an adjustable multiplier for extracting differences."
SHORT_DESCRIPTION = "Subtract CLIP weights with adjustable multiplier."
def merge(self, clip1, clip2, multiplier): def merge(self, clip1, clip2, multiplier):
m = clip1.clone() m = clip1.clone()
@@ -124,6 +134,8 @@ class CLIPAdd:
FUNCTION = "merge" FUNCTION = "merge"
CATEGORY = "advanced/model_merging" CATEGORY = "advanced/model_merging"
DESCRIPTION = "Add the weights of one CLIP model on top of another."
SHORT_DESCRIPTION = None
def merge(self, clip1, clip2): def merge(self, clip1, clip2):
m = clip1.clone() m = clip1.clone()
@@ -148,6 +160,8 @@ class ModelMergeBlocks:
FUNCTION = "merge" FUNCTION = "merge"
CATEGORY = "advanced/model_merging" CATEGORY = "advanced/model_merging"
DESCRIPTION = "Merge two diffusion models with separate blend ratios for input, middle, and output blocks."
SHORT_DESCRIPTION = "Merge two models with per-block blend ratios."
def merge(self, model1, model2, **kwargs): def merge(self, model1, model2, **kwargs):
m = model1.clone() m = model1.clone()
@@ -228,6 +242,8 @@ def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefi
class CheckpointSave: class CheckpointSave:
SEARCH_ALIASES = ["save model", "export checkpoint", "merge save"] SEARCH_ALIASES = ["save model", "export checkpoint", "merge save"]
DESCRIPTION = "Saves a model, CLIP, and VAE as a combined checkpoint file in safetensors format with optional workflow metadata."
SHORT_DESCRIPTION = "Saves model, CLIP, and VAE as a checkpoint."
def __init__(self): def __init__(self):
self.output_dir = folder_paths.get_output_directory() self.output_dir = folder_paths.get_output_directory()
@@ -262,6 +278,8 @@ class CLIPSave:
OUTPUT_NODE = True OUTPUT_NODE = True
CATEGORY = "advanced/model_merging" CATEGORY = "advanced/model_merging"
DESCRIPTION = "Save a CLIP text encoder model to safetensors files, splitting by model component."
SHORT_DESCRIPTION = "Save a CLIP model to safetensors files."
def save(self, clip, filename_prefix, prompt=None, extra_pnginfo=None): def save(self, clip, filename_prefix, prompt=None, extra_pnginfo=None):
prompt_info = "" prompt_info = ""
@@ -319,6 +337,8 @@ class VAESave:
OUTPUT_NODE = True OUTPUT_NODE = True
CATEGORY = "advanced/model_merging" CATEGORY = "advanced/model_merging"
DESCRIPTION = "Save a VAE model to a safetensors file."
SHORT_DESCRIPTION = None
def save(self, vae, filename_prefix, prompt=None, extra_pnginfo=None): def save(self, vae, filename_prefix, prompt=None, extra_pnginfo=None):
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
@@ -354,6 +374,8 @@ class ModelSave:
OUTPUT_NODE = True OUTPUT_NODE = True
CATEGORY = "advanced/model_merging" CATEGORY = "advanced/model_merging"
DESCRIPTION = "Save a diffusion model to a safetensors file."
SHORT_DESCRIPTION = None
def save(self, model, filename_prefix, prompt=None, extra_pnginfo=None): def save(self, model, filename_prefix, prompt=None, extra_pnginfo=None):
save_checkpoint(model, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo) save_checkpoint(model, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo)

View File

@@ -2,6 +2,8 @@ import comfy_extras.nodes_model_merging
class ModelMergeSD1(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeSD1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two SD1 models with per-block weight control over input, middle, and output blocks."
SHORT_DESCRIPTION = "Merge two SD1 models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
arg_dict = { "model1": ("MODEL",), arg_dict = { "model1": ("MODEL",),
@@ -26,8 +28,15 @@ class ModelMergeSD1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
return {"required": arg_dict} return {"required": arg_dict}
class ModelMergeSD2(ModelMergeSD1):
DESCRIPTION = "Merge two SD2 models with per-block weight control over input, middle, and output blocks."
SHORT_DESCRIPTION = "Merge two SD2 models with per-block control."
class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two SDXL models with per-block weight control over input, middle, and output blocks."
SHORT_DESCRIPTION = "Merge two SDXL models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -54,6 +63,8 @@ class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks):
class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two SD3 2B models with per-block weight control over 24 joint blocks and embedders."
SHORT_DESCRIPTION = "Merge two SD3 2B models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -78,6 +89,8 @@ class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two AuraFlow models with per-block weight control over double and single layers."
SHORT_DESCRIPTION = "Merge two AuraFlow models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -105,6 +118,8 @@ class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks):
class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two Flux1 models with per-block weight control over 19 double blocks and 38 single blocks."
SHORT_DESCRIPTION = "Merge two Flux1 models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -131,6 +146,8 @@ class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two SD3.5 Large models with per-block weight control over 38 joint blocks and embedders."
SHORT_DESCRIPTION = "Merge two SD3.5 Large models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -154,6 +171,8 @@ class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks):
class ModelMergeMochiPreview(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeMochiPreview(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two Mochi Preview models with per-block weight control over 48 blocks and embedders."
SHORT_DESCRIPTION = "Merge two Mochi Preview models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -176,6 +195,8 @@ class ModelMergeMochiPreview(comfy_extras.nodes_model_merging.ModelMergeBlocks):
class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two LTXV models with per-block weight control over 28 transformer blocks."
SHORT_DESCRIPTION = "Merge two LTXV models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -198,6 +219,8 @@ class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks):
class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two Cosmos 7B models with per-block weight control over 28 blocks and embedders."
SHORT_DESCRIPTION = "Merge two Cosmos 7B models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -222,6 +245,8 @@ class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two Cosmos 14B models with per-block weight control over 36 blocks and embedders."
SHORT_DESCRIPTION = "Merge two Cosmos 14B models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -247,6 +272,7 @@ class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "1.3B model has 30 blocks, 14B model has 40 blocks. Image to video model has the extra img_emb." DESCRIPTION = "1.3B model has 30 blocks, 14B model has 40 blocks. Image to video model has the extra img_emb."
SHORT_DESCRIPTION = "WAN 2.1 model merging with block-level control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -270,6 +296,8 @@ class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
class ModelMergeCosmosPredict2_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeCosmosPredict2_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two Cosmos Predict2 2B models with per-block weight control over 28 blocks and embedders."
SHORT_DESCRIPTION = "Merge two Cosmos Predict2 2B models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -293,6 +321,8 @@ class ModelMergeCosmosPredict2_2B(comfy_extras.nodes_model_merging.ModelMergeBlo
class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two Cosmos Predict2 14B models with per-block weight control over 36 blocks and embedders."
SHORT_DESCRIPTION = "Merge two Cosmos Predict2 14B models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -316,6 +346,8 @@ class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBl
class ModelMergeQwenImage(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeQwenImage(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
DESCRIPTION = "Merge two Qwen Image models with per-block weight control over 60 transformer blocks."
SHORT_DESCRIPTION = "Merge two Qwen Image models with per-block control."
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
@@ -339,7 +371,7 @@ class ModelMergeQwenImage(comfy_extras.nodes_model_merging.ModelMergeBlocks):
NODE_CLASS_MAPPINGS = { NODE_CLASS_MAPPINGS = {
"ModelMergeSD1": ModelMergeSD1, "ModelMergeSD1": ModelMergeSD1,
"ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks "ModelMergeSD2": ModelMergeSD2, #SD1 and SD2 have the same blocks
"ModelMergeSDXL": ModelMergeSDXL, "ModelMergeSDXL": ModelMergeSDXL,
"ModelMergeSD3_2B": ModelMergeSD3_2B, "ModelMergeSD3_2B": ModelMergeSD3_2B,
"ModelMergeAuraflow": ModelMergeAuraflow, "ModelMergeAuraflow": ModelMergeAuraflow,

View File

@@ -230,6 +230,8 @@ class ModelPatchLoader:
EXPERIMENTAL = True EXPERIMENTAL = True
CATEGORY = "advanced/loaders" CATEGORY = "advanced/loaders"
DESCRIPTION = "Load a model patch file such as a controlnet or style reference patch for use with compatible model nodes."
SHORT_DESCRIPTION = "Load a model patch file for controlnet or style."
def load_model_patch(self, name): def load_model_patch(self, name):
model_patch_path = folder_paths.get_full_path_or_raise("model_patches", name) model_patch_path = folder_paths.get_full_path_or_raise("model_patches", name)
@@ -456,6 +458,8 @@ class QwenImageDiffsynthControlnet:
EXPERIMENTAL = True EXPERIMENTAL = True
CATEGORY = "advanced/loaders/qwen" CATEGORY = "advanced/loaders/qwen"
DESCRIPTION = "Apply a DiffSynth-style controlnet patch to a Qwen Image model using a VAE-encoded control image."
SHORT_DESCRIPTION = "Apply DiffSynth controlnet to a Qwen Image model."
def diffsynth_controlnet(self, model, model_patch, vae, image=None, strength=1.0, inpaint_image=None, mask=None): def diffsynth_controlnet(self, model, model_patch, vae, image=None, strength=1.0, inpaint_image=None, mask=None):
model_patched = model.clone() model_patched = model.clone()
@@ -489,6 +493,8 @@ class ZImageFunControlnet(QwenImageDiffsynthControlnet):
"optional": {"image": ("IMAGE",), "inpaint_image": ("IMAGE",), "mask": ("MASK",)}} "optional": {"image": ("IMAGE",), "inpaint_image": ("IMAGE",), "mask": ("MASK",)}}
CATEGORY = "advanced/loaders/zimage" CATEGORY = "advanced/loaders/zimage"
DESCRIPTION = "Apply a Z-Image Fun controlnet patch to a model with optional control image, inpaint image, and mask inputs."
SHORT_DESCRIPTION = "Apply Z-Image Fun controlnet with optional inpainting."
class UsoStyleProjectorPatch: class UsoStyleProjectorPatch:
def __init__(self, model_patch, encoded_image): def __init__(self, model_patch, encoded_image):
@@ -525,6 +531,8 @@ class USOStyleReference:
EXPERIMENTAL = True EXPERIMENTAL = True
CATEGORY = "advanced/model_patches/flux" CATEGORY = "advanced/model_patches/flux"
DESCRIPTION = "Apply a USO style reference patch to a Flux model using multi-layer SigLIP features from CLIP vision output."
SHORT_DESCRIPTION = "Apply USO style reference to a Flux model."
def apply_patch(self, model, model_patch, clip_vision_output): def apply_patch(self, model, model_patch, clip_vision_output):
encoded_image = torch.stack((clip_vision_output.all_hidden_states[:, -20], clip_vision_output.all_hidden_states[:, -11], clip_vision_output.penultimate_hidden_states)) encoded_image = torch.stack((clip_vision_output.all_hidden_states[:, -20], clip_vision_output.all_hidden_states[:, -11], clip_vision_output.penultimate_hidden_states))

View File

@@ -15,6 +15,8 @@ class Morphology(io.ComfyNode):
search_aliases=["erode", "dilate"], search_aliases=["erode", "dilate"],
display_name="ImageMorphology", display_name="ImageMorphology",
category="image/postprocessing", category="image/postprocessing",
description="Applies morphological operations to an image using a configurable kernel size.",
short_description="",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
io.Combo.Input( io.Combo.Input(
@@ -60,6 +62,8 @@ class ImageRGBToYUV(io.ComfyNode):
node_id="ImageRGBToYUV", node_id="ImageRGBToYUV",
search_aliases=["color space conversion"], search_aliases=["color space conversion"],
category="image/batch", category="image/batch",
description="Converts an RGB image to YUV (YCbCr) color space, outputting separate Y, U, and V channel images.",
short_description="Convert RGB image to YUV color space.",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
], ],
@@ -82,6 +86,8 @@ class ImageYUVToRGB(io.ComfyNode):
node_id="ImageYUVToRGB", node_id="ImageYUVToRGB",
search_aliases=["color space conversion"], search_aliases=["color space conversion"],
category="image/batch", category="image/batch",
description="Converts separate Y, U, and V (YCbCr) channel images back into a single RGB image.",
short_description="Convert YUV channels back to RGB image.",
inputs=[ inputs=[
io.Image.Input("Y"), io.Image.Input("Y"),
io.Image.Input("U"), io.Image.Input("U"),

View File

@@ -14,6 +14,7 @@ class wanBlockSwap(io.ComfyNode):
node_id="wanBlockSwap", node_id="wanBlockSwap",
category="", category="",
description="NOP", description="NOP",
short_description=None,
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
], ],

View File

@@ -32,6 +32,8 @@ class OptimalStepsScheduler(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="OptimalStepsScheduler", node_id="OptimalStepsScheduler",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
description="Generates an optimized noise schedule with precomputed optimal sigma levels using log-linear interpolation.",
short_description="Optimal noise schedule with precomputed sigma levels.",
inputs=[ inputs=[
io.Combo.Input("model_type", options=["FLUX", "Wan", "Chroma"]), io.Combo.Input("model_type", options=["FLUX", "Wan", "Chroma"]),
io.Int.Input("steps", default=20, min=3, max=1000), io.Int.Input("steps", default=20, min=3, max=1000),

View File

@@ -16,6 +16,8 @@ class PerturbedAttentionGuidance(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="PerturbedAttentionGuidance", node_id="PerturbedAttentionGuidance",
category="model_patches/unet", category="model_patches/unet",
description="Applies Perturbed Attention Guidance (PAG) by replacing self-attention with identity in the middle block to compute a guidance signal that enhances structural coherence.",
short_description="Perturbed Attention Guidance for structural coherence.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input("scale", default=3.0, min=0.0, max=100.0, step=0.01, round=0.01), io.Float.Input("scale", default=3.0, min=0.0, max=100.0, step=0.01, round=0.01),

View File

@@ -26,6 +26,8 @@ class PerpNeg(io.ComfyNode):
node_id="PerpNeg", node_id="PerpNeg",
display_name="Perp-Neg (DEPRECATED by PerpNegGuider)", display_name="Perp-Neg (DEPRECATED by PerpNegGuider)",
category="_for_testing", category="_for_testing",
description="Applies perpendicular negative guidance by projecting out the component of negative conditioning parallel to positive conditioning. Deprecated in favor of PerpNegGuider.",
short_description="Perpendicular negative guidance (deprecated).",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Conditioning.Input("empty_conditioning"), io.Conditioning.Input("empty_conditioning"),
@@ -128,6 +130,8 @@ class PerpNegGuider(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="PerpNegGuider", node_id="PerpNegGuider",
category="_for_testing", category="_for_testing",
description="Creates a guider that applies perpendicular negative guidance, computing positive, negative, and empty conditioning in a single batch for efficient sampling.",
short_description="Guider with perpendicular negative guidance.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),

View File

@@ -124,6 +124,8 @@ class PhotoMakerLoader(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="PhotoMakerLoader", node_id="PhotoMakerLoader",
category="_for_testing/photomaker", category="_for_testing/photomaker",
description="Loads a PhotoMaker model from a safetensors file for identity-preserving image generation.",
short_description="Load a PhotoMaker model from file.",
inputs=[ inputs=[
io.Combo.Input("photomaker_model_name", options=folder_paths.get_filename_list("photomaker")), io.Combo.Input("photomaker_model_name", options=folder_paths.get_filename_list("photomaker")),
], ],
@@ -150,6 +152,8 @@ class PhotoMakerEncode(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="PhotoMakerEncode", node_id="PhotoMakerEncode",
category="_for_testing/photomaker", category="_for_testing/photomaker",
description="Encodes a reference image and text prompt using PhotoMaker to produce identity-preserving conditioning for image generation.",
short_description="Encode image and text with PhotoMaker.",
inputs=[ inputs=[
io.Photomaker.Input("photomaker"), io.Photomaker.Input("photomaker"),
io.Image.Input("image"), io.Image.Input("image"),

View File

@@ -10,6 +10,7 @@ class CLIPTextEncodePixArtAlpha(io.ComfyNode):
search_aliases=["pixart prompt"], search_aliases=["pixart prompt"],
category="advanced/conditioning", category="advanced/conditioning",
description="Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma.", description="Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma.",
short_description="Encodes text with resolution conditioning for PixArt Alpha.",
inputs=[ inputs=[
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION), io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION), io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION),

View File

@@ -20,6 +20,8 @@ class Blend(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="ImageBlend", node_id="ImageBlend",
category="image/postprocessing", category="image/postprocessing",
description="Blends two images together using a selectable blend mode and adjustable blend factor.",
short_description="Blends two images using a selected blend mode.",
inputs=[ inputs=[
io.Image.Input("image1"), io.Image.Input("image1"),
io.Image.Input("image2"), io.Image.Input("image2"),
@@ -77,6 +79,8 @@ class Blur(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="ImageBlur", node_id="ImageBlur",
category="image/postprocessing", category="image/postprocessing",
description="Applies a Gaussian blur to an image with configurable radius and sigma.",
short_description="Applies Gaussian blur to an image.",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
io.Int.Input("blur_radius", default=1, min=1, max=31, step=1), io.Int.Input("blur_radius", default=1, min=1, max=31, step=1),
@@ -112,6 +116,8 @@ class Quantize(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="ImageQuantize", node_id="ImageQuantize",
category="image/postprocessing", category="image/postprocessing",
description="Reduces the number of colors in an image with optional dithering.",
short_description="",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
io.Int.Input("colors", default=256, min=1, max=256, step=1), io.Int.Input("colors", default=256, min=1, max=256, step=1),
@@ -177,6 +183,8 @@ class Sharpen(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="ImageSharpen", node_id="ImageSharpen",
category="image/postprocessing", category="image/postprocessing",
description="Sharpens an image using an unsharp mask with configurable radius, sigma, and strength.",
short_description="Sharpens an image using unsharp mask.",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
io.Int.Input("sharpen_radius", default=1, min=1, max=31, step=1), io.Int.Input("sharpen_radius", default=1, min=1, max=31, step=1),
@@ -221,6 +229,8 @@ class ImageScaleToTotalPixels(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="ImageScaleToTotalPixels", node_id="ImageScaleToTotalPixels",
category="image/upscaling", category="image/upscaling",
description="Scales an image to a target total megapixel count while preserving aspect ratio, with configurable resolution stepping.",
short_description="Scales an image to a target megapixel count.",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
io.Combo.Input("upscale_method", options=cls.upscale_methods), io.Combo.Input("upscale_method", options=cls.upscale_methods),
@@ -430,6 +440,7 @@ class ResizeImageMaskNode(io.ComfyNode):
node_id="ResizeImageMaskNode", node_id="ResizeImageMaskNode",
display_name="Resize Image/Mask", display_name="Resize Image/Mask",
description="Resize an image or mask using various scaling methods.", description="Resize an image or mask using various scaling methods.",
short_description=None,
category="transform", category="transform",
search_aliases=["resize", "resize image", "resize mask", "scale", "scale image", "scale mask", "image resize", "change size", "dimensions", "shrink", "enlarge"], search_aliases=["resize", "resize image", "resize mask", "scale", "scale image", "scale mask", "image resize", "change size", "dimensions", "shrink", "enlarge"],
inputs=[ inputs=[
@@ -565,6 +576,8 @@ class BatchImagesNode(io.ComfyNode):
node_id="BatchImagesNode", node_id="BatchImagesNode",
display_name="Batch Images", display_name="Batch Images",
category="image", category="image",
description="Combines multiple images into a single batch, resizing them to match the first image's dimensions.",
short_description="Combines multiple images into a single batch.",
search_aliases=["batch", "image batch", "batch images", "combine images", "merge images", "stack images"], search_aliases=["batch", "image batch", "batch images", "combine images", "merge images", "stack images"],
inputs=[ inputs=[
io.Autogrow.Input("images", template=autogrow_template) io.Autogrow.Input("images", template=autogrow_template)
@@ -587,6 +600,8 @@ class BatchMasksNode(io.ComfyNode):
search_aliases=["combine masks", "stack masks", "merge masks"], search_aliases=["combine masks", "stack masks", "merge masks"],
display_name="Batch Masks", display_name="Batch Masks",
category="mask", category="mask",
description="Combines multiple masks into a single batch, resizing them to match the first mask's dimensions.",
short_description="Combines multiple masks into a single batch.",
inputs=[ inputs=[
io.Autogrow.Input("masks", template=autogrow_template) io.Autogrow.Input("masks", template=autogrow_template)
], ],
@@ -608,6 +623,8 @@ class BatchLatentsNode(io.ComfyNode):
search_aliases=["combine latents", "stack latents", "merge latents"], search_aliases=["combine latents", "stack latents", "merge latents"],
display_name="Batch Latents", display_name="Batch Latents",
category="latent", category="latent",
description="Combines multiple latent tensors into a single batch, reshaping them to match the first latent's dimensions.",
short_description="Combines multiple latents into a single batch.",
inputs=[ inputs=[
io.Autogrow.Input("latents", template=autogrow_template) io.Autogrow.Input("latents", template=autogrow_template)
], ],
@@ -632,6 +649,8 @@ class BatchImagesMasksLatentsNode(io.ComfyNode):
search_aliases=["combine batch", "merge batch", "stack inputs"], search_aliases=["combine batch", "merge batch", "stack inputs"],
display_name="Batch Images/Masks/Latents", display_name="Batch Images/Masks/Latents",
category="util", category="util",
description="Combines multiple images, masks, or latents into a single batch, automatically detecting the input type.",
short_description="Batches images, masks, or latents together.",
inputs=[ inputs=[
io.Autogrow.Input("inputs", template=autogrow_template) io.Autogrow.Input("inputs", template=autogrow_template)
], ],

View File

@@ -16,6 +16,8 @@ class PreviewAny():
OUTPUT_NODE = True OUTPUT_NODE = True
CATEGORY = "utils" CATEGORY = "utils"
DESCRIPTION = "Preview any input value as text, converting it to a JSON or string representation for display."
SHORT_DESCRIPTION = "Preview any input value as text."
SEARCH_ALIASES = ["show output", "inspect", "debug", "print value", "show text"] SEARCH_ALIASES = ["show output", "inspect", "debug", "print value", "show text"]
def main(self, source=None): def main(self, source=None):

View File

@@ -11,6 +11,8 @@ class String(io.ComfyNode):
node_id="PrimitiveString", node_id="PrimitiveString",
display_name="String", display_name="String",
category="utils/primitive", category="utils/primitive",
description="A primitive node that passes through a string value.",
short_description=None,
inputs=[ inputs=[
io.String.Input("value"), io.String.Input("value"),
], ],
@@ -29,6 +31,8 @@ class StringMultiline(io.ComfyNode):
node_id="PrimitiveStringMultiline", node_id="PrimitiveStringMultiline",
display_name="String (Multiline)", display_name="String (Multiline)",
category="utils/primitive", category="utils/primitive",
description="A primitive node that passes through a multiline string value.",
short_description=None,
inputs=[ inputs=[
io.String.Input("value", multiline=True), io.String.Input("value", multiline=True),
], ],
@@ -47,6 +51,8 @@ class Int(io.ComfyNode):
node_id="PrimitiveInt", node_id="PrimitiveInt",
display_name="Int", display_name="Int",
category="utils/primitive", category="utils/primitive",
description="A primitive node that passes through an integer value.",
short_description=None,
inputs=[ inputs=[
io.Int.Input("value", min=-sys.maxsize, max=sys.maxsize, control_after_generate=True), io.Int.Input("value", min=-sys.maxsize, max=sys.maxsize, control_after_generate=True),
], ],
@@ -65,6 +71,8 @@ class Float(io.ComfyNode):
node_id="PrimitiveFloat", node_id="PrimitiveFloat",
display_name="Float", display_name="Float",
category="utils/primitive", category="utils/primitive",
description="A primitive node that passes through a float value.",
short_description=None,
inputs=[ inputs=[
io.Float.Input("value", min=-sys.maxsize, max=sys.maxsize, step=0.1), io.Float.Input("value", min=-sys.maxsize, max=sys.maxsize, step=0.1),
], ],
@@ -83,6 +91,8 @@ class Boolean(io.ComfyNode):
node_id="PrimitiveBoolean", node_id="PrimitiveBoolean",
display_name="Boolean", display_name="Boolean",
category="utils/primitive", category="utils/primitive",
description="A primitive node that passes through a boolean value.",
short_description=None,
inputs=[ inputs=[
io.Boolean.Input("value"), io.Boolean.Input("value"),
], ],

View File

@@ -13,6 +13,8 @@ class TextEncodeQwenImageEdit(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="TextEncodeQwenImageEdit", node_id="TextEncodeQwenImageEdit",
category="advanced/conditioning", category="advanced/conditioning",
description="Encodes a text prompt with an optional reference image for Qwen-based image editing, producing conditioning with latent reference.",
short_description="Text and image encoding for Qwen image editing.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.String.Input("prompt", multiline=True, dynamic_prompts=True), io.String.Input("prompt", multiline=True, dynamic_prompts=True),
@@ -56,6 +58,8 @@ class TextEncodeQwenImageEditPlus(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="TextEncodeQwenImageEditPlus", node_id="TextEncodeQwenImageEditPlus",
category="advanced/conditioning", category="advanced/conditioning",
description="Encodes a text prompt with up to three reference images for Qwen-based multi-image editing, producing conditioning with latent references.",
short_description="Multi-image text encoding for Qwen image editing.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.String.Input("prompt", multiline=True, dynamic_prompts=True), io.String.Input("prompt", multiline=True, dynamic_prompts=True),
@@ -113,6 +117,8 @@ class EmptyQwenImageLayeredLatentImage(io.ComfyNode):
node_id="EmptyQwenImageLayeredLatentImage", node_id="EmptyQwenImageLayeredLatentImage",
display_name="Empty Qwen Image Layered Latent", display_name="Empty Qwen Image Layered Latent",
category="latent/qwen", category="latent/qwen",
description="Creates an empty multi-layer latent tensor for Qwen image generation with a configurable number of layers.",
short_description="Empty multi-layer latent for Qwen image generation.",
inputs=[ inputs=[
io.Int.Input("width", default=640, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("width", default=640, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("height", default=640, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=640, min=16, max=nodes.MAX_RESOLUTION, step=16),

View File

@@ -11,6 +11,8 @@ class LatentRebatch(io.ComfyNode):
node_id="RebatchLatents", node_id="RebatchLatents",
display_name="Rebatch Latents", display_name="Rebatch Latents",
category="latent/batch", category="latent/batch",
description="Splits and recombines latent batches into a new batch size, handling noise masks and batch indices across differently sized inputs.",
short_description="Rebatch latents to a specified batch size.",
is_input_list=True, is_input_list=True,
inputs=[ inputs=[
io.Latent.Input("latents"), io.Latent.Input("latents"),
@@ -114,6 +116,7 @@ class ImageRebatch(io.ComfyNode):
node_id="RebatchImages", node_id="RebatchImages",
display_name="Rebatch Images", display_name="Rebatch Images",
category="image/batch", category="image/batch",
description="Splits and recombines image batches into a new specified batch size.",
is_input_list=True, is_input_list=True,
inputs=[ inputs=[
io.Image.Input("images"), io.Image.Input("images"),

View File

@@ -9,6 +9,7 @@ class ScaleROPE(io.ComfyNode):
node_id="ScaleROPE", node_id="ScaleROPE",
category="advanced/model_patches", category="advanced/model_patches",
description="Scale and shift the ROPE of the model.", description="Scale and shift the ROPE of the model.",
short_description=None,
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),

View File

@@ -114,6 +114,8 @@ class SelfAttentionGuidance(io.ComfyNode):
node_id="SelfAttentionGuidance", node_id="SelfAttentionGuidance",
display_name="Self-Attention Guidance", display_name="Self-Attention Guidance",
category="_for_testing", category="_for_testing",
description="Applies Self-Attention Guidance (SAG) which uses attention maps to create adversarially blurred images and computes a guidance signal that enhances fine details.",
short_description="Self-Attention Guidance for enhanced detail.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.Float.Input("scale", default=0.5, min=-2.0, max=5.0, step=0.01), io.Float.Input("scale", default=0.5, min=-2.0, max=5.0, step=0.01),

View File

@@ -15,6 +15,7 @@ class TripleCLIPLoader(io.ComfyNode):
node_id="TripleCLIPLoader", node_id="TripleCLIPLoader",
category="advanced/loaders", category="advanced/loaders",
description="[Recipes]\n\nsd3: clip-l, clip-g, t5", description="[Recipes]\n\nsd3: clip-l, clip-g, t5",
short_description=None,
inputs=[ inputs=[
io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")), io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")),
io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")), io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")),
@@ -42,6 +43,8 @@ class EmptySD3LatentImage(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="EmptySD3LatentImage", node_id="EmptySD3LatentImage",
category="latent/sd3", category="latent/sd3",
description="Creates an empty SD3 latent image tensor with the specified width, height, and batch size.",
short_description="Creates an empty SD3 latent image tensor.",
inputs=[ inputs=[
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
@@ -67,6 +70,8 @@ class CLIPTextEncodeSD3(io.ComfyNode):
node_id="CLIPTextEncodeSD3", node_id="CLIPTextEncodeSD3",
search_aliases=["sd3 prompt"], search_aliases=["sd3 prompt"],
category="advanced/conditioning", category="advanced/conditioning",
description="Encodes separate CLIP-L, CLIP-G, and T5-XXL text prompts into SD3 conditioning with optional empty padding.",
short_description="Encodes multi-encoder text prompts for SD3.",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),
io.String.Input("clip_l", multiline=True, dynamic_prompts=True), io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
@@ -114,6 +119,8 @@ class ControlNetApplySD3(io.ComfyNode):
node_id="ControlNetApplySD3", node_id="ControlNetApplySD3",
display_name="Apply Controlnet with VAE", display_name="Apply Controlnet with VAE",
category="conditioning/controlnet", category="conditioning/controlnet",
description="Applies a ControlNet to SD3 conditioning using a VAE-encoded control image with adjustable strength and start/end percentages.",
short_description="Applies ControlNet with VAE to SD3 conditioning.",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),
@@ -177,6 +184,7 @@ class SkipLayerGuidanceSD3(io.ComfyNode):
node_id="SkipLayerGuidanceSD3", node_id="SkipLayerGuidanceSD3",
category="advanced/guidance", category="advanced/guidance",
description="Generic version of SkipLayerGuidance node that can be used on every DiT model.", description="Generic version of SkipLayerGuidance node that can be used on every DiT model.",
short_description="Skip layer guidance applicable to any DiT model.",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
io.String.Input("layers", default="7, 8, 9", multiline=False), io.String.Input("layers", default="7, 8, 9", multiline=False),

Some files were not shown because too many files have changed in this diff Show More