mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-18 06:00:03 +00:00
Compare commits
1 Commits
v0.14.1
...
pysssss/no
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ecec1310b2 |
@@ -1300,6 +1300,7 @@ class NodeInfoV1:
|
||||
name: str=None
|
||||
display_name: str=None
|
||||
description: str=None
|
||||
short_description: str=None
|
||||
python_module: Any=None
|
||||
category: str=None
|
||||
output_node: bool=None
|
||||
@@ -1390,6 +1391,8 @@ class Schema:
|
||||
hidden: list[Hidden] = field(default_factory=list)
|
||||
description: str=""
|
||||
"""Node description, shown as a tooltip when hovering over the node."""
|
||||
short_description: str=""
|
||||
"""Short node description, shown in the node list/search."""
|
||||
search_aliases: list[str] = field(default_factory=list)
|
||||
"""Alternative names for search. Useful for synonyms, abbreviations, or old names after renaming."""
|
||||
is_input_list: bool = False
|
||||
@@ -1528,6 +1531,7 @@ class Schema:
|
||||
display_name=self.display_name,
|
||||
category=self.category,
|
||||
description=self.description,
|
||||
short_description=self.short_description,
|
||||
output_node=self.is_output_node,
|
||||
deprecated=self.is_deprecated,
|
||||
experimental=self.is_experimental,
|
||||
@@ -1771,6 +1775,14 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
|
||||
cls.GET_SCHEMA()
|
||||
return cls._DESCRIPTION
|
||||
|
||||
_SHORT_DESCRIPTION = None
|
||||
@final
|
||||
@classproperty
|
||||
def SHORT_DESCRIPTION(cls): # noqa
|
||||
if cls._SHORT_DESCRIPTION is None:
|
||||
cls.GET_SCHEMA()
|
||||
return cls._SHORT_DESCRIPTION
|
||||
|
||||
_CATEGORY = None
|
||||
@final
|
||||
@classproperty
|
||||
@@ -1899,6 +1911,8 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
|
||||
schema.validate()
|
||||
if cls._DESCRIPTION is None:
|
||||
cls._DESCRIPTION = schema.description
|
||||
if cls._SHORT_DESCRIPTION is None:
|
||||
cls._SHORT_DESCRIPTION = schema.short_description
|
||||
if cls._CATEGORY is None:
|
||||
cls._CATEGORY = schema.category
|
||||
if cls._EXPERIMENTAL is None:
|
||||
|
||||
@@ -44,6 +44,7 @@ class FluxProUltraImageNode(IO.ComfyNode):
|
||||
display_name="Flux 1.1 [pro] Ultra Image",
|
||||
category="api node/image/BFL",
|
||||
description="Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.",
|
||||
short_description="Generate images with Flux Pro 1.1 Ultra API.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -154,13 +155,17 @@ class FluxProUltraImageNode(IO.ComfyNode):
|
||||
|
||||
class FluxKontextProImageNode(IO.ComfyNode):
|
||||
|
||||
DESCRIPTION = "Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio."
|
||||
SHORT_DESCRIPTION = "Edit images with Flux.1 Kontext [pro] API."
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls) -> IO.Schema:
|
||||
return IO.Schema(
|
||||
node_id=cls.NODE_ID,
|
||||
display_name=cls.DISPLAY_NAME,
|
||||
category="api node/image/BFL",
|
||||
description="Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio.",
|
||||
description=cls.DESCRIPTION,
|
||||
short_description=cls.SHORT_DESCRIPTION,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -268,6 +273,7 @@ class FluxKontextProImageNode(IO.ComfyNode):
|
||||
class FluxKontextMaxImageNode(FluxKontextProImageNode):
|
||||
|
||||
DESCRIPTION = "Edits images using Flux.1 Kontext [max] via api based on prompt and aspect ratio."
|
||||
SHORT_DESCRIPTION = "Edit images with Flux.1 Kontext [max] API."
|
||||
BFL_PATH = "/proxy/bfl/flux-kontext-max/generate"
|
||||
NODE_ID = "FluxKontextMaxImageNode"
|
||||
DISPLAY_NAME = "Flux.1 Kontext [max] Image"
|
||||
@@ -282,6 +288,7 @@ class FluxProExpandNode(IO.ComfyNode):
|
||||
display_name="Flux.1 Expand Image",
|
||||
category="api node/image/BFL",
|
||||
description="Outpaints image based on prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -418,6 +425,7 @@ class FluxProFillNode(IO.ComfyNode):
|
||||
display_name="Flux.1 Fill Image",
|
||||
category="api node/image/BFL",
|
||||
description="Inpaints image based on mask and prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Mask.Input("mask"),
|
||||
@@ -543,6 +551,7 @@ class Flux2ProImageNode(IO.ComfyNode):
|
||||
display_name=cls.DISPLAY_NAME,
|
||||
category="api node/image/BFL",
|
||||
description="Generates images synchronously based on prompt and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -33,6 +33,7 @@ class BriaImageEditNode(IO.ComfyNode):
|
||||
display_name="Bria FIBO Image Edit",
|
||||
category="api node/image/Bria",
|
||||
description="Edit images using Bria latest model",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["FIBO"]),
|
||||
IO.Image.Input("image"),
|
||||
|
||||
@@ -60,6 +60,7 @@ class ByteDanceImageNode(IO.ComfyNode):
|
||||
display_name="ByteDance Image",
|
||||
category="api node/image/ByteDance",
|
||||
description="Generate images using ByteDance models via api based on prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["seedream-3-0-t2i-250415"]),
|
||||
IO.String.Input(
|
||||
@@ -182,6 +183,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
|
||||
display_name="ByteDance Seedream 4.5",
|
||||
category="api node/image/ByteDance",
|
||||
description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.",
|
||||
short_description="Text-to-image generation and editing up to 4K.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -380,6 +382,7 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
|
||||
display_name="ByteDance Text to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using ByteDance models via api based on prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -505,6 +508,7 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
|
||||
display_name="ByteDance Image to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using ByteDance models via api based on image and prompt",
|
||||
short_description="Generate video from image and prompt via ByteDance API.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -639,6 +643,7 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
|
||||
display_name="ByteDance First-Last-Frame to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using prompt and first and last frames.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -784,6 +789,7 @@ class ByteDanceImageReferenceNode(IO.ComfyNode):
|
||||
display_name="ByteDance Reference Images to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using prompt and reference images.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -254,6 +254,7 @@ class GeminiNode(IO.ComfyNode):
|
||||
description="Generate text responses with Google's Gemini AI model. "
|
||||
"You can provide multiple types of inputs (text, images, audio, video) "
|
||||
"as context for generating more relevant and meaningful responses.",
|
||||
short_description="Generate text responses with Google's Gemini AI.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -480,6 +481,7 @@ class GeminiInputFiles(IO.ComfyNode):
|
||||
"The files will be read by the Gemini model when generating a response. "
|
||||
"The contents of the text file count toward the token limit. "
|
||||
"🛈 TIP: Can be chained together with other Gemini Input File nodes.",
|
||||
short_description="Load and prepare input files for Gemini LLM nodes.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"file",
|
||||
@@ -534,6 +536,7 @@ class GeminiImage(IO.ComfyNode):
|
||||
display_name="Nano Banana (Google Gemini Image)",
|
||||
category="api node/image/Gemini",
|
||||
description="Edit images synchronously via Google API.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -665,6 +668,7 @@ class GeminiImage2(IO.ComfyNode):
|
||||
display_name="Nano Banana Pro (Google Gemini Image)",
|
||||
category="api node/image/Gemini",
|
||||
description="Generate or edit images synchronously via Google Vertex API.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -36,6 +36,7 @@ class GrokImageNode(IO.ComfyNode):
|
||||
display_name="Grok Image",
|
||||
category="api node/image/Grok",
|
||||
description="Generate images using Grok based on a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-image-beta"]),
|
||||
IO.String.Input(
|
||||
@@ -137,6 +138,7 @@ class GrokImageEditNode(IO.ComfyNode):
|
||||
display_name="Grok Image Edit",
|
||||
category="api node/image/Grok",
|
||||
description="Modify an existing image based on a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-image-beta"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -226,6 +228,7 @@ class GrokVideoNode(IO.ComfyNode):
|
||||
display_name="Grok Video",
|
||||
category="api node/video/Grok",
|
||||
description="Generate video from a prompt or an image",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-video-beta"]),
|
||||
IO.String.Input(
|
||||
@@ -334,6 +337,7 @@ class GrokVideoEditNode(IO.ComfyNode):
|
||||
display_name="Grok Video Edit",
|
||||
category="api node/video/Grok",
|
||||
description="Edit an existing video based on a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-video-beta"]),
|
||||
IO.String.Input(
|
||||
|
||||
@@ -74,6 +74,7 @@ class HitPawGeneralImageEnhance(IO.ComfyNode):
|
||||
category="api node/image/HitPaw",
|
||||
description="Upscale low-resolution images to super-resolution, eliminate artifacts and noise. "
|
||||
f"Maximum output: {MAX_MP_GENERATIVE} megapixels.",
|
||||
short_description="Upscale images to super-resolution, removing artifacts and noise.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["generative_portrait", "generative"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -205,6 +206,7 @@ class HitPawVideoEnhance(IO.ComfyNode):
|
||||
category="api node/video/HitPaw",
|
||||
description="Upscale low-resolution videos to high resolution, eliminate artifacts and noise. "
|
||||
"Prices shown are per second of video.",
|
||||
short_description="Upscale videos to high resolution, removing artifacts and noise.",
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input("model", options=model_options),
|
||||
IO.Video.Input("video"),
|
||||
|
||||
@@ -54,6 +54,8 @@ class TencentTextToModelNode(IO.ComfyNode):
|
||||
node_id="TencentTextToModelNode",
|
||||
display_name="Hunyuan3D: Text to Model",
|
||||
category="api node/3d/Tencent",
|
||||
description="Generate 3D models from text prompts using Hunyuan3D Pro with configurable face count and geometry options.",
|
||||
short_description="Generate 3D models from text using Hunyuan3D Pro.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -168,6 +170,8 @@ class TencentImageToModelNode(IO.ComfyNode):
|
||||
node_id="TencentImageToModelNode",
|
||||
display_name="Hunyuan3D: Image(s) to Model",
|
||||
category="api node/3d/Tencent",
|
||||
description="Generate 3D models from images using Hunyuan3D Pro with optional multi-view inputs and configurable geometry.",
|
||||
short_description="Generate 3D models from images using Hunyuan3D Pro.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -236,6 +236,7 @@ class IdeogramV1(IO.ComfyNode):
|
||||
display_name="Ideogram V1",
|
||||
category="api node/image/Ideogram",
|
||||
description="Generates images using the Ideogram V1 model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -361,6 +362,7 @@ class IdeogramV2(IO.ComfyNode):
|
||||
display_name="Ideogram V2",
|
||||
category="api node/image/Ideogram",
|
||||
description="Generates images using the Ideogram V2 model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -526,6 +528,7 @@ class IdeogramV3(IO.ComfyNode):
|
||||
category="api node/image/Ideogram",
|
||||
description="Generates images using the Ideogram V3 model. "
|
||||
"Supports both regular image generation from text prompts and image editing with mask.",
|
||||
short_description="Generate and edit images with Ideogram V3.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -642,6 +642,7 @@ class KlingCameraControls(IO.ComfyNode):
|
||||
display_name="Kling Camera Controls",
|
||||
category="api node/video/Kling",
|
||||
description="Allows specifying configuration options for Kling Camera Controls and motion control effects.",
|
||||
short_description="Configure Kling camera controls and motion effects.",
|
||||
inputs=[
|
||||
IO.Combo.Input("camera_control_type", options=KlingCameraControlType),
|
||||
IO.Float.Input(
|
||||
@@ -762,6 +763,7 @@ class KlingTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling Text to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Text to Video Node",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
@@ -849,6 +851,7 @@ class OmniProTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Text to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use text prompts to generate videos with the latest Kling model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -989,6 +992,7 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni First-Last-Frame to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use a start frame, an optional end frame, or reference images with the latest Kling model.",
|
||||
short_description="Generate video from start/end frames or reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1187,6 +1191,7 @@ class OmniProImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Image to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use up to 7 reference images to generate a video with the latest Kling model.",
|
||||
short_description="Generate video from up to 7 reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1347,6 +1352,7 @@ class OmniProVideoToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Video to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use a video and up to 4 reference images to generate a video with the latest Kling model.",
|
||||
short_description="Generate video from a video and reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1458,6 +1464,7 @@ class OmniProEditVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Edit Video",
|
||||
category="api node/video/Kling",
|
||||
description="Edit an existing video with the latest model from Kling.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1565,6 +1572,7 @@ class OmniProImageNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Image",
|
||||
category="api node/image/Kling",
|
||||
description="Create or edit images with the latest model from Kling.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-image-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1693,6 +1701,7 @@ class KlingCameraControlT2VNode(IO.ComfyNode):
|
||||
display_name="Kling Text to Video (Camera Control)",
|
||||
category="api node/video/Kling",
|
||||
description="Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.",
|
||||
short_description="Generate videos from text with camera movement controls.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
@@ -1754,6 +1763,8 @@ class KlingImage2VideoNode(IO.ComfyNode):
|
||||
node_id="KlingImage2VideoNode",
|
||||
display_name="Kling Image(First Frame) to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video from a first-frame image with configurable model, mode, aspect ratio, and duration settings.",
|
||||
short_description="Generate video from a first-frame reference image.",
|
||||
inputs=[
|
||||
IO.Image.Input("start_frame", tooltip="The reference image used to generate the video."),
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
@@ -1854,6 +1865,7 @@ class KlingCameraControlI2VNode(IO.ComfyNode):
|
||||
display_name="Kling Image to Video (Camera Control)",
|
||||
category="api node/video/Kling",
|
||||
description="Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.",
|
||||
short_description="Generate videos from images with camera movement controls.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"start_frame",
|
||||
@@ -1925,6 +1937,7 @@ class KlingStartEndFrameNode(IO.ComfyNode):
|
||||
display_name="Kling Start-End Frame to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.",
|
||||
short_description="Generate video transitioning between start and end frame images.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"start_frame",
|
||||
@@ -2019,6 +2032,7 @@ class KlingVideoExtendNode(IO.ComfyNode):
|
||||
display_name="Kling Video Extend",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.",
|
||||
short_description="Extend videos generated by other Kling nodes.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -2100,6 +2114,7 @@ class KlingDualCharacterVideoEffectNode(IO.ComfyNode):
|
||||
display_name="Kling Dual Character Video Effects",
|
||||
category="api node/video/Kling",
|
||||
description="Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.",
|
||||
short_description="Apply dual-character video effects from two images.",
|
||||
inputs=[
|
||||
IO.Image.Input("image_left", tooltip="Left side image"),
|
||||
IO.Image.Input("image_right", tooltip="Right side image"),
|
||||
@@ -2190,6 +2205,7 @@ class KlingSingleImageVideoEffectNode(IO.ComfyNode):
|
||||
display_name="Kling Video Effects",
|
||||
category="api node/video/Kling",
|
||||
description="Achieve different special effects when generating a video based on the effect_scene.",
|
||||
short_description="Apply special video effects to a single image.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -2263,6 +2279,7 @@ class KlingLipSyncAudioToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling Lip Sync Video with Audio",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
|
||||
short_description="Sync video mouth movements to audio content.",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Audio.Input("audio"),
|
||||
@@ -2314,6 +2331,7 @@ class KlingLipSyncTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling Lip Sync Video with Text",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
|
||||
short_description="Sync video mouth movements to a text prompt.",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.String.Input(
|
||||
@@ -2381,6 +2399,7 @@ class KlingVirtualTryOnNode(IO.ComfyNode):
|
||||
display_name="Kling Virtual Try On",
|
||||
category="api node/image/Kling",
|
||||
description="Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.",
|
||||
short_description="Virtually try clothing onto a human image.",
|
||||
inputs=[
|
||||
IO.Image.Input("human_image"),
|
||||
IO.Image.Input("cloth_image"),
|
||||
@@ -2448,6 +2467,7 @@ class KlingImageGenerationNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Image",
|
||||
category="api node/image/Kling",
|
||||
description="Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.",
|
||||
short_description="Generate images from text with optional reference image.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
@@ -2581,6 +2601,8 @@ class TextToVideoWithAudio(IO.ComfyNode):
|
||||
node_id="KlingTextToVideoWithAudio",
|
||||
display_name="Kling 2.6 Text to Video with Audio",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video with synchronized audio from a text prompt using the Kling v2-6 model.",
|
||||
short_description="Generate video with audio from text using Kling v2-6.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v2-6"]),
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt."),
|
||||
@@ -2649,6 +2671,8 @@ class ImageToVideoWithAudio(IO.ComfyNode):
|
||||
node_id="KlingImageToVideoWithAudio",
|
||||
display_name="Kling 2.6 Image(First Frame) to Video with Audio",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video with synchronized audio from a first-frame image and text prompt using the Kling v2-6 model.",
|
||||
short_description="Generate video with audio from an image using Kling v2-6.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v2-6"]),
|
||||
IO.Image.Input("start_frame"),
|
||||
@@ -2719,6 +2743,8 @@ class MotionControl(IO.ComfyNode):
|
||||
node_id="KlingMotionControl",
|
||||
display_name="Kling Motion Control",
|
||||
category="api node/video/Kling",
|
||||
description="Drive character movement and expression in video using a reference image and motion reference video.",
|
||||
short_description="Control video character motion using reference image and video.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True),
|
||||
IO.Image.Input("reference_image"),
|
||||
@@ -2815,6 +2841,7 @@ class KlingVideoNode(IO.ComfyNode):
|
||||
category="api node/video/Kling",
|
||||
description="Generate videos with Kling V3. "
|
||||
"Supports text-to-video and image-to-video with optional storyboard multi-prompt and audio generation.",
|
||||
short_description="Generate videos with Kling V3 from text or images.",
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"multi_shot",
|
||||
|
||||
@@ -52,6 +52,7 @@ class TextToVideoNode(IO.ComfyNode):
|
||||
display_name="LTXV Text To Video",
|
||||
category="api node/video/LTXV",
|
||||
description="Professional-quality videos with customizable duration and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=list(MODELS_MAP.keys())),
|
||||
IO.String.Input(
|
||||
@@ -128,6 +129,7 @@ class ImageToVideoNode(IO.ComfyNode):
|
||||
display_name="LTXV Image To Video",
|
||||
category="api node/video/LTXV",
|
||||
description="Professional-quality videos with customizable duration and resolution based on start image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="First frame to be used for the video."),
|
||||
IO.Combo.Input("model", options=list(MODELS_MAP.keys())),
|
||||
|
||||
@@ -46,6 +46,7 @@ class LumaReferenceNode(IO.ComfyNode):
|
||||
display_name="Luma Reference",
|
||||
category="api node/image/Luma",
|
||||
description="Holds an image and weight for use with Luma Generate Image node.",
|
||||
short_description="Image and weight input for Luma generation.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -85,6 +86,7 @@ class LumaConceptsNode(IO.ComfyNode):
|
||||
display_name="Luma Concepts",
|
||||
category="api node/video/Luma",
|
||||
description="Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.",
|
||||
short_description="Camera concepts for Luma video generation nodes.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"concept1",
|
||||
@@ -134,6 +136,7 @@ class LumaImageGenerationNode(IO.ComfyNode):
|
||||
display_name="Luma Text to Image",
|
||||
category="api node/image/Luma",
|
||||
description="Generates images synchronously based on prompt and aspect ratio.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -278,6 +281,7 @@ class LumaImageModifyNode(IO.ComfyNode):
|
||||
display_name="Luma Image to Image",
|
||||
category="api node/image/Luma",
|
||||
description="Modifies images synchronously based on prompt and aspect ratio.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -371,6 +375,7 @@ class LumaTextToVideoGenerationNode(IO.ComfyNode):
|
||||
display_name="Luma Text to Video",
|
||||
category="api node/video/Luma",
|
||||
description="Generates videos synchronously based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -472,6 +477,7 @@ class LumaImageToVideoGenerationNode(IO.ComfyNode):
|
||||
display_name="Luma Image to Video",
|
||||
category="api node/video/Luma",
|
||||
description="Generates videos synchronously based on prompt, input images, and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -242,6 +242,7 @@ class MagnificImageUpscalerPreciseV2Node(IO.ComfyNode):
|
||||
category="api node/image/Magnific",
|
||||
description="High-fidelity upscaling with fine control over sharpness, grain, and detail. "
|
||||
"Maximum output: 10060×10060 pixels.",
|
||||
short_description="High-fidelity upscaling with sharpness, grain, and detail control.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
|
||||
@@ -401,6 +402,7 @@ class MagnificImageStyleTransferNode(IO.ComfyNode):
|
||||
display_name="Magnific Image Style Transfer",
|
||||
category="api node/image/Magnific",
|
||||
description="Transfer the style from a reference image to your input image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The image to apply style transfer to."),
|
||||
IO.Image.Input("reference_image", tooltip="The reference image to extract style from."),
|
||||
@@ -549,6 +551,7 @@ class MagnificImageRelightNode(IO.ComfyNode):
|
||||
display_name="Magnific Image Relight",
|
||||
category="api node/image/Magnific",
|
||||
description="Relight an image with lighting adjustments and optional reference-based light transfer.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The image to relight."),
|
||||
IO.String.Input(
|
||||
@@ -787,6 +790,7 @@ class MagnificImageSkinEnhancerNode(IO.ComfyNode):
|
||||
display_name="Magnific Image Skin Enhancer",
|
||||
category="api node/image/Magnific",
|
||||
description="Skin enhancement for portraits with multiple processing modes.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The portrait image to enhance."),
|
||||
IO.Int.Input(
|
||||
|
||||
@@ -34,6 +34,8 @@ class MeshyTextToModelNode(IO.ComfyNode):
|
||||
node_id="MeshyTextToModelNode",
|
||||
display_name="Meshy: Text to Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Generate a 3D model from a text prompt using the Meshy API.",
|
||||
short_description="Generate a 3D model from a text prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.String.Input("prompt", multiline=True, default=""),
|
||||
@@ -146,6 +148,7 @@ class MeshyRefineNode(IO.ComfyNode):
|
||||
display_name="Meshy: Refine Draft Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Refine a previously created draft model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
|
||||
@@ -239,6 +242,8 @@ class MeshyImageToModelNode(IO.ComfyNode):
|
||||
node_id="MeshyImageToModelNode",
|
||||
display_name="Meshy: Image to Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Generate a 3D model from a single image using the Meshy API.",
|
||||
short_description="Generate a 3D model from an image.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -403,6 +408,7 @@ class MeshyMultiImageToModelNode(IO.ComfyNode):
|
||||
node_id="MeshyMultiImageToModelNode",
|
||||
display_name="Meshy: Multi-Image to Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Generate a 3D model from multiple images using the Meshy API.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Autogrow.Input(
|
||||
@@ -575,6 +581,7 @@ class MeshyRigModelNode(IO.ComfyNode):
|
||||
description="Provides a rigged character in standard formats. "
|
||||
"Auto-rigging is currently not suitable for untextured meshes, non-humanoid assets, "
|
||||
"or humanoid assets with unclear limb and body structure.",
|
||||
short_description="Rig a character model for animation.",
|
||||
inputs=[
|
||||
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
|
||||
IO.Float.Input(
|
||||
@@ -654,6 +661,7 @@ class MeshyAnimateModelNode(IO.ComfyNode):
|
||||
display_name="Meshy: Animate Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Apply a specific animation action to a previously rigged character.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Custom("MESHY_RIGGED_TASK_ID").Input("rig_task_id"),
|
||||
IO.Int.Input(
|
||||
@@ -719,6 +727,7 @@ class MeshyTextureNode(IO.ComfyNode):
|
||||
node_id="MeshyTextureNode",
|
||||
display_name="Meshy: Texture Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Apply textures to an existing 3D model using the Meshy API.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
|
||||
|
||||
@@ -103,6 +103,7 @@ class MinimaxTextToVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Text to Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos synchronously based on a prompt, and optional parameters.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt_text",
|
||||
@@ -165,6 +166,7 @@ class MinimaxImageToVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Image to Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos synchronously based on an image and prompt, and optional parameters.",
|
||||
short_description="Generate videos from an image, prompt, and optional parameters.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -232,6 +234,7 @@ class MinimaxSubjectToVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Subject to Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos synchronously based on an image and prompt, and optional parameters.",
|
||||
short_description="Subject-driven video generation from image and prompt.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"subject",
|
||||
@@ -296,6 +299,7 @@ class MinimaxHailuoVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Hailuo Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.",
|
||||
short_description="Generate videos with optional start frame using Hailuo-02.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt_text",
|
||||
|
||||
@@ -166,6 +166,7 @@ class MoonvalleyImg2VideoNode(IO.ComfyNode):
|
||||
display_name="Moonvalley Marey Image to Video",
|
||||
category="api node/video/Moonvalley Marey",
|
||||
description="Moonvalley Marey Image to Video Node",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -290,7 +291,8 @@ class MoonvalleyVideo2VideoNode(IO.ComfyNode):
|
||||
node_id="MoonvalleyVideo2VideoNode",
|
||||
display_name="Moonvalley Marey Video to Video",
|
||||
category="api node/video/Moonvalley Marey",
|
||||
description="",
|
||||
description="Transform an input video into a new video using a text prompt and motion or pose control.",
|
||||
short_description="Transform video using text prompt with motion or pose control.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -415,7 +417,8 @@ class MoonvalleyTxt2VideoNode(IO.ComfyNode):
|
||||
node_id="MoonvalleyTxt2VideoNode",
|
||||
display_name="Moonvalley Marey Text to Video",
|
||||
category="api node/video/Moonvalley Marey",
|
||||
description="",
|
||||
description="Generate a video from a text prompt using the Moonvalley Marey model.",
|
||||
short_description="Generate video from a text prompt using Moonvalley Marey.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -98,6 +98,7 @@ class OpenAIDalle2(IO.ComfyNode):
|
||||
display_name="OpenAI DALL·E 2",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's DALL·E 2 endpoint.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -248,6 +249,7 @@ class OpenAIDalle3(IO.ComfyNode):
|
||||
display_name="OpenAI DALL·E 3",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's DALL·E 3 endpoint.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -366,6 +368,7 @@ class OpenAIGPTImage1(IO.ComfyNode):
|
||||
display_name="OpenAI GPT Image 1.5",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's GPT Image endpoint.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -576,6 +579,7 @@ class OpenAIChatNode(IO.ComfyNode):
|
||||
display_name="OpenAI ChatGPT",
|
||||
category="api node/text/OpenAI",
|
||||
description="Generate text responses from an OpenAI model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -803,6 +807,7 @@ class OpenAIInputFiles(IO.ComfyNode):
|
||||
display_name="OpenAI ChatGPT Input Files",
|
||||
category="api node/text/OpenAI",
|
||||
description="Loads and prepares input files (text, pdf, etc.) to include as inputs for the OpenAI Chat Node. The files will be read by the OpenAI model when generating a response. 🛈 TIP: Can be chained together with other OpenAI Input File nodes.",
|
||||
short_description="Load and prepare input files for OpenAI Chat.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"file",
|
||||
@@ -850,6 +855,7 @@ class OpenAIChatConfig(IO.ComfyNode):
|
||||
display_name="OpenAI ChatGPT Advanced Options",
|
||||
category="api node/text/OpenAI",
|
||||
description="Allows specifying advanced configuration options for the OpenAI Chat Nodes.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"truncation",
|
||||
|
||||
@@ -54,6 +54,8 @@ class PixverseTemplateNode(IO.ComfyNode):
|
||||
node_id="PixverseTemplateNode",
|
||||
display_name="PixVerse Template",
|
||||
category="api node/video/PixVerse",
|
||||
description="Select a style template for PixVerse video generation.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("template", options=list(pixverse_templates.keys())),
|
||||
],
|
||||
@@ -76,6 +78,7 @@ class PixverseTextToVideoNode(IO.ComfyNode):
|
||||
display_name="PixVerse Text to Video",
|
||||
category="api node/video/PixVerse",
|
||||
description="Generates videos based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -194,6 +197,7 @@ class PixverseImageToVideoNode(IO.ComfyNode):
|
||||
display_name="PixVerse Image to Video",
|
||||
category="api node/video/PixVerse",
|
||||
description="Generates videos based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -312,6 +316,7 @@ class PixverseTransitionVideoNode(IO.ComfyNode):
|
||||
display_name="PixVerse Transition Video",
|
||||
category="api node/video/PixVerse",
|
||||
description="Generates videos based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("first_frame"),
|
||||
IO.Image.Input("last_frame"),
|
||||
|
||||
@@ -180,6 +180,7 @@ class RecraftColorRGBNode(IO.ComfyNode):
|
||||
display_name="Recraft Color RGB",
|
||||
category="api node/image/Recraft",
|
||||
description="Create Recraft Color by choosing specific RGB values.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Int.Input("r", default=0, min=0, max=255, tooltip="Red value of color."),
|
||||
IO.Int.Input("g", default=0, min=0, max=255, tooltip="Green value of color."),
|
||||
@@ -206,6 +207,7 @@ class RecraftControlsNode(IO.ComfyNode):
|
||||
display_name="Recraft Controls",
|
||||
category="api node/image/Recraft",
|
||||
description="Create Recraft Controls for customizing Recraft generation.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Custom(RecraftIO.COLOR).Input("colors", optional=True),
|
||||
IO.Custom(RecraftIO.COLOR).Input("background_color", optional=True),
|
||||
@@ -230,6 +232,7 @@ class RecraftStyleV3RealisticImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Style - Realistic Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
|
||||
],
|
||||
@@ -254,7 +257,8 @@ class RecraftStyleV3DigitalIllustrationNode(RecraftStyleV3RealisticImageNode):
|
||||
node_id="RecraftStyleV3DigitalIllustration",
|
||||
display_name="Recraft Style - Digital Illustration",
|
||||
category="api node/image/Recraft",
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
description="Select digital_illustration style and optional substyle.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
|
||||
],
|
||||
@@ -271,9 +275,10 @@ class RecraftStyleV3VectorIllustrationNode(RecraftStyleV3RealisticImageNode):
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="RecraftStyleV3VectorIllustrationNode",
|
||||
display_name="Recraft Style - Realistic Image",
|
||||
display_name="Recraft Style - Vector Illustration",
|
||||
category="api node/image/Recraft",
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
description="Select vector_illustration style and optional substyle.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
|
||||
],
|
||||
@@ -292,7 +297,8 @@ class RecraftStyleV3LogoRasterNode(RecraftStyleV3RealisticImageNode):
|
||||
node_id="RecraftStyleV3LogoRaster",
|
||||
display_name="Recraft Style - Logo Raster",
|
||||
category="api node/image/Recraft",
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
description="Select logo_raster style and optional substyle.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE, include_none=False)),
|
||||
],
|
||||
@@ -310,6 +316,7 @@ class RecraftStyleInfiniteStyleLibrary(IO.ComfyNode):
|
||||
display_name="Recraft Style - Infinite Style Library",
|
||||
category="api node/image/Recraft",
|
||||
description="Select style based on preexisting UUID from Recraft's Infinite Style Library.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("style_id", default="", tooltip="UUID of style from Infinite Style Library."),
|
||||
],
|
||||
@@ -335,6 +342,7 @@ class RecraftCreateStyleNode(IO.ComfyNode):
|
||||
description="Create a custom style from reference images. "
|
||||
"Upload 1-5 images to use as style references. "
|
||||
"Total size of all images is limited to 5 MB.",
|
||||
short_description="Create a custom style from 1-5 reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"style",
|
||||
@@ -402,6 +410,7 @@ class RecraftTextToImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Text to Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates images synchronously based on prompt and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."),
|
||||
IO.Combo.Input(
|
||||
@@ -514,6 +523,7 @@ class RecraftImageToImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Image to Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Modify image based on prompt and strength.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."),
|
||||
@@ -632,6 +642,7 @@ class RecraftImageInpaintingNode(IO.ComfyNode):
|
||||
display_name="Recraft Image Inpainting",
|
||||
category="api node/image/Recraft",
|
||||
description="Modify image based on prompt and mask.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Mask.Input("mask"),
|
||||
@@ -734,6 +745,7 @@ class RecraftTextToVectorNode(IO.ComfyNode):
|
||||
display_name="Recraft Text to Vector",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates SVG synchronously based on prompt and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("prompt", default="", tooltip="Prompt for the image generation.", multiline=True),
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(RecraftStyleV3.vector_illustration)),
|
||||
@@ -834,6 +846,7 @@ class RecraftVectorizeImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Vectorize Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates SVG synchronously from an input image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -877,6 +890,7 @@ class RecraftReplaceBackgroundNode(IO.ComfyNode):
|
||||
display_name="Recraft Replace Background",
|
||||
category="api node/image/Recraft",
|
||||
description="Replace background on image, based on provided prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input("prompt", tooltip="Prompt for the image generation.", default="", multiline=True),
|
||||
@@ -964,6 +978,7 @@ class RecraftRemoveBackgroundNode(IO.ComfyNode):
|
||||
display_name="Recraft Remove Background",
|
||||
category="api node/image/Recraft",
|
||||
description="Remove background from image, and return processed image and mask.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -1012,8 +1027,9 @@ class RecraftCrispUpscaleNode(IO.ComfyNode):
|
||||
display_name="Recraft Crisp Upscale Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Upscale image synchronously.\n"
|
||||
"Enhances a given raster image using ‘crisp upscale’ tool, "
|
||||
"Enhances a given raster image using 'crisp upscale' tool, "
|
||||
"increasing image resolution, making the image sharper and cleaner.",
|
||||
short_description="Crisp upscale to sharpen and increase image resolution.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -1058,8 +1074,9 @@ class RecraftCreativeUpscaleNode(RecraftCrispUpscaleNode):
|
||||
display_name="Recraft Creative Upscale Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Upscale image synchronously.\n"
|
||||
"Enhances a given raster image using ‘creative upscale’ tool, "
|
||||
"Enhances a given raster image using 'creative upscale' tool, "
|
||||
"boosting resolution with a focus on refining small details and faces.",
|
||||
short_description="Creative upscale focusing on small details and faces.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
|
||||
@@ -238,6 +238,7 @@ class Rodin3D_Regular(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Regular Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
*COMMON_PARAMETERS,
|
||||
@@ -297,6 +298,7 @@ class Rodin3D_Detail(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Detail Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
*COMMON_PARAMETERS,
|
||||
@@ -356,6 +358,7 @@ class Rodin3D_Smooth(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Smooth Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
*COMMON_PARAMETERS,
|
||||
@@ -414,6 +417,7 @@ class Rodin3D_Sketch(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Sketch Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
IO.Int.Input(
|
||||
@@ -476,6 +480,7 @@ class Rodin3D_Gen2(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Gen-2 Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
IO.Int.Input(
|
||||
|
||||
@@ -145,6 +145,7 @@ class RunwayImageToVideoNodeGen3a(IO.ComfyNode):
|
||||
"Before diving in, review these best practices to ensure that "
|
||||
"your input selections will set your generation up for success: "
|
||||
"https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.",
|
||||
short_description="Generate video from a starting frame using Gen3a Turbo.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -239,6 +240,7 @@ class RunwayImageToVideoNodeGen4(IO.ComfyNode):
|
||||
"Before diving in, review these best practices to ensure that "
|
||||
"your input selections will set your generation up for success: "
|
||||
"https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.",
|
||||
short_description="Generate video from a starting frame using Gen4 Turbo.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -337,6 +339,7 @@ class RunwayFirstLastFrameNode(IO.ComfyNode):
|
||||
"Before diving in, review these best practices to ensure that your input selections "
|
||||
"will set your generation up for success: "
|
||||
"https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.",
|
||||
short_description="Generate video from first and last keyframes with a prompt.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -443,6 +446,7 @@ class RunwayTextToImageNode(IO.ComfyNode):
|
||||
category="api node/image/Runway",
|
||||
description="Generate an image from a text prompt using Runway's Gen 4 model. "
|
||||
"You can also include reference image to guide the generation.",
|
||||
short_description="Generate an image from text using Runway Gen 4.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -36,6 +36,7 @@ class OpenAIVideoSora2(IO.ComfyNode):
|
||||
display_name="OpenAI Sora - Video",
|
||||
category="api node/video/Sora",
|
||||
description="OpenAI video and audio generation.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -64,6 +64,7 @@ class StabilityStableImageUltraNode(IO.ComfyNode):
|
||||
display_name="Stability AI Stable Image Ultra",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -197,6 +198,7 @@ class StabilityStableImageSD_3_5Node(IO.ComfyNode):
|
||||
display_name="Stability AI Stable Diffusion 3.5 Image",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -352,6 +354,7 @@ class StabilityUpscaleConservativeNode(IO.ComfyNode):
|
||||
display_name="Stability AI Upscale Conservative",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -454,6 +457,7 @@ class StabilityUpscaleCreativeNode(IO.ComfyNode):
|
||||
display_name="Stability AI Upscale Creative",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -573,6 +577,7 @@ class StabilityUpscaleFastNode(IO.ComfyNode):
|
||||
display_name="Stability AI Upscale Fast",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description="Quickly upscale an image to 4x its original size.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -625,6 +630,7 @@ class StabilityTextToAudio(IO.ComfyNode):
|
||||
display_name="Stability AI Text To Audio",
|
||||
category="api node/audio/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -701,6 +707,7 @@ class StabilityAudioToAudio(IO.ComfyNode):
|
||||
display_name="Stability AI Audio To Audio",
|
||||
category="api node/audio/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -794,6 +801,7 @@ class StabilityAudioInpaint(IO.ComfyNode):
|
||||
display_name="Stability AI Audio Inpaint",
|
||||
category="api node/audio/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -49,6 +49,7 @@ class TopazImageEnhance(IO.ComfyNode):
|
||||
display_name="Topaz Image Enhance",
|
||||
category="api node/image/Topaz",
|
||||
description="Industry-standard upscaling and image enhancement.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["Reimagine"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -223,6 +224,7 @@ class TopazVideoEnhance(IO.ComfyNode):
|
||||
display_name="Topaz Video Enhance",
|
||||
category="api node/video/Topaz",
|
||||
description="Breathe new life into video with powerful upscaling and recovery technology.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Boolean.Input("upscaler_enabled", default=True),
|
||||
|
||||
@@ -80,6 +80,7 @@ class TripoTextToModelNode(IO.ComfyNode):
|
||||
node_id="TripoTextToModelNode",
|
||||
display_name="Tripo: Text to Model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Generate a 3D model from a text prompt using Tripo's API.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True),
|
||||
IO.String.Input("negative_prompt", multiline=True, optional=True),
|
||||
@@ -199,6 +200,7 @@ class TripoImageToModelNode(IO.ComfyNode):
|
||||
node_id="TripoImageToModelNode",
|
||||
display_name="Tripo: Image to Model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Generate a 3D model from a single image using Tripo's API.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input(
|
||||
@@ -331,6 +333,7 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
|
||||
node_id="TripoMultiviewToModelNode",
|
||||
display_name="Tripo: Multiview to Model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Generate a 3D model from multiple view images using Tripo's API.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Image.Input("image_left", optional=True),
|
||||
@@ -470,6 +473,7 @@ class TripoTextureNode(IO.ComfyNode):
|
||||
node_id="TripoTextureNode",
|
||||
display_name="Tripo: Texture model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Apply textures to an existing 3D model using Tripo's API.",
|
||||
inputs=[
|
||||
IO.Custom("MODEL_TASK_ID").Input("model_task_id"),
|
||||
IO.Boolean.Input("texture", default=True, optional=True),
|
||||
@@ -538,6 +542,7 @@ class TripoRefineNode(IO.ComfyNode):
|
||||
display_name="Tripo: Refine Draft model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Refine a draft model created by v1.4 Tripo models only.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Custom("MODEL_TASK_ID").Input("model_task_id", tooltip="Must be a v1.4 Tripo model"),
|
||||
],
|
||||
@@ -577,6 +582,8 @@ class TripoRigNode(IO.ComfyNode):
|
||||
node_id="TripoRigNode",
|
||||
display_name="Tripo: Rig model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Add a skeleton rig to an existing 3D model using Tripo's API.",
|
||||
short_description="Add a skeleton rig to a 3D model.",
|
||||
inputs=[IO.Custom("MODEL_TASK_ID").Input("original_model_task_id")],
|
||||
outputs=[
|
||||
IO.String.Output(display_name="model_file"), # for backward compatibility only
|
||||
@@ -614,6 +621,8 @@ class TripoRetargetNode(IO.ComfyNode):
|
||||
node_id="TripoRetargetNode",
|
||||
display_name="Tripo: Retarget rigged model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Apply a preset animation to a rigged 3D model using Tripo's API.",
|
||||
short_description="Apply a preset animation to a rigged model.",
|
||||
inputs=[
|
||||
IO.Custom("RIG_TASK_ID").Input("original_model_task_id"),
|
||||
IO.Combo.Input(
|
||||
@@ -679,6 +688,8 @@ class TripoConversionNode(IO.ComfyNode):
|
||||
node_id="TripoConversionNode",
|
||||
display_name="Tripo: Convert model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Convert a 3D model to different formats with optional post-processing using Tripo's API.",
|
||||
short_description="Convert a 3D model to different formats.",
|
||||
inputs=[
|
||||
IO.Custom("MODEL_TASK_ID,RIG_TASK_ID,RETARGET_TASK_ID").Input("original_model_task_id"),
|
||||
IO.Combo.Input("format", options=["GLTF", "USDZ", "FBX", "OBJ", "STL", "3MF"]),
|
||||
|
||||
@@ -46,6 +46,7 @@ class VeoVideoGenerationNode(IO.ComfyNode):
|
||||
display_name="Google Veo 2 Video Generation",
|
||||
category="api node/video/Veo",
|
||||
description="Generates videos from text prompts using Google's Veo 2 API",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -264,6 +265,7 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
|
||||
display_name="Google Veo 3 Video Generation",
|
||||
category="api node/video/Veo",
|
||||
description="Generates videos from text prompts using Google's Veo 3 API",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -377,6 +379,7 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
display_name="Google Veo 3 First-Last-Frame to Video",
|
||||
category="api node/video/Veo",
|
||||
description="Generate video using prompt and first and last frames.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -72,6 +72,7 @@ class ViduTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Text To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.String.Input(
|
||||
@@ -168,6 +169,7 @@ class ViduImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Image To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from image and optional prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.Image.Input(
|
||||
@@ -270,6 +272,7 @@ class ViduReferenceVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Reference To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from multiple images and a prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.Image.Input(
|
||||
@@ -383,6 +386,7 @@ class ViduStartEndToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Start End To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from start and end frames and a prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.Image.Input(
|
||||
@@ -485,6 +489,7 @@ class Vidu2TextToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Text-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2"]),
|
||||
IO.String.Input(
|
||||
@@ -576,6 +581,7 @@ class Vidu2ImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Image-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from an image and an optional prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]),
|
||||
IO.Image.Input(
|
||||
@@ -704,6 +710,7 @@ class Vidu2ReferenceVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Reference-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from multiple reference images and a prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2"]),
|
||||
IO.Autogrow.Input(
|
||||
@@ -837,6 +844,7 @@ class Vidu2StartEndToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Start/End Frame-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from a start frame, an end frame, and a prompt.",
|
||||
short_description="Generate video from start frame, end frame, and prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]),
|
||||
IO.Image.Input("first_frame"),
|
||||
@@ -956,6 +964,7 @@ class ViduExtendVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Video Extension",
|
||||
category="api node/video/Vidu",
|
||||
description="Extend an existing video by generating additional frames.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
@@ -1126,6 +1135,7 @@ class ViduMultiFrameVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Multi-Frame Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video with multiple keyframe transitions.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2-pro", "viduq2-turbo"]),
|
||||
IO.Image.Input(
|
||||
@@ -1272,6 +1282,7 @@ class Vidu3TextToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Q3 Text-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
@@ -1380,6 +1391,7 @@ class Vidu3ImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Q3 Image-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from an image and an optional prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
|
||||
@@ -175,6 +175,7 @@ class WanTextToImageApi(IO.ComfyNode):
|
||||
display_name="Wan Text to Image",
|
||||
category="api node/image/Wan",
|
||||
description="Generates an image based on a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -298,6 +299,7 @@ class WanImageToImageApi(IO.ComfyNode):
|
||||
category="api node/image/Wan",
|
||||
description="Generates an image from one or two input images and a text prompt. "
|
||||
"The output image is currently fixed at 1.6 MP, and its aspect ratio matches the input image(s).",
|
||||
short_description="Generate an image from input images and a text prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -424,6 +426,7 @@ class WanTextToVideoApi(IO.ComfyNode):
|
||||
display_name="Wan Text to Video",
|
||||
category="api node/video/Wan",
|
||||
description="Generates a video based on a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -603,6 +606,7 @@ class WanImageToVideoApi(IO.ComfyNode):
|
||||
display_name="Wan Image to Video",
|
||||
category="api node/video/Wan",
|
||||
description="Generates a video from the first frame and a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -779,6 +783,7 @@ class WanReferenceVideoApi(IO.ComfyNode):
|
||||
category="api node/video/Wan",
|
||||
description="Use the character and voice from input videos, combined with a prompt, "
|
||||
"to generate a new video that maintains character consistency.",
|
||||
short_description="Generate character-consistent video from reference videos and prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["wan2.6-r2v"]),
|
||||
IO.String.Input(
|
||||
|
||||
@@ -30,6 +30,7 @@ class WavespeedFlashVSRNode(IO.ComfyNode):
|
||||
category="api node/video/WaveSpeed",
|
||||
description="Fast, high-quality video upscaler that "
|
||||
"boosts resolution and restores clarity for low-resolution or blurry footage.",
|
||||
short_description="Fast video upscaler that boosts resolution and restores clarity.",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Combo.Input("target_resolution", options=["720p", "1080p", "2K", "4K"]),
|
||||
@@ -101,6 +102,7 @@ class WavespeedImageUpscaleNode(IO.ComfyNode):
|
||||
display_name="WaveSpeed Image Upscale",
|
||||
category="api node/image/WaveSpeed",
|
||||
description="Boost image resolution and quality, upscaling photos to 4K or 8K for sharp, detailed results.",
|
||||
short_description="Upscale images to 4K or 8K with enhanced quality.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["SeedVR2", "Ultimate"]),
|
||||
IO.Image.Input("image"),
|
||||
|
||||
@@ -12,6 +12,8 @@ class TextEncodeAceStepAudio(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeAceStepAudio",
|
||||
category="conditioning",
|
||||
description="Encodes tags and lyrics into conditioning for ACE-Step 1.0 audio generation with adjustable lyrics strength.",
|
||||
short_description="Encodes tags and lyrics for ACE-Step 1.0 audio.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
@@ -34,6 +36,8 @@ class TextEncodeAceStepAudio15(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeAceStepAudio1.5",
|
||||
category="conditioning",
|
||||
description="Encodes tags, lyrics, and music parameters like BPM, key, and language into conditioning for ACE-Step 1.5 audio generation.",
|
||||
short_description="Encodes text and music parameters for ACE-Step 1.5.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
@@ -68,6 +72,8 @@ class EmptyAceStepLatentAudio(io.ComfyNode):
|
||||
node_id="EmptyAceStepLatentAudio",
|
||||
display_name="Empty Ace Step 1.0 Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty latent audio tensor for ACE-Step 1.0 with a specified duration and batch size.",
|
||||
short_description="Creates an empty ACE-Step 1.0 audio latent.",
|
||||
inputs=[
|
||||
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1),
|
||||
io.Int.Input(
|
||||
@@ -91,6 +97,8 @@ class EmptyAceStep15LatentAudio(io.ComfyNode):
|
||||
node_id="EmptyAceStep1.5LatentAudio",
|
||||
display_name="Empty Ace Step 1.5 Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty latent audio tensor for ACE-Step 1.5 with a specified duration and batch size.",
|
||||
short_description="Creates an empty ACE-Step 1.5 audio latent.",
|
||||
inputs=[
|
||||
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.01),
|
||||
io.Int.Input(
|
||||
@@ -115,6 +123,7 @@ class ReferenceAudio(io.ComfyNode):
|
||||
category="advanced/conditioning/audio",
|
||||
is_experimental=True,
|
||||
description="This node sets the reference audio for ace step 1.5",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
|
||||
@@ -46,6 +46,8 @@ class SamplerLCMUpscale(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerLCMUpscale",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Sampler that progressively upscales the latent during LCM sampling steps, combining denoising with gradual resolution increase.",
|
||||
short_description="LCM sampler with progressive latent upscaling.",
|
||||
inputs=[
|
||||
io.Float.Input("scale_ratio", default=1.0, min=0.1, max=20.0, step=0.01),
|
||||
io.Int.Input("scale_steps", default=-1, min=-1, max=1000, step=1),
|
||||
@@ -93,6 +95,8 @@ class SamplerEulerCFGpp(io.ComfyNode):
|
||||
node_id="SamplerEulerCFGpp",
|
||||
display_name="SamplerEulerCFG++",
|
||||
category="_for_testing", # "sampling/custom_sampling/samplers"
|
||||
description="Euler sampler variant using the CFG++ formulation, which modifies the denoising direction using unconditional predictions for improved guidance.",
|
||||
short_description="Euler sampler using CFG++ guidance formulation.",
|
||||
inputs=[
|
||||
io.Combo.Input("version", options=["regular", "alternative"]),
|
||||
],
|
||||
|
||||
@@ -30,6 +30,8 @@ class AlignYourStepsScheduler(io.ComfyNode):
|
||||
node_id="AlignYourStepsScheduler",
|
||||
search_aliases=["AYS scheduler"],
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates an optimized noise schedule using the Align Your Steps method with log-linear interpolation.",
|
||||
short_description="Optimized noise schedule using Align Your Steps.",
|
||||
inputs=[
|
||||
io.Combo.Input("model_type", options=["SD1", "SDXL", "SVD"]),
|
||||
io.Int.Input("steps", default=10, min=1, max=10000),
|
||||
|
||||
@@ -17,6 +17,8 @@ class APG(io.ComfyNode):
|
||||
node_id="APG",
|
||||
display_name="Adaptive Projected Guidance",
|
||||
category="sampling/custom_sampling",
|
||||
description="Applies Adaptive Projected Guidance to a model, decomposing CFG guidance into parallel and orthogonal components with optional momentum and norm thresholding for improved sampling quality.",
|
||||
short_description="Decomposes CFG guidance with projection and normalization.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
|
||||
@@ -26,6 +26,8 @@ class UNetSelfAttentionMultiply(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="UNetSelfAttentionMultiply",
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the query, key, value, and output weights of UNet self-attention layers by specified multipliers to experiment with attention behavior.",
|
||||
short_description="Scale UNet self-attention Q/K/V/Out weights.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
@@ -49,6 +51,8 @@ class UNetCrossAttentionMultiply(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="UNetCrossAttentionMultiply",
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the query, key, value, and output weights of UNet cross-attention layers by specified multipliers to experiment with text-to-image attention.",
|
||||
short_description="Scale UNet cross-attention Q/K/V/Out weights.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
@@ -73,6 +77,8 @@ class CLIPAttentionMultiply(io.ComfyNode):
|
||||
node_id="CLIPAttentionMultiply",
|
||||
search_aliases=["clip attention scale", "text encoder attention"],
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the query, key, value, and output projection weights of CLIP text encoder self-attention layers by specified multipliers.",
|
||||
short_description="Scale CLIP text encoder attention weights.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
@@ -107,6 +113,8 @@ class UNetTemporalAttentionMultiply(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="UNetTemporalAttentionMultiply",
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the output weights of UNet temporal and structural attention layers independently, allowing fine-grained control over video model attention behavior.",
|
||||
short_description="Scale UNet temporal and structural attention weights.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("self_structural", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
|
||||
@@ -19,6 +19,8 @@ class EmptyLatentAudio(IO.ComfyNode):
|
||||
node_id="EmptyLatentAudio",
|
||||
display_name="Empty Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty latent audio tensor with a specified duration and batch size for Stable Audio generation.",
|
||||
short_description="Creates an empty latent audio tensor.",
|
||||
inputs=[
|
||||
IO.Float.Input("seconds", default=47.6, min=1.0, max=1000.0, step=0.1),
|
||||
IO.Int.Input(
|
||||
@@ -43,6 +45,8 @@ class ConditioningStableAudio(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="ConditioningStableAudio",
|
||||
category="conditioning",
|
||||
description="Sets the start time and total duration on Stable Audio positive and negative conditioning.",
|
||||
short_description="Sets timing parameters on Stable Audio conditioning.",
|
||||
inputs=[
|
||||
IO.Conditioning.Input("positive"),
|
||||
IO.Conditioning.Input("negative"),
|
||||
@@ -72,6 +76,8 @@ class VAEEncodeAudio(IO.ComfyNode):
|
||||
search_aliases=["audio to latent"],
|
||||
display_name="VAE Encode Audio",
|
||||
category="latent/audio",
|
||||
description="Encodes an audio waveform into a latent representation using a VAE, resampling if needed.",
|
||||
short_description="Encodes audio into latent via VAE.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -115,6 +121,8 @@ class VAEDecodeAudio(IO.ComfyNode):
|
||||
search_aliases=["latent to audio"],
|
||||
display_name="VAE Decode Audio",
|
||||
category="latent/audio",
|
||||
description="Decodes a latent representation back into an audio waveform using a VAE.",
|
||||
short_description="Decodes latent into audio via VAE.",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -137,6 +145,8 @@ class VAEDecodeAudioTiled(IO.ComfyNode):
|
||||
search_aliases=["latent to audio"],
|
||||
display_name="VAE Decode Audio (Tiled)",
|
||||
category="latent/audio",
|
||||
description="Decodes a latent representation into audio using tiled VAE decoding to reduce memory usage.",
|
||||
short_description="Tiled VAE decoding of latent into audio.",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -159,6 +169,8 @@ class SaveAudio(IO.ComfyNode):
|
||||
search_aliases=["export flac"],
|
||||
display_name="Save Audio (FLAC)",
|
||||
category="audio",
|
||||
description="Saves audio to disk in FLAC format with a configurable filename prefix.",
|
||||
short_description="Saves audio to disk in FLAC format.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
@@ -184,6 +196,8 @@ class SaveAudioMP3(IO.ComfyNode):
|
||||
search_aliases=["export mp3"],
|
||||
display_name="Save Audio (MP3)",
|
||||
category="audio",
|
||||
description="Saves audio to disk in MP3 format with configurable quality and filename prefix.",
|
||||
short_description="Saves audio to disk in MP3 format.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
@@ -212,6 +226,8 @@ class SaveAudioOpus(IO.ComfyNode):
|
||||
search_aliases=["export opus"],
|
||||
display_name="Save Audio (Opus)",
|
||||
category="audio",
|
||||
description="Saves audio to disk in Opus format with configurable quality and filename prefix.",
|
||||
short_description="Saves audio to disk in Opus format.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
@@ -240,6 +256,8 @@ class PreviewAudio(IO.ComfyNode):
|
||||
search_aliases=["play audio"],
|
||||
display_name="Preview Audio",
|
||||
category="audio",
|
||||
description="Plays back audio in the UI for previewing.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
],
|
||||
@@ -300,6 +318,8 @@ class LoadAudio(IO.ComfyNode):
|
||||
search_aliases=["import audio", "open audio", "audio file"],
|
||||
display_name="Load Audio",
|
||||
category="audio",
|
||||
description="Loads an audio or video file from disk and outputs the audio as a single Audio output.",
|
||||
short_description="Loads an audio file from disk.",
|
||||
inputs=[
|
||||
IO.Combo.Input("audio", upload=IO.UploadType.audio, options=sorted(files)),
|
||||
],
|
||||
@@ -338,6 +358,7 @@ class RecordAudio(IO.ComfyNode):
|
||||
search_aliases=["microphone input", "audio capture", "voice input"],
|
||||
display_name="Record Audio",
|
||||
category="audio",
|
||||
description="Records audio from a microphone input and outputs the captured audio.",
|
||||
inputs=[
|
||||
IO.Custom("AUDIO_RECORD").Input("audio"),
|
||||
],
|
||||
@@ -363,6 +384,7 @@ class TrimAudioDuration(IO.ComfyNode):
|
||||
search_aliases=["cut audio", "audio clip", "shorten audio"],
|
||||
display_name="Trim Audio Duration",
|
||||
description="Trim audio tensor into chosen time range.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
@@ -416,6 +438,7 @@ class SplitAudioChannels(IO.ComfyNode):
|
||||
search_aliases=["stereo to mono"],
|
||||
display_name="Split Audio Channels",
|
||||
description="Separates the audio into left and right channels.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
@@ -448,6 +471,7 @@ class JoinAudioChannels(IO.ComfyNode):
|
||||
node_id="JoinAudioChannels",
|
||||
display_name="Join Audio Channels",
|
||||
description="Joins left and right mono audio channels into a stereo audio.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio_left"),
|
||||
@@ -517,6 +541,7 @@ class AudioConcat(IO.ComfyNode):
|
||||
search_aliases=["join audio", "combine audio", "append audio"],
|
||||
display_name="Audio Concat",
|
||||
description="Concatenates the audio1 to audio2 in the specified direction.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio1"),
|
||||
@@ -565,6 +590,7 @@ class AudioMerge(IO.ComfyNode):
|
||||
search_aliases=["mix audio", "overlay audio", "layer audio"],
|
||||
display_name="Audio Merge",
|
||||
description="Combine two audio tracks by overlaying their waveforms.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio1"),
|
||||
@@ -626,6 +652,8 @@ class AudioAdjustVolume(IO.ComfyNode):
|
||||
search_aliases=["audio gain", "loudness", "audio level"],
|
||||
display_name="Audio Adjust Volume",
|
||||
category="audio",
|
||||
description="Adjusts audio volume by a specified number of decibels.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.Int.Input(
|
||||
@@ -662,6 +690,8 @@ class EmptyAudio(IO.ComfyNode):
|
||||
search_aliases=["blank audio"],
|
||||
display_name="Empty Audio",
|
||||
category="audio",
|
||||
description="Creates a silent audio clip with configurable duration, sample rate, and channel count.",
|
||||
short_description="Creates a silent audio clip.",
|
||||
inputs=[
|
||||
IO.Float.Input(
|
||||
"duration",
|
||||
|
||||
@@ -11,6 +11,8 @@ class AudioEncoderLoader(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="AudioEncoderLoader",
|
||||
category="loaders",
|
||||
description="Loads an audio encoder model from a checkpoint file for encoding audio into embeddings.",
|
||||
short_description="Loads an audio encoder model from a checkpoint.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"audio_encoder_name",
|
||||
@@ -36,6 +38,8 @@ class AudioEncoderEncode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="AudioEncoderEncode",
|
||||
category="conditioning",
|
||||
description="Encodes audio into embeddings using a loaded audio encoder model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.AudioEncoder.Input("audio_encoder"),
|
||||
io.Audio.Input("audio"),
|
||||
|
||||
@@ -154,6 +154,8 @@ class WanCameraEmbedding(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="WanCameraEmbedding",
|
||||
category="camera",
|
||||
description="Generates Plucker camera embeddings from a selected camera motion trajectory for Wan video generation.",
|
||||
short_description="Generates camera embeddings for Wan video generation.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"camera_pose",
|
||||
|
||||
@@ -12,6 +12,8 @@ class Canny(io.ComfyNode):
|
||||
node_id="Canny",
|
||||
search_aliases=["edge detection", "outline", "contour detection", "line art"],
|
||||
category="image/preprocessors",
|
||||
description="Detects edges in an image using the Canny edge detection algorithm with configurable low and high thresholds.",
|
||||
short_description="Canny edge detection on images.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Float.Input("low_threshold", default=0.4, min=0.01, max=0.99, step=0.01),
|
||||
|
||||
@@ -27,6 +27,8 @@ class CFGZeroStar(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CFGZeroStar",
|
||||
category="advanced/guidance",
|
||||
description="Applies CFG-Zero* post-CFG correction that computes an optimal scaling factor between conditional and unconditional predictions to reduce CFG artifacts.",
|
||||
short_description="CFG-Zero* guidance correction to reduce artifacts.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
],
|
||||
@@ -54,6 +56,8 @@ class CFGNorm(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CFGNorm",
|
||||
category="advanced/guidance",
|
||||
description="Constrains the CFG-guided prediction norm to not exceed the conditional prediction norm, helping to prevent oversaturation at high CFG scales.",
|
||||
short_description="Constrain CFG output norm to conditional prediction norm.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||
|
||||
@@ -14,6 +14,8 @@ class EmptyChromaRadianceLatentImage(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyChromaRadianceLatentImage",
|
||||
category="latent/chroma_radiance",
|
||||
description="Creates an empty Chroma Radiance latent image tensor with the specified width, height, and batch size.",
|
||||
short_description="Creates an empty Chroma Radiance latent image.",
|
||||
inputs=[
|
||||
io.Int.Input(id="width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input(id="height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -35,6 +37,7 @@ class ChromaRadianceOptions(io.ComfyNode):
|
||||
node_id="ChromaRadianceOptions",
|
||||
category="model_patches/chroma_radiance",
|
||||
description="Allows setting advanced options for the Chroma Radiance model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input(id="model"),
|
||||
io.Boolean.Input(
|
||||
|
||||
@@ -10,6 +10,8 @@ class CLIPTextEncodeSDXLRefiner(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeSDXLRefiner",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text for SDXL refiner models with aesthetic score and resolution conditioning parameters.",
|
||||
short_description="Encodes text for SDXL refiner models.",
|
||||
inputs=[
|
||||
io.Float.Input("ascore", default=6.0, min=0.0, max=1000.0, step=0.01),
|
||||
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
@@ -31,6 +33,8 @@ class CLIPTextEncodeSDXL(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeSDXL",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes separate G and L text prompts for SDXL models with resolution and crop conditioning parameters.",
|
||||
short_description="Encodes dual text prompts for SDXL models.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
|
||||
@@ -10,6 +10,7 @@ class ColorToRGBInt(io.ComfyNode):
|
||||
display_name="Color to RGB Int",
|
||||
category="utils",
|
||||
description="Convert a color to a RGB integer value.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Color.Input("color"),
|
||||
],
|
||||
|
||||
@@ -112,6 +112,8 @@ class PorterDuffImageComposite(io.ComfyNode):
|
||||
search_aliases=["alpha composite", "blend modes", "layer blend", "transparency blend"],
|
||||
display_name="Porter-Duff Image Composite",
|
||||
category="mask/compositing",
|
||||
description="Composites two images with alpha masks using Porter-Duff blend modes.",
|
||||
short_description="",
|
||||
inputs=[
|
||||
io.Image.Input("source"),
|
||||
io.Mask.Input("source_alpha"),
|
||||
@@ -169,6 +171,8 @@ class SplitImageWithAlpha(io.ComfyNode):
|
||||
search_aliases=["extract alpha", "separate transparency", "remove alpha"],
|
||||
display_name="Split Image with Alpha",
|
||||
category="mask/compositing",
|
||||
description="Separates an RGBA image into its RGB color channels and an alpha transparency mask.",
|
||||
short_description="Split RGBA image into RGB and alpha mask.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
@@ -193,6 +197,8 @@ class JoinImageWithAlpha(io.ComfyNode):
|
||||
search_aliases=["add transparency", "apply alpha", "composite alpha", "RGBA"],
|
||||
display_name="Join Image with Alpha",
|
||||
category="mask/compositing",
|
||||
description="Combines an RGB image with an alpha mask to produce an RGBA image with transparency.",
|
||||
short_description="Combine RGB image and alpha into RGBA.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Mask.Input("alpha"),
|
||||
|
||||
@@ -9,6 +9,8 @@ class CLIPTextEncodeControlnet(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeControlnet",
|
||||
category="_for_testing/conditioning",
|
||||
description="Encodes text with CLIP and attaches the result as cross-attention controlnet conditioning to existing conditioning data.",
|
||||
short_description="CLIP text encode for controlnet cross-attention conditioning.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Conditioning.Input("conditioning"),
|
||||
@@ -36,6 +38,8 @@ class T5TokenizerOptions(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="T5TokenizerOptions",
|
||||
category="_for_testing/conditioning",
|
||||
description="Configures minimum padding and length options for T5-family tokenizers used in CLIP text encoding.",
|
||||
short_description="Set T5 tokenizer padding and length options.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Int.Input("min_padding", default=0, min=0, max=10000, step=1),
|
||||
|
||||
@@ -12,6 +12,7 @@ class ContextWindowsManualNode(io.ComfyNode):
|
||||
display_name="Context Windows (Manual)",
|
||||
category="context",
|
||||
description="Manually set context windows.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input("model", tooltip="The model to apply context windows to during sampling."),
|
||||
io.Int.Input("context_length", min=1, default=16, tooltip="The length of the context window."),
|
||||
@@ -65,6 +66,7 @@ class WanContextWindowsManualNode(ContextWindowsManualNode):
|
||||
schema.node_id = "WanContextWindowsManual"
|
||||
schema.display_name = "WAN Context Windows (Manual)"
|
||||
schema.description = "Manually set context windows for WAN-like models (dim=2)."
|
||||
schema.short_description = None
|
||||
schema.inputs = [
|
||||
io.Model.Input("model", tooltip="The model to apply context windows to during sampling."),
|
||||
io.Int.Input("context_length", min=1, max=nodes.MAX_RESOLUTION, step=4, default=81, tooltip="The length of the context window."),
|
||||
|
||||
@@ -10,6 +10,8 @@ class SetUnionControlNetType(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SetUnionControlNetType",
|
||||
category="conditioning/controlnet",
|
||||
description="Sets the control type for a Union ControlNet, selecting which conditioning mode to use.",
|
||||
short_description="Select control mode for Union ControlNet.",
|
||||
inputs=[
|
||||
io.ControlNet.Input("control_net"),
|
||||
io.Combo.Input("type", options=["auto"] + list(UNION_CONTROLNET_TYPES.keys())),
|
||||
@@ -40,6 +42,8 @@ class ControlNetInpaintingAliMamaApply(io.ComfyNode):
|
||||
node_id="ControlNetInpaintingAliMamaApply",
|
||||
search_aliases=["masked controlnet"],
|
||||
category="conditioning/controlnet",
|
||||
description="Applies an AliMama inpainting ControlNet to positive and negative conditioning using an image and mask with VAE encoding.",
|
||||
short_description="Applies AliMama inpainting ControlNet with mask.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
|
||||
@@ -14,6 +14,7 @@ class EmptyCosmosLatentVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyCosmosLatentVideo",
|
||||
category="latent/video",
|
||||
description="Creates an empty latent tensor sized for Cosmos video generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=704, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -46,6 +47,8 @@ class CosmosImageToVideoLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CosmosImageToVideoLatent",
|
||||
category="conditioning/inpaint",
|
||||
description="Creates an inpainting video latent for Cosmos by encoding optional start and end images with a noise mask.",
|
||||
short_description="Cosmos inpainting video latent from start/end images.",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -89,6 +92,8 @@ class CosmosPredict2ImageToVideoLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CosmosPredict2ImageToVideoLatent",
|
||||
category="conditioning/inpaint",
|
||||
description="Creates an inpainting video latent for Cosmos Predict2 by encoding optional start and end images with Wan latent format processing.",
|
||||
short_description="Cosmos Predict2 inpainting video latent from images.",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
|
||||
@@ -18,6 +18,8 @@ class BasicScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="BasicScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule from a model using a selected scheduler algorithm, step count, and denoise strength.",
|
||||
short_description="Generate sigma schedule from model and scheduler.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Combo.Input("scheduler", options=comfy.samplers.SCHEDULER_NAMES),
|
||||
@@ -48,6 +50,8 @@ class KarrasScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="KarrasScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using the Karras noise schedule with configurable sigma range and rho parameter.",
|
||||
short_description="Generate sigmas using Karras noise schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -70,6 +74,8 @@ class ExponentialScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ExponentialScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using an exponential noise schedule with configurable sigma range.",
|
||||
short_description="Generate sigmas using exponential noise schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -91,6 +97,8 @@ class PolyexponentialScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="PolyexponentialScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using a polyexponential noise schedule with configurable sigma range and rho parameter.",
|
||||
short_description="Generate sigmas using polyexponential noise schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -113,6 +121,8 @@ class LaplaceScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LaplaceScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using a Laplace distribution-based noise schedule with configurable mu and beta parameters.",
|
||||
short_description="Generate sigmas using Laplace distribution schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -137,6 +147,8 @@ class SDTurboScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SDTurboScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule optimized for SD Turbo models with very few steps and adjustable denoise strength.",
|
||||
short_description="Generate sigma schedule for SD Turbo models.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("steps", default=1, min=1, max=10),
|
||||
@@ -161,6 +173,8 @@ class BetaSamplingScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="BetaSamplingScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using a beta distribution with configurable alpha and beta shape parameters.",
|
||||
short_description="Generate sigmas using beta distribution schedule.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
@@ -183,6 +197,8 @@ class VPScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="VPScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using the Variance Preserving (VP) SDE formulation with configurable beta and epsilon parameters.",
|
||||
short_description="Generate sigmas using VP SDE schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("beta_d", default=19.9, min=0.0, max=5000.0, step=0.01, round=False), #TODO: fix default values
|
||||
@@ -205,6 +221,8 @@ class SplitSigmas(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SplitSigmas",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Splits a sigma sequence into high and low portions at a specified step index for multi-pass sampling.",
|
||||
short_description="Split sigmas into high and low at a step.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Int.Input("step", default=0, min=0, max=10000),
|
||||
@@ -229,6 +247,8 @@ class SplitSigmasDenoise(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SplitSigmasDenoise",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Splits a sigma sequence into high and low portions based on a denoise ratio for multi-pass sampling workflows.",
|
||||
short_description="Split sigmas by denoise ratio.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
@@ -255,6 +275,8 @@ class FlipSigmas(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="FlipSigmas",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Reverses the order of a sigma sequence, useful for converting between ascending and descending noise schedules.",
|
||||
short_description="Reverse the order of a sigma sequence.",
|
||||
inputs=[io.Sigmas.Input("sigmas")],
|
||||
outputs=[io.Sigmas.Output()]
|
||||
)
|
||||
@@ -277,6 +299,8 @@ class SetFirstSigma(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SetFirstSigma",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Overrides the first sigma value in a sequence with a custom value, allowing manual control of the initial noise level.",
|
||||
short_description="Override the first sigma value in a sequence.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Float.Input("sigma", default=136.0, min=0.0, max=20000.0, step=0.001, round=False),
|
||||
@@ -299,6 +323,8 @@ class ExtendIntermediateSigmas(io.ComfyNode):
|
||||
node_id="ExtendIntermediateSigmas",
|
||||
search_aliases=["interpolate sigmas"],
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Interpolates additional intermediate sigma values between existing steps using selectable spacing within a specified sigma range.",
|
||||
short_description="Interpolate additional sigma steps between existing values.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Int.Input("steps", default=2, min=1, max=100),
|
||||
@@ -352,6 +378,8 @@ class SamplingPercentToSigma(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplingPercentToSigma",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Converts a sampling percentage (0.0 to 1.0) to the corresponding sigma value using a model's noise schedule.",
|
||||
short_description="Convert sampling percentage to sigma value.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("sampling_percent", default=0.0, min=0.0, max=1.0, step=0.0001),
|
||||
@@ -380,6 +408,8 @@ class KSamplerSelect(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="KSamplerSelect",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Selects a sampler algorithm by name from the list of available samplers and outputs the sampler object.",
|
||||
short_description="Select a sampler algorithm by name.",
|
||||
inputs=[io.Combo.Input("sampler_name", options=comfy.samplers.SAMPLER_NAMES)],
|
||||
outputs=[io.Sampler.Output()]
|
||||
)
|
||||
@@ -397,6 +427,8 @@ class SamplerDPMPP_3M_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_3M_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ 3M SDE sampler with configurable eta, noise scale, and GPU or CPU noise generation.",
|
||||
short_description="Create a DPM++ 3M SDE sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -422,6 +454,8 @@ class SamplerDPMPP_2M_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_2M_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ 2M SDE sampler with configurable solver type, eta, noise scale, and noise device.",
|
||||
short_description="Create a DPM++ 2M SDE sampler.",
|
||||
inputs=[
|
||||
io.Combo.Input("solver_type", options=['midpoint', 'heun']),
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -449,6 +483,8 @@ class SamplerDPMPP_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ SDE sampler with configurable eta, noise scale, r parameter, and noise device.",
|
||||
short_description="Create a DPM++ SDE sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -475,6 +511,8 @@ class SamplerDPMPP_2S_Ancestral(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_2S_Ancestral",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ 2S Ancestral sampler with configurable eta and noise scale parameters.",
|
||||
short_description="Create a DPM++ 2S Ancestral sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -495,6 +533,8 @@ class SamplerEulerAncestral(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerEulerAncestral",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an Euler Ancestral sampler with configurable eta and noise scale for stochastic sampling.",
|
||||
short_description="Create an Euler Ancestral stochastic sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -516,6 +556,8 @@ class SamplerEulerAncestralCFGPP(io.ComfyNode):
|
||||
node_id="SamplerEulerAncestralCFGPP",
|
||||
display_name="SamplerEulerAncestralCFG++",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an Euler Ancestral CFG++ sampler that applies classifier-free guidance with improved stability.",
|
||||
short_description="Create an Euler Ancestral CFG++ sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=1.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=10.0, step=0.01, round=False),
|
||||
@@ -538,6 +580,8 @@ class SamplerLMS(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerLMS",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a Linear Multi-Step (LMS) sampler with a configurable order parameter.",
|
||||
short_description="Create a Linear Multi-Step (LMS) sampler.",
|
||||
inputs=[io.Int.Input("order", default=4, min=1, max=100)],
|
||||
outputs=[io.Sampler.Output()]
|
||||
)
|
||||
@@ -555,6 +599,8 @@ class SamplerDPMAdaptative(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMAdaptative",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM Adaptive sampler with configurable order, tolerances, PID coefficients, and stochastic noise parameters for adaptive step-size sampling.",
|
||||
short_description="Create a DPM Adaptive step-size sampler.",
|
||||
inputs=[
|
||||
io.Int.Input("order", default=3, min=2, max=3),
|
||||
io.Float.Input("rtol", default=0.05, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -586,6 +632,8 @@ class SamplerER_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerER_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an ER-SDE sampler supporting ER-SDE, reverse-time SDE, and ODE solver types with configurable stochastic strength and staging.",
|
||||
short_description="Create an ER-SDE, reverse-time SDE, or ODE sampler.",
|
||||
inputs=[
|
||||
io.Combo.Input("solver_type", options=["ER-SDE", "Reverse-time SDE", "ODE"]),
|
||||
io.Int.Input("max_stage", default=3, min=1, max=3),
|
||||
@@ -624,6 +672,8 @@ class SamplerSASolver(io.ComfyNode):
|
||||
node_id="SamplerSASolver",
|
||||
search_aliases=["sde"],
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an SA-Solver sampler with configurable predictor/corrector orders, SDE region, and PECE mode for high-order diffusion sampling.",
|
||||
short_description="Create an SA-Solver high-order diffusion sampler.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=10.0, step=0.01, round=False),
|
||||
@@ -684,7 +734,8 @@ class SamplerSEEDS2(io.ComfyNode):
|
||||
"- solver_type=phi_2, r=1.0, eta=0.0\n\n"
|
||||
"exp_heun_2_x0_sde\n"
|
||||
"- solver_type=phi_2, r=1.0, eta=1.0, s_noise=1.0"
|
||||
)
|
||||
),
|
||||
short_description="SEEDS2 sampler with configurable solver and SDE settings.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -728,6 +779,8 @@ class SamplerCustom(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerCustom",
|
||||
category="sampling/custom_sampling",
|
||||
description="Runs a complete custom sampling pass by combining a model, sampler, sigmas, and conditioning with optional noise injection.",
|
||||
short_description="Run custom sampling with manual sampler and sigmas.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Boolean.Input("add_noise", default=True),
|
||||
@@ -794,6 +847,8 @@ class BasicGuider(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="BasicGuider",
|
||||
category="sampling/custom_sampling/guiders",
|
||||
description="Creates a basic guider that applies a single conditioning input to guide the diffusion model without classifier-free guidance.",
|
||||
short_description="Create a single-conditioning guider without CFG.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("conditioning"),
|
||||
@@ -815,6 +870,8 @@ class CFGGuider(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CFGGuider",
|
||||
category="sampling/custom_sampling/guiders",
|
||||
description="Creates a classifier-free guidance guider that combines positive and negative conditioning with an adjustable CFG scale.",
|
||||
short_description="Create a CFG guider with positive/negative conditioning.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("positive"),
|
||||
@@ -869,6 +926,8 @@ class DualCFGGuider(io.ComfyNode):
|
||||
node_id="DualCFGGuider",
|
||||
search_aliases=["dual prompt guidance"],
|
||||
category="sampling/custom_sampling/guiders",
|
||||
description="Creates a dual classifier-free guidance guider that blends two conditioning inputs against a negative with independent CFG scales and regular or nested styles.",
|
||||
short_description="Create a dual CFG guider with two conditionings.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("cond1"),
|
||||
@@ -897,6 +956,8 @@ class DisableNoise(io.ComfyNode):
|
||||
node_id="DisableNoise",
|
||||
search_aliases=["zero noise"],
|
||||
category="sampling/custom_sampling/noise",
|
||||
description="Produces a zero-noise source that disables noise injection, useful for deterministic sampling or img2img without added noise.",
|
||||
short_description="Produce zero noise to disable noise injection.",
|
||||
inputs=[],
|
||||
outputs=[io.Noise.Output()]
|
||||
)
|
||||
@@ -914,6 +975,8 @@ class RandomNoise(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="RandomNoise",
|
||||
category="sampling/custom_sampling/noise",
|
||||
description="Produces a random noise source from a seed value for use in custom sampling workflows.",
|
||||
short_description="Produce seeded random noise for sampling.",
|
||||
inputs=[io.Int.Input("noise_seed", default=0, min=0, max=0xffffffffffffffff, control_after_generate=True)],
|
||||
outputs=[io.Noise.Output()]
|
||||
)
|
||||
@@ -931,6 +994,8 @@ class SamplerCustomAdvanced(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerCustomAdvanced",
|
||||
category="sampling/custom_sampling",
|
||||
description="Runs an advanced custom sampling pass using separate noise, guider, sampler, and sigmas inputs for maximum control over the diffusion process.",
|
||||
short_description="Run advanced custom sampling with separate components.",
|
||||
inputs=[
|
||||
io.Noise.Input("noise"),
|
||||
io.Guider.Input("guider"),
|
||||
@@ -985,6 +1050,8 @@ class AddNoise(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="AddNoise",
|
||||
category="_for_testing/custom_sampling/noise",
|
||||
description="Adds scaled noise to a latent image using the model's noise schedule and sigma values for manual noise injection.",
|
||||
short_description="Add scaled noise to a latent image.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
@@ -1035,6 +1102,8 @@ class ManualSigmas(io.ComfyNode):
|
||||
node_id="ManualSigmas",
|
||||
search_aliases=["custom noise schedule", "define sigmas"],
|
||||
category="_for_testing/custom_sampling",
|
||||
description="Defines a custom sigma sequence by manually entering comma-separated numeric values as a text string.",
|
||||
short_description="Define custom sigmas from comma-separated values.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.String.Input("sigmas", default="1, 0.5", multiline=False)
|
||||
|
||||
@@ -49,6 +49,8 @@ class LoadImageDataSetFromFolderNode(io.ComfyNode):
|
||||
node_id="LoadImageDataSetFromFolder",
|
||||
display_name="Load Image Dataset from Folder",
|
||||
category="dataset",
|
||||
description="Loads all images from a selected input subfolder and outputs them as a list of image tensors.",
|
||||
short_description="Loads images from a folder as a list.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
@@ -86,6 +88,8 @@ class LoadImageTextDataSetFromFolderNode(io.ComfyNode):
|
||||
node_id="LoadImageTextDataSetFromFolder",
|
||||
display_name="Load Image and Text Dataset from Folder",
|
||||
category="dataset",
|
||||
description="Loads paired images and text captions from a folder, matching each image with its corresponding text file.",
|
||||
short_description="Loads paired images and text captions from folder.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
@@ -208,6 +212,8 @@ class SaveImageDataSetToFolderNode(io.ComfyNode):
|
||||
node_id="SaveImageDataSetToFolder",
|
||||
display_name="Save Image Dataset to Folder",
|
||||
category="dataset",
|
||||
description="Saves a list of images to a named folder in the output directory with configurable filename prefix.",
|
||||
short_description="Saves image list to an output folder.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
is_input_list=True, # Receive images as list
|
||||
@@ -247,6 +253,8 @@ class SaveImageTextDataSetToFolderNode(io.ComfyNode):
|
||||
node_id="SaveImageTextDataSetToFolder",
|
||||
display_name="Save Image and Text Dataset to Folder",
|
||||
category="dataset",
|
||||
description="Saves paired images and text captions to a named folder in the output directory with configurable filename prefix.",
|
||||
short_description="Saves paired images and text to output folder.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
is_input_list=True, # Receive both images and texts as lists
|
||||
@@ -401,6 +409,8 @@ class ImageProcessingNode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id=cls.node_id,
|
||||
display_name=cls.display_name or cls.node_id,
|
||||
description=getattr(cls, 'description', ''),
|
||||
short_description=getattr(cls, 'short_description', ''),
|
||||
category="dataset/image",
|
||||
is_experimental=True,
|
||||
is_input_list=is_group, # True for group, False for individual
|
||||
@@ -550,6 +560,8 @@ class TextProcessingNode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id=cls.node_id,
|
||||
display_name=cls.display_name or cls.node_id,
|
||||
description=getattr(cls, 'description', ''),
|
||||
short_description=getattr(cls, 'short_description', ''),
|
||||
category="dataset/text",
|
||||
is_experimental=True,
|
||||
is_input_list=is_group, # True for group, False for individual
|
||||
@@ -627,6 +639,7 @@ class ResizeImagesByShorterEdgeNode(ImageProcessingNode):
|
||||
node_id = "ResizeImagesByShorterEdge"
|
||||
display_name = "Resize Images by Shorter Edge"
|
||||
description = "Resize images so that the shorter edge matches the specified length while preserving aspect ratio."
|
||||
short_description = "Resizes images by shorter edge preserving aspect ratio."
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
"shorter_edge",
|
||||
@@ -655,6 +668,7 @@ class ResizeImagesByLongerEdgeNode(ImageProcessingNode):
|
||||
node_id = "ResizeImagesByLongerEdge"
|
||||
display_name = "Resize Images by Longer Edge"
|
||||
description = "Resize images so that the longer edge matches the specified length while preserving aspect ratio."
|
||||
short_description = "Resizes images by longer edge preserving aspect ratio."
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
"longer_edge",
|
||||
@@ -686,6 +700,7 @@ class CenterCropImagesNode(ImageProcessingNode):
|
||||
node_id = "CenterCropImages"
|
||||
display_name = "Center Crop Images"
|
||||
description = "Center crop all images to the specified dimensions."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."),
|
||||
io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."),
|
||||
@@ -708,6 +723,7 @@ class RandomCropImagesNode(ImageProcessingNode):
|
||||
description = (
|
||||
"Randomly crop all images to the specified dimensions (for data augmentation)."
|
||||
)
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."),
|
||||
io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."),
|
||||
@@ -734,6 +750,7 @@ class NormalizeImagesNode(ImageProcessingNode):
|
||||
node_id = "NormalizeImages"
|
||||
display_name = "Normalize Images"
|
||||
description = "Normalize images using mean and standard deviation."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
"mean",
|
||||
@@ -760,6 +777,7 @@ class AdjustBrightnessNode(ImageProcessingNode):
|
||||
node_id = "AdjustBrightness"
|
||||
display_name = "Adjust Brightness"
|
||||
description = "Adjust brightness of all images."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
"factor",
|
||||
@@ -779,6 +797,7 @@ class AdjustContrastNode(ImageProcessingNode):
|
||||
node_id = "AdjustContrast"
|
||||
display_name = "Adjust Contrast"
|
||||
description = "Adjust contrast of all images."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
"factor",
|
||||
@@ -798,6 +817,7 @@ class ShuffleDatasetNode(ImageProcessingNode):
|
||||
node_id = "ShuffleDataset"
|
||||
display_name = "Shuffle Image Dataset"
|
||||
description = "Randomly shuffle the order of images in the dataset."
|
||||
short_description = None
|
||||
is_group_process = True # Requires full list to shuffle
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
@@ -821,6 +841,8 @@ class ShuffleImageTextDatasetNode(io.ComfyNode):
|
||||
node_id="ShuffleImageTextDataset",
|
||||
display_name="Shuffle Image-Text Dataset",
|
||||
category="dataset/image",
|
||||
description="Randomly shuffles paired image and text lists together using a seed, preserving their correspondence.",
|
||||
short_description="Shuffles paired image-text lists together.",
|
||||
is_experimental=True,
|
||||
is_input_list=True,
|
||||
inputs=[
|
||||
@@ -863,6 +885,7 @@ class TextToLowercaseNode(TextProcessingNode):
|
||||
node_id = "TextToLowercase"
|
||||
display_name = "Text to Lowercase"
|
||||
description = "Convert all texts to lowercase."
|
||||
short_description = None
|
||||
|
||||
@classmethod
|
||||
def _process(cls, text):
|
||||
@@ -873,6 +896,7 @@ class TextToUppercaseNode(TextProcessingNode):
|
||||
node_id = "TextToUppercase"
|
||||
display_name = "Text to Uppercase"
|
||||
description = "Convert all texts to uppercase."
|
||||
short_description = None
|
||||
|
||||
@classmethod
|
||||
def _process(cls, text):
|
||||
@@ -883,6 +907,7 @@ class TruncateTextNode(TextProcessingNode):
|
||||
node_id = "TruncateText"
|
||||
display_name = "Truncate Text"
|
||||
description = "Truncate all texts to a maximum length."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
"max_length", default=77, min=1, max=10000, tooltip="Maximum text length."
|
||||
@@ -898,6 +923,7 @@ class AddTextPrefixNode(TextProcessingNode):
|
||||
node_id = "AddTextPrefix"
|
||||
display_name = "Add Text Prefix"
|
||||
description = "Add a prefix to all texts."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.String.Input("prefix", default="", tooltip="Prefix to add."),
|
||||
]
|
||||
@@ -911,6 +937,7 @@ class AddTextSuffixNode(TextProcessingNode):
|
||||
node_id = "AddTextSuffix"
|
||||
display_name = "Add Text Suffix"
|
||||
description = "Add a suffix to all texts."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.String.Input("suffix", default="", tooltip="Suffix to add."),
|
||||
]
|
||||
@@ -924,6 +951,7 @@ class ReplaceTextNode(TextProcessingNode):
|
||||
node_id = "ReplaceText"
|
||||
display_name = "Replace Text"
|
||||
description = "Replace text in all texts."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.String.Input("find", default="", tooltip="Text to find."),
|
||||
io.String.Input("replace", default="", tooltip="Text to replace with."),
|
||||
@@ -938,6 +966,7 @@ class StripWhitespaceNode(TextProcessingNode):
|
||||
node_id = "StripWhitespace"
|
||||
display_name = "Strip Whitespace"
|
||||
description = "Strip leading and trailing whitespace from all texts."
|
||||
short_description = None
|
||||
|
||||
@classmethod
|
||||
def _process(cls, text):
|
||||
@@ -953,6 +982,7 @@ class ImageDeduplicationNode(ImageProcessingNode):
|
||||
node_id = "ImageDeduplication"
|
||||
display_name = "Image Deduplication"
|
||||
description = "Remove duplicate or very similar images from the dataset."
|
||||
short_description = None
|
||||
is_group_process = True # Requires full list to compare images
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
@@ -1023,6 +1053,7 @@ class ImageGridNode(ImageProcessingNode):
|
||||
node_id = "ImageGrid"
|
||||
display_name = "Image Grid"
|
||||
description = "Arrange multiple images into a grid layout."
|
||||
short_description = None
|
||||
is_group_process = True # Requires full list to create grid
|
||||
is_output_list = False # Outputs single grid image
|
||||
extra_inputs = [
|
||||
@@ -1097,6 +1128,7 @@ class MergeImageListsNode(ImageProcessingNode):
|
||||
node_id = "MergeImageLists"
|
||||
display_name = "Merge Image Lists"
|
||||
description = "Concatenate multiple image lists into one."
|
||||
short_description = None
|
||||
is_group_process = True # Receives images as list
|
||||
|
||||
@classmethod
|
||||
@@ -1114,6 +1146,7 @@ class MergeTextListsNode(TextProcessingNode):
|
||||
node_id = "MergeTextLists"
|
||||
display_name = "Merge Text Lists"
|
||||
description = "Concatenate multiple text lists into one."
|
||||
short_description = None
|
||||
is_group_process = True # Receives texts as list
|
||||
|
||||
@classmethod
|
||||
@@ -1137,6 +1170,8 @@ class ResolutionBucket(io.ComfyNode):
|
||||
node_id="ResolutionBucket",
|
||||
display_name="Resolution Bucket",
|
||||
category="dataset",
|
||||
description="Groups latents and conditioning by resolution into batched buckets for efficient training with mixed aspect ratios.",
|
||||
short_description="Groups latents by resolution into training buckets.",
|
||||
is_experimental=True,
|
||||
is_input_list=True,
|
||||
inputs=[
|
||||
@@ -1230,6 +1265,8 @@ class MakeTrainingDataset(io.ComfyNode):
|
||||
search_aliases=["encode dataset"],
|
||||
display_name="Make Training Dataset",
|
||||
category="dataset",
|
||||
description="Encodes images with a VAE and text captions with CLIP to create paired latent and conditioning training data.",
|
||||
short_description="Encodes images and text into training data.",
|
||||
is_experimental=True,
|
||||
is_input_list=True, # images and texts as lists
|
||||
inputs=[
|
||||
@@ -1316,6 +1353,8 @@ class SaveTrainingDataset(io.ComfyNode):
|
||||
search_aliases=["export training data"],
|
||||
display_name="Save Training Dataset",
|
||||
category="dataset",
|
||||
description="Saves encoded latent and conditioning training data to disk in sharded files with configurable shard size.",
|
||||
short_description="Saves encoded training data to sharded files.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
is_input_list=True, # Receive lists
|
||||
@@ -1417,6 +1456,8 @@ class LoadTrainingDataset(io.ComfyNode):
|
||||
search_aliases=["import dataset", "training data"],
|
||||
display_name="Load Training Dataset",
|
||||
category="dataset",
|
||||
description="Loads a previously saved training dataset of latents and conditioning from sharded files on disk.",
|
||||
short_description="Loads saved training dataset from disk.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.String.Input(
|
||||
|
||||
@@ -14,6 +14,8 @@ class DifferentialDiffusion(io.ComfyNode):
|
||||
search_aliases=["inpaint gradient", "variable denoise strength"],
|
||||
display_name="Differential Diffusion",
|
||||
category="_for_testing",
|
||||
description="Enables per-pixel variable denoise strength using a mask, where mask intensity controls how much each region is denoised during sampling.",
|
||||
short_description="Per-pixel variable denoise strength via mask.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
|
||||
@@ -363,6 +363,7 @@ class EasyCacheNode(io.ComfyNode):
|
||||
node_id="EasyCache",
|
||||
display_name="EasyCache",
|
||||
description="Native EasyCache implementation.",
|
||||
short_description=None,
|
||||
category="advanced/debug/model",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
@@ -496,6 +497,7 @@ class LazyCacheNode(io.ComfyNode):
|
||||
node_id="LazyCache",
|
||||
display_name="LazyCache",
|
||||
description="A homebrew version of EasyCache - even 'easier' version of EasyCache to implement. Overall works worse than EasyCache, but better in some rare cases AND universal compatibility with everything in ComfyUI.",
|
||||
short_description="Simpler EasyCache alternative with universal ComfyUI compatibility.",
|
||||
category="advanced/debug/model",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
|
||||
@@ -10,6 +10,7 @@ class ReferenceLatent(io.ComfyNode):
|
||||
node_id="ReferenceLatent",
|
||||
category="advanced/conditioning/edit_models",
|
||||
description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.",
|
||||
short_description="Sets guiding latent for edit models with chaining support.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
|
||||
@@ -19,6 +19,8 @@ class EpsilonScaling(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="Epsilon Scaling",
|
||||
category="model_patches/unet",
|
||||
description="Applies epsilon scaling to mitigate exposure bias in diffusion models by scaling the predicted noise after CFG, improving sample quality.",
|
||||
short_description="Scale predicted noise to reduce exposure bias.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
@@ -121,6 +123,7 @@ class TemporalScoreRescaling(io.ComfyNode):
|
||||
"TSR - Temporal Score Rescaling (2510.01184)\n\n"
|
||||
"Rescaling the model's score or noise to steer the sampling diversity.\n"
|
||||
),
|
||||
short_description="Rescales temporal scores to control sampling diversity.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -13,6 +13,8 @@ class CLIPTextEncodeFlux(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeFlux",
|
||||
category="advanced/conditioning/flux",
|
||||
description="Encodes separate CLIP-L and T5-XXL text prompts with a guidance value into Flux conditioning.",
|
||||
short_description="Encodes CLIP-L and T5-XXL prompts for Flux.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
@@ -40,6 +42,8 @@ class EmptyFlux2LatentImage(io.ComfyNode):
|
||||
node_id="EmptyFlux2LatentImage",
|
||||
display_name="Empty Flux 2 Latent",
|
||||
category="latent",
|
||||
description="Creates an empty Flux 2 latent image tensor with the specified width, height, and batch size.",
|
||||
short_description="Creates an empty Flux 2 latent image tensor.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -61,6 +65,8 @@ class FluxGuidance(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="FluxGuidance",
|
||||
category="advanced/conditioning/flux",
|
||||
description="Sets the guidance strength value on Flux conditioning to control how closely generation follows the prompt.",
|
||||
short_description="Sets guidance strength on Flux conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1),
|
||||
@@ -85,6 +91,7 @@ class FluxDisableGuidance(io.ComfyNode):
|
||||
node_id="FluxDisableGuidance",
|
||||
category="advanced/conditioning/flux",
|
||||
description="This node completely disables the guidance embed on Flux and Flux like models",
|
||||
short_description="Disables guidance embed on Flux and Flux-like models.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
],
|
||||
@@ -129,6 +136,7 @@ class FluxKontextImageScale(io.ComfyNode):
|
||||
node_id="FluxKontextImageScale",
|
||||
category="advanced/conditioning/flux",
|
||||
description="This node resizes the image to one that is more optimal for flux kontext.",
|
||||
short_description="Resizes images to optimal dimensions for Flux Kontext.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
@@ -156,6 +164,8 @@ class FluxKontextMultiReferenceLatentMethod(io.ComfyNode):
|
||||
node_id="FluxKontextMultiReferenceLatentMethod",
|
||||
display_name="Edit Model Reference Method",
|
||||
category="advanced/conditioning/flux",
|
||||
description="Selects the method used for handling multiple reference latents in Flux Kontext edit models.",
|
||||
short_description="Selects reference latent method for Flux Kontext.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Combo.Input(
|
||||
@@ -214,6 +224,8 @@ class Flux2Scheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="Flux2Scheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule for Flux 2 sampling based on step count and image resolution.",
|
||||
short_description="Generates a sigma schedule for Flux 2 sampling.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=4096),
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=1),
|
||||
|
||||
@@ -30,6 +30,8 @@ class FreeU(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="FreeU",
|
||||
category="model_patches/unet",
|
||||
description="Applies FreeU v1 to a UNet model, boosting backbone features and filtering skip connections using Fourier transforms for improved quality.",
|
||||
short_description="Applies FreeU v1 backbone boost and skip filtering.",
|
||||
inputs=[
|
||||
IO.Model.Input("model"),
|
||||
IO.Float.Input("b1", default=1.1, min=0.0, max=10.0, step=0.01),
|
||||
@@ -77,6 +79,8 @@ class FreeU_V2(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="FreeU_V2",
|
||||
category="model_patches/unet",
|
||||
description="Applies FreeU v2 to a UNet model with adaptive backbone scaling based on hidden state magnitude and Fourier skip filtering.",
|
||||
short_description="Applies FreeU v2 with adaptive scaling.",
|
||||
inputs=[
|
||||
IO.Model.Input("model"),
|
||||
IO.Float.Input("b1", default=1.3, min=0.0, max=10.0, step=0.01),
|
||||
|
||||
@@ -62,6 +62,7 @@ class FreSca(io.ComfyNode):
|
||||
display_name="FreSca",
|
||||
category="_for_testing",
|
||||
description="Applies frequency-dependent scaling to the guidance",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01,
|
||||
|
||||
@@ -341,6 +341,8 @@ class GITSScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="GITSScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a noise schedule using the GITS method with precomputed optimal sigma levels and configurable coefficient.",
|
||||
short_description="Generates a GITS noise schedule with optimal sigma levels.",
|
||||
inputs=[
|
||||
io.Float.Input("coeff", default=1.20, min=0.80, max=1.50, step=0.05),
|
||||
io.Int.Input("steps", default=10, min=2, max=1000),
|
||||
|
||||
@@ -13,6 +13,7 @@ class QuadrupleCLIPLoader(io.ComfyNode):
|
||||
node_id="QuadrupleCLIPLoader",
|
||||
category="advanced/loaders",
|
||||
description="[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")),
|
||||
io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")),
|
||||
@@ -40,6 +41,8 @@ class CLIPTextEncodeHiDream(io.ComfyNode):
|
||||
node_id="CLIPTextEncodeHiDream",
|
||||
search_aliases=["hidream prompt"],
|
||||
category="advanced/conditioning",
|
||||
description="Encodes separate CLIP-L, CLIP-G, T5-XXL, and Llama text prompts into HiDream conditioning.",
|
||||
short_description="Encodes multi-encoder text prompts for HiDream.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
|
||||
@@ -38,6 +38,8 @@ class PairConditioningSetProperties:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Set properties like strength, mask, hooks, and timesteps on a positive/negative conditioning pair."
|
||||
SHORT_DESCRIPTION = "Set properties on a positive/negative conditioning pair."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, positive_NEW, negative_NEW,
|
||||
@@ -73,6 +75,8 @@ class PairConditioningSetPropertiesAndCombine:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Set properties on new conditioning pair and combine with existing positive/negative conditioning."
|
||||
SHORT_DESCRIPTION = "Set properties on new cond pair, combine with existing."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, positive, negative, positive_NEW, negative_NEW,
|
||||
@@ -104,6 +108,8 @@ class ConditioningSetProperties:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "advanced/hooks/cond single"
|
||||
DESCRIPTION = "Set properties like strength, mask, hooks, and timesteps on a single conditioning input."
|
||||
SHORT_DESCRIPTION = "Set properties on a single conditioning input."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, cond_NEW,
|
||||
@@ -136,6 +142,8 @@ class ConditioningSetPropertiesAndCombine:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "advanced/hooks/cond single"
|
||||
DESCRIPTION = "Set properties on new conditioning and combine it with an existing conditioning input."
|
||||
SHORT_DESCRIPTION = "Set properties on new conditioning, combine with existing."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, cond, cond_NEW,
|
||||
@@ -164,6 +172,8 @@ class PairConditioningCombine:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Combine two positive/negative conditioning pairs into a single pair."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine"
|
||||
|
||||
def combine(self, positive_A, negative_A, positive_B, negative_B):
|
||||
@@ -191,6 +201,8 @@ class PairConditioningSetDefaultAndCombine:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Set default conditioning pair and combine it with existing positive/negative conditioning and optional hooks."
|
||||
SHORT_DESCRIPTION = "Set default cond pair and combine with existing."
|
||||
FUNCTION = "set_default_and_combine"
|
||||
|
||||
def set_default_and_combine(self, positive, negative, positive_DEFAULT, negative_DEFAULT,
|
||||
@@ -217,6 +229,8 @@ class ConditioningSetDefaultAndCombine:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "advanced/hooks/cond single"
|
||||
DESCRIPTION = "Set default conditioning and combine it with existing conditioning input and optional hooks."
|
||||
SHORT_DESCRIPTION = "Set default conditioning and combine with existing."
|
||||
FUNCTION = "set_default_and_combine"
|
||||
|
||||
def set_default_and_combine(self, cond, cond_DEFAULT,
|
||||
@@ -244,6 +258,8 @@ class SetClipHooks:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CLIP",)
|
||||
CATEGORY = "advanced/hooks/clip"
|
||||
DESCRIPTION = "Apply hooks to a CLIP model, optionally propagating them to conditioning outputs and enabling CLIP scheduling."
|
||||
SHORT_DESCRIPTION = "Apply hooks to a CLIP model with scheduling options."
|
||||
FUNCTION = "apply_hooks"
|
||||
|
||||
def apply_hooks(self, clip: CLIP, schedule_clip: bool, apply_to_conds: bool, hooks: comfy.hooks.HookGroup=None):
|
||||
@@ -275,6 +291,8 @@ class ConditioningTimestepsRange:
|
||||
RETURN_TYPES = ("TIMESTEPS_RANGE", "TIMESTEPS_RANGE", "TIMESTEPS_RANGE")
|
||||
RETURN_NAMES = ("TIMESTEPS_RANGE", "BEFORE_RANGE", "AFTER_RANGE")
|
||||
CATEGORY = "advanced/hooks"
|
||||
DESCRIPTION = "Define a timestep percentage range and output the range plus its complement before and after segments."
|
||||
SHORT_DESCRIPTION = "Define a timestep range with before/after complements."
|
||||
FUNCTION = "create_range"
|
||||
|
||||
def create_range(self, start_percent: float, end_percent: float):
|
||||
@@ -308,6 +326,8 @@ class CreateHookLora:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a LoRA hook with separate model and CLIP strength that can be scheduled on conditioning."
|
||||
SHORT_DESCRIPTION = "Create a LoRA hook with model and CLIP strength."
|
||||
FUNCTION = "create_hook"
|
||||
|
||||
def create_hook(self, lora_name: str, strength_model: float, strength_clip: float, prev_hooks: comfy.hooks.HookGroup=None):
|
||||
@@ -353,6 +373,8 @@ class CreateHookLoraModelOnly(CreateHookLora):
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a LoRA hook that only affects the model (not CLIP) for scheduling on conditioning."
|
||||
SHORT_DESCRIPTION = "Create a model-only LoRA hook."
|
||||
FUNCTION = "create_hook_model_only"
|
||||
|
||||
def create_hook_model_only(self, lora_name: str, strength_model: float, prev_hooks: comfy.hooks.HookGroup=None):
|
||||
@@ -383,6 +405,8 @@ class CreateHookModelAsLora:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a hook from a full checkpoint treated as a LoRA, with separate model and CLIP strength controls."
|
||||
SHORT_DESCRIPTION = "Create a hook from a checkpoint treated as LoRA."
|
||||
FUNCTION = "create_hook"
|
||||
|
||||
def create_hook(self, ckpt_name: str, strength_model: float, strength_clip: float,
|
||||
@@ -431,6 +455,8 @@ class CreateHookModelAsLoraModelOnly(CreateHookModelAsLora):
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a model-only hook from a full checkpoint treated as a LoRA, without affecting CLIP."
|
||||
SHORT_DESCRIPTION = "Create a model-only hook from a checkpoint as LoRA."
|
||||
FUNCTION = "create_hook_model_only"
|
||||
|
||||
def create_hook_model_only(self, ckpt_name: str, strength_model: float,
|
||||
@@ -460,6 +486,8 @@ class SetHookKeyframes:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Assign keyframe schedules to hooks for controlling their strength over time during sampling."
|
||||
SHORT_DESCRIPTION = "Assign keyframe schedules to hooks over time."
|
||||
FUNCTION = "set_hook_keyframes"
|
||||
|
||||
def set_hook_keyframes(self, hooks: comfy.hooks.HookGroup, hook_kf: comfy.hooks.HookKeyframeGroup=None):
|
||||
@@ -488,6 +516,8 @@ class CreateHookKeyframe:
|
||||
RETURN_TYPES = ("HOOK_KEYFRAMES",)
|
||||
RETURN_NAMES = ("HOOK_KF",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Create a single hook keyframe with a strength multiplier at a specific timestep percentage."
|
||||
SHORT_DESCRIPTION = "Create a hook keyframe at a specific timestep."
|
||||
FUNCTION = "create_hook_keyframe"
|
||||
|
||||
def create_hook_keyframe(self, strength_mult: float, start_percent: float, prev_hook_kf: comfy.hooks.HookKeyframeGroup=None):
|
||||
@@ -523,6 +553,8 @@ class CreateHookKeyframesInterpolated:
|
||||
RETURN_TYPES = ("HOOK_KEYFRAMES",)
|
||||
RETURN_NAMES = ("HOOK_KF",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Generate multiple interpolated hook keyframes between start and end strength values over a timestep range."
|
||||
SHORT_DESCRIPTION = "Generate interpolated hook keyframes over a timestep range."
|
||||
FUNCTION = "create_hook_keyframes"
|
||||
|
||||
def create_hook_keyframes(self, strength_start: float, strength_end: float, interpolation: str,
|
||||
@@ -568,6 +600,8 @@ class CreateHookKeyframesFromFloats:
|
||||
RETURN_TYPES = ("HOOK_KEYFRAMES",)
|
||||
RETURN_NAMES = ("HOOK_KF",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Create hook keyframes from a list of float values distributed evenly across a timestep percentage range."
|
||||
SHORT_DESCRIPTION = "Create hook keyframes from a list of float values."
|
||||
FUNCTION = "create_hook_keyframes"
|
||||
|
||||
def create_hook_keyframes(self, floats_strength: Union[float, list[float]],
|
||||
@@ -639,6 +673,8 @@ class CombineHooks:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/combine"
|
||||
DESCRIPTION = "Combine two hook groups into one."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine_hooks"
|
||||
|
||||
def combine_hooks(self,
|
||||
@@ -666,6 +702,8 @@ class CombineHooksFour:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/combine"
|
||||
DESCRIPTION = "Combine up to four hook groups into one."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine_hooks"
|
||||
|
||||
def combine_hooks(self,
|
||||
@@ -699,6 +737,8 @@ class CombineHooksEight:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/combine"
|
||||
DESCRIPTION = "Combine up to eight hook groups into one."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine_hooks"
|
||||
|
||||
def combine_hooks(self,
|
||||
|
||||
@@ -15,6 +15,8 @@ class CLIPTextEncodeHunyuanDiT(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeHunyuanDiT",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text using both BERT and mT5-XL tokenizers for Hunyuan DiT conditioning.",
|
||||
short_description="Dual-tokenizer text encoding for Hunyuan DiT.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("bert", multiline=True, dynamic_prompts=True),
|
||||
@@ -42,6 +44,8 @@ class EmptyHunyuanLatentVideo(io.ComfyNode):
|
||||
node_id="EmptyHunyuanLatentVideo",
|
||||
display_name="Empty HunyuanVideo 1.0 Latent",
|
||||
category="latent/video",
|
||||
description="Creates an empty latent tensor sized for HunyuanVideo 1.0 video generation.",
|
||||
short_description="Empty latent for HunyuanVideo 1.0 generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -67,6 +71,8 @@ class EmptyHunyuanVideo15Latent(EmptyHunyuanLatentVideo):
|
||||
schema = super().define_schema()
|
||||
schema.node_id = "EmptyHunyuanVideo15Latent"
|
||||
schema.display_name = "Empty HunyuanVideo 1.5 Latent"
|
||||
schema.description = "Creates an empty latent tensor sized for HunyuanVideo 1.5 video generation with 16x spatial downscale."
|
||||
schema.short_description = "Empty latent for HunyuanVideo 1.5 generation."
|
||||
return schema
|
||||
|
||||
@classmethod
|
||||
@@ -82,6 +88,8 @@ class HunyuanVideo15ImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="HunyuanVideo15ImageToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Prepares conditioning and latent for HunyuanVideo 1.5 image-to-video generation with start image and CLIP vision support.",
|
||||
short_description="HunyuanVideo 1.5 image-to-video conditioning setup.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -131,6 +139,9 @@ class HunyuanVideo15SuperResolution(io.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="HunyuanVideo15SuperResolution",
|
||||
category="conditioning/video_models",
|
||||
description="Sets up conditioning for HunyuanVideo 1.5 super-resolution upscaling of a latent with noise augmentation and optional image guidance.",
|
||||
short_description="HunyuanVideo 1.5 super-resolution latent conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -177,6 +188,8 @@ class LatentUpscaleModelLoader(io.ComfyNode):
|
||||
node_id="LatentUpscaleModelLoader",
|
||||
display_name="Load Latent Upscale Model",
|
||||
category="loaders",
|
||||
description="Loads a latent upscale model from disk, supporting HunyuanVideo 720p, 1080p, and other latent upsampler architectures.",
|
||||
short_description="Load a latent upscale model from file.",
|
||||
inputs=[
|
||||
io.Combo.Input("model_name", options=folder_paths.get_filename_list("latent_upscale_models")),
|
||||
],
|
||||
@@ -226,6 +239,8 @@ class HunyuanVideo15LatentUpscaleWithModel(io.ComfyNode):
|
||||
node_id="HunyuanVideo15LatentUpscaleWithModel",
|
||||
display_name="Hunyuan Video 15 Latent Upscale With Model",
|
||||
category="latent",
|
||||
description="Upscales a video latent to a target resolution using a loaded latent upscale model and configurable upscale method.",
|
||||
short_description="Upscale video latent using a latent upscale model.",
|
||||
inputs=[
|
||||
io.LatentUpscaleModel.Input("model"),
|
||||
io.Latent.Input("samples"),
|
||||
@@ -275,6 +290,8 @@ class TextEncodeHunyuanVideo_ImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeHunyuanVideo_ImageToVideo",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text with CLIP vision image embeddings for HunyuanVideo image-to-video conditioning using an interleaved template.",
|
||||
short_description="Text and image encoding for HunyuanVideo image-to-video.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.ClipVisionOutput.Input("clip_vision_output"),
|
||||
@@ -306,6 +323,8 @@ class HunyuanImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="HunyuanImageToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Prepares conditioning and latent for Hunyuan image-to-video generation with selectable guidance type.",
|
||||
short_description="Hunyuan image-to-video conditioning with guidance options.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Vae.Input("vae"),
|
||||
@@ -357,6 +376,8 @@ class EmptyHunyuanImageLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyHunyuanImageLatent",
|
||||
category="latent",
|
||||
description="Creates an empty latent tensor sized for Hunyuan image generation.",
|
||||
short_description="Empty latent for Hunyuan image generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
io.Int.Input("height", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
@@ -380,6 +401,9 @@ class HunyuanRefinerLatent(io.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="HunyuanRefinerLatent",
|
||||
category="conditioning/video_models",
|
||||
description="Prepares conditioning for a Hunyuan refiner pass by concatenating the input latent with noise augmentation settings.",
|
||||
short_description="Hunyuan refiner conditioning with noise augmentation.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
|
||||
@@ -18,6 +18,8 @@ class EmptyLatentHunyuan3Dv2(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="EmptyLatentHunyuan3Dv2",
|
||||
category="latent/3d",
|
||||
description="Creates an empty latent tensor for Hunyuan 3D v2 generation with configurable resolution and batch size.",
|
||||
short_description="Empty latent for Hunyuan 3D v2 generation.",
|
||||
inputs=[
|
||||
IO.Int.Input("resolution", default=3072, min=1, max=8192),
|
||||
IO.Int.Input("batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."),
|
||||
@@ -41,6 +43,8 @@ class Hunyuan3Dv2Conditioning(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="Hunyuan3Dv2Conditioning",
|
||||
category="conditioning/video_models",
|
||||
description="Creates positive and negative conditioning for Hunyuan 3D v2 from a CLIP vision output embedding.",
|
||||
short_description="Conditioning from CLIP vision for Hunyuan 3D v2.",
|
||||
inputs=[
|
||||
IO.ClipVisionOutput.Input("clip_vision_output"),
|
||||
],
|
||||
@@ -66,6 +70,8 @@ class Hunyuan3Dv2ConditioningMultiView(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="Hunyuan3Dv2ConditioningMultiView",
|
||||
category="conditioning/video_models",
|
||||
description="Creates multi-view conditioning for Hunyuan 3D v2 from up to four directional CLIP vision outputs with positional encoding.",
|
||||
short_description="Multi-view conditioning for Hunyuan 3D v2.",
|
||||
inputs=[
|
||||
IO.ClipVisionOutput.Input("front", optional=True),
|
||||
IO.ClipVisionOutput.Input("left", optional=True),
|
||||
@@ -103,6 +109,8 @@ class VAEDecodeHunyuan3D(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="VAEDecodeHunyuan3D",
|
||||
category="latent/3d",
|
||||
description="Decodes a Hunyuan 3D latent into a voxel grid using a VAE with configurable chunk size and octree resolution.",
|
||||
short_description="Decodes Hunyuan 3D latent into voxels.",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -425,6 +433,8 @@ class VoxelToMeshBasic(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="VoxelToMeshBasic",
|
||||
category="3d",
|
||||
description="Converts a voxel grid to a 3D mesh using basic cube-based surface extraction with adjustable threshold.",
|
||||
short_description="Converts voxels to mesh using basic extraction.",
|
||||
inputs=[
|
||||
IO.Voxel.Input("voxel"),
|
||||
IO.Float.Input("threshold", default=0.6, min=-1.0, max=1.0, step=0.01),
|
||||
@@ -454,6 +464,8 @@ class VoxelToMesh(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="VoxelToMesh",
|
||||
category="3d",
|
||||
description="Converts a voxel grid to a 3D mesh using selectable surface net or basic algorithm with adjustable threshold.",
|
||||
short_description="Converts voxels to mesh with algorithm selection.",
|
||||
inputs=[
|
||||
IO.Voxel.Input("voxel"),
|
||||
IO.Combo.Input("algorithm", options=["surface net", "basic"]),
|
||||
@@ -621,6 +633,8 @@ class SaveGLB(IO.ComfyNode):
|
||||
display_name="Save 3D Model",
|
||||
search_aliases=["export 3d model", "save mesh"],
|
||||
category="3d",
|
||||
description="Saves a 3D mesh or model file to disk in GLB format with optional workflow metadata embedding.",
|
||||
short_description="Saves 3D mesh or model to GLB file.",
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
IO.MultiType.Input(
|
||||
|
||||
@@ -103,6 +103,8 @@ class HypernetworkLoader(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="HypernetworkLoader",
|
||||
category="loaders",
|
||||
description="Loads a hypernetwork and patches it onto a diffusion model's attention layers with adjustable strength.",
|
||||
short_description="Loads and applies a hypernetwork to a model.",
|
||||
inputs=[
|
||||
IO.Model.Input("model"),
|
||||
IO.Combo.Input("hypernetwork_name", options=folder_paths.get_filename_list("hypernetworks")),
|
||||
|
||||
@@ -28,6 +28,8 @@ class HyperTile(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="HyperTile",
|
||||
category="model_patches/unet",
|
||||
description="Patches the model to split self-attention into smaller tiles during inference, reducing memory usage and speeding up generation at higher resolutions.",
|
||||
short_description="Tile self-attention for faster high-res generation.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("tile_size", default=256, min=1, max=2048),
|
||||
|
||||
@@ -13,6 +13,7 @@ class ImageCompare(IO.ComfyNode):
|
||||
node_id="ImageCompare",
|
||||
display_name="Image Compare",
|
||||
description="Compares two images side by side with a slider.",
|
||||
short_description=None,
|
||||
category="image",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
|
||||
@@ -25,6 +25,8 @@ class ImageCrop(IO.ComfyNode):
|
||||
search_aliases=["trim"],
|
||||
display_name="Image Crop",
|
||||
category="image/transform",
|
||||
description="Crops a rectangular region from an image at the specified position and dimensions.",
|
||||
short_description="Crops a region from an image.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -54,6 +56,8 @@ class RepeatImageBatch(IO.ComfyNode):
|
||||
node_id="RepeatImageBatch",
|
||||
search_aliases=["duplicate image", "clone image"],
|
||||
category="image/batch",
|
||||
description="Repeats an image a specified number of times to create a batch of identical images.",
|
||||
short_description="Repeats an image to create a batch.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("amount", default=1, min=1, max=4096),
|
||||
@@ -76,6 +80,8 @@ class ImageFromBatch(IO.ComfyNode):
|
||||
node_id="ImageFromBatch",
|
||||
search_aliases=["select image", "pick from batch", "extract image"],
|
||||
category="image/batch",
|
||||
description="Selects a contiguous range of images from a batch starting at a given index.",
|
||||
short_description="Selects images from a batch by index.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("batch_index", default=0, min=0, max=4095),
|
||||
@@ -102,6 +108,8 @@ class ImageAddNoise(IO.ComfyNode):
|
||||
node_id="ImageAddNoise",
|
||||
search_aliases=["film grain"],
|
||||
category="image",
|
||||
description="Adds random noise to an image with adjustable strength, useful for film grain effects.",
|
||||
short_description="Adds random noise to an image.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input(
|
||||
@@ -134,6 +142,8 @@ class SaveAnimatedWEBP(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="SaveAnimatedWEBP",
|
||||
category="image/animation",
|
||||
description="Saves a sequence of images as an animated WEBP file with configurable FPS, quality, and compression.",
|
||||
short_description="Saves images as an animated WEBP file.",
|
||||
inputs=[
|
||||
IO.Image.Input("images"),
|
||||
IO.String.Input("filename_prefix", default="ComfyUI"),
|
||||
@@ -171,6 +181,8 @@ class SaveAnimatedPNG(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="SaveAnimatedPNG",
|
||||
category="image/animation",
|
||||
description="Saves a sequence of images as an animated PNG (APNG) file with configurable FPS and compression level.",
|
||||
short_description="Saves images as an animated PNG file.",
|
||||
inputs=[
|
||||
IO.Image.Input("images"),
|
||||
IO.String.Input("filename_prefix", default="ComfyUI"),
|
||||
@@ -207,6 +219,7 @@ class ImageStitch(IO.ComfyNode):
|
||||
description="Stitches image2 to image1 in the specified direction.\n"
|
||||
"If image2 is not provided, returns image1 unchanged.\n"
|
||||
"Optional spacing can be added between images.",
|
||||
short_description="Joins two images together in a specified direction.",
|
||||
category="image/transform",
|
||||
inputs=[
|
||||
IO.Image.Input("image1"),
|
||||
@@ -379,6 +392,8 @@ class ResizeAndPadImage(IO.ComfyNode):
|
||||
node_id="ResizeAndPadImage",
|
||||
search_aliases=["fit to size"],
|
||||
category="image/transform",
|
||||
description="Resizes an image to fit within target dimensions while preserving aspect ratio, then pads with a solid color to fill the target size.",
|
||||
short_description="Resizes an image to fit and pads the remainder.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("target_width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -430,6 +445,7 @@ class SaveSVGNode(IO.ComfyNode):
|
||||
node_id="SaveSVGNode",
|
||||
search_aliases=["export vector", "save vector graphics"],
|
||||
description="Save SVG files on disk.",
|
||||
short_description=None,
|
||||
category="image/save",
|
||||
inputs=[
|
||||
IO.SVG.Input("svg"),
|
||||
@@ -502,7 +518,7 @@ class GetImageSize(IO.ComfyNode):
|
||||
node_id="GetImageSize",
|
||||
search_aliases=["dimensions", "resolution", "image info"],
|
||||
display_name="Get Image Size",
|
||||
description="Returns width and height of the image, and passes it through unchanged.",
|
||||
description="Returns the width, height, and batch size of an image.",
|
||||
category="image",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
@@ -537,6 +553,8 @@ class ImageRotate(IO.ComfyNode):
|
||||
node_id="ImageRotate",
|
||||
search_aliases=["turn", "flip orientation"],
|
||||
category="image/transform",
|
||||
description="Rotates an image by 90, 180, or 270 degrees.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("rotation", options=["none", "90 degrees", "180 degrees", "270 degrees"]),
|
||||
@@ -567,6 +585,8 @@ class ImageFlip(IO.ComfyNode):
|
||||
node_id="ImageFlip",
|
||||
search_aliases=["mirror", "reflect"],
|
||||
category="image/transform",
|
||||
description="Flips an image horizontally or vertically.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("flip_method", options=["x-axis: vertically", "y-axis: horizontally"]),
|
||||
@@ -593,6 +613,8 @@ class ImageScaleToMaxDimension(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="ImageScaleToMaxDimension",
|
||||
category="image/upscaling",
|
||||
description="Scales an image so its largest dimension matches the specified size while preserving aspect ratio.",
|
||||
short_description="Scales image to a target max dimension size.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input(
|
||||
|
||||
@@ -10,6 +10,8 @@ class InstructPixToPixConditioning(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="InstructPixToPixConditioning",
|
||||
category="conditioning/instructpix2pix",
|
||||
description="Prepares conditioning for InstructPix2Pix image editing by encoding the input image through a VAE and attaching it as concat latent to both positive and negative conditioning.",
|
||||
short_description="Prepare conditioning for InstructPix2Pix editing.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
|
||||
@@ -14,6 +14,8 @@ class Kandinsky5ImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="Kandinsky5ImageToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Sets up Kandinsky 5 image-to-video generation by creating an empty video latent and optionally encoding a start image for conditioning.",
|
||||
short_description="Sets up Kandinsky 5 image-to-video conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -73,6 +75,7 @@ class NormalizeVideoLatentStart(io.ComfyNode):
|
||||
node_id="NormalizeVideoLatentStart",
|
||||
category="conditioning/video_models",
|
||||
description="Normalizes the initial frames of a video latent to match the mean and standard deviation of subsequent reference frames. Helps reduce differences between the starting frames and the rest of the video.",
|
||||
short_description="Normalizes initial video latent frames to match reference frames.",
|
||||
inputs=[
|
||||
io.Latent.Input("latent"),
|
||||
io.Int.Input("start_frame_count", default=4, min=1, max=nodes.MAX_RESOLUTION, step=1, tooltip="Number of latent frames to normalize, counted from the start"),
|
||||
@@ -106,6 +109,8 @@ class CLIPTextEncodeKandinsky5(io.ComfyNode):
|
||||
node_id="CLIPTextEncodeKandinsky5",
|
||||
search_aliases=["kandinsky prompt"],
|
||||
category="advanced/conditioning/kandinsky5",
|
||||
description="Encodes separate CLIP-L and Qwen 2.5 7B text prompts into Kandinsky 5 conditioning.",
|
||||
short_description="Encodes CLIP-L and Qwen prompts for Kandinsky 5.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
|
||||
@@ -23,6 +23,8 @@ class LatentAdd(io.ComfyNode):
|
||||
node_id="LatentAdd",
|
||||
search_aliases=["combine latents", "sum latents"],
|
||||
category="latent/advanced",
|
||||
description="Adds two latent tensors element-wise, automatically resizing the second to match the first.",
|
||||
short_description="Add two latent tensors element-wise.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
@@ -50,6 +52,8 @@ class LatentSubtract(io.ComfyNode):
|
||||
node_id="LatentSubtract",
|
||||
search_aliases=["difference latent", "remove features"],
|
||||
category="latent/advanced",
|
||||
description="Subtracts one latent tensor from another element-wise, automatically resizing the second to match the first.",
|
||||
short_description="Subtract one latent tensor from another.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
@@ -77,6 +81,8 @@ class LatentMultiply(io.ComfyNode):
|
||||
node_id="LatentMultiply",
|
||||
search_aliases=["scale latent", "amplify latent", "latent gain"],
|
||||
category="latent/advanced",
|
||||
description="Multiplies a latent tensor by a scalar value to scale its magnitude up or down.",
|
||||
short_description="Scale a latent tensor by a multiplier.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01),
|
||||
@@ -101,6 +107,8 @@ class LatentInterpolate(io.ComfyNode):
|
||||
node_id="LatentInterpolate",
|
||||
search_aliases=["blend latent", "mix latent", "lerp latent", "transition"],
|
||||
category="latent/advanced",
|
||||
description="Interpolates between two latent tensors using a ratio, preserving magnitude for smoother blending than linear interpolation.",
|
||||
short_description="Interpolate between two latent tensors.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
@@ -140,6 +148,8 @@ class LatentConcat(io.ComfyNode):
|
||||
node_id="LatentConcat",
|
||||
search_aliases=["join latents", "stitch latents"],
|
||||
category="latent/advanced",
|
||||
description="Concatenates two latent tensors along a chosen spatial or temporal dimension (x, y, or t) with optional reversal.",
|
||||
short_description="Concatenate two latents along a chosen dimension.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
@@ -180,6 +190,8 @@ class LatentCut(io.ComfyNode):
|
||||
node_id="LatentCut",
|
||||
search_aliases=["crop latent", "slice latent", "extract region"],
|
||||
category="latent/advanced",
|
||||
description="Extracts a contiguous slice from a latent tensor along a chosen spatial or temporal dimension at a specified index and size.",
|
||||
short_description="Extract a slice from a latent along a dimension.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Combo.Input("dim", options=["x", "y", "t"]),
|
||||
@@ -221,6 +233,8 @@ class LatentCutToBatch(io.ComfyNode):
|
||||
node_id="LatentCutToBatch",
|
||||
search_aliases=["slice to batch", "split latent", "tile latent"],
|
||||
category="latent/advanced",
|
||||
description="Slices a latent tensor along a chosen dimension into equal-sized chunks and reshapes them into the batch dimension.",
|
||||
short_description="Slice latent along a dimension into batch chunks.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Combo.Input("dim", options=["t", "x", "y"]),
|
||||
@@ -263,6 +277,8 @@ class LatentBatch(io.ComfyNode):
|
||||
node_id="LatentBatch",
|
||||
search_aliases=["combine latents", "merge latents", "join latents"],
|
||||
category="latent/batch",
|
||||
description="Concatenates two latent tensors along the batch dimension, preserving batch index metadata.",
|
||||
short_description="Concatenate two latents along the batch dimension.",
|
||||
is_deprecated=True,
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
@@ -291,6 +307,8 @@ class LatentBatchSeedBehavior(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LatentBatchSeedBehavior",
|
||||
category="latent/advanced",
|
||||
description="Controls whether each item in a latent batch receives a random or fixed noise seed during sampling.",
|
||||
short_description="Set random or fixed seed behavior for batches.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Combo.Input("seed_behavior", options=["random", "fixed"], default="fixed"),
|
||||
@@ -320,6 +338,8 @@ class LatentApplyOperation(io.ComfyNode):
|
||||
node_id="LatentApplyOperation",
|
||||
search_aliases=["transform latent"],
|
||||
category="latent/advanced/operations",
|
||||
description="Applies a latent operation (such as tonemap or sharpen) directly to a latent tensor.",
|
||||
short_description="Apply a latent operation to a latent tensor.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
@@ -344,6 +364,8 @@ class LatentApplyOperationCFG(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LatentApplyOperationCFG",
|
||||
category="latent/advanced/operations",
|
||||
description="Applies a latent operation during the CFG pre-processing stage of sampling, modifying the model's prediction before guidance is applied.",
|
||||
short_description="Apply a latent operation during CFG pre-processing.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
@@ -376,6 +398,8 @@ class LatentOperationTonemapReinhard(io.ComfyNode):
|
||||
node_id="LatentOperationTonemapReinhard",
|
||||
search_aliases=["hdr latent"],
|
||||
category="latent/advanced/operations",
|
||||
description="Creates a Reinhard tonemapping operation that compresses high-magnitude latent values to reduce blown-out artifacts.",
|
||||
short_description="Create a Reinhard tonemapping latent operation.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||
@@ -411,6 +435,8 @@ class LatentOperationSharpen(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LatentOperationSharpen",
|
||||
category="latent/advanced/operations",
|
||||
description="Creates a sharpening operation that enhances detail in latent space using a Gaussian-based unsharp mask with configurable radius, sigma, and strength.",
|
||||
short_description="Create a Gaussian-based latent sharpening operation.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Int.Input("sharpen_radius", default=9, min=1, max=31, step=1),
|
||||
@@ -448,6 +474,8 @@ class ReplaceVideoLatentFrames(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ReplaceVideoLatentFrames",
|
||||
category="latent/batch",
|
||||
description="Replaces a range of frames in a destination video latent with frames from a source latent at a specified index.",
|
||||
short_description="Replace video latent frames at a given index.",
|
||||
inputs=[
|
||||
io.Latent.Input("destination", tooltip="The destination latent where frames will be replaced."),
|
||||
io.Latent.Input("source", optional=True, tooltip="The source latent providing frames to insert into the destination latent. If not provided, the destination latent is returned unchanged."),
|
||||
|
||||
@@ -31,6 +31,8 @@ class Load3D(IO.ComfyNode):
|
||||
node_id="Load3D",
|
||||
display_name="Load 3D & Animation",
|
||||
category="3d",
|
||||
description="Loads a 3D model file and renders it to produce an image, mask, normal map, camera info, recording video, and 3D file output.",
|
||||
short_description="Loads and renders a 3D model file.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_file", options=sorted(files), upload=IO.UploadType.model),
|
||||
@@ -81,6 +83,8 @@ class Preview3D(IO.ComfyNode):
|
||||
search_aliases=["view mesh", "3d viewer"],
|
||||
display_name="Preview 3D & Animation",
|
||||
category="3d",
|
||||
description="Previews a 3D model or file in the UI with optional camera info and background image overlay.",
|
||||
short_description="Previews a 3D model in the UI.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
|
||||
@@ -16,6 +16,8 @@ class SwitchNode(io.ComfyNode):
|
||||
node_id="ComfySwitchNode",
|
||||
display_name="Switch",
|
||||
category="logic",
|
||||
description="Routes one of two inputs to the output based on a boolean switch value, evaluating only the selected branch lazily.",
|
||||
short_description="Route one of two inputs based on a boolean.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Boolean.Input("switch"),
|
||||
@@ -47,6 +49,8 @@ class SoftSwitchNode(io.ComfyNode):
|
||||
node_id="ComfySoftSwitchNode",
|
||||
display_name="Soft Switch",
|
||||
category="logic",
|
||||
description="Routes one of two optional inputs to the output based on a boolean, falling back to whichever input is connected if only one is provided.",
|
||||
short_description="Switch with optional fallback to connected input.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Boolean.Input("switch"),
|
||||
@@ -102,6 +106,8 @@ class CustomComboNode(io.ComfyNode):
|
||||
node_id="CustomCombo",
|
||||
display_name="Custom Combo",
|
||||
category="utils",
|
||||
description="Provides a user-defined dropdown combo box where options are written by the user, outputting the selected string and its index.",
|
||||
short_description="User-defined dropdown outputting string and index.",
|
||||
is_experimental=True,
|
||||
inputs=[io.Combo.Input("choice", options=[])],
|
||||
outputs=[
|
||||
@@ -137,6 +143,8 @@ class DCTestNode(io.ComfyNode):
|
||||
node_id="DCTestNode",
|
||||
display_name="DCTest",
|
||||
category="logic",
|
||||
description="Test node demonstrating DynamicCombo inputs with nested sub-options that conditionally show different input types.",
|
||||
short_description="Test node for DynamicCombo nested inputs.",
|
||||
is_output_node=True,
|
||||
inputs=[io.DynamicCombo.Input("combo", options=[
|
||||
io.DynamicCombo.Option("option1", [io.String.Input("string")]),
|
||||
@@ -175,6 +183,8 @@ class AutogrowNamesTestNode(io.ComfyNode):
|
||||
node_id="AutogrowNamesTestNode",
|
||||
display_name="AutogrowNamesTest",
|
||||
category="logic",
|
||||
description="Test node demonstrating Autogrow inputs with named template slots that dynamically add float inputs.",
|
||||
short_description="Test node for Autogrow named template inputs.",
|
||||
inputs=[
|
||||
_io.Autogrow.Input("autogrow", template=template)
|
||||
],
|
||||
@@ -195,6 +205,8 @@ class AutogrowPrefixTestNode(io.ComfyNode):
|
||||
node_id="AutogrowPrefixTestNode",
|
||||
display_name="AutogrowPrefixTest",
|
||||
category="logic",
|
||||
description="Test node demonstrating Autogrow inputs with prefix-based template slots that dynamically add numbered float inputs.",
|
||||
short_description="Test node for Autogrow prefix template inputs.",
|
||||
inputs=[
|
||||
_io.Autogrow.Input("autogrow", template=template)
|
||||
],
|
||||
@@ -214,6 +226,8 @@ class ComboOutputTestNode(io.ComfyNode):
|
||||
node_id="ComboOptionTestNode",
|
||||
display_name="ComboOptionTest",
|
||||
category="logic",
|
||||
description="Test node demonstrating combo output types by passing two selected combo values through as outputs.",
|
||||
short_description="Test node for combo output passthrough.",
|
||||
inputs=[io.Combo.Input("combo", options=["option1", "option2", "option3"]),
|
||||
io.Combo.Input("combo2", options=["option4", "option5", "option6"])],
|
||||
outputs=[io.Combo.Output(), io.Combo.Output()],
|
||||
@@ -231,6 +245,8 @@ class ConvertStringToComboNode(io.ComfyNode):
|
||||
search_aliases=["string to dropdown", "text to combo"],
|
||||
display_name="Convert String to Combo",
|
||||
category="logic",
|
||||
description="Converts a string value into a combo type output so it can be used as a dropdown selection in downstream nodes.",
|
||||
short_description="Convert a string to a combo type output.",
|
||||
inputs=[io.String.Input("string")],
|
||||
outputs=[io.Combo.Output()],
|
||||
)
|
||||
@@ -247,6 +263,8 @@ class InvertBooleanNode(io.ComfyNode):
|
||||
search_aliases=["not", "toggle", "negate", "flip boolean"],
|
||||
display_name="Invert Boolean",
|
||||
category="logic",
|
||||
description="Inverts a boolean value, outputting true when input is false and vice versa.",
|
||||
short_description="Invert a boolean value.",
|
||||
inputs=[io.Boolean.Input("boolean")],
|
||||
outputs=[io.Boolean.Output()],
|
||||
)
|
||||
|
||||
@@ -32,6 +32,7 @@ class LoraLoaderBypass:
|
||||
|
||||
CATEGORY = "loaders"
|
||||
DESCRIPTION = "Apply LoRA in bypass mode. Unlike regular LoRA, this doesn't modify model weights - instead it injects the LoRA computation during forward pass. Useful for training scenarios."
|
||||
SHORT_DESCRIPTION = "Applies LoRA via forward pass injection, not weight modification."
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
|
||||
@@ -62,6 +63,8 @@ class LoraLoaderBypassModelOnly(LoraLoaderBypass):
|
||||
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
|
||||
}}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
DESCRIPTION = "Apply LoRA in bypass mode to only the diffusion model without modifying base weights or affecting CLIP."
|
||||
SHORT_DESCRIPTION = "Apply bypass LoRA to model only, no CLIP."
|
||||
FUNCTION = "load_lora_model_only"
|
||||
|
||||
def load_lora_model_only(self, model, lora_name, strength_model):
|
||||
|
||||
@@ -92,6 +92,8 @@ class LoraSave(io.ComfyNode):
|
||||
search_aliases=["export lora"],
|
||||
display_name="Extract and Save Lora",
|
||||
category="_for_testing",
|
||||
description="Extracts LoRA weights from a model or text encoder diff using SVD decomposition and saves them as a safetensors file, supporting standard and full diff modes.",
|
||||
short_description="Extract and save LoRA from model diff.",
|
||||
inputs=[
|
||||
io.String.Input("filename_prefix", default="loras/ComfyUI_extracted_lora"),
|
||||
io.Int.Input("rank", default=8, min=1, max=4096, step=1),
|
||||
|
||||
@@ -11,6 +11,8 @@ class LotusConditioning(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LotusConditioning",
|
||||
category="conditioning/lotus",
|
||||
description="Provides precomputed null conditioning embeddings for the Lotus depth/normal estimation model, avoiding the need for a separate text encoder.",
|
||||
short_description="Precomputed null conditioning for Lotus model.",
|
||||
inputs=[],
|
||||
outputs=[io.Conditioning.Output(display_name="conditioning")],
|
||||
)
|
||||
|
||||
@@ -18,6 +18,8 @@ class EmptyLTXVLatentVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyLTXVLatentVideo",
|
||||
category="latent/video/ltxv",
|
||||
description="Creates an empty LTXV video latent tensor with the specified dimensions and batch size.",
|
||||
short_description="Creates an empty LTXV video latent tensor.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
@@ -42,6 +44,8 @@ class LTXVImgToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVImgToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Encodes an image through a VAE and sets up conditioning for LTXV image-to-video generation with adjustable strength.",
|
||||
short_description="Sets up LTXV image-to-video conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -87,6 +91,8 @@ class LTXVImgToVideoInplace(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVImgToVideoInplace",
|
||||
category="conditioning/video_models",
|
||||
description="Encodes an image through a VAE and injects it into an existing latent for in-place LTXV image-to-video conditioning.",
|
||||
short_description="In-place LTXV image-to-video latent conditioning.",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Image.Input("image"),
|
||||
@@ -171,6 +177,8 @@ class LTXVAddGuide(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVAddGuide",
|
||||
category="conditioning/video_models",
|
||||
description="Adds a guiding image or video to LTXV conditioning at a specified frame index to control video generation.",
|
||||
short_description="Adds a guiding image or video to LTXV conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -335,6 +343,8 @@ class LTXVCropGuides(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVCropGuides",
|
||||
category="conditioning/video_models",
|
||||
description="Removes appended keyframe guide latents from an LTXV latent and resets keyframe indices in the conditioning.",
|
||||
short_description="Removes keyframe guide latents from LTXV conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -373,6 +383,8 @@ class LTXVConditioning(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVConditioning",
|
||||
category="conditioning/video_models",
|
||||
description="Sets the frame rate on LTXV positive and negative conditioning for video generation.",
|
||||
short_description="Sets frame rate on LTXV conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -397,6 +409,8 @@ class ModelSamplingLTXV(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ModelSamplingLTXV",
|
||||
category="advanced/model",
|
||||
description="Configures LTXV model sampling by computing a shift parameter from max_shift, base_shift, and latent token count.",
|
||||
short_description="Configures LTXV model sampling shift parameters.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01),
|
||||
@@ -442,6 +456,8 @@ class LTXVScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule for LTXV sampling with configurable shift parameters, stretch, and terminal value.",
|
||||
short_description="Generates a sigma schedule for LTXV sampling.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01),
|
||||
@@ -546,6 +562,8 @@ class LTXVPreprocess(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVPreprocess",
|
||||
category="image",
|
||||
description="Applies H.264 video compression preprocessing to images to improve LTXV generation quality.",
|
||||
short_description="Applies video compression preprocessing for LTXV.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input(
|
||||
@@ -574,6 +592,8 @@ class LTXVConcatAVLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVConcatAVLatent",
|
||||
category="latent/video/ltxv",
|
||||
description="Concatenates separate video and audio latents into a combined audio-video latent for LTXV processing.",
|
||||
short_description="Concatenates video and audio latents for LTXV.",
|
||||
inputs=[
|
||||
io.Latent.Input("video_latent"),
|
||||
io.Latent.Input("audio_latent"),
|
||||
@@ -609,7 +629,8 @@ class LTXVSeparateAVLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LTXVSeparateAVLatent",
|
||||
category="latent/video/ltxv",
|
||||
description="LTXV Separate AV Latent",
|
||||
description="Separates a combined audio-video latent into individual video and audio latents.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Latent.Input("av_latent"),
|
||||
],
|
||||
|
||||
@@ -14,6 +14,8 @@ class LTXVAudioVAELoader(io.ComfyNode):
|
||||
node_id="LTXVAudioVAELoader",
|
||||
display_name="LTXV Audio VAE Loader",
|
||||
category="audio",
|
||||
description="Loads an LTXV Audio VAE model from a checkpoint file for audio encoding and decoding.",
|
||||
short_description="Loads an LTXV Audio VAE model checkpoint.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"ckpt_name",
|
||||
@@ -38,6 +40,7 @@ class LTXVAudioVAEEncode(io.ComfyNode):
|
||||
node_id="LTXVAudioVAEEncode",
|
||||
display_name="LTXV Audio VAE Encode",
|
||||
category="audio",
|
||||
description="Encodes audio into latent representations using the LTXV Audio VAE model.",
|
||||
inputs=[
|
||||
io.Audio.Input("audio", tooltip="The audio to be encoded."),
|
||||
io.Vae.Input(
|
||||
@@ -68,6 +71,8 @@ class LTXVAudioVAEDecode(io.ComfyNode):
|
||||
node_id="LTXVAudioVAEDecode",
|
||||
display_name="LTXV Audio VAE Decode",
|
||||
category="audio",
|
||||
description="Decodes latent representations back into audio using the LTXV Audio VAE model.",
|
||||
short_description="Decodes latents back to audio via LTXV Audio VAE.",
|
||||
inputs=[
|
||||
io.Latent.Input("samples", tooltip="The latent to be decoded."),
|
||||
io.Vae.Input(
|
||||
@@ -101,6 +106,8 @@ class LTXVEmptyLatentAudio(io.ComfyNode):
|
||||
node_id="LTXVEmptyLatentAudio",
|
||||
display_name="LTXV Empty Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty LTXV audio latent tensor sized according to the frame count, frame rate, and Audio VAE configuration.",
|
||||
short_description="Creates an empty LTXV audio latent tensor.",
|
||||
inputs=[
|
||||
io.Int.Input(
|
||||
"frames_number",
|
||||
@@ -177,6 +184,7 @@ class LTXAVTextEncoderLoader(io.ComfyNode):
|
||||
display_name="LTXV Audio Text Encoder Loader",
|
||||
category="advanced/loaders",
|
||||
description="[Recipes]\n\nltxav: gemma 3 12B",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"text_encoder",
|
||||
|
||||
@@ -19,6 +19,8 @@ class LTXVLatentUpsampler:
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "upsample_latent"
|
||||
CATEGORY = "latent/video"
|
||||
DESCRIPTION = "Upsample an LTXV video latent by a factor of 2 using a dedicated latent upscale model."
|
||||
SHORT_DESCRIPTION = "Upsample an LTXV video latent by 2x."
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def upsample_latent(
|
||||
|
||||
@@ -10,6 +10,8 @@ class RenormCFG(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="RenormCFG",
|
||||
category="advanced/model",
|
||||
description="Applies renormalized classifier-free guidance with configurable truncation threshold and renormalization strength to control CFG output magnitude.",
|
||||
short_description="Applies renormalized classifier-free guidance with truncation.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("cfg_trunc", default=100, min=0.0, max=100.0, step=0.01),
|
||||
@@ -84,6 +86,7 @@ class CLIPTextEncodeLumina2(io.ComfyNode):
|
||||
category="conditioning",
|
||||
description="Encodes a system prompt and a user prompt using a CLIP model into an embedding "
|
||||
"that can be used to guide the diffusion model towards generating specific images.",
|
||||
short_description="Encodes system and user prompts via CLIP for Lumina2.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"system_prompt",
|
||||
|
||||
@@ -13,6 +13,7 @@ class Mahiro(io.ComfyNode):
|
||||
display_name="Mahiro CFG",
|
||||
category="_for_testing",
|
||||
description="Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.",
|
||||
short_description="Scales guidance toward positive prompt direction over negative difference.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
],
|
||||
|
||||
@@ -52,6 +52,8 @@ class LatentCompositeMasked(IO.ComfyNode):
|
||||
node_id="LatentCompositeMasked",
|
||||
search_aliases=["overlay latent", "layer latent", "paste latent", "inpaint latent"],
|
||||
category="latent",
|
||||
description="Composites a source latent onto a destination latent at a specified position with optional mask and resize support.",
|
||||
short_description="Composites one latent onto another with masking.",
|
||||
inputs=[
|
||||
IO.Latent.Input("destination"),
|
||||
IO.Latent.Input("source"),
|
||||
@@ -81,6 +83,8 @@ class ImageCompositeMasked(IO.ComfyNode):
|
||||
node_id="ImageCompositeMasked",
|
||||
search_aliases=["paste image", "overlay", "layer"],
|
||||
category="image",
|
||||
description="Composites a source image onto a destination image at a specified position with optional mask and resize support.",
|
||||
short_description="Composites one image onto another with masking.",
|
||||
inputs=[
|
||||
IO.Image.Input("destination"),
|
||||
IO.Image.Input("source"),
|
||||
@@ -110,6 +114,8 @@ class MaskToImage(IO.ComfyNode):
|
||||
search_aliases=["convert mask"],
|
||||
display_name="Convert Mask to Image",
|
||||
category="mask",
|
||||
description="Converts a single-channel mask into a three-channel grayscale image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
],
|
||||
@@ -132,6 +138,7 @@ class ImageToMask(IO.ComfyNode):
|
||||
search_aliases=["extract channel", "channel to mask"],
|
||||
display_name="Convert Image to Mask",
|
||||
category="mask",
|
||||
description="Extracts a selected color channel from an image as a mask.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("channel", options=["red", "green", "blue", "alpha"]),
|
||||
@@ -155,6 +162,8 @@ class ImageColorToMask(IO.ComfyNode):
|
||||
node_id="ImageColorToMask",
|
||||
search_aliases=["color keying", "chroma key"],
|
||||
category="mask",
|
||||
description="Creates a mask from an image where pixels matching a specified RGB color value become white.",
|
||||
short_description="Creates a mask from pixels matching a color.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Int.Input("color", default=0, min=0, max=0xFFFFFF, step=1, display_mode=IO.NumberDisplay.number),
|
||||
@@ -178,6 +187,8 @@ class SolidMask(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="SolidMask",
|
||||
category="mask",
|
||||
description="Creates a uniform solid mask filled with a single value at the specified dimensions.",
|
||||
short_description="Creates a solid mask with a uniform value.",
|
||||
inputs=[
|
||||
IO.Float.Input("value", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -201,6 +212,8 @@ class InvertMask(IO.ComfyNode):
|
||||
node_id="InvertMask",
|
||||
search_aliases=["reverse mask", "flip mask"],
|
||||
category="mask",
|
||||
description="Inverts a mask so white becomes black and vice versa.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
],
|
||||
@@ -222,6 +235,8 @@ class CropMask(IO.ComfyNode):
|
||||
node_id="CropMask",
|
||||
search_aliases=["cut mask", "extract mask region", "mask slice"],
|
||||
category="mask",
|
||||
description="Crops a rectangular region from a mask at the specified position and dimensions.",
|
||||
short_description="Crops a rectangular region from a mask.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
IO.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -248,6 +263,8 @@ class MaskComposite(IO.ComfyNode):
|
||||
node_id="MaskComposite",
|
||||
search_aliases=["combine masks", "blend masks", "layer masks"],
|
||||
category="mask",
|
||||
description="Composites a source mask onto a destination mask at a specified position using selectable blend operations.",
|
||||
short_description="Composites masks with selectable blend operations.",
|
||||
inputs=[
|
||||
IO.Mask.Input("destination"),
|
||||
IO.Mask.Input("source"),
|
||||
@@ -297,6 +314,8 @@ class FeatherMask(IO.ComfyNode):
|
||||
node_id="FeatherMask",
|
||||
search_aliases=["soft edge mask", "blur mask edges", "gradient mask edge"],
|
||||
category="mask",
|
||||
description="Applies a soft gradient feather to the edges of a mask with independent control for each side.",
|
||||
short_description="Feathers mask edges with per-side control.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
IO.Int.Input("left", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -345,6 +364,8 @@ class GrowMask(IO.ComfyNode):
|
||||
search_aliases=["expand mask", "shrink mask"],
|
||||
display_name="Grow Mask",
|
||||
category="mask",
|
||||
description="Expands or shrinks a mask by a specified number of pixels using morphological dilation or erosion with optional tapered corners.",
|
||||
short_description="Expands or shrinks a mask by pixel amount.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
IO.Int.Input("expand", default=0, min=-nodes.MAX_RESOLUTION, max=nodes.MAX_RESOLUTION, step=1),
|
||||
@@ -382,6 +403,8 @@ class ThresholdMask(IO.ComfyNode):
|
||||
node_id="ThresholdMask",
|
||||
search_aliases=["binary mask"],
|
||||
category="mask",
|
||||
description="Converts a mask to binary by setting pixels above a threshold to white and below to black.",
|
||||
short_description="Converts a mask to binary using a threshold.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
IO.Float.Input("value", default=0.5, min=0.0, max=1.0, step=0.01),
|
||||
@@ -408,7 +431,8 @@ class MaskPreview(IO.ComfyNode):
|
||||
search_aliases=["show mask", "view mask", "inspect mask", "debug mask"],
|
||||
display_name="Preview Mask",
|
||||
category="mask",
|
||||
description="Saves the input images to your ComfyUI output directory.",
|
||||
description="Previews a mask in the UI by rendering it as a grayscale image.",
|
||||
short_description="Previews a mask as a grayscale image.",
|
||||
inputs=[
|
||||
IO.Mask.Input("mask"),
|
||||
],
|
||||
|
||||
@@ -11,6 +11,8 @@ class EmptyMochiLatentVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyMochiLatentVideo",
|
||||
category="latent/video",
|
||||
description="Creates an empty latent tensor sized for Mochi video generation with configurable width, height, frame length, and batch size.",
|
||||
short_description="Create empty latent for Mochi video generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
|
||||
@@ -60,6 +60,8 @@ class ModelSamplingDiscrete:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling method to use a discrete noise schedule with a selectable prediction type."
|
||||
SHORT_DESCRIPTION = "Override model sampling to a discrete noise schedule."
|
||||
|
||||
def patch(self, model, sampling, zsnr):
|
||||
m = model.clone()
|
||||
@@ -96,6 +98,8 @@ class ModelSamplingStableCascade:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use Stable Cascade noise scheduling with an adjustable shift parameter."
|
||||
SHORT_DESCRIPTION = "Override sampling to Stable Cascade noise scheduling."
|
||||
|
||||
def patch(self, model, shift):
|
||||
m = model.clone()
|
||||
@@ -122,6 +126,8 @@ class ModelSamplingSD3:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use the SD3 discrete flow noise schedule with an adjustable shift parameter."
|
||||
SHORT_DESCRIPTION = "Override sampling to SD3 discrete flow schedule."
|
||||
|
||||
def patch(self, model, shift, multiplier=1000):
|
||||
m = model.clone()
|
||||
@@ -144,6 +150,8 @@ class ModelSamplingAuraFlow(ModelSamplingSD3):
|
||||
"shift": ("FLOAT", {"default": 1.73, "min": 0.0, "max": 100.0, "step":0.01}),
|
||||
}}
|
||||
|
||||
DESCRIPTION = "Override the model's sampling to use the AuraFlow discrete flow noise schedule with an adjustable shift."
|
||||
SHORT_DESCRIPTION = "Override sampling to AuraFlow discrete flow schedule."
|
||||
FUNCTION = "patch_aura"
|
||||
|
||||
def patch_aura(self, model, shift):
|
||||
@@ -163,6 +171,8 @@ class ModelSamplingFlux:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use the Flux flow schedule with resolution-dependent shift computed from base and max shift values."
|
||||
SHORT_DESCRIPTION = "Override sampling to Flux flow schedule with resolution shift."
|
||||
|
||||
def patch(self, model, max_shift, base_shift, width, height):
|
||||
m = model.clone()
|
||||
@@ -198,6 +208,8 @@ class ModelSamplingContinuousEDM:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use a continuous EDM noise schedule with configurable sigma range and prediction type."
|
||||
SHORT_DESCRIPTION = "Override sampling to continuous EDM noise schedule."
|
||||
|
||||
def patch(self, model, sampling, sigma_max, sigma_min):
|
||||
m = model.clone()
|
||||
@@ -243,6 +255,8 @@ class ModelSamplingContinuousV:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Override the model's sampling to use a continuous V-prediction noise schedule with configurable sigma range."
|
||||
SHORT_DESCRIPTION = "Override sampling to continuous V-prediction schedule."
|
||||
|
||||
def patch(self, model, sampling, sigma_max, sigma_min):
|
||||
m = model.clone()
|
||||
@@ -269,6 +283,8 @@ class RescaleCFG:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/model"
|
||||
DESCRIPTION = "Apply Rescale CFG to the model, which normalizes the CFG output to match the standard deviation of the positive conditioning prediction."
|
||||
SHORT_DESCRIPTION = "Normalize CFG output to match positive conditioning std."
|
||||
|
||||
def patch(self, model, multiplier):
|
||||
def rescale_cfg(args):
|
||||
@@ -310,6 +326,7 @@ class ModelComputeDtype:
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "advanced/debug/model"
|
||||
DESCRIPTION = "Override the compute dtype used by the model during inference."
|
||||
|
||||
def patch(self, model, dtype):
|
||||
m = model.clone()
|
||||
|
||||
@@ -11,6 +11,8 @@ class PatchModelAddDownscale(io.ComfyNode):
|
||||
node_id="PatchModelAddDownscale",
|
||||
display_name="PatchModelAddDownscale (Kohya Deep Shrink)",
|
||||
category="model_patches/unet",
|
||||
description="Patches the UNet to downscale internal feature maps at a specified block during a configurable sigma range, then upscale on output, implementing the Kohya Deep Shrink technique for faster generation.",
|
||||
short_description="Kohya Deep Shrink: downscale UNet internals for speed.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("block_number", default=3, min=1, max=32, step=1),
|
||||
|
||||
@@ -22,6 +22,8 @@ class ModelMergeSimple:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Merge two diffusion models using a simple ratio to blend all weights uniformly."
|
||||
SHORT_DESCRIPTION = "Merge two models with a uniform blend ratio."
|
||||
|
||||
def merge(self, model1, model2, ratio):
|
||||
m = model1.clone()
|
||||
@@ -41,6 +43,8 @@ class ModelSubtract:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Subtract one diffusion model's weights from another with an adjustable multiplier for extracting differences."
|
||||
SHORT_DESCRIPTION = "Subtract model weights with adjustable multiplier."
|
||||
|
||||
def merge(self, model1, model2, multiplier):
|
||||
m = model1.clone()
|
||||
@@ -59,6 +63,8 @@ class ModelAdd:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Add the weights of one diffusion model on top of another."
|
||||
SHORT_DESCRIPTION = None
|
||||
|
||||
def merge(self, model1, model2):
|
||||
m = model1.clone()
|
||||
@@ -79,6 +85,8 @@ class CLIPMergeSimple:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Merge two CLIP text encoder models using a simple ratio to blend all weights uniformly."
|
||||
SHORT_DESCRIPTION = "Merge two CLIP models with a uniform blend ratio."
|
||||
|
||||
def merge(self, clip1, clip2, ratio):
|
||||
m = clip1.clone()
|
||||
@@ -102,6 +110,8 @@ class CLIPSubtract:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Subtract one CLIP model's weights from another with an adjustable multiplier for extracting differences."
|
||||
SHORT_DESCRIPTION = "Subtract CLIP weights with adjustable multiplier."
|
||||
|
||||
def merge(self, clip1, clip2, multiplier):
|
||||
m = clip1.clone()
|
||||
@@ -124,6 +134,8 @@ class CLIPAdd:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Add the weights of one CLIP model on top of another."
|
||||
SHORT_DESCRIPTION = None
|
||||
|
||||
def merge(self, clip1, clip2):
|
||||
m = clip1.clone()
|
||||
@@ -148,6 +160,8 @@ class ModelMergeBlocks:
|
||||
FUNCTION = "merge"
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Merge two diffusion models with separate blend ratios for input, middle, and output blocks."
|
||||
SHORT_DESCRIPTION = "Merge two models with per-block blend ratios."
|
||||
|
||||
def merge(self, model1, model2, **kwargs):
|
||||
m = model1.clone()
|
||||
@@ -228,6 +242,8 @@ def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefi
|
||||
|
||||
class CheckpointSave:
|
||||
SEARCH_ALIASES = ["save model", "export checkpoint", "merge save"]
|
||||
DESCRIPTION = "Saves a model, CLIP, and VAE as a combined checkpoint file in safetensors format with optional workflow metadata."
|
||||
SHORT_DESCRIPTION = "Saves model, CLIP, and VAE as a checkpoint."
|
||||
def __init__(self):
|
||||
self.output_dir = folder_paths.get_output_directory()
|
||||
|
||||
@@ -262,6 +278,8 @@ class CLIPSave:
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Save a CLIP text encoder model to safetensors files, splitting by model component."
|
||||
SHORT_DESCRIPTION = "Save a CLIP model to safetensors files."
|
||||
|
||||
def save(self, clip, filename_prefix, prompt=None, extra_pnginfo=None):
|
||||
prompt_info = ""
|
||||
@@ -319,6 +337,8 @@ class VAESave:
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Save a VAE model to a safetensors file."
|
||||
SHORT_DESCRIPTION = None
|
||||
|
||||
def save(self, vae, filename_prefix, prompt=None, extra_pnginfo=None):
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
|
||||
@@ -354,6 +374,8 @@ class ModelSave:
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "advanced/model_merging"
|
||||
DESCRIPTION = "Save a diffusion model to a safetensors file."
|
||||
SHORT_DESCRIPTION = None
|
||||
|
||||
def save(self, model, filename_prefix, prompt=None, extra_pnginfo=None):
|
||||
save_checkpoint(model, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo)
|
||||
|
||||
@@ -2,6 +2,8 @@ import comfy_extras.nodes_model_merging
|
||||
|
||||
class ModelMergeSD1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two SD1 models with per-block weight control over input, middle, and output blocks."
|
||||
SHORT_DESCRIPTION = "Merge two SD1 models with per-block control."
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
arg_dict = { "model1": ("MODEL",),
|
||||
@@ -26,8 +28,15 @@ class ModelMergeSD1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
return {"required": arg_dict}
|
||||
|
||||
|
||||
class ModelMergeSD2(ModelMergeSD1):
|
||||
DESCRIPTION = "Merge two SD2 models with per-block weight control over input, middle, and output blocks."
|
||||
SHORT_DESCRIPTION = "Merge two SD2 models with per-block control."
|
||||
|
||||
|
||||
class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two SDXL models with per-block weight control over input, middle, and output blocks."
|
||||
SHORT_DESCRIPTION = "Merge two SDXL models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -54,6 +63,8 @@ class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two SD3 2B models with per-block weight control over 24 joint blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two SD3 2B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -78,6 +89,8 @@ class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two AuraFlow models with per-block weight control over double and single layers."
|
||||
SHORT_DESCRIPTION = "Merge two AuraFlow models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -105,6 +118,8 @@ class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Flux1 models with per-block weight control over 19 double blocks and 38 single blocks."
|
||||
SHORT_DESCRIPTION = "Merge two Flux1 models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -131,6 +146,8 @@ class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two SD3.5 Large models with per-block weight control over 38 joint blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two SD3.5 Large models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -154,6 +171,8 @@ class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeMochiPreview(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Mochi Preview models with per-block weight control over 48 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Mochi Preview models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -176,6 +195,8 @@ class ModelMergeMochiPreview(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two LTXV models with per-block weight control over 28 transformer blocks."
|
||||
SHORT_DESCRIPTION = "Merge two LTXV models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -198,6 +219,8 @@ class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Cosmos 7B models with per-block weight control over 28 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Cosmos 7B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -222,6 +245,8 @@ class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Cosmos 14B models with per-block weight control over 36 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Cosmos 14B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -247,6 +272,7 @@ class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "1.3B model has 30 blocks, 14B model has 40 blocks. Image to video model has the extra img_emb."
|
||||
SHORT_DESCRIPTION = "WAN 2.1 model merging with block-level control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -270,6 +296,8 @@ class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
class ModelMergeCosmosPredict2_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Cosmos Predict2 2B models with per-block weight control over 28 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Cosmos Predict2 2B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -293,6 +321,8 @@ class ModelMergeCosmosPredict2_2B(comfy_extras.nodes_model_merging.ModelMergeBlo
|
||||
|
||||
class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Cosmos Predict2 14B models with per-block weight control over 36 blocks and embedders."
|
||||
SHORT_DESCRIPTION = "Merge two Cosmos Predict2 14B models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -316,6 +346,8 @@ class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBl
|
||||
|
||||
class ModelMergeQwenImage(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
DESCRIPTION = "Merge two Qwen Image models with per-block weight control over 60 transformer blocks."
|
||||
SHORT_DESCRIPTION = "Merge two Qwen Image models with per-block control."
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@@ -339,7 +371,7 @@ class ModelMergeQwenImage(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"ModelMergeSD1": ModelMergeSD1,
|
||||
"ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks
|
||||
"ModelMergeSD2": ModelMergeSD2, #SD1 and SD2 have the same blocks
|
||||
"ModelMergeSDXL": ModelMergeSDXL,
|
||||
"ModelMergeSD3_2B": ModelMergeSD3_2B,
|
||||
"ModelMergeAuraflow": ModelMergeAuraflow,
|
||||
|
||||
@@ -230,6 +230,8 @@ class ModelPatchLoader:
|
||||
EXPERIMENTAL = True
|
||||
|
||||
CATEGORY = "advanced/loaders"
|
||||
DESCRIPTION = "Load a model patch file such as a controlnet or style reference patch for use with compatible model nodes."
|
||||
SHORT_DESCRIPTION = "Load a model patch file for controlnet or style."
|
||||
|
||||
def load_model_patch(self, name):
|
||||
model_patch_path = folder_paths.get_full_path_or_raise("model_patches", name)
|
||||
@@ -456,6 +458,8 @@ class QwenImageDiffsynthControlnet:
|
||||
EXPERIMENTAL = True
|
||||
|
||||
CATEGORY = "advanced/loaders/qwen"
|
||||
DESCRIPTION = "Apply a DiffSynth-style controlnet patch to a Qwen Image model using a VAE-encoded control image."
|
||||
SHORT_DESCRIPTION = "Apply DiffSynth controlnet to a Qwen Image model."
|
||||
|
||||
def diffsynth_controlnet(self, model, model_patch, vae, image=None, strength=1.0, inpaint_image=None, mask=None):
|
||||
model_patched = model.clone()
|
||||
@@ -489,6 +493,8 @@ class ZImageFunControlnet(QwenImageDiffsynthControlnet):
|
||||
"optional": {"image": ("IMAGE",), "inpaint_image": ("IMAGE",), "mask": ("MASK",)}}
|
||||
|
||||
CATEGORY = "advanced/loaders/zimage"
|
||||
DESCRIPTION = "Apply a Z-Image Fun controlnet patch to a model with optional control image, inpaint image, and mask inputs."
|
||||
SHORT_DESCRIPTION = "Apply Z-Image Fun controlnet with optional inpainting."
|
||||
|
||||
class UsoStyleProjectorPatch:
|
||||
def __init__(self, model_patch, encoded_image):
|
||||
@@ -525,6 +531,8 @@ class USOStyleReference:
|
||||
EXPERIMENTAL = True
|
||||
|
||||
CATEGORY = "advanced/model_patches/flux"
|
||||
DESCRIPTION = "Apply a USO style reference patch to a Flux model using multi-layer SigLIP features from CLIP vision output."
|
||||
SHORT_DESCRIPTION = "Apply USO style reference to a Flux model."
|
||||
|
||||
def apply_patch(self, model, model_patch, clip_vision_output):
|
||||
encoded_image = torch.stack((clip_vision_output.all_hidden_states[:, -20], clip_vision_output.all_hidden_states[:, -11], clip_vision_output.penultimate_hidden_states))
|
||||
|
||||
@@ -15,6 +15,8 @@ class Morphology(io.ComfyNode):
|
||||
search_aliases=["erode", "dilate"],
|
||||
display_name="ImageMorphology",
|
||||
category="image/postprocessing",
|
||||
description="Applies morphological operations to an image using a configurable kernel size.",
|
||||
short_description="",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Combo.Input(
|
||||
@@ -60,6 +62,8 @@ class ImageRGBToYUV(io.ComfyNode):
|
||||
node_id="ImageRGBToYUV",
|
||||
search_aliases=["color space conversion"],
|
||||
category="image/batch",
|
||||
description="Converts an RGB image to YUV (YCbCr) color space, outputting separate Y, U, and V channel images.",
|
||||
short_description="Convert RGB image to YUV color space.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
@@ -82,6 +86,8 @@ class ImageYUVToRGB(io.ComfyNode):
|
||||
node_id="ImageYUVToRGB",
|
||||
search_aliases=["color space conversion"],
|
||||
category="image/batch",
|
||||
description="Converts separate Y, U, and V (YCbCr) channel images back into a single RGB image.",
|
||||
short_description="Convert YUV channels back to RGB image.",
|
||||
inputs=[
|
||||
io.Image.Input("Y"),
|
||||
io.Image.Input("U"),
|
||||
|
||||
@@ -14,6 +14,7 @@ class wanBlockSwap(io.ComfyNode):
|
||||
node_id="wanBlockSwap",
|
||||
category="",
|
||||
description="NOP",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
],
|
||||
|
||||
@@ -32,6 +32,8 @@ class OptimalStepsScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="OptimalStepsScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates an optimized noise schedule with precomputed optimal sigma levels using log-linear interpolation.",
|
||||
short_description="Optimal noise schedule with precomputed sigma levels.",
|
||||
inputs=[
|
||||
io.Combo.Input("model_type", options=["FLUX", "Wan", "Chroma"]),
|
||||
io.Int.Input("steps", default=20, min=3, max=1000),
|
||||
|
||||
@@ -16,6 +16,8 @@ class PerturbedAttentionGuidance(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="PerturbedAttentionGuidance",
|
||||
category="model_patches/unet",
|
||||
description="Applies Perturbed Attention Guidance (PAG) by replacing self-attention with identity in the middle block to compute a guidance signal that enhances structural coherence.",
|
||||
short_description="Perturbed Attention Guidance for structural coherence.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("scale", default=3.0, min=0.0, max=100.0, step=0.01, round=0.01),
|
||||
|
||||
@@ -26,6 +26,8 @@ class PerpNeg(io.ComfyNode):
|
||||
node_id="PerpNeg",
|
||||
display_name="Perp-Neg (DEPRECATED by PerpNegGuider)",
|
||||
category="_for_testing",
|
||||
description="Applies perpendicular negative guidance by projecting out the component of negative conditioning parallel to positive conditioning. Deprecated in favor of PerpNegGuider.",
|
||||
short_description="Perpendicular negative guidance (deprecated).",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("empty_conditioning"),
|
||||
@@ -128,6 +130,8 @@ class PerpNegGuider(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="PerpNegGuider",
|
||||
category="_for_testing",
|
||||
description="Creates a guider that applies perpendicular negative guidance, computing positive, negative, and empty conditioning in a single batch for efficient sampling.",
|
||||
short_description="Guider with perpendicular negative guidance.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("positive"),
|
||||
|
||||
@@ -124,6 +124,8 @@ class PhotoMakerLoader(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="PhotoMakerLoader",
|
||||
category="_for_testing/photomaker",
|
||||
description="Loads a PhotoMaker model from a safetensors file for identity-preserving image generation.",
|
||||
short_description="Load a PhotoMaker model from file.",
|
||||
inputs=[
|
||||
io.Combo.Input("photomaker_model_name", options=folder_paths.get_filename_list("photomaker")),
|
||||
],
|
||||
@@ -150,6 +152,8 @@ class PhotoMakerEncode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="PhotoMakerEncode",
|
||||
category="_for_testing/photomaker",
|
||||
description="Encodes a reference image and text prompt using PhotoMaker to produce identity-preserving conditioning for image generation.",
|
||||
short_description="Encode image and text with PhotoMaker.",
|
||||
inputs=[
|
||||
io.Photomaker.Input("photomaker"),
|
||||
io.Image.Input("image"),
|
||||
|
||||
@@ -10,6 +10,7 @@ class CLIPTextEncodePixArtAlpha(io.ComfyNode):
|
||||
search_aliases=["pixart prompt"],
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma.",
|
||||
short_description="Encodes text with resolution conditioning for PixArt Alpha.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
|
||||
@@ -20,6 +20,8 @@ class Blend(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ImageBlend",
|
||||
category="image/postprocessing",
|
||||
description="Blends two images together using a selectable blend mode and adjustable blend factor.",
|
||||
short_description="Blends two images using a selected blend mode.",
|
||||
inputs=[
|
||||
io.Image.Input("image1"),
|
||||
io.Image.Input("image2"),
|
||||
@@ -77,6 +79,8 @@ class Blur(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ImageBlur",
|
||||
category="image/postprocessing",
|
||||
description="Applies a Gaussian blur to an image with configurable radius and sigma.",
|
||||
short_description="Applies Gaussian blur to an image.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input("blur_radius", default=1, min=1, max=31, step=1),
|
||||
@@ -112,6 +116,8 @@ class Quantize(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ImageQuantize",
|
||||
category="image/postprocessing",
|
||||
description="Reduces the number of colors in an image with optional dithering.",
|
||||
short_description="",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input("colors", default=256, min=1, max=256, step=1),
|
||||
@@ -177,6 +183,8 @@ class Sharpen(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ImageSharpen",
|
||||
category="image/postprocessing",
|
||||
description="Sharpens an image using an unsharp mask with configurable radius, sigma, and strength.",
|
||||
short_description="Sharpens an image using unsharp mask.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input("sharpen_radius", default=1, min=1, max=31, step=1),
|
||||
@@ -221,6 +229,8 @@ class ImageScaleToTotalPixels(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ImageScaleToTotalPixels",
|
||||
category="image/upscaling",
|
||||
description="Scales an image to a target total megapixel count while preserving aspect ratio, with configurable resolution stepping.",
|
||||
short_description="Scales an image to a target megapixel count.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Combo.Input("upscale_method", options=cls.upscale_methods),
|
||||
@@ -430,6 +440,7 @@ class ResizeImageMaskNode(io.ComfyNode):
|
||||
node_id="ResizeImageMaskNode",
|
||||
display_name="Resize Image/Mask",
|
||||
description="Resize an image or mask using various scaling methods.",
|
||||
short_description=None,
|
||||
category="transform",
|
||||
search_aliases=["resize", "resize image", "resize mask", "scale", "scale image", "scale mask", "image resize", "change size", "dimensions", "shrink", "enlarge"],
|
||||
inputs=[
|
||||
@@ -565,6 +576,8 @@ class BatchImagesNode(io.ComfyNode):
|
||||
node_id="BatchImagesNode",
|
||||
display_name="Batch Images",
|
||||
category="image",
|
||||
description="Combines multiple images into a single batch, resizing them to match the first image's dimensions.",
|
||||
short_description="Combines multiple images into a single batch.",
|
||||
search_aliases=["batch", "image batch", "batch images", "combine images", "merge images", "stack images"],
|
||||
inputs=[
|
||||
io.Autogrow.Input("images", template=autogrow_template)
|
||||
@@ -587,6 +600,8 @@ class BatchMasksNode(io.ComfyNode):
|
||||
search_aliases=["combine masks", "stack masks", "merge masks"],
|
||||
display_name="Batch Masks",
|
||||
category="mask",
|
||||
description="Combines multiple masks into a single batch, resizing them to match the first mask's dimensions.",
|
||||
short_description="Combines multiple masks into a single batch.",
|
||||
inputs=[
|
||||
io.Autogrow.Input("masks", template=autogrow_template)
|
||||
],
|
||||
@@ -608,6 +623,8 @@ class BatchLatentsNode(io.ComfyNode):
|
||||
search_aliases=["combine latents", "stack latents", "merge latents"],
|
||||
display_name="Batch Latents",
|
||||
category="latent",
|
||||
description="Combines multiple latent tensors into a single batch, reshaping them to match the first latent's dimensions.",
|
||||
short_description="Combines multiple latents into a single batch.",
|
||||
inputs=[
|
||||
io.Autogrow.Input("latents", template=autogrow_template)
|
||||
],
|
||||
@@ -632,6 +649,8 @@ class BatchImagesMasksLatentsNode(io.ComfyNode):
|
||||
search_aliases=["combine batch", "merge batch", "stack inputs"],
|
||||
display_name="Batch Images/Masks/Latents",
|
||||
category="util",
|
||||
description="Combines multiple images, masks, or latents into a single batch, automatically detecting the input type.",
|
||||
short_description="Batches images, masks, or latents together.",
|
||||
inputs=[
|
||||
io.Autogrow.Input("inputs", template=autogrow_template)
|
||||
],
|
||||
|
||||
@@ -16,6 +16,8 @@ class PreviewAny():
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "utils"
|
||||
DESCRIPTION = "Preview any input value as text, converting it to a JSON or string representation for display."
|
||||
SHORT_DESCRIPTION = "Preview any input value as text."
|
||||
SEARCH_ALIASES = ["show output", "inspect", "debug", "print value", "show text"]
|
||||
|
||||
def main(self, source=None):
|
||||
|
||||
@@ -11,6 +11,8 @@ class String(io.ComfyNode):
|
||||
node_id="PrimitiveString",
|
||||
display_name="String",
|
||||
category="utils/primitive",
|
||||
description="A primitive node that passes through a string value.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.String.Input("value"),
|
||||
],
|
||||
@@ -29,6 +31,8 @@ class StringMultiline(io.ComfyNode):
|
||||
node_id="PrimitiveStringMultiline",
|
||||
display_name="String (Multiline)",
|
||||
category="utils/primitive",
|
||||
description="A primitive node that passes through a multiline string value.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.String.Input("value", multiline=True),
|
||||
],
|
||||
@@ -47,6 +51,8 @@ class Int(io.ComfyNode):
|
||||
node_id="PrimitiveInt",
|
||||
display_name="Int",
|
||||
category="utils/primitive",
|
||||
description="A primitive node that passes through an integer value.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Int.Input("value", min=-sys.maxsize, max=sys.maxsize, control_after_generate=True),
|
||||
],
|
||||
@@ -65,6 +71,8 @@ class Float(io.ComfyNode):
|
||||
node_id="PrimitiveFloat",
|
||||
display_name="Float",
|
||||
category="utils/primitive",
|
||||
description="A primitive node that passes through a float value.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Float.Input("value", min=-sys.maxsize, max=sys.maxsize, step=0.1),
|
||||
],
|
||||
@@ -83,6 +91,8 @@ class Boolean(io.ComfyNode):
|
||||
node_id="PrimitiveBoolean",
|
||||
display_name="Boolean",
|
||||
category="utils/primitive",
|
||||
description="A primitive node that passes through a boolean value.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Boolean.Input("value"),
|
||||
],
|
||||
|
||||
@@ -13,6 +13,8 @@ class TextEncodeQwenImageEdit(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeQwenImageEdit",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes a text prompt with an optional reference image for Qwen-based image editing, producing conditioning with latent reference.",
|
||||
short_description="Text and image encoding for Qwen image editing.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("prompt", multiline=True, dynamic_prompts=True),
|
||||
@@ -56,6 +58,8 @@ class TextEncodeQwenImageEditPlus(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeQwenImageEditPlus",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes a text prompt with up to three reference images for Qwen-based multi-image editing, producing conditioning with latent references.",
|
||||
short_description="Multi-image text encoding for Qwen image editing.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("prompt", multiline=True, dynamic_prompts=True),
|
||||
@@ -113,6 +117,8 @@ class EmptyQwenImageLayeredLatentImage(io.ComfyNode):
|
||||
node_id="EmptyQwenImageLayeredLatentImage",
|
||||
display_name="Empty Qwen Image Layered Latent",
|
||||
category="latent/qwen",
|
||||
description="Creates an empty multi-layer latent tensor for Qwen image generation with a configurable number of layers.",
|
||||
short_description="Empty multi-layer latent for Qwen image generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=640, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=640, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
|
||||
@@ -11,6 +11,8 @@ class LatentRebatch(io.ComfyNode):
|
||||
node_id="RebatchLatents",
|
||||
display_name="Rebatch Latents",
|
||||
category="latent/batch",
|
||||
description="Splits and recombines latent batches into a new batch size, handling noise masks and batch indices across differently sized inputs.",
|
||||
short_description="Rebatch latents to a specified batch size.",
|
||||
is_input_list=True,
|
||||
inputs=[
|
||||
io.Latent.Input("latents"),
|
||||
@@ -114,6 +116,7 @@ class ImageRebatch(io.ComfyNode):
|
||||
node_id="RebatchImages",
|
||||
display_name="Rebatch Images",
|
||||
category="image/batch",
|
||||
description="Splits and recombines image batches into a new specified batch size.",
|
||||
is_input_list=True,
|
||||
inputs=[
|
||||
io.Image.Input("images"),
|
||||
|
||||
@@ -9,6 +9,7 @@ class ScaleROPE(io.ComfyNode):
|
||||
node_id="ScaleROPE",
|
||||
category="advanced/model_patches",
|
||||
description="Scale and shift the ROPE of the model.",
|
||||
short_description=None,
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
|
||||
@@ -114,6 +114,8 @@ class SelfAttentionGuidance(io.ComfyNode):
|
||||
node_id="SelfAttentionGuidance",
|
||||
display_name="Self-Attention Guidance",
|
||||
category="_for_testing",
|
||||
description="Applies Self-Attention Guidance (SAG) which uses attention maps to create adversarially blurred images and computes a guidance signal that enhances fine details.",
|
||||
short_description="Self-Attention Guidance for enhanced detail.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("scale", default=0.5, min=-2.0, max=5.0, step=0.01),
|
||||
|
||||
@@ -15,6 +15,7 @@ class TripleCLIPLoader(io.ComfyNode):
|
||||
node_id="TripleCLIPLoader",
|
||||
category="advanced/loaders",
|
||||
description="[Recipes]\n\nsd3: clip-l, clip-g, t5",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")),
|
||||
io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")),
|
||||
@@ -42,6 +43,8 @@ class EmptySD3LatentImage(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptySD3LatentImage",
|
||||
category="latent/sd3",
|
||||
description="Creates an empty SD3 latent image tensor with the specified width, height, and batch size.",
|
||||
short_description="Creates an empty SD3 latent image tensor.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -67,6 +70,8 @@ class CLIPTextEncodeSD3(io.ComfyNode):
|
||||
node_id="CLIPTextEncodeSD3",
|
||||
search_aliases=["sd3 prompt"],
|
||||
category="advanced/conditioning",
|
||||
description="Encodes separate CLIP-L, CLIP-G, and T5-XXL text prompts into SD3 conditioning with optional empty padding.",
|
||||
short_description="Encodes multi-encoder text prompts for SD3.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
@@ -114,6 +119,8 @@ class ControlNetApplySD3(io.ComfyNode):
|
||||
node_id="ControlNetApplySD3",
|
||||
display_name="Apply Controlnet with VAE",
|
||||
category="conditioning/controlnet",
|
||||
description="Applies a ControlNet to SD3 conditioning using a VAE-encoded control image with adjustable strength and start/end percentages.",
|
||||
short_description="Applies ControlNet with VAE to SD3 conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -177,6 +184,7 @@ class SkipLayerGuidanceSD3(io.ComfyNode):
|
||||
node_id="SkipLayerGuidanceSD3",
|
||||
category="advanced/guidance",
|
||||
description="Generic version of SkipLayerGuidance node that can be used on every DiT model.",
|
||||
short_description="Skip layer guidance applicable to any DiT model.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.String.Input("layers", default="7, 8, 9", multiline=False),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user