From ecec1310b282b6176c271ea79e81c378c58b896c Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 16 Feb 2026 14:02:17 -0800 Subject: [PATCH] wip add AI generated descriptions to all nodes --- comfy_api/latest/_io.py | 14 +++ comfy_api_nodes/nodes_bfl.py | 11 +- comfy_api_nodes/nodes_bria.py | 1 + comfy_api_nodes/nodes_bytedance.py | 6 + comfy_api_nodes/nodes_gemini.py | 4 + comfy_api_nodes/nodes_grok.py | 4 + comfy_api_nodes/nodes_hitpaw.py | 2 + comfy_api_nodes/nodes_hunyuan3d.py | 4 + comfy_api_nodes/nodes_ideogram.py | 3 + comfy_api_nodes/nodes_kling.py | 27 ++++ comfy_api_nodes/nodes_ltxv.py | 2 + comfy_api_nodes/nodes_luma.py | 6 + comfy_api_nodes/nodes_magnific.py | 4 + comfy_api_nodes/nodes_meshy.py | 9 ++ comfy_api_nodes/nodes_minimax.py | 4 + comfy_api_nodes/nodes_moonvalley.py | 7 +- comfy_api_nodes/nodes_openai.py | 6 + comfy_api_nodes/nodes_pixverse.py | 5 + comfy_api_nodes/nodes_recraft.py | 29 ++++- comfy_api_nodes/nodes_rodin.py | 5 + comfy_api_nodes/nodes_runway.py | 4 + comfy_api_nodes/nodes_sora.py | 1 + comfy_api_nodes/nodes_stability.py | 8 ++ comfy_api_nodes/nodes_topaz.py | 2 + comfy_api_nodes/nodes_tripo.py | 11 ++ comfy_api_nodes/nodes_veo2.py | 3 + comfy_api_nodes/nodes_vidu.py | 12 ++ comfy_api_nodes/nodes_wan.py | 5 + comfy_api_nodes/nodes_wavespeed.py | 2 + comfy_extras/nodes_ace.py | 9 ++ comfy_extras/nodes_advanced_samplers.py | 4 + comfy_extras/nodes_align_your_steps.py | 2 + comfy_extras/nodes_apg.py | 2 + comfy_extras/nodes_attention_multiply.py | 8 ++ comfy_extras/nodes_audio.py | 30 +++++ comfy_extras/nodes_audio_encoder.py | 4 + comfy_extras/nodes_camera_trajectory.py | 2 + comfy_extras/nodes_canny.py | 2 + comfy_extras/nodes_cfg.py | 4 + comfy_extras/nodes_chroma_radiance.py | 3 + comfy_extras/nodes_clip_sdxl.py | 4 + comfy_extras/nodes_color.py | 1 + comfy_extras/nodes_compositing.py | 6 + comfy_extras/nodes_cond.py | 4 + comfy_extras/nodes_context_windows.py | 2 + comfy_extras/nodes_controlnet.py | 4 + comfy_extras/nodes_cosmos.py | 5 + comfy_extras/nodes_custom_sampler.py | 71 ++++++++++- comfy_extras/nodes_dataset.py | 41 +++++++ comfy_extras/nodes_differential_diffusion.py | 2 + comfy_extras/nodes_easycache.py | 2 + comfy_extras/nodes_edit_model.py | 1 + comfy_extras/nodes_eps.py | 3 + comfy_extras/nodes_flux.py | 12 ++ comfy_extras/nodes_freelunch.py | 4 + comfy_extras/nodes_fresca.py | 1 + comfy_extras/nodes_gits.py | 2 + comfy_extras/nodes_hidream.py | 3 + comfy_extras/nodes_hooks.py | 40 ++++++ comfy_extras/nodes_hunyuan.py | 24 ++++ comfy_extras/nodes_hunyuan3d.py | 14 +++ comfy_extras/nodes_hypernetwork.py | 2 + comfy_extras/nodes_hypertile.py | 2 + comfy_extras/nodes_image_compare.py | 1 + comfy_extras/nodes_images.py | 24 +++- comfy_extras/nodes_ip2p.py | 2 + comfy_extras/nodes_kandinsky5.py | 5 + comfy_extras/nodes_latent.py | 28 +++++ comfy_extras/nodes_load_3d.py | 4 + comfy_extras/nodes_logic.py | 18 +++ comfy_extras/nodes_lora_debug.py | 3 + comfy_extras/nodes_lora_extract.py | 2 + comfy_extras/nodes_lotus.py | 2 + comfy_extras/nodes_lt.py | 23 +++- comfy_extras/nodes_lt_audio.py | 8 ++ comfy_extras/nodes_lt_upsampler.py | 2 + comfy_extras/nodes_lumina2.py | 3 + comfy_extras/nodes_mahiro.py | 1 + comfy_extras/nodes_mask.py | 26 +++- comfy_extras/nodes_mochi.py | 2 + comfy_extras/nodes_model_advanced.py | 17 +++ comfy_extras/nodes_model_downscale.py | 2 + comfy_extras/nodes_model_merging.py | 22 ++++ .../nodes_model_merging_model_specific.py | 34 ++++- comfy_extras/nodes_model_patch.py | 8 ++ comfy_extras/nodes_morphology.py | 6 + comfy_extras/nodes_nop.py | 1 + comfy_extras/nodes_optimalsteps.py | 2 + comfy_extras/nodes_pag.py | 2 + comfy_extras/nodes_perpneg.py | 4 + comfy_extras/nodes_photomaker.py | 4 + comfy_extras/nodes_pixart.py | 1 + comfy_extras/nodes_post_processing.py | 19 +++ comfy_extras/nodes_preview_any.py | 2 + comfy_extras/nodes_primitive.py | 10 ++ comfy_extras/nodes_qwen.py | 6 + comfy_extras/nodes_rebatch.py | 3 + comfy_extras/nodes_rope.py | 1 + comfy_extras/nodes_sag.py | 2 + comfy_extras/nodes_sd3.py | 8 ++ comfy_extras/nodes_sdupscale.py | 2 + comfy_extras/nodes_slg.py | 2 + comfy_extras/nodes_stable3d.py | 6 + comfy_extras/nodes_stable_cascade.py | 8 ++ comfy_extras/nodes_string.py | 21 ++++ comfy_extras/nodes_tcfg.py | 1 + comfy_extras/nodes_tomesd.py | 2 + comfy_extras/nodes_toolkit.py | 2 + comfy_extras/nodes_torch_compile.py | 2 + comfy_extras/nodes_train.py | 8 ++ comfy_extras/nodes_upscale_model.py | 4 + comfy_extras/nodes_video.py | 11 +- comfy_extras/nodes_video_model.py | 12 ++ comfy_extras/nodes_wan.py | 32 +++++ comfy_extras/nodes_wanmove.py | 10 ++ comfy_extras/nodes_webcam.py | 2 + comfy_extras/nodes_zimage.py | 2 + nodes.py | 116 ++++++++++++++++++ server.py | 2 + 119 files changed, 1059 insertions(+), 15 deletions(-) diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 95d79c035..db93a7a00 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -1300,6 +1300,7 @@ class NodeInfoV1: name: str=None display_name: str=None description: str=None + short_description: str=None python_module: Any=None category: str=None output_node: bool=None @@ -1390,6 +1391,8 @@ class Schema: hidden: list[Hidden] = field(default_factory=list) description: str="" """Node description, shown as a tooltip when hovering over the node.""" + short_description: str="" + """Short node description, shown in the node list/search.""" search_aliases: list[str] = field(default_factory=list) """Alternative names for search. Useful for synonyms, abbreviations, or old names after renaming.""" is_input_list: bool = False @@ -1528,6 +1531,7 @@ class Schema: display_name=self.display_name, category=self.category, description=self.description, + short_description=self.short_description, output_node=self.is_output_node, deprecated=self.is_deprecated, experimental=self.is_experimental, @@ -1771,6 +1775,14 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal): cls.GET_SCHEMA() return cls._DESCRIPTION + _SHORT_DESCRIPTION = None + @final + @classproperty + def SHORT_DESCRIPTION(cls): # noqa + if cls._SHORT_DESCRIPTION is None: + cls.GET_SCHEMA() + return cls._SHORT_DESCRIPTION + _CATEGORY = None @final @classproperty @@ -1899,6 +1911,8 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal): schema.validate() if cls._DESCRIPTION is None: cls._DESCRIPTION = schema.description + if cls._SHORT_DESCRIPTION is None: + cls._SHORT_DESCRIPTION = schema.short_description if cls._CATEGORY is None: cls._CATEGORY = schema.category if cls._EXPERIMENTAL is None: diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py index 61c3b4503..905084b9e 100644 --- a/comfy_api_nodes/nodes_bfl.py +++ b/comfy_api_nodes/nodes_bfl.py @@ -44,6 +44,7 @@ class FluxProUltraImageNode(IO.ComfyNode): display_name="Flux 1.1 [pro] Ultra Image", category="api node/image/BFL", description="Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.", + short_description="Generate images with Flux Pro 1.1 Ultra API.", inputs=[ IO.String.Input( "prompt", @@ -154,13 +155,17 @@ class FluxProUltraImageNode(IO.ComfyNode): class FluxKontextProImageNode(IO.ComfyNode): + DESCRIPTION = "Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio." + SHORT_DESCRIPTION = "Edit images with Flux.1 Kontext [pro] API." + @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id=cls.NODE_ID, display_name=cls.DISPLAY_NAME, category="api node/image/BFL", - description="Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio.", + description=cls.DESCRIPTION, + short_description=cls.SHORT_DESCRIPTION, inputs=[ IO.String.Input( "prompt", @@ -268,6 +273,7 @@ class FluxKontextProImageNode(IO.ComfyNode): class FluxKontextMaxImageNode(FluxKontextProImageNode): DESCRIPTION = "Edits images using Flux.1 Kontext [max] via api based on prompt and aspect ratio." + SHORT_DESCRIPTION = "Edit images with Flux.1 Kontext [max] API." BFL_PATH = "/proxy/bfl/flux-kontext-max/generate" NODE_ID = "FluxKontextMaxImageNode" DISPLAY_NAME = "Flux.1 Kontext [max] Image" @@ -282,6 +288,7 @@ class FluxProExpandNode(IO.ComfyNode): display_name="Flux.1 Expand Image", category="api node/image/BFL", description="Outpaints image based on prompt.", + short_description=None, inputs=[ IO.Image.Input("image"), IO.String.Input( @@ -418,6 +425,7 @@ class FluxProFillNode(IO.ComfyNode): display_name="Flux.1 Fill Image", category="api node/image/BFL", description="Inpaints image based on mask and prompt.", + short_description=None, inputs=[ IO.Image.Input("image"), IO.Mask.Input("mask"), @@ -543,6 +551,7 @@ class Flux2ProImageNode(IO.ComfyNode): display_name=cls.DISPLAY_NAME, category="api node/image/BFL", description="Generates images synchronously based on prompt and resolution.", + short_description=None, inputs=[ IO.String.Input( "prompt", diff --git a/comfy_api_nodes/nodes_bria.py b/comfy_api_nodes/nodes_bria.py index 4044ee3ea..7bc3704b3 100644 --- a/comfy_api_nodes/nodes_bria.py +++ b/comfy_api_nodes/nodes_bria.py @@ -33,6 +33,7 @@ class BriaImageEditNode(IO.ComfyNode): display_name="Bria FIBO Image Edit", category="api node/image/Bria", description="Edit images using Bria latest model", + short_description=None, inputs=[ IO.Combo.Input("model", options=["FIBO"]), IO.Image.Input("image"), diff --git a/comfy_api_nodes/nodes_bytedance.py b/comfy_api_nodes/nodes_bytedance.py index 0cb5e3be8..e3eebf564 100644 --- a/comfy_api_nodes/nodes_bytedance.py +++ b/comfy_api_nodes/nodes_bytedance.py @@ -60,6 +60,7 @@ class ByteDanceImageNode(IO.ComfyNode): display_name="ByteDance Image", category="api node/image/ByteDance", description="Generate images using ByteDance models via api based on prompt", + short_description=None, inputs=[ IO.Combo.Input("model", options=["seedream-3-0-t2i-250415"]), IO.String.Input( @@ -182,6 +183,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode): display_name="ByteDance Seedream 4.5", category="api node/image/ByteDance", description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.", + short_description="Text-to-image generation and editing up to 4K.", inputs=[ IO.Combo.Input( "model", @@ -380,6 +382,7 @@ class ByteDanceTextToVideoNode(IO.ComfyNode): display_name="ByteDance Text to Video", category="api node/video/ByteDance", description="Generate video using ByteDance models via api based on prompt", + short_description=None, inputs=[ IO.Combo.Input( "model", @@ -505,6 +508,7 @@ class ByteDanceImageToVideoNode(IO.ComfyNode): display_name="ByteDance Image to Video", category="api node/video/ByteDance", description="Generate video using ByteDance models via api based on image and prompt", + short_description="Generate video from image and prompt via ByteDance API.", inputs=[ IO.Combo.Input( "model", @@ -639,6 +643,7 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode): display_name="ByteDance First-Last-Frame to Video", category="api node/video/ByteDance", description="Generate video using prompt and first and last frames.", + short_description=None, inputs=[ IO.Combo.Input( "model", @@ -784,6 +789,7 @@ class ByteDanceImageReferenceNode(IO.ComfyNode): display_name="ByteDance Reference Images to Video", category="api node/video/ByteDance", description="Generate video using prompt and reference images.", + short_description=None, inputs=[ IO.Combo.Input( "model", diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 3b31caa7b..6ff242d80 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -254,6 +254,7 @@ class GeminiNode(IO.ComfyNode): description="Generate text responses with Google's Gemini AI model. " "You can provide multiple types of inputs (text, images, audio, video) " "as context for generating more relevant and meaningful responses.", + short_description="Generate text responses with Google's Gemini AI.", inputs=[ IO.String.Input( "prompt", @@ -480,6 +481,7 @@ class GeminiInputFiles(IO.ComfyNode): "The files will be read by the Gemini model when generating a response. " "The contents of the text file count toward the token limit. " "🛈 TIP: Can be chained together with other Gemini Input File nodes.", + short_description="Load and prepare input files for Gemini LLM nodes.", inputs=[ IO.Combo.Input( "file", @@ -534,6 +536,7 @@ class GeminiImage(IO.ComfyNode): display_name="Nano Banana (Google Gemini Image)", category="api node/image/Gemini", description="Edit images synchronously via Google API.", + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -665,6 +668,7 @@ class GeminiImage2(IO.ComfyNode): display_name="Nano Banana Pro (Google Gemini Image)", category="api node/image/Gemini", description="Generate or edit images synchronously via Google Vertex API.", + short_description=None, inputs=[ IO.String.Input( "prompt", diff --git a/comfy_api_nodes/nodes_grok.py b/comfy_api_nodes/nodes_grok.py index da15e97ea..717f5d25b 100644 --- a/comfy_api_nodes/nodes_grok.py +++ b/comfy_api_nodes/nodes_grok.py @@ -36,6 +36,7 @@ class GrokImageNode(IO.ComfyNode): display_name="Grok Image", category="api node/image/Grok", description="Generate images using Grok based on a text prompt", + short_description=None, inputs=[ IO.Combo.Input("model", options=["grok-imagine-image-beta"]), IO.String.Input( @@ -137,6 +138,7 @@ class GrokImageEditNode(IO.ComfyNode): display_name="Grok Image Edit", category="api node/image/Grok", description="Modify an existing image based on a text prompt", + short_description=None, inputs=[ IO.Combo.Input("model", options=["grok-imagine-image-beta"]), IO.Image.Input("image"), @@ -226,6 +228,7 @@ class GrokVideoNode(IO.ComfyNode): display_name="Grok Video", category="api node/video/Grok", description="Generate video from a prompt or an image", + short_description=None, inputs=[ IO.Combo.Input("model", options=["grok-imagine-video-beta"]), IO.String.Input( @@ -334,6 +337,7 @@ class GrokVideoEditNode(IO.ComfyNode): display_name="Grok Video Edit", category="api node/video/Grok", description="Edit an existing video based on a text prompt.", + short_description=None, inputs=[ IO.Combo.Input("model", options=["grok-imagine-video-beta"]), IO.String.Input( diff --git a/comfy_api_nodes/nodes_hitpaw.py b/comfy_api_nodes/nodes_hitpaw.py index 488080a74..953be5f54 100644 --- a/comfy_api_nodes/nodes_hitpaw.py +++ b/comfy_api_nodes/nodes_hitpaw.py @@ -74,6 +74,7 @@ class HitPawGeneralImageEnhance(IO.ComfyNode): category="api node/image/HitPaw", description="Upscale low-resolution images to super-resolution, eliminate artifacts and noise. " f"Maximum output: {MAX_MP_GENERATIVE} megapixels.", + short_description="Upscale images to super-resolution, removing artifacts and noise.", inputs=[ IO.Combo.Input("model", options=["generative_portrait", "generative"]), IO.Image.Input("image"), @@ -205,6 +206,7 @@ class HitPawVideoEnhance(IO.ComfyNode): category="api node/video/HitPaw", description="Upscale low-resolution videos to high resolution, eliminate artifacts and noise. " "Prices shown are per second of video.", + short_description="Upscale videos to high resolution, removing artifacts and noise.", inputs=[ IO.DynamicCombo.Input("model", options=model_options), IO.Video.Input("video"), diff --git a/comfy_api_nodes/nodes_hunyuan3d.py b/comfy_api_nodes/nodes_hunyuan3d.py index ca002cc60..f7511f0a4 100644 --- a/comfy_api_nodes/nodes_hunyuan3d.py +++ b/comfy_api_nodes/nodes_hunyuan3d.py @@ -54,6 +54,8 @@ class TencentTextToModelNode(IO.ComfyNode): node_id="TencentTextToModelNode", display_name="Hunyuan3D: Text to Model", category="api node/3d/Tencent", + description="Generate 3D models from text prompts using Hunyuan3D Pro with configurable face count and geometry options.", + short_description="Generate 3D models from text using Hunyuan3D Pro.", inputs=[ IO.Combo.Input( "model", @@ -168,6 +170,8 @@ class TencentImageToModelNode(IO.ComfyNode): node_id="TencentImageToModelNode", display_name="Hunyuan3D: Image(s) to Model", category="api node/3d/Tencent", + description="Generate 3D models from images using Hunyuan3D Pro with optional multi-view inputs and configurable geometry.", + short_description="Generate 3D models from images using Hunyuan3D Pro.", inputs=[ IO.Combo.Input( "model", diff --git a/comfy_api_nodes/nodes_ideogram.py b/comfy_api_nodes/nodes_ideogram.py index feaf7a858..e5145c367 100644 --- a/comfy_api_nodes/nodes_ideogram.py +++ b/comfy_api_nodes/nodes_ideogram.py @@ -236,6 +236,7 @@ class IdeogramV1(IO.ComfyNode): display_name="Ideogram V1", category="api node/image/Ideogram", description="Generates images using the Ideogram V1 model.", + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -361,6 +362,7 @@ class IdeogramV2(IO.ComfyNode): display_name="Ideogram V2", category="api node/image/Ideogram", description="Generates images using the Ideogram V2 model.", + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -526,6 +528,7 @@ class IdeogramV3(IO.ComfyNode): category="api node/image/Ideogram", description="Generates images using the Ideogram V3 model. " "Supports both regular image generation from text prompts and image editing with mask.", + short_description="Generate and edit images with Ideogram V3.", inputs=[ IO.String.Input( "prompt", diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index b89c85561..ed9dfe277 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -642,6 +642,7 @@ class KlingCameraControls(IO.ComfyNode): display_name="Kling Camera Controls", category="api node/video/Kling", description="Allows specifying configuration options for Kling Camera Controls and motion control effects.", + short_description="Configure Kling camera controls and motion effects.", inputs=[ IO.Combo.Input("camera_control_type", options=KlingCameraControlType), IO.Float.Input( @@ -762,6 +763,7 @@ class KlingTextToVideoNode(IO.ComfyNode): display_name="Kling Text to Video", category="api node/video/Kling", description="Kling Text to Video Node", + short_description=None, inputs=[ IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), @@ -849,6 +851,7 @@ class OmniProTextToVideoNode(IO.ComfyNode): display_name="Kling 3.0 Omni Text to Video", category="api node/video/Kling", description="Use text prompts to generate videos with the latest Kling model.", + short_description=None, inputs=[ IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]), IO.String.Input( @@ -989,6 +992,7 @@ class OmniProFirstLastFrameNode(IO.ComfyNode): display_name="Kling 3.0 Omni First-Last-Frame to Video", category="api node/video/Kling", description="Use a start frame, an optional end frame, or reference images with the latest Kling model.", + short_description="Generate video from start/end frames or reference images.", inputs=[ IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]), IO.String.Input( @@ -1187,6 +1191,7 @@ class OmniProImageToVideoNode(IO.ComfyNode): display_name="Kling 3.0 Omni Image to Video", category="api node/video/Kling", description="Use up to 7 reference images to generate a video with the latest Kling model.", + short_description="Generate video from up to 7 reference images.", inputs=[ IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]), IO.String.Input( @@ -1347,6 +1352,7 @@ class OmniProVideoToVideoNode(IO.ComfyNode): display_name="Kling 3.0 Omni Video to Video", category="api node/video/Kling", description="Use a video and up to 4 reference images to generate a video with the latest Kling model.", + short_description="Generate video from a video and reference images.", inputs=[ IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]), IO.String.Input( @@ -1458,6 +1464,7 @@ class OmniProEditVideoNode(IO.ComfyNode): display_name="Kling 3.0 Omni Edit Video", category="api node/video/Kling", description="Edit an existing video with the latest model from Kling.", + short_description=None, inputs=[ IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]), IO.String.Input( @@ -1565,6 +1572,7 @@ class OmniProImageNode(IO.ComfyNode): display_name="Kling 3.0 Omni Image", category="api node/image/Kling", description="Create or edit images with the latest model from Kling.", + short_description=None, inputs=[ IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-image-o1"]), IO.String.Input( @@ -1693,6 +1701,7 @@ class KlingCameraControlT2VNode(IO.ComfyNode): display_name="Kling Text to Video (Camera Control)", category="api node/video/Kling", description="Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.", + short_description="Generate videos from text with camera movement controls.", inputs=[ IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), @@ -1754,6 +1763,8 @@ class KlingImage2VideoNode(IO.ComfyNode): node_id="KlingImage2VideoNode", display_name="Kling Image(First Frame) to Video", category="api node/video/Kling", + description="Generate a video from a first-frame image with configurable model, mode, aspect ratio, and duration settings.", + short_description="Generate video from a first-frame reference image.", inputs=[ IO.Image.Input("start_frame", tooltip="The reference image used to generate the video."), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), @@ -1854,6 +1865,7 @@ class KlingCameraControlI2VNode(IO.ComfyNode): display_name="Kling Image to Video (Camera Control)", category="api node/video/Kling", description="Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.", + short_description="Generate videos from images with camera movement controls.", inputs=[ IO.Image.Input( "start_frame", @@ -1925,6 +1937,7 @@ class KlingStartEndFrameNode(IO.ComfyNode): display_name="Kling Start-End Frame to Video", category="api node/video/Kling", description="Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.", + short_description="Generate video transitioning between start and end frame images.", inputs=[ IO.Image.Input( "start_frame", @@ -2019,6 +2032,7 @@ class KlingVideoExtendNode(IO.ComfyNode): display_name="Kling Video Extend", category="api node/video/Kling", description="Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.", + short_description="Extend videos generated by other Kling nodes.", inputs=[ IO.String.Input( "prompt", @@ -2100,6 +2114,7 @@ class KlingDualCharacterVideoEffectNode(IO.ComfyNode): display_name="Kling Dual Character Video Effects", category="api node/video/Kling", description="Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.", + short_description="Apply dual-character video effects from two images.", inputs=[ IO.Image.Input("image_left", tooltip="Left side image"), IO.Image.Input("image_right", tooltip="Right side image"), @@ -2190,6 +2205,7 @@ class KlingSingleImageVideoEffectNode(IO.ComfyNode): display_name="Kling Video Effects", category="api node/video/Kling", description="Achieve different special effects when generating a video based on the effect_scene.", + short_description="Apply special video effects to a single image.", inputs=[ IO.Image.Input( "image", @@ -2263,6 +2279,7 @@ class KlingLipSyncAudioToVideoNode(IO.ComfyNode): display_name="Kling Lip Sync Video with Audio", category="api node/video/Kling", description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.", + short_description="Sync video mouth movements to audio content.", inputs=[ IO.Video.Input("video"), IO.Audio.Input("audio"), @@ -2314,6 +2331,7 @@ class KlingLipSyncTextToVideoNode(IO.ComfyNode): display_name="Kling Lip Sync Video with Text", category="api node/video/Kling", description="Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.", + short_description="Sync video mouth movements to a text prompt.", inputs=[ IO.Video.Input("video"), IO.String.Input( @@ -2381,6 +2399,7 @@ class KlingVirtualTryOnNode(IO.ComfyNode): display_name="Kling Virtual Try On", category="api node/image/Kling", description="Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.", + short_description="Virtually try clothing onto a human image.", inputs=[ IO.Image.Input("human_image"), IO.Image.Input("cloth_image"), @@ -2448,6 +2467,7 @@ class KlingImageGenerationNode(IO.ComfyNode): display_name="Kling 3.0 Image", category="api node/image/Kling", description="Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.", + short_description="Generate images from text with optional reference image.", inputs=[ IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), @@ -2581,6 +2601,8 @@ class TextToVideoWithAudio(IO.ComfyNode): node_id="KlingTextToVideoWithAudio", display_name="Kling 2.6 Text to Video with Audio", category="api node/video/Kling", + description="Generate a video with synchronized audio from a text prompt using the Kling v2-6 model.", + short_description="Generate video with audio from text using Kling v2-6.", inputs=[ IO.Combo.Input("model_name", options=["kling-v2-6"]), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt."), @@ -2649,6 +2671,8 @@ class ImageToVideoWithAudio(IO.ComfyNode): node_id="KlingImageToVideoWithAudio", display_name="Kling 2.6 Image(First Frame) to Video with Audio", category="api node/video/Kling", + description="Generate a video with synchronized audio from a first-frame image and text prompt using the Kling v2-6 model.", + short_description="Generate video with audio from an image using Kling v2-6.", inputs=[ IO.Combo.Input("model_name", options=["kling-v2-6"]), IO.Image.Input("start_frame"), @@ -2719,6 +2743,8 @@ class MotionControl(IO.ComfyNode): node_id="KlingMotionControl", display_name="Kling Motion Control", category="api node/video/Kling", + description="Drive character movement and expression in video using a reference image and motion reference video.", + short_description="Control video character motion using reference image and video.", inputs=[ IO.String.Input("prompt", multiline=True), IO.Image.Input("reference_image"), @@ -2815,6 +2841,7 @@ class KlingVideoNode(IO.ComfyNode): category="api node/video/Kling", description="Generate videos with Kling V3. " "Supports text-to-video and image-to-video with optional storyboard multi-prompt and audio generation.", + short_description="Generate videos with Kling V3 from text or images.", inputs=[ IO.DynamicCombo.Input( "multi_shot", diff --git a/comfy_api_nodes/nodes_ltxv.py b/comfy_api_nodes/nodes_ltxv.py index c6424af92..7ac2bf4d5 100644 --- a/comfy_api_nodes/nodes_ltxv.py +++ b/comfy_api_nodes/nodes_ltxv.py @@ -52,6 +52,7 @@ class TextToVideoNode(IO.ComfyNode): display_name="LTXV Text To Video", category="api node/video/LTXV", description="Professional-quality videos with customizable duration and resolution.", + short_description=None, inputs=[ IO.Combo.Input("model", options=list(MODELS_MAP.keys())), IO.String.Input( @@ -128,6 +129,7 @@ class ImageToVideoNode(IO.ComfyNode): display_name="LTXV Image To Video", category="api node/video/LTXV", description="Professional-quality videos with customizable duration and resolution based on start image.", + short_description=None, inputs=[ IO.Image.Input("image", tooltip="First frame to be used for the video."), IO.Combo.Input("model", options=list(MODELS_MAP.keys())), diff --git a/comfy_api_nodes/nodes_luma.py b/comfy_api_nodes/nodes_luma.py index 9ed6cd299..7b9bb4eed 100644 --- a/comfy_api_nodes/nodes_luma.py +++ b/comfy_api_nodes/nodes_luma.py @@ -46,6 +46,7 @@ class LumaReferenceNode(IO.ComfyNode): display_name="Luma Reference", category="api node/image/Luma", description="Holds an image and weight for use with Luma Generate Image node.", + short_description="Image and weight input for Luma generation.", inputs=[ IO.Image.Input( "image", @@ -85,6 +86,7 @@ class LumaConceptsNode(IO.ComfyNode): display_name="Luma Concepts", category="api node/video/Luma", description="Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.", + short_description="Camera concepts for Luma video generation nodes.", inputs=[ IO.Combo.Input( "concept1", @@ -134,6 +136,7 @@ class LumaImageGenerationNode(IO.ComfyNode): display_name="Luma Text to Image", category="api node/image/Luma", description="Generates images synchronously based on prompt and aspect ratio.", + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -278,6 +281,7 @@ class LumaImageModifyNode(IO.ComfyNode): display_name="Luma Image to Image", category="api node/image/Luma", description="Modifies images synchronously based on prompt and aspect ratio.", + short_description=None, inputs=[ IO.Image.Input( "image", @@ -371,6 +375,7 @@ class LumaTextToVideoGenerationNode(IO.ComfyNode): display_name="Luma Text to Video", category="api node/video/Luma", description="Generates videos synchronously based on prompt and output_size.", + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -472,6 +477,7 @@ class LumaImageToVideoGenerationNode(IO.ComfyNode): display_name="Luma Image to Video", category="api node/video/Luma", description="Generates videos synchronously based on prompt, input images, and output_size.", + short_description=None, inputs=[ IO.String.Input( "prompt", diff --git a/comfy_api_nodes/nodes_magnific.py b/comfy_api_nodes/nodes_magnific.py index 83a581c5d..c6fc18b89 100644 --- a/comfy_api_nodes/nodes_magnific.py +++ b/comfy_api_nodes/nodes_magnific.py @@ -242,6 +242,7 @@ class MagnificImageUpscalerPreciseV2Node(IO.ComfyNode): category="api node/image/Magnific", description="High-fidelity upscaling with fine control over sharpness, grain, and detail. " "Maximum output: 10060×10060 pixels.", + short_description="High-fidelity upscaling with sharpness, grain, and detail control.", inputs=[ IO.Image.Input("image"), IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]), @@ -401,6 +402,7 @@ class MagnificImageStyleTransferNode(IO.ComfyNode): display_name="Magnific Image Style Transfer", category="api node/image/Magnific", description="Transfer the style from a reference image to your input image.", + short_description=None, inputs=[ IO.Image.Input("image", tooltip="The image to apply style transfer to."), IO.Image.Input("reference_image", tooltip="The reference image to extract style from."), @@ -549,6 +551,7 @@ class MagnificImageRelightNode(IO.ComfyNode): display_name="Magnific Image Relight", category="api node/image/Magnific", description="Relight an image with lighting adjustments and optional reference-based light transfer.", + short_description=None, inputs=[ IO.Image.Input("image", tooltip="The image to relight."), IO.String.Input( @@ -787,6 +790,7 @@ class MagnificImageSkinEnhancerNode(IO.ComfyNode): display_name="Magnific Image Skin Enhancer", category="api node/image/Magnific", description="Skin enhancement for portraits with multiple processing modes.", + short_description=None, inputs=[ IO.Image.Input("image", tooltip="The portrait image to enhance."), IO.Int.Input( diff --git a/comfy_api_nodes/nodes_meshy.py b/comfy_api_nodes/nodes_meshy.py index 65f6f0d2d..190242eff 100644 --- a/comfy_api_nodes/nodes_meshy.py +++ b/comfy_api_nodes/nodes_meshy.py @@ -34,6 +34,8 @@ class MeshyTextToModelNode(IO.ComfyNode): node_id="MeshyTextToModelNode", display_name="Meshy: Text to Model", category="api node/3d/Meshy", + description="Generate a 3D model from a text prompt using the Meshy API.", + short_description="Generate a 3D model from a text prompt.", inputs=[ IO.Combo.Input("model", options=["latest"]), IO.String.Input("prompt", multiline=True, default=""), @@ -146,6 +148,7 @@ class MeshyRefineNode(IO.ComfyNode): display_name="Meshy: Refine Draft Model", category="api node/3d/Meshy", description="Refine a previously created draft model.", + short_description=None, inputs=[ IO.Combo.Input("model", options=["latest"]), IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"), @@ -239,6 +242,8 @@ class MeshyImageToModelNode(IO.ComfyNode): node_id="MeshyImageToModelNode", display_name="Meshy: Image to Model", category="api node/3d/Meshy", + description="Generate a 3D model from a single image using the Meshy API.", + short_description="Generate a 3D model from an image.", inputs=[ IO.Combo.Input("model", options=["latest"]), IO.Image.Input("image"), @@ -403,6 +408,7 @@ class MeshyMultiImageToModelNode(IO.ComfyNode): node_id="MeshyMultiImageToModelNode", display_name="Meshy: Multi-Image to Model", category="api node/3d/Meshy", + description="Generate a 3D model from multiple images using the Meshy API.", inputs=[ IO.Combo.Input("model", options=["latest"]), IO.Autogrow.Input( @@ -575,6 +581,7 @@ class MeshyRigModelNode(IO.ComfyNode): description="Provides a rigged character in standard formats. " "Auto-rigging is currently not suitable for untextured meshes, non-humanoid assets, " "or humanoid assets with unclear limb and body structure.", + short_description="Rig a character model for animation.", inputs=[ IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"), IO.Float.Input( @@ -654,6 +661,7 @@ class MeshyAnimateModelNode(IO.ComfyNode): display_name="Meshy: Animate Model", category="api node/3d/Meshy", description="Apply a specific animation action to a previously rigged character.", + short_description=None, inputs=[ IO.Custom("MESHY_RIGGED_TASK_ID").Input("rig_task_id"), IO.Int.Input( @@ -719,6 +727,7 @@ class MeshyTextureNode(IO.ComfyNode): node_id="MeshyTextureNode", display_name="Meshy: Texture Model", category="api node/3d/Meshy", + description="Apply textures to an existing 3D model using the Meshy API.", inputs=[ IO.Combo.Input("model", options=["latest"]), IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"), diff --git a/comfy_api_nodes/nodes_minimax.py b/comfy_api_nodes/nodes_minimax.py index b5d0b461f..e00012aa6 100644 --- a/comfy_api_nodes/nodes_minimax.py +++ b/comfy_api_nodes/nodes_minimax.py @@ -103,6 +103,7 @@ class MinimaxTextToVideoNode(IO.ComfyNode): display_name="MiniMax Text to Video", category="api node/video/MiniMax", description="Generates videos synchronously based on a prompt, and optional parameters.", + short_description=None, inputs=[ IO.String.Input( "prompt_text", @@ -165,6 +166,7 @@ class MinimaxImageToVideoNode(IO.ComfyNode): display_name="MiniMax Image to Video", category="api node/video/MiniMax", description="Generates videos synchronously based on an image and prompt, and optional parameters.", + short_description="Generate videos from an image, prompt, and optional parameters.", inputs=[ IO.Image.Input( "image", @@ -232,6 +234,7 @@ class MinimaxSubjectToVideoNode(IO.ComfyNode): display_name="MiniMax Subject to Video", category="api node/video/MiniMax", description="Generates videos synchronously based on an image and prompt, and optional parameters.", + short_description="Subject-driven video generation from image and prompt.", inputs=[ IO.Image.Input( "subject", @@ -296,6 +299,7 @@ class MinimaxHailuoVideoNode(IO.ComfyNode): display_name="MiniMax Hailuo Video", category="api node/video/MiniMax", description="Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.", + short_description="Generate videos with optional start frame using Hailuo-02.", inputs=[ IO.String.Input( "prompt_text", diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 78a230529..181e7f8ec 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -166,6 +166,7 @@ class MoonvalleyImg2VideoNode(IO.ComfyNode): display_name="Moonvalley Marey Image to Video", category="api node/video/Moonvalley Marey", description="Moonvalley Marey Image to Video Node", + short_description=None, inputs=[ IO.Image.Input( "image", @@ -290,7 +291,8 @@ class MoonvalleyVideo2VideoNode(IO.ComfyNode): node_id="MoonvalleyVideo2VideoNode", display_name="Moonvalley Marey Video to Video", category="api node/video/Moonvalley Marey", - description="", + description="Transform an input video into a new video using a text prompt and motion or pose control.", + short_description="Transform video using text prompt with motion or pose control.", inputs=[ IO.String.Input( "prompt", @@ -415,7 +417,8 @@ class MoonvalleyTxt2VideoNode(IO.ComfyNode): node_id="MoonvalleyTxt2VideoNode", display_name="Moonvalley Marey Text to Video", category="api node/video/Moonvalley Marey", - description="", + description="Generate a video from a text prompt using the Moonvalley Marey model.", + short_description="Generate video from a text prompt using Moonvalley Marey.", inputs=[ IO.String.Input( "prompt", diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index 332107a82..613ef87a4 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -98,6 +98,7 @@ class OpenAIDalle2(IO.ComfyNode): display_name="OpenAI DALL·E 2", category="api node/image/OpenAI", description="Generates images synchronously via OpenAI's DALL·E 2 endpoint.", + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -248,6 +249,7 @@ class OpenAIDalle3(IO.ComfyNode): display_name="OpenAI DALL·E 3", category="api node/image/OpenAI", description="Generates images synchronously via OpenAI's DALL·E 3 endpoint.", + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -366,6 +368,7 @@ class OpenAIGPTImage1(IO.ComfyNode): display_name="OpenAI GPT Image 1.5", category="api node/image/OpenAI", description="Generates images synchronously via OpenAI's GPT Image endpoint.", + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -576,6 +579,7 @@ class OpenAIChatNode(IO.ComfyNode): display_name="OpenAI ChatGPT", category="api node/text/OpenAI", description="Generate text responses from an OpenAI model.", + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -803,6 +807,7 @@ class OpenAIInputFiles(IO.ComfyNode): display_name="OpenAI ChatGPT Input Files", category="api node/text/OpenAI", description="Loads and prepares input files (text, pdf, etc.) to include as inputs for the OpenAI Chat Node. The files will be read by the OpenAI model when generating a response. 🛈 TIP: Can be chained together with other OpenAI Input File nodes.", + short_description="Load and prepare input files for OpenAI Chat.", inputs=[ IO.Combo.Input( "file", @@ -850,6 +855,7 @@ class OpenAIChatConfig(IO.ComfyNode): display_name="OpenAI ChatGPT Advanced Options", category="api node/text/OpenAI", description="Allows specifying advanced configuration options for the OpenAI Chat Nodes.", + short_description=None, inputs=[ IO.Combo.Input( "truncation", diff --git a/comfy_api_nodes/nodes_pixverse.py b/comfy_api_nodes/nodes_pixverse.py index e17a24ae7..a95fa368f 100644 --- a/comfy_api_nodes/nodes_pixverse.py +++ b/comfy_api_nodes/nodes_pixverse.py @@ -54,6 +54,8 @@ class PixverseTemplateNode(IO.ComfyNode): node_id="PixverseTemplateNode", display_name="PixVerse Template", category="api node/video/PixVerse", + description="Select a style template for PixVerse video generation.", + short_description=None, inputs=[ IO.Combo.Input("template", options=list(pixverse_templates.keys())), ], @@ -76,6 +78,7 @@ class PixverseTextToVideoNode(IO.ComfyNode): display_name="PixVerse Text to Video", category="api node/video/PixVerse", description="Generates videos based on prompt and output_size.", + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -194,6 +197,7 @@ class PixverseImageToVideoNode(IO.ComfyNode): display_name="PixVerse Image to Video", category="api node/video/PixVerse", description="Generates videos based on prompt and output_size.", + short_description=None, inputs=[ IO.Image.Input("image"), IO.String.Input( @@ -312,6 +316,7 @@ class PixverseTransitionVideoNode(IO.ComfyNode): display_name="PixVerse Transition Video", category="api node/video/PixVerse", description="Generates videos based on prompt and output_size.", + short_description=None, inputs=[ IO.Image.Input("first_frame"), IO.Image.Input("last_frame"), diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py index 3a1f32263..47f7b2014 100644 --- a/comfy_api_nodes/nodes_recraft.py +++ b/comfy_api_nodes/nodes_recraft.py @@ -180,6 +180,7 @@ class RecraftColorRGBNode(IO.ComfyNode): display_name="Recraft Color RGB", category="api node/image/Recraft", description="Create Recraft Color by choosing specific RGB values.", + short_description=None, inputs=[ IO.Int.Input("r", default=0, min=0, max=255, tooltip="Red value of color."), IO.Int.Input("g", default=0, min=0, max=255, tooltip="Green value of color."), @@ -206,6 +207,7 @@ class RecraftControlsNode(IO.ComfyNode): display_name="Recraft Controls", category="api node/image/Recraft", description="Create Recraft Controls for customizing Recraft generation.", + short_description=None, inputs=[ IO.Custom(RecraftIO.COLOR).Input("colors", optional=True), IO.Custom(RecraftIO.COLOR).Input("background_color", optional=True), @@ -230,6 +232,7 @@ class RecraftStyleV3RealisticImageNode(IO.ComfyNode): display_name="Recraft Style - Realistic Image", category="api node/image/Recraft", description="Select realistic_image style and optional substyle.", + short_description=None, inputs=[ IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)), ], @@ -254,7 +257,8 @@ class RecraftStyleV3DigitalIllustrationNode(RecraftStyleV3RealisticImageNode): node_id="RecraftStyleV3DigitalIllustration", display_name="Recraft Style - Digital Illustration", category="api node/image/Recraft", - description="Select realistic_image style and optional substyle.", + description="Select digital_illustration style and optional substyle.", + short_description=None, inputs=[ IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)), ], @@ -271,9 +275,10 @@ class RecraftStyleV3VectorIllustrationNode(RecraftStyleV3RealisticImageNode): def define_schema(cls): return IO.Schema( node_id="RecraftStyleV3VectorIllustrationNode", - display_name="Recraft Style - Realistic Image", + display_name="Recraft Style - Vector Illustration", category="api node/image/Recraft", - description="Select realistic_image style and optional substyle.", + description="Select vector_illustration style and optional substyle.", + short_description=None, inputs=[ IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)), ], @@ -292,7 +297,8 @@ class RecraftStyleV3LogoRasterNode(RecraftStyleV3RealisticImageNode): node_id="RecraftStyleV3LogoRaster", display_name="Recraft Style - Logo Raster", category="api node/image/Recraft", - description="Select realistic_image style and optional substyle.", + description="Select logo_raster style and optional substyle.", + short_description=None, inputs=[ IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE, include_none=False)), ], @@ -310,6 +316,7 @@ class RecraftStyleInfiniteStyleLibrary(IO.ComfyNode): display_name="Recraft Style - Infinite Style Library", category="api node/image/Recraft", description="Select style based on preexisting UUID from Recraft's Infinite Style Library.", + short_description=None, inputs=[ IO.String.Input("style_id", default="", tooltip="UUID of style from Infinite Style Library."), ], @@ -335,6 +342,7 @@ class RecraftCreateStyleNode(IO.ComfyNode): description="Create a custom style from reference images. " "Upload 1-5 images to use as style references. " "Total size of all images is limited to 5 MB.", + short_description="Create a custom style from 1-5 reference images.", inputs=[ IO.Combo.Input( "style", @@ -402,6 +410,7 @@ class RecraftTextToImageNode(IO.ComfyNode): display_name="Recraft Text to Image", category="api node/image/Recraft", description="Generates images synchronously based on prompt and resolution.", + short_description=None, inputs=[ IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."), IO.Combo.Input( @@ -514,6 +523,7 @@ class RecraftImageToImageNode(IO.ComfyNode): display_name="Recraft Image to Image", category="api node/image/Recraft", description="Modify image based on prompt and strength.", + short_description=None, inputs=[ IO.Image.Input("image"), IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."), @@ -632,6 +642,7 @@ class RecraftImageInpaintingNode(IO.ComfyNode): display_name="Recraft Image Inpainting", category="api node/image/Recraft", description="Modify image based on prompt and mask.", + short_description=None, inputs=[ IO.Image.Input("image"), IO.Mask.Input("mask"), @@ -734,6 +745,7 @@ class RecraftTextToVectorNode(IO.ComfyNode): display_name="Recraft Text to Vector", category="api node/image/Recraft", description="Generates SVG synchronously based on prompt and resolution.", + short_description=None, inputs=[ IO.String.Input("prompt", default="", tooltip="Prompt for the image generation.", multiline=True), IO.Combo.Input("substyle", options=get_v3_substyles(RecraftStyleV3.vector_illustration)), @@ -834,6 +846,7 @@ class RecraftVectorizeImageNode(IO.ComfyNode): display_name="Recraft Vectorize Image", category="api node/image/Recraft", description="Generates SVG synchronously from an input image.", + short_description=None, inputs=[ IO.Image.Input("image"), ], @@ -877,6 +890,7 @@ class RecraftReplaceBackgroundNode(IO.ComfyNode): display_name="Recraft Replace Background", category="api node/image/Recraft", description="Replace background on image, based on provided prompt.", + short_description=None, inputs=[ IO.Image.Input("image"), IO.String.Input("prompt", tooltip="Prompt for the image generation.", default="", multiline=True), @@ -964,6 +978,7 @@ class RecraftRemoveBackgroundNode(IO.ComfyNode): display_name="Recraft Remove Background", category="api node/image/Recraft", description="Remove background from image, and return processed image and mask.", + short_description=None, inputs=[ IO.Image.Input("image"), ], @@ -1012,8 +1027,9 @@ class RecraftCrispUpscaleNode(IO.ComfyNode): display_name="Recraft Crisp Upscale Image", category="api node/image/Recraft", description="Upscale image synchronously.\n" - "Enhances a given raster image using ‘crisp upscale’ tool, " + "Enhances a given raster image using 'crisp upscale' tool, " "increasing image resolution, making the image sharper and cleaner.", + short_description="Crisp upscale to sharpen and increase image resolution.", inputs=[ IO.Image.Input("image"), ], @@ -1058,8 +1074,9 @@ class RecraftCreativeUpscaleNode(RecraftCrispUpscaleNode): display_name="Recraft Creative Upscale Image", category="api node/image/Recraft", description="Upscale image synchronously.\n" - "Enhances a given raster image using ‘creative upscale’ tool, " + "Enhances a given raster image using 'creative upscale' tool, " "boosting resolution with a focus on refining small details and faces.", + short_description="Creative upscale focusing on small details and faces.", inputs=[ IO.Image.Input("image"), ], diff --git a/comfy_api_nodes/nodes_rodin.py b/comfy_api_nodes/nodes_rodin.py index f9cff121f..05c9498c5 100644 --- a/comfy_api_nodes/nodes_rodin.py +++ b/comfy_api_nodes/nodes_rodin.py @@ -238,6 +238,7 @@ class Rodin3D_Regular(IO.ComfyNode): display_name="Rodin 3D Generate - Regular Generate", category="api node/3d/Rodin", description=cleandoc(cls.__doc__ or ""), + short_description=None, inputs=[ IO.Image.Input("Images"), *COMMON_PARAMETERS, @@ -297,6 +298,7 @@ class Rodin3D_Detail(IO.ComfyNode): display_name="Rodin 3D Generate - Detail Generate", category="api node/3d/Rodin", description=cleandoc(cls.__doc__ or ""), + short_description=None, inputs=[ IO.Image.Input("Images"), *COMMON_PARAMETERS, @@ -356,6 +358,7 @@ class Rodin3D_Smooth(IO.ComfyNode): display_name="Rodin 3D Generate - Smooth Generate", category="api node/3d/Rodin", description=cleandoc(cls.__doc__ or ""), + short_description=None, inputs=[ IO.Image.Input("Images"), *COMMON_PARAMETERS, @@ -414,6 +417,7 @@ class Rodin3D_Sketch(IO.ComfyNode): display_name="Rodin 3D Generate - Sketch Generate", category="api node/3d/Rodin", description=cleandoc(cls.__doc__ or ""), + short_description=None, inputs=[ IO.Image.Input("Images"), IO.Int.Input( @@ -476,6 +480,7 @@ class Rodin3D_Gen2(IO.ComfyNode): display_name="Rodin 3D Generate - Gen-2 Generate", category="api node/3d/Rodin", description=cleandoc(cls.__doc__ or ""), + short_description=None, inputs=[ IO.Image.Input("Images"), IO.Int.Input( diff --git a/comfy_api_nodes/nodes_runway.py b/comfy_api_nodes/nodes_runway.py index 573170ba2..6364b1187 100644 --- a/comfy_api_nodes/nodes_runway.py +++ b/comfy_api_nodes/nodes_runway.py @@ -145,6 +145,7 @@ class RunwayImageToVideoNodeGen3a(IO.ComfyNode): "Before diving in, review these best practices to ensure that " "your input selections will set your generation up for success: " "https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.", + short_description="Generate video from a starting frame using Gen3a Turbo.", inputs=[ IO.String.Input( "prompt", @@ -239,6 +240,7 @@ class RunwayImageToVideoNodeGen4(IO.ComfyNode): "Before diving in, review these best practices to ensure that " "your input selections will set your generation up for success: " "https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.", + short_description="Generate video from a starting frame using Gen4 Turbo.", inputs=[ IO.String.Input( "prompt", @@ -337,6 +339,7 @@ class RunwayFirstLastFrameNode(IO.ComfyNode): "Before diving in, review these best practices to ensure that your input selections " "will set your generation up for success: " "https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.", + short_description="Generate video from first and last keyframes with a prompt.", inputs=[ IO.String.Input( "prompt", @@ -443,6 +446,7 @@ class RunwayTextToImageNode(IO.ComfyNode): category="api node/image/Runway", description="Generate an image from a text prompt using Runway's Gen 4 model. " "You can also include reference image to guide the generation.", + short_description="Generate an image from text using Runway Gen 4.", inputs=[ IO.String.Input( "prompt", diff --git a/comfy_api_nodes/nodes_sora.py b/comfy_api_nodes/nodes_sora.py index afc18bb25..768e106aa 100644 --- a/comfy_api_nodes/nodes_sora.py +++ b/comfy_api_nodes/nodes_sora.py @@ -36,6 +36,7 @@ class OpenAIVideoSora2(IO.ComfyNode): display_name="OpenAI Sora - Video", category="api node/video/Sora", description="OpenAI video and audio generation.", + short_description=None, inputs=[ IO.Combo.Input( "model", diff --git a/comfy_api_nodes/nodes_stability.py b/comfy_api_nodes/nodes_stability.py index 5665109cf..877022348 100644 --- a/comfy_api_nodes/nodes_stability.py +++ b/comfy_api_nodes/nodes_stability.py @@ -64,6 +64,7 @@ class StabilityStableImageUltraNode(IO.ComfyNode): display_name="Stability AI Stable Image Ultra", category="api node/image/Stability AI", description=cleandoc(cls.__doc__ or ""), + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -197,6 +198,7 @@ class StabilityStableImageSD_3_5Node(IO.ComfyNode): display_name="Stability AI Stable Diffusion 3.5 Image", category="api node/image/Stability AI", description=cleandoc(cls.__doc__ or ""), + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -352,6 +354,7 @@ class StabilityUpscaleConservativeNode(IO.ComfyNode): display_name="Stability AI Upscale Conservative", category="api node/image/Stability AI", description=cleandoc(cls.__doc__ or ""), + short_description=None, inputs=[ IO.Image.Input("image"), IO.String.Input( @@ -454,6 +457,7 @@ class StabilityUpscaleCreativeNode(IO.ComfyNode): display_name="Stability AI Upscale Creative", category="api node/image/Stability AI", description=cleandoc(cls.__doc__ or ""), + short_description=None, inputs=[ IO.Image.Input("image"), IO.String.Input( @@ -573,6 +577,7 @@ class StabilityUpscaleFastNode(IO.ComfyNode): display_name="Stability AI Upscale Fast", category="api node/image/Stability AI", description=cleandoc(cls.__doc__ or ""), + short_description="Quickly upscale an image to 4x its original size.", inputs=[ IO.Image.Input("image"), ], @@ -625,6 +630,7 @@ class StabilityTextToAudio(IO.ComfyNode): display_name="Stability AI Text To Audio", category="api node/audio/Stability AI", description=cleandoc(cls.__doc__ or ""), + short_description=None, inputs=[ IO.Combo.Input( "model", @@ -701,6 +707,7 @@ class StabilityAudioToAudio(IO.ComfyNode): display_name="Stability AI Audio To Audio", category="api node/audio/Stability AI", description=cleandoc(cls.__doc__ or ""), + short_description=None, inputs=[ IO.Combo.Input( "model", @@ -794,6 +801,7 @@ class StabilityAudioInpaint(IO.ComfyNode): display_name="Stability AI Audio Inpaint", category="api node/audio/Stability AI", description=cleandoc(cls.__doc__ or ""), + short_description=None, inputs=[ IO.Combo.Input( "model", diff --git a/comfy_api_nodes/nodes_topaz.py b/comfy_api_nodes/nodes_topaz.py index 8fccde25a..6339a7c77 100644 --- a/comfy_api_nodes/nodes_topaz.py +++ b/comfy_api_nodes/nodes_topaz.py @@ -49,6 +49,7 @@ class TopazImageEnhance(IO.ComfyNode): display_name="Topaz Image Enhance", category="api node/image/Topaz", description="Industry-standard upscaling and image enhancement.", + short_description=None, inputs=[ IO.Combo.Input("model", options=["Reimagine"]), IO.Image.Input("image"), @@ -223,6 +224,7 @@ class TopazVideoEnhance(IO.ComfyNode): display_name="Topaz Video Enhance", category="api node/video/Topaz", description="Breathe new life into video with powerful upscaling and recovery technology.", + short_description=None, inputs=[ IO.Video.Input("video"), IO.Boolean.Input("upscaler_enabled", default=True), diff --git a/comfy_api_nodes/nodes_tripo.py b/comfy_api_nodes/nodes_tripo.py index 67c7f59fc..debb2b7b4 100644 --- a/comfy_api_nodes/nodes_tripo.py +++ b/comfy_api_nodes/nodes_tripo.py @@ -80,6 +80,7 @@ class TripoTextToModelNode(IO.ComfyNode): node_id="TripoTextToModelNode", display_name="Tripo: Text to Model", category="api node/3d/Tripo", + description="Generate a 3D model from a text prompt using Tripo's API.", inputs=[ IO.String.Input("prompt", multiline=True), IO.String.Input("negative_prompt", multiline=True, optional=True), @@ -199,6 +200,7 @@ class TripoImageToModelNode(IO.ComfyNode): node_id="TripoImageToModelNode", display_name="Tripo: Image to Model", category="api node/3d/Tripo", + description="Generate a 3D model from a single image using Tripo's API.", inputs=[ IO.Image.Input("image"), IO.Combo.Input( @@ -331,6 +333,7 @@ class TripoMultiviewToModelNode(IO.ComfyNode): node_id="TripoMultiviewToModelNode", display_name="Tripo: Multiview to Model", category="api node/3d/Tripo", + description="Generate a 3D model from multiple view images using Tripo's API.", inputs=[ IO.Image.Input("image"), IO.Image.Input("image_left", optional=True), @@ -470,6 +473,7 @@ class TripoTextureNode(IO.ComfyNode): node_id="TripoTextureNode", display_name="Tripo: Texture model", category="api node/3d/Tripo", + description="Apply textures to an existing 3D model using Tripo's API.", inputs=[ IO.Custom("MODEL_TASK_ID").Input("model_task_id"), IO.Boolean.Input("texture", default=True, optional=True), @@ -538,6 +542,7 @@ class TripoRefineNode(IO.ComfyNode): display_name="Tripo: Refine Draft model", category="api node/3d/Tripo", description="Refine a draft model created by v1.4 Tripo models only.", + short_description=None, inputs=[ IO.Custom("MODEL_TASK_ID").Input("model_task_id", tooltip="Must be a v1.4 Tripo model"), ], @@ -577,6 +582,8 @@ class TripoRigNode(IO.ComfyNode): node_id="TripoRigNode", display_name="Tripo: Rig model", category="api node/3d/Tripo", + description="Add a skeleton rig to an existing 3D model using Tripo's API.", + short_description="Add a skeleton rig to a 3D model.", inputs=[IO.Custom("MODEL_TASK_ID").Input("original_model_task_id")], outputs=[ IO.String.Output(display_name="model_file"), # for backward compatibility only @@ -614,6 +621,8 @@ class TripoRetargetNode(IO.ComfyNode): node_id="TripoRetargetNode", display_name="Tripo: Retarget rigged model", category="api node/3d/Tripo", + description="Apply a preset animation to a rigged 3D model using Tripo's API.", + short_description="Apply a preset animation to a rigged model.", inputs=[ IO.Custom("RIG_TASK_ID").Input("original_model_task_id"), IO.Combo.Input( @@ -679,6 +688,8 @@ class TripoConversionNode(IO.ComfyNode): node_id="TripoConversionNode", display_name="Tripo: Convert model", category="api node/3d/Tripo", + description="Convert a 3D model to different formats with optional post-processing using Tripo's API.", + short_description="Convert a 3D model to different formats.", inputs=[ IO.Custom("MODEL_TASK_ID,RIG_TASK_ID,RETARGET_TASK_ID").Input("original_model_task_id"), IO.Combo.Input("format", options=["GLTF", "USDZ", "FBX", "OBJ", "STL", "3MF"]), diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index 2a202fc3b..bc63fb1e7 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -46,6 +46,7 @@ class VeoVideoGenerationNode(IO.ComfyNode): display_name="Google Veo 2 Video Generation", category="api node/video/Veo", description="Generates videos from text prompts using Google's Veo 2 API", + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -264,6 +265,7 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode): display_name="Google Veo 3 Video Generation", category="api node/video/Veo", description="Generates videos from text prompts using Google's Veo 3 API", + short_description=None, inputs=[ IO.String.Input( "prompt", @@ -377,6 +379,7 @@ class Veo3FirstLastFrameNode(IO.ComfyNode): display_name="Google Veo 3 First-Last-Frame to Video", category="api node/video/Veo", description="Generate video using prompt and first and last frames.", + short_description=None, inputs=[ IO.String.Input( "prompt", diff --git a/comfy_api_nodes/nodes_vidu.py b/comfy_api_nodes/nodes_vidu.py index 80de14dfe..d21fd2cd3 100644 --- a/comfy_api_nodes/nodes_vidu.py +++ b/comfy_api_nodes/nodes_vidu.py @@ -72,6 +72,7 @@ class ViduTextToVideoNode(IO.ComfyNode): display_name="Vidu Text To Video Generation", category="api node/video/Vidu", description="Generate video from a text prompt", + short_description=None, inputs=[ IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"), IO.String.Input( @@ -168,6 +169,7 @@ class ViduImageToVideoNode(IO.ComfyNode): display_name="Vidu Image To Video Generation", category="api node/video/Vidu", description="Generate video from image and optional prompt", + short_description=None, inputs=[ IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"), IO.Image.Input( @@ -270,6 +272,7 @@ class ViduReferenceVideoNode(IO.ComfyNode): display_name="Vidu Reference To Video Generation", category="api node/video/Vidu", description="Generate video from multiple images and a prompt", + short_description=None, inputs=[ IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"), IO.Image.Input( @@ -383,6 +386,7 @@ class ViduStartEndToVideoNode(IO.ComfyNode): display_name="Vidu Start End To Video Generation", category="api node/video/Vidu", description="Generate a video from start and end frames and a prompt", + short_description=None, inputs=[ IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"), IO.Image.Input( @@ -485,6 +489,7 @@ class Vidu2TextToVideoNode(IO.ComfyNode): display_name="Vidu2 Text-to-Video Generation", category="api node/video/Vidu", description="Generate video from a text prompt", + short_description=None, inputs=[ IO.Combo.Input("model", options=["viduq2"]), IO.String.Input( @@ -576,6 +581,7 @@ class Vidu2ImageToVideoNode(IO.ComfyNode): display_name="Vidu2 Image-to-Video Generation", category="api node/video/Vidu", description="Generate a video from an image and an optional prompt.", + short_description=None, inputs=[ IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]), IO.Image.Input( @@ -704,6 +710,7 @@ class Vidu2ReferenceVideoNode(IO.ComfyNode): display_name="Vidu2 Reference-to-Video Generation", category="api node/video/Vidu", description="Generate a video from multiple reference images and a prompt.", + short_description=None, inputs=[ IO.Combo.Input("model", options=["viduq2"]), IO.Autogrow.Input( @@ -837,6 +844,7 @@ class Vidu2StartEndToVideoNode(IO.ComfyNode): display_name="Vidu2 Start/End Frame-to-Video Generation", category="api node/video/Vidu", description="Generate a video from a start frame, an end frame, and a prompt.", + short_description="Generate video from start frame, end frame, and prompt.", inputs=[ IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]), IO.Image.Input("first_frame"), @@ -956,6 +964,7 @@ class ViduExtendVideoNode(IO.ComfyNode): display_name="Vidu Video Extension", category="api node/video/Vidu", description="Extend an existing video by generating additional frames.", + short_description=None, inputs=[ IO.DynamicCombo.Input( "model", @@ -1126,6 +1135,7 @@ class ViduMultiFrameVideoNode(IO.ComfyNode): display_name="Vidu Multi-Frame Video Generation", category="api node/video/Vidu", description="Generate a video with multiple keyframe transitions.", + short_description=None, inputs=[ IO.Combo.Input("model", options=["viduq2-pro", "viduq2-turbo"]), IO.Image.Input( @@ -1272,6 +1282,7 @@ class Vidu3TextToVideoNode(IO.ComfyNode): display_name="Vidu Q3 Text-to-Video Generation", category="api node/video/Vidu", description="Generate video from a text prompt.", + short_description=None, inputs=[ IO.DynamicCombo.Input( "model", @@ -1380,6 +1391,7 @@ class Vidu3ImageToVideoNode(IO.ComfyNode): display_name="Vidu Q3 Image-to-Video Generation", category="api node/video/Vidu", description="Generate a video from an image and an optional prompt.", + short_description=None, inputs=[ IO.DynamicCombo.Input( "model", diff --git a/comfy_api_nodes/nodes_wan.py b/comfy_api_nodes/nodes_wan.py index a1355d4f1..7767f018c 100644 --- a/comfy_api_nodes/nodes_wan.py +++ b/comfy_api_nodes/nodes_wan.py @@ -175,6 +175,7 @@ class WanTextToImageApi(IO.ComfyNode): display_name="Wan Text to Image", category="api node/image/Wan", description="Generates an image based on a text prompt.", + short_description=None, inputs=[ IO.Combo.Input( "model", @@ -298,6 +299,7 @@ class WanImageToImageApi(IO.ComfyNode): category="api node/image/Wan", description="Generates an image from one or two input images and a text prompt. " "The output image is currently fixed at 1.6 MP, and its aspect ratio matches the input image(s).", + short_description="Generate an image from input images and a text prompt.", inputs=[ IO.Combo.Input( "model", @@ -424,6 +426,7 @@ class WanTextToVideoApi(IO.ComfyNode): display_name="Wan Text to Video", category="api node/video/Wan", description="Generates a video based on a text prompt.", + short_description=None, inputs=[ IO.Combo.Input( "model", @@ -603,6 +606,7 @@ class WanImageToVideoApi(IO.ComfyNode): display_name="Wan Image to Video", category="api node/video/Wan", description="Generates a video from the first frame and a text prompt.", + short_description=None, inputs=[ IO.Combo.Input( "model", @@ -779,6 +783,7 @@ class WanReferenceVideoApi(IO.ComfyNode): category="api node/video/Wan", description="Use the character and voice from input videos, combined with a prompt, " "to generate a new video that maintains character consistency.", + short_description="Generate character-consistent video from reference videos and prompt.", inputs=[ IO.Combo.Input("model", options=["wan2.6-r2v"]), IO.String.Input( diff --git a/comfy_api_nodes/nodes_wavespeed.py b/comfy_api_nodes/nodes_wavespeed.py index c59fafd3b..1fcf164f9 100644 --- a/comfy_api_nodes/nodes_wavespeed.py +++ b/comfy_api_nodes/nodes_wavespeed.py @@ -30,6 +30,7 @@ class WavespeedFlashVSRNode(IO.ComfyNode): category="api node/video/WaveSpeed", description="Fast, high-quality video upscaler that " "boosts resolution and restores clarity for low-resolution or blurry footage.", + short_description="Fast video upscaler that boosts resolution and restores clarity.", inputs=[ IO.Video.Input("video"), IO.Combo.Input("target_resolution", options=["720p", "1080p", "2K", "4K"]), @@ -101,6 +102,7 @@ class WavespeedImageUpscaleNode(IO.ComfyNode): display_name="WaveSpeed Image Upscale", category="api node/image/WaveSpeed", description="Boost image resolution and quality, upscaling photos to 4K or 8K for sharp, detailed results.", + short_description="Upscale images to 4K or 8K with enhanced quality.", inputs=[ IO.Combo.Input("model", options=["SeedVR2", "Ultimate"]), IO.Image.Input("image"), diff --git a/comfy_extras/nodes_ace.py b/comfy_extras/nodes_ace.py index 9cf84ab4d..1db36f1e1 100644 --- a/comfy_extras/nodes_ace.py +++ b/comfy_extras/nodes_ace.py @@ -12,6 +12,8 @@ class TextEncodeAceStepAudio(io.ComfyNode): return io.Schema( node_id="TextEncodeAceStepAudio", category="conditioning", + description="Encodes tags and lyrics into conditioning for ACE-Step 1.0 audio generation with adjustable lyrics strength.", + short_description="Encodes tags and lyrics for ACE-Step 1.0 audio.", inputs=[ io.Clip.Input("clip"), io.String.Input("tags", multiline=True, dynamic_prompts=True), @@ -34,6 +36,8 @@ class TextEncodeAceStepAudio15(io.ComfyNode): return io.Schema( node_id="TextEncodeAceStepAudio1.5", category="conditioning", + description="Encodes tags, lyrics, and music parameters like BPM, key, and language into conditioning for ACE-Step 1.5 audio generation.", + short_description="Encodes text and music parameters for ACE-Step 1.5.", inputs=[ io.Clip.Input("clip"), io.String.Input("tags", multiline=True, dynamic_prompts=True), @@ -68,6 +72,8 @@ class EmptyAceStepLatentAudio(io.ComfyNode): node_id="EmptyAceStepLatentAudio", display_name="Empty Ace Step 1.0 Latent Audio", category="latent/audio", + description="Creates an empty latent audio tensor for ACE-Step 1.0 with a specified duration and batch size.", + short_description="Creates an empty ACE-Step 1.0 audio latent.", inputs=[ io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1), io.Int.Input( @@ -91,6 +97,8 @@ class EmptyAceStep15LatentAudio(io.ComfyNode): node_id="EmptyAceStep1.5LatentAudio", display_name="Empty Ace Step 1.5 Latent Audio", category="latent/audio", + description="Creates an empty latent audio tensor for ACE-Step 1.5 with a specified duration and batch size.", + short_description="Creates an empty ACE-Step 1.5 audio latent.", inputs=[ io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.01), io.Int.Input( @@ -115,6 +123,7 @@ class ReferenceAudio(io.ComfyNode): category="advanced/conditioning/audio", is_experimental=True, description="This node sets the reference audio for ace step 1.5", + short_description=None, inputs=[ io.Conditioning.Input("conditioning"), io.Latent.Input("latent", optional=True), diff --git a/comfy_extras/nodes_advanced_samplers.py b/comfy_extras/nodes_advanced_samplers.py index 5532ffe6a..4a1f81aec 100644 --- a/comfy_extras/nodes_advanced_samplers.py +++ b/comfy_extras/nodes_advanced_samplers.py @@ -46,6 +46,8 @@ class SamplerLCMUpscale(io.ComfyNode): return io.Schema( node_id="SamplerLCMUpscale", category="sampling/custom_sampling/samplers", + description="Sampler that progressively upscales the latent during LCM sampling steps, combining denoising with gradual resolution increase.", + short_description="LCM sampler with progressive latent upscaling.", inputs=[ io.Float.Input("scale_ratio", default=1.0, min=0.1, max=20.0, step=0.01), io.Int.Input("scale_steps", default=-1, min=-1, max=1000, step=1), @@ -93,6 +95,8 @@ class SamplerEulerCFGpp(io.ComfyNode): node_id="SamplerEulerCFGpp", display_name="SamplerEulerCFG++", category="_for_testing", # "sampling/custom_sampling/samplers" + description="Euler sampler variant using the CFG++ formulation, which modifies the denoising direction using unconditional predictions for improved guidance.", + short_description="Euler sampler using CFG++ guidance formulation.", inputs=[ io.Combo.Input("version", options=["regular", "alternative"]), ], diff --git a/comfy_extras/nodes_align_your_steps.py b/comfy_extras/nodes_align_your_steps.py index 4fc511d2c..7d83cb6bc 100644 --- a/comfy_extras/nodes_align_your_steps.py +++ b/comfy_extras/nodes_align_your_steps.py @@ -30,6 +30,8 @@ class AlignYourStepsScheduler(io.ComfyNode): node_id="AlignYourStepsScheduler", search_aliases=["AYS scheduler"], category="sampling/custom_sampling/schedulers", + description="Generates an optimized noise schedule using the Align Your Steps method with log-linear interpolation.", + short_description="Optimized noise schedule using Align Your Steps.", inputs=[ io.Combo.Input("model_type", options=["SD1", "SDXL", "SVD"]), io.Int.Input("steps", default=10, min=1, max=10000), diff --git a/comfy_extras/nodes_apg.py b/comfy_extras/nodes_apg.py index b9df2dcc9..cc353997e 100644 --- a/comfy_extras/nodes_apg.py +++ b/comfy_extras/nodes_apg.py @@ -17,6 +17,8 @@ class APG(io.ComfyNode): node_id="APG", display_name="Adaptive Projected Guidance", category="sampling/custom_sampling", + description="Applies Adaptive Projected Guidance to a model, decomposing CFG guidance into parallel and orthogonal components with optional momentum and norm thresholding for improved sampling quality.", + short_description="Decomposes CFG guidance with projection and normalization.", inputs=[ io.Model.Input("model"), io.Float.Input( diff --git a/comfy_extras/nodes_attention_multiply.py b/comfy_extras/nodes_attention_multiply.py index 67c4e2ed0..a99b18174 100644 --- a/comfy_extras/nodes_attention_multiply.py +++ b/comfy_extras/nodes_attention_multiply.py @@ -26,6 +26,8 @@ class UNetSelfAttentionMultiply(io.ComfyNode): return io.Schema( node_id="UNetSelfAttentionMultiply", category="_for_testing/attention_experiments", + description="Scales the query, key, value, and output weights of UNet self-attention layers by specified multipliers to experiment with attention behavior.", + short_description="Scale UNet self-attention Q/K/V/Out weights.", inputs=[ io.Model.Input("model"), io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01), @@ -49,6 +51,8 @@ class UNetCrossAttentionMultiply(io.ComfyNode): return io.Schema( node_id="UNetCrossAttentionMultiply", category="_for_testing/attention_experiments", + description="Scales the query, key, value, and output weights of UNet cross-attention layers by specified multipliers to experiment with text-to-image attention.", + short_description="Scale UNet cross-attention Q/K/V/Out weights.", inputs=[ io.Model.Input("model"), io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01), @@ -73,6 +77,8 @@ class CLIPAttentionMultiply(io.ComfyNode): node_id="CLIPAttentionMultiply", search_aliases=["clip attention scale", "text encoder attention"], category="_for_testing/attention_experiments", + description="Scales the query, key, value, and output projection weights of CLIP text encoder self-attention layers by specified multipliers.", + short_description="Scale CLIP text encoder attention weights.", inputs=[ io.Clip.Input("clip"), io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01), @@ -107,6 +113,8 @@ class UNetTemporalAttentionMultiply(io.ComfyNode): return io.Schema( node_id="UNetTemporalAttentionMultiply", category="_for_testing/attention_experiments", + description="Scales the output weights of UNet temporal and structural attention layers independently, allowing fine-grained control over video model attention behavior.", + short_description="Scale UNet temporal and structural attention weights.", inputs=[ io.Model.Input("model"), io.Float.Input("self_structural", default=1.0, min=0.0, max=10.0, step=0.01), diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index b63dd8e97..e1be34ef5 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -19,6 +19,8 @@ class EmptyLatentAudio(IO.ComfyNode): node_id="EmptyLatentAudio", display_name="Empty Latent Audio", category="latent/audio", + description="Creates an empty latent audio tensor with a specified duration and batch size for Stable Audio generation.", + short_description="Creates an empty latent audio tensor.", inputs=[ IO.Float.Input("seconds", default=47.6, min=1.0, max=1000.0, step=0.1), IO.Int.Input( @@ -43,6 +45,8 @@ class ConditioningStableAudio(IO.ComfyNode): return IO.Schema( node_id="ConditioningStableAudio", category="conditioning", + description="Sets the start time and total duration on Stable Audio positive and negative conditioning.", + short_description="Sets timing parameters on Stable Audio conditioning.", inputs=[ IO.Conditioning.Input("positive"), IO.Conditioning.Input("negative"), @@ -72,6 +76,8 @@ class VAEEncodeAudio(IO.ComfyNode): search_aliases=["audio to latent"], display_name="VAE Encode Audio", category="latent/audio", + description="Encodes an audio waveform into a latent representation using a VAE, resampling if needed.", + short_description="Encodes audio into latent via VAE.", inputs=[ IO.Audio.Input("audio"), IO.Vae.Input("vae"), @@ -115,6 +121,8 @@ class VAEDecodeAudio(IO.ComfyNode): search_aliases=["latent to audio"], display_name="VAE Decode Audio", category="latent/audio", + description="Decodes a latent representation back into an audio waveform using a VAE.", + short_description="Decodes latent into audio via VAE.", inputs=[ IO.Latent.Input("samples"), IO.Vae.Input("vae"), @@ -137,6 +145,8 @@ class VAEDecodeAudioTiled(IO.ComfyNode): search_aliases=["latent to audio"], display_name="VAE Decode Audio (Tiled)", category="latent/audio", + description="Decodes a latent representation into audio using tiled VAE decoding to reduce memory usage.", + short_description="Tiled VAE decoding of latent into audio.", inputs=[ IO.Latent.Input("samples"), IO.Vae.Input("vae"), @@ -159,6 +169,8 @@ class SaveAudio(IO.ComfyNode): search_aliases=["export flac"], display_name="Save Audio (FLAC)", category="audio", + description="Saves audio to disk in FLAC format with a configurable filename prefix.", + short_description="Saves audio to disk in FLAC format.", inputs=[ IO.Audio.Input("audio"), IO.String.Input("filename_prefix", default="audio/ComfyUI"), @@ -184,6 +196,8 @@ class SaveAudioMP3(IO.ComfyNode): search_aliases=["export mp3"], display_name="Save Audio (MP3)", category="audio", + description="Saves audio to disk in MP3 format with configurable quality and filename prefix.", + short_description="Saves audio to disk in MP3 format.", inputs=[ IO.Audio.Input("audio"), IO.String.Input("filename_prefix", default="audio/ComfyUI"), @@ -212,6 +226,8 @@ class SaveAudioOpus(IO.ComfyNode): search_aliases=["export opus"], display_name="Save Audio (Opus)", category="audio", + description="Saves audio to disk in Opus format with configurable quality and filename prefix.", + short_description="Saves audio to disk in Opus format.", inputs=[ IO.Audio.Input("audio"), IO.String.Input("filename_prefix", default="audio/ComfyUI"), @@ -240,6 +256,8 @@ class PreviewAudio(IO.ComfyNode): search_aliases=["play audio"], display_name="Preview Audio", category="audio", + description="Plays back audio in the UI for previewing.", + short_description=None, inputs=[ IO.Audio.Input("audio"), ], @@ -300,6 +318,8 @@ class LoadAudio(IO.ComfyNode): search_aliases=["import audio", "open audio", "audio file"], display_name="Load Audio", category="audio", + description="Loads an audio or video file from disk and outputs the audio as a single Audio output.", + short_description="Loads an audio file from disk.", inputs=[ IO.Combo.Input("audio", upload=IO.UploadType.audio, options=sorted(files)), ], @@ -338,6 +358,7 @@ class RecordAudio(IO.ComfyNode): search_aliases=["microphone input", "audio capture", "voice input"], display_name="Record Audio", category="audio", + description="Records audio from a microphone input and outputs the captured audio.", inputs=[ IO.Custom("AUDIO_RECORD").Input("audio"), ], @@ -363,6 +384,7 @@ class TrimAudioDuration(IO.ComfyNode): search_aliases=["cut audio", "audio clip", "shorten audio"], display_name="Trim Audio Duration", description="Trim audio tensor into chosen time range.", + short_description=None, category="audio", inputs=[ IO.Audio.Input("audio"), @@ -416,6 +438,7 @@ class SplitAudioChannels(IO.ComfyNode): search_aliases=["stereo to mono"], display_name="Split Audio Channels", description="Separates the audio into left and right channels.", + short_description=None, category="audio", inputs=[ IO.Audio.Input("audio"), @@ -448,6 +471,7 @@ class JoinAudioChannels(IO.ComfyNode): node_id="JoinAudioChannels", display_name="Join Audio Channels", description="Joins left and right mono audio channels into a stereo audio.", + short_description=None, category="audio", inputs=[ IO.Audio.Input("audio_left"), @@ -517,6 +541,7 @@ class AudioConcat(IO.ComfyNode): search_aliases=["join audio", "combine audio", "append audio"], display_name="Audio Concat", description="Concatenates the audio1 to audio2 in the specified direction.", + short_description=None, category="audio", inputs=[ IO.Audio.Input("audio1"), @@ -565,6 +590,7 @@ class AudioMerge(IO.ComfyNode): search_aliases=["mix audio", "overlay audio", "layer audio"], display_name="Audio Merge", description="Combine two audio tracks by overlaying their waveforms.", + short_description=None, category="audio", inputs=[ IO.Audio.Input("audio1"), @@ -626,6 +652,8 @@ class AudioAdjustVolume(IO.ComfyNode): search_aliases=["audio gain", "loudness", "audio level"], display_name="Audio Adjust Volume", category="audio", + description="Adjusts audio volume by a specified number of decibels.", + short_description=None, inputs=[ IO.Audio.Input("audio"), IO.Int.Input( @@ -662,6 +690,8 @@ class EmptyAudio(IO.ComfyNode): search_aliases=["blank audio"], display_name="Empty Audio", category="audio", + description="Creates a silent audio clip with configurable duration, sample rate, and channel count.", + short_description="Creates a silent audio clip.", inputs=[ IO.Float.Input( "duration", diff --git a/comfy_extras/nodes_audio_encoder.py b/comfy_extras/nodes_audio_encoder.py index 13aacd41a..0a78c4356 100644 --- a/comfy_extras/nodes_audio_encoder.py +++ b/comfy_extras/nodes_audio_encoder.py @@ -11,6 +11,8 @@ class AudioEncoderLoader(io.ComfyNode): return io.Schema( node_id="AudioEncoderLoader", category="loaders", + description="Loads an audio encoder model from a checkpoint file for encoding audio into embeddings.", + short_description="Loads an audio encoder model from a checkpoint.", inputs=[ io.Combo.Input( "audio_encoder_name", @@ -36,6 +38,8 @@ class AudioEncoderEncode(io.ComfyNode): return io.Schema( node_id="AudioEncoderEncode", category="conditioning", + description="Encodes audio into embeddings using a loaded audio encoder model.", + short_description=None, inputs=[ io.AudioEncoder.Input("audio_encoder"), io.Audio.Input("audio"), diff --git a/comfy_extras/nodes_camera_trajectory.py b/comfy_extras/nodes_camera_trajectory.py index eb7ef363c..22086a42f 100644 --- a/comfy_extras/nodes_camera_trajectory.py +++ b/comfy_extras/nodes_camera_trajectory.py @@ -154,6 +154,8 @@ class WanCameraEmbedding(io.ComfyNode): return io.Schema( node_id="WanCameraEmbedding", category="camera", + description="Generates Plucker camera embeddings from a selected camera motion trajectory for Wan video generation.", + short_description="Generates camera embeddings for Wan video generation.", inputs=[ io.Combo.Input( "camera_pose", diff --git a/comfy_extras/nodes_canny.py b/comfy_extras/nodes_canny.py index 6e0fadca5..47aee7a57 100644 --- a/comfy_extras/nodes_canny.py +++ b/comfy_extras/nodes_canny.py @@ -12,6 +12,8 @@ class Canny(io.ComfyNode): node_id="Canny", search_aliases=["edge detection", "outline", "contour detection", "line art"], category="image/preprocessors", + description="Detects edges in an image using the Canny edge detection algorithm with configurable low and high thresholds.", + short_description="Canny edge detection on images.", inputs=[ io.Image.Input("image"), io.Float.Input("low_threshold", default=0.4, min=0.01, max=0.99, step=0.01), diff --git a/comfy_extras/nodes_cfg.py b/comfy_extras/nodes_cfg.py index 4ebb4b51e..0d319b6f6 100644 --- a/comfy_extras/nodes_cfg.py +++ b/comfy_extras/nodes_cfg.py @@ -27,6 +27,8 @@ class CFGZeroStar(io.ComfyNode): return io.Schema( node_id="CFGZeroStar", category="advanced/guidance", + description="Applies CFG-Zero* post-CFG correction that computes an optimal scaling factor between conditional and unconditional predictions to reduce CFG artifacts.", + short_description="CFG-Zero* guidance correction to reduce artifacts.", inputs=[ io.Model.Input("model"), ], @@ -54,6 +56,8 @@ class CFGNorm(io.ComfyNode): return io.Schema( node_id="CFGNorm", category="advanced/guidance", + description="Constrains the CFG-guided prediction norm to not exceed the conditional prediction norm, helping to prevent oversaturation at high CFG scales.", + short_description="Constrain CFG output norm to conditional prediction norm.", inputs=[ io.Model.Input("model"), io.Float.Input("strength", default=1.0, min=0.0, max=100.0, step=0.01), diff --git a/comfy_extras/nodes_chroma_radiance.py b/comfy_extras/nodes_chroma_radiance.py index 381989818..7b4430fd1 100644 --- a/comfy_extras/nodes_chroma_radiance.py +++ b/comfy_extras/nodes_chroma_radiance.py @@ -14,6 +14,8 @@ class EmptyChromaRadianceLatentImage(io.ComfyNode): return io.Schema( node_id="EmptyChromaRadianceLatentImage", category="latent/chroma_radiance", + description="Creates an empty Chroma Radiance latent image tensor with the specified width, height, and batch size.", + short_description="Creates an empty Chroma Radiance latent image.", inputs=[ io.Int.Input(id="width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input(id="height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), @@ -35,6 +37,7 @@ class ChromaRadianceOptions(io.ComfyNode): node_id="ChromaRadianceOptions", category="model_patches/chroma_radiance", description="Allows setting advanced options for the Chroma Radiance model.", + short_description=None, inputs=[ io.Model.Input(id="model"), io.Boolean.Input( diff --git a/comfy_extras/nodes_clip_sdxl.py b/comfy_extras/nodes_clip_sdxl.py index 520ff0e3c..7c1bbef7d 100644 --- a/comfy_extras/nodes_clip_sdxl.py +++ b/comfy_extras/nodes_clip_sdxl.py @@ -10,6 +10,8 @@ class CLIPTextEncodeSDXLRefiner(io.ComfyNode): return io.Schema( node_id="CLIPTextEncodeSDXLRefiner", category="advanced/conditioning", + description="Encodes text for SDXL refiner models with aesthetic score and resolution conditioning parameters.", + short_description="Encodes text for SDXL refiner models.", inputs=[ io.Float.Input("ascore", default=6.0, min=0.0, max=1000.0, step=0.01), io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION), @@ -31,6 +33,8 @@ class CLIPTextEncodeSDXL(io.ComfyNode): return io.Schema( node_id="CLIPTextEncodeSDXL", category="advanced/conditioning", + description="Encodes separate G and L text prompts for SDXL models with resolution and crop conditioning parameters.", + short_description="Encodes dual text prompts for SDXL models.", inputs=[ io.Clip.Input("clip"), io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION), diff --git a/comfy_extras/nodes_color.py b/comfy_extras/nodes_color.py index 80ba121cd..16aa37d5a 100644 --- a/comfy_extras/nodes_color.py +++ b/comfy_extras/nodes_color.py @@ -10,6 +10,7 @@ class ColorToRGBInt(io.ComfyNode): display_name="Color to RGB Int", category="utils", description="Convert a color to a RGB integer value.", + short_description=None, inputs=[ io.Color.Input("color"), ], diff --git a/comfy_extras/nodes_compositing.py b/comfy_extras/nodes_compositing.py index 3bc9fccb3..1b3a81fc9 100644 --- a/comfy_extras/nodes_compositing.py +++ b/comfy_extras/nodes_compositing.py @@ -112,6 +112,8 @@ class PorterDuffImageComposite(io.ComfyNode): search_aliases=["alpha composite", "blend modes", "layer blend", "transparency blend"], display_name="Porter-Duff Image Composite", category="mask/compositing", + description="Composites two images with alpha masks using Porter-Duff blend modes.", + short_description="", inputs=[ io.Image.Input("source"), io.Mask.Input("source_alpha"), @@ -169,6 +171,8 @@ class SplitImageWithAlpha(io.ComfyNode): search_aliases=["extract alpha", "separate transparency", "remove alpha"], display_name="Split Image with Alpha", category="mask/compositing", + description="Separates an RGBA image into its RGB color channels and an alpha transparency mask.", + short_description="Split RGBA image into RGB and alpha mask.", inputs=[ io.Image.Input("image"), ], @@ -193,6 +197,8 @@ class JoinImageWithAlpha(io.ComfyNode): search_aliases=["add transparency", "apply alpha", "composite alpha", "RGBA"], display_name="Join Image with Alpha", category="mask/compositing", + description="Combines an RGB image with an alpha mask to produce an RGBA image with transparency.", + short_description="Combine RGB image and alpha into RGBA.", inputs=[ io.Image.Input("image"), io.Mask.Input("alpha"), diff --git a/comfy_extras/nodes_cond.py b/comfy_extras/nodes_cond.py index 8b06e3de9..8055a15c3 100644 --- a/comfy_extras/nodes_cond.py +++ b/comfy_extras/nodes_cond.py @@ -9,6 +9,8 @@ class CLIPTextEncodeControlnet(io.ComfyNode): return io.Schema( node_id="CLIPTextEncodeControlnet", category="_for_testing/conditioning", + description="Encodes text with CLIP and attaches the result as cross-attention controlnet conditioning to existing conditioning data.", + short_description="CLIP text encode for controlnet cross-attention conditioning.", inputs=[ io.Clip.Input("clip"), io.Conditioning.Input("conditioning"), @@ -36,6 +38,8 @@ class T5TokenizerOptions(io.ComfyNode): return io.Schema( node_id="T5TokenizerOptions", category="_for_testing/conditioning", + description="Configures minimum padding and length options for T5-family tokenizers used in CLIP text encoding.", + short_description="Set T5 tokenizer padding and length options.", inputs=[ io.Clip.Input("clip"), io.Int.Input("min_padding", default=0, min=0, max=10000, step=1), diff --git a/comfy_extras/nodes_context_windows.py b/comfy_extras/nodes_context_windows.py index 3799a9004..0d9949ca8 100644 --- a/comfy_extras/nodes_context_windows.py +++ b/comfy_extras/nodes_context_windows.py @@ -12,6 +12,7 @@ class ContextWindowsManualNode(io.ComfyNode): display_name="Context Windows (Manual)", category="context", description="Manually set context windows.", + short_description=None, inputs=[ io.Model.Input("model", tooltip="The model to apply context windows to during sampling."), io.Int.Input("context_length", min=1, default=16, tooltip="The length of the context window."), @@ -65,6 +66,7 @@ class WanContextWindowsManualNode(ContextWindowsManualNode): schema.node_id = "WanContextWindowsManual" schema.display_name = "WAN Context Windows (Manual)" schema.description = "Manually set context windows for WAN-like models (dim=2)." + schema.short_description = None schema.inputs = [ io.Model.Input("model", tooltip="The model to apply context windows to during sampling."), io.Int.Input("context_length", min=1, max=nodes.MAX_RESOLUTION, step=4, default=81, tooltip="The length of the context window."), diff --git a/comfy_extras/nodes_controlnet.py b/comfy_extras/nodes_controlnet.py index 0c1d7f0d4..aafed6700 100644 --- a/comfy_extras/nodes_controlnet.py +++ b/comfy_extras/nodes_controlnet.py @@ -10,6 +10,8 @@ class SetUnionControlNetType(io.ComfyNode): return io.Schema( node_id="SetUnionControlNetType", category="conditioning/controlnet", + description="Sets the control type for a Union ControlNet, selecting which conditioning mode to use.", + short_description="Select control mode for Union ControlNet.", inputs=[ io.ControlNet.Input("control_net"), io.Combo.Input("type", options=["auto"] + list(UNION_CONTROLNET_TYPES.keys())), @@ -40,6 +42,8 @@ class ControlNetInpaintingAliMamaApply(io.ComfyNode): node_id="ControlNetInpaintingAliMamaApply", search_aliases=["masked controlnet"], category="conditioning/controlnet", + description="Applies an AliMama inpainting ControlNet to positive and negative conditioning using an image and mask with VAE encoding.", + short_description="Applies AliMama inpainting ControlNet with mask.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), diff --git a/comfy_extras/nodes_cosmos.py b/comfy_extras/nodes_cosmos.py index 7dd129d19..9eec6accb 100644 --- a/comfy_extras/nodes_cosmos.py +++ b/comfy_extras/nodes_cosmos.py @@ -14,6 +14,7 @@ class EmptyCosmosLatentVideo(io.ComfyNode): return io.Schema( node_id="EmptyCosmosLatentVideo", category="latent/video", + description="Creates an empty latent tensor sized for Cosmos video generation.", inputs=[ io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=704, min=16, max=nodes.MAX_RESOLUTION, step=16), @@ -46,6 +47,8 @@ class CosmosImageToVideoLatent(io.ComfyNode): return io.Schema( node_id="CosmosImageToVideoLatent", category="conditioning/inpaint", + description="Creates an inpainting video latent for Cosmos by encoding optional start and end images with a noise mask.", + short_description="Cosmos inpainting video latent from start/end images.", inputs=[ io.Vae.Input("vae"), io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16), @@ -89,6 +92,8 @@ class CosmosPredict2ImageToVideoLatent(io.ComfyNode): return io.Schema( node_id="CosmosPredict2ImageToVideoLatent", category="conditioning/inpaint", + description="Creates an inpainting video latent for Cosmos Predict2 by encoding optional start and end images with Wan latent format processing.", + short_description="Cosmos Predict2 inpainting video latent from images.", inputs=[ io.Vae.Input("vae"), io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 61a234634..4711d7a57 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -18,6 +18,8 @@ class BasicScheduler(io.ComfyNode): return io.Schema( node_id="BasicScheduler", category="sampling/custom_sampling/schedulers", + description="Generates a sigma schedule from a model using a selected scheduler algorithm, step count, and denoise strength.", + short_description="Generate sigma schedule from model and scheduler.", inputs=[ io.Model.Input("model"), io.Combo.Input("scheduler", options=comfy.samplers.SCHEDULER_NAMES), @@ -48,6 +50,8 @@ class KarrasScheduler(io.ComfyNode): return io.Schema( node_id="KarrasScheduler", category="sampling/custom_sampling/schedulers", + description="Generates a sigma schedule using the Karras noise schedule with configurable sigma range and rho parameter.", + short_description="Generate sigmas using Karras noise schedule.", inputs=[ io.Int.Input("steps", default=20, min=1, max=10000), io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False), @@ -70,6 +74,8 @@ class ExponentialScheduler(io.ComfyNode): return io.Schema( node_id="ExponentialScheduler", category="sampling/custom_sampling/schedulers", + description="Generates a sigma schedule using an exponential noise schedule with configurable sigma range.", + short_description="Generate sigmas using exponential noise schedule.", inputs=[ io.Int.Input("steps", default=20, min=1, max=10000), io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False), @@ -91,6 +97,8 @@ class PolyexponentialScheduler(io.ComfyNode): return io.Schema( node_id="PolyexponentialScheduler", category="sampling/custom_sampling/schedulers", + description="Generates a sigma schedule using a polyexponential noise schedule with configurable sigma range and rho parameter.", + short_description="Generate sigmas using polyexponential noise schedule.", inputs=[ io.Int.Input("steps", default=20, min=1, max=10000), io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False), @@ -113,6 +121,8 @@ class LaplaceScheduler(io.ComfyNode): return io.Schema( node_id="LaplaceScheduler", category="sampling/custom_sampling/schedulers", + description="Generates a sigma schedule using a Laplace distribution-based noise schedule with configurable mu and beta parameters.", + short_description="Generate sigmas using Laplace distribution schedule.", inputs=[ io.Int.Input("steps", default=20, min=1, max=10000), io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False), @@ -137,6 +147,8 @@ class SDTurboScheduler(io.ComfyNode): return io.Schema( node_id="SDTurboScheduler", category="sampling/custom_sampling/schedulers", + description="Generates a sigma schedule optimized for SD Turbo models with very few steps and adjustable denoise strength.", + short_description="Generate sigma schedule for SD Turbo models.", inputs=[ io.Model.Input("model"), io.Int.Input("steps", default=1, min=1, max=10), @@ -161,6 +173,8 @@ class BetaSamplingScheduler(io.ComfyNode): return io.Schema( node_id="BetaSamplingScheduler", category="sampling/custom_sampling/schedulers", + description="Generates a sigma schedule using a beta distribution with configurable alpha and beta shape parameters.", + short_description="Generate sigmas using beta distribution schedule.", inputs=[ io.Model.Input("model"), io.Int.Input("steps", default=20, min=1, max=10000), @@ -183,6 +197,8 @@ class VPScheduler(io.ComfyNode): return io.Schema( node_id="VPScheduler", category="sampling/custom_sampling/schedulers", + description="Generates a sigma schedule using the Variance Preserving (VP) SDE formulation with configurable beta and epsilon parameters.", + short_description="Generate sigmas using VP SDE schedule.", inputs=[ io.Int.Input("steps", default=20, min=1, max=10000), io.Float.Input("beta_d", default=19.9, min=0.0, max=5000.0, step=0.01, round=False), #TODO: fix default values @@ -205,6 +221,8 @@ class SplitSigmas(io.ComfyNode): return io.Schema( node_id="SplitSigmas", category="sampling/custom_sampling/sigmas", + description="Splits a sigma sequence into high and low portions at a specified step index for multi-pass sampling.", + short_description="Split sigmas into high and low at a step.", inputs=[ io.Sigmas.Input("sigmas"), io.Int.Input("step", default=0, min=0, max=10000), @@ -229,6 +247,8 @@ class SplitSigmasDenoise(io.ComfyNode): return io.Schema( node_id="SplitSigmasDenoise", category="sampling/custom_sampling/sigmas", + description="Splits a sigma sequence into high and low portions based on a denoise ratio for multi-pass sampling workflows.", + short_description="Split sigmas by denoise ratio.", inputs=[ io.Sigmas.Input("sigmas"), io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01), @@ -255,6 +275,8 @@ class FlipSigmas(io.ComfyNode): return io.Schema( node_id="FlipSigmas", category="sampling/custom_sampling/sigmas", + description="Reverses the order of a sigma sequence, useful for converting between ascending and descending noise schedules.", + short_description="Reverse the order of a sigma sequence.", inputs=[io.Sigmas.Input("sigmas")], outputs=[io.Sigmas.Output()] ) @@ -277,6 +299,8 @@ class SetFirstSigma(io.ComfyNode): return io.Schema( node_id="SetFirstSigma", category="sampling/custom_sampling/sigmas", + description="Overrides the first sigma value in a sequence with a custom value, allowing manual control of the initial noise level.", + short_description="Override the first sigma value in a sequence.", inputs=[ io.Sigmas.Input("sigmas"), io.Float.Input("sigma", default=136.0, min=0.0, max=20000.0, step=0.001, round=False), @@ -299,6 +323,8 @@ class ExtendIntermediateSigmas(io.ComfyNode): node_id="ExtendIntermediateSigmas", search_aliases=["interpolate sigmas"], category="sampling/custom_sampling/sigmas", + description="Interpolates additional intermediate sigma values between existing steps using selectable spacing within a specified sigma range.", + short_description="Interpolate additional sigma steps between existing values.", inputs=[ io.Sigmas.Input("sigmas"), io.Int.Input("steps", default=2, min=1, max=100), @@ -352,6 +378,8 @@ class SamplingPercentToSigma(io.ComfyNode): return io.Schema( node_id="SamplingPercentToSigma", category="sampling/custom_sampling/sigmas", + description="Converts a sampling percentage (0.0 to 1.0) to the corresponding sigma value using a model's noise schedule.", + short_description="Convert sampling percentage to sigma value.", inputs=[ io.Model.Input("model"), io.Float.Input("sampling_percent", default=0.0, min=0.0, max=1.0, step=0.0001), @@ -380,6 +408,8 @@ class KSamplerSelect(io.ComfyNode): return io.Schema( node_id="KSamplerSelect", category="sampling/custom_sampling/samplers", + description="Selects a sampler algorithm by name from the list of available samplers and outputs the sampler object.", + short_description="Select a sampler algorithm by name.", inputs=[io.Combo.Input("sampler_name", options=comfy.samplers.SAMPLER_NAMES)], outputs=[io.Sampler.Output()] ) @@ -397,6 +427,8 @@ class SamplerDPMPP_3M_SDE(io.ComfyNode): return io.Schema( node_id="SamplerDPMPP_3M_SDE", category="sampling/custom_sampling/samplers", + description="Creates a DPM++ 3M SDE sampler with configurable eta, noise scale, and GPU or CPU noise generation.", + short_description="Create a DPM++ 3M SDE sampler.", inputs=[ io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), @@ -422,6 +454,8 @@ class SamplerDPMPP_2M_SDE(io.ComfyNode): return io.Schema( node_id="SamplerDPMPP_2M_SDE", category="sampling/custom_sampling/samplers", + description="Creates a DPM++ 2M SDE sampler with configurable solver type, eta, noise scale, and noise device.", + short_description="Create a DPM++ 2M SDE sampler.", inputs=[ io.Combo.Input("solver_type", options=['midpoint', 'heun']), io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), @@ -449,6 +483,8 @@ class SamplerDPMPP_SDE(io.ComfyNode): return io.Schema( node_id="SamplerDPMPP_SDE", category="sampling/custom_sampling/samplers", + description="Creates a DPM++ SDE sampler with configurable eta, noise scale, r parameter, and noise device.", + short_description="Create a DPM++ SDE sampler.", inputs=[ io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), @@ -475,6 +511,8 @@ class SamplerDPMPP_2S_Ancestral(io.ComfyNode): return io.Schema( node_id="SamplerDPMPP_2S_Ancestral", category="sampling/custom_sampling/samplers", + description="Creates a DPM++ 2S Ancestral sampler with configurable eta and noise scale parameters.", + short_description="Create a DPM++ 2S Ancestral sampler.", inputs=[ io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), @@ -495,6 +533,8 @@ class SamplerEulerAncestral(io.ComfyNode): return io.Schema( node_id="SamplerEulerAncestral", category="sampling/custom_sampling/samplers", + description="Creates an Euler Ancestral sampler with configurable eta and noise scale for stochastic sampling.", + short_description="Create an Euler Ancestral stochastic sampler.", inputs=[ io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), @@ -516,6 +556,8 @@ class SamplerEulerAncestralCFGPP(io.ComfyNode): node_id="SamplerEulerAncestralCFGPP", display_name="SamplerEulerAncestralCFG++", category="sampling/custom_sampling/samplers", + description="Creates an Euler Ancestral CFG++ sampler that applies classifier-free guidance with improved stability.", + short_description="Create an Euler Ancestral CFG++ sampler.", inputs=[ io.Float.Input("eta", default=1.0, min=0.0, max=1.0, step=0.01, round=False), io.Float.Input("s_noise", default=1.0, min=0.0, max=10.0, step=0.01, round=False), @@ -538,6 +580,8 @@ class SamplerLMS(io.ComfyNode): return io.Schema( node_id="SamplerLMS", category="sampling/custom_sampling/samplers", + description="Creates a Linear Multi-Step (LMS) sampler with a configurable order parameter.", + short_description="Create a Linear Multi-Step (LMS) sampler.", inputs=[io.Int.Input("order", default=4, min=1, max=100)], outputs=[io.Sampler.Output()] ) @@ -555,6 +599,8 @@ class SamplerDPMAdaptative(io.ComfyNode): return io.Schema( node_id="SamplerDPMAdaptative", category="sampling/custom_sampling/samplers", + description="Creates a DPM Adaptive sampler with configurable order, tolerances, PID coefficients, and stochastic noise parameters for adaptive step-size sampling.", + short_description="Create a DPM Adaptive step-size sampler.", inputs=[ io.Int.Input("order", default=3, min=2, max=3), io.Float.Input("rtol", default=0.05, min=0.0, max=100.0, step=0.01, round=False), @@ -586,6 +632,8 @@ class SamplerER_SDE(io.ComfyNode): return io.Schema( node_id="SamplerER_SDE", category="sampling/custom_sampling/samplers", + description="Creates an ER-SDE sampler supporting ER-SDE, reverse-time SDE, and ODE solver types with configurable stochastic strength and staging.", + short_description="Create an ER-SDE, reverse-time SDE, or ODE sampler.", inputs=[ io.Combo.Input("solver_type", options=["ER-SDE", "Reverse-time SDE", "ODE"]), io.Int.Input("max_stage", default=3, min=1, max=3), @@ -624,6 +672,8 @@ class SamplerSASolver(io.ComfyNode): node_id="SamplerSASolver", search_aliases=["sde"], category="sampling/custom_sampling/samplers", + description="Creates an SA-Solver sampler with configurable predictor/corrector orders, SDE region, and PECE mode for high-order diffusion sampling.", + short_description="Create an SA-Solver high-order diffusion sampler.", inputs=[ io.Model.Input("model"), io.Float.Input("eta", default=1.0, min=0.0, max=10.0, step=0.01, round=False), @@ -684,7 +734,8 @@ class SamplerSEEDS2(io.ComfyNode): "- solver_type=phi_2, r=1.0, eta=0.0\n\n" "exp_heun_2_x0_sde\n" "- solver_type=phi_2, r=1.0, eta=1.0, s_noise=1.0" - ) + ), + short_description="SEEDS2 sampler with configurable solver and SDE settings.", ) @classmethod @@ -728,6 +779,8 @@ class SamplerCustom(io.ComfyNode): return io.Schema( node_id="SamplerCustom", category="sampling/custom_sampling", + description="Runs a complete custom sampling pass by combining a model, sampler, sigmas, and conditioning with optional noise injection.", + short_description="Run custom sampling with manual sampler and sigmas.", inputs=[ io.Model.Input("model"), io.Boolean.Input("add_noise", default=True), @@ -794,6 +847,8 @@ class BasicGuider(io.ComfyNode): return io.Schema( node_id="BasicGuider", category="sampling/custom_sampling/guiders", + description="Creates a basic guider that applies a single conditioning input to guide the diffusion model without classifier-free guidance.", + short_description="Create a single-conditioning guider without CFG.", inputs=[ io.Model.Input("model"), io.Conditioning.Input("conditioning"), @@ -815,6 +870,8 @@ class CFGGuider(io.ComfyNode): return io.Schema( node_id="CFGGuider", category="sampling/custom_sampling/guiders", + description="Creates a classifier-free guidance guider that combines positive and negative conditioning with an adjustable CFG scale.", + short_description="Create a CFG guider with positive/negative conditioning.", inputs=[ io.Model.Input("model"), io.Conditioning.Input("positive"), @@ -869,6 +926,8 @@ class DualCFGGuider(io.ComfyNode): node_id="DualCFGGuider", search_aliases=["dual prompt guidance"], category="sampling/custom_sampling/guiders", + description="Creates a dual classifier-free guidance guider that blends two conditioning inputs against a negative with independent CFG scales and regular or nested styles.", + short_description="Create a dual CFG guider with two conditionings.", inputs=[ io.Model.Input("model"), io.Conditioning.Input("cond1"), @@ -897,6 +956,8 @@ class DisableNoise(io.ComfyNode): node_id="DisableNoise", search_aliases=["zero noise"], category="sampling/custom_sampling/noise", + description="Produces a zero-noise source that disables noise injection, useful for deterministic sampling or img2img without added noise.", + short_description="Produce zero noise to disable noise injection.", inputs=[], outputs=[io.Noise.Output()] ) @@ -914,6 +975,8 @@ class RandomNoise(io.ComfyNode): return io.Schema( node_id="RandomNoise", category="sampling/custom_sampling/noise", + description="Produces a random noise source from a seed value for use in custom sampling workflows.", + short_description="Produce seeded random noise for sampling.", inputs=[io.Int.Input("noise_seed", default=0, min=0, max=0xffffffffffffffff, control_after_generate=True)], outputs=[io.Noise.Output()] ) @@ -931,6 +994,8 @@ class SamplerCustomAdvanced(io.ComfyNode): return io.Schema( node_id="SamplerCustomAdvanced", category="sampling/custom_sampling", + description="Runs an advanced custom sampling pass using separate noise, guider, sampler, and sigmas inputs for maximum control over the diffusion process.", + short_description="Run advanced custom sampling with separate components.", inputs=[ io.Noise.Input("noise"), io.Guider.Input("guider"), @@ -985,6 +1050,8 @@ class AddNoise(io.ComfyNode): return io.Schema( node_id="AddNoise", category="_for_testing/custom_sampling/noise", + description="Adds scaled noise to a latent image using the model's noise schedule and sigma values for manual noise injection.", + short_description="Add scaled noise to a latent image.", is_experimental=True, inputs=[ io.Model.Input("model"), @@ -1035,6 +1102,8 @@ class ManualSigmas(io.ComfyNode): node_id="ManualSigmas", search_aliases=["custom noise schedule", "define sigmas"], category="_for_testing/custom_sampling", + description="Defines a custom sigma sequence by manually entering comma-separated numeric values as a text string.", + short_description="Define custom sigmas from comma-separated values.", is_experimental=True, inputs=[ io.String.Input("sigmas", default="1, 0.5", multiline=False) diff --git a/comfy_extras/nodes_dataset.py b/comfy_extras/nodes_dataset.py index fb9409ac3..43f20faf0 100644 --- a/comfy_extras/nodes_dataset.py +++ b/comfy_extras/nodes_dataset.py @@ -49,6 +49,8 @@ class LoadImageDataSetFromFolderNode(io.ComfyNode): node_id="LoadImageDataSetFromFolder", display_name="Load Image Dataset from Folder", category="dataset", + description="Loads all images from a selected input subfolder and outputs them as a list of image tensors.", + short_description="Loads images from a folder as a list.", is_experimental=True, inputs=[ io.Combo.Input( @@ -86,6 +88,8 @@ class LoadImageTextDataSetFromFolderNode(io.ComfyNode): node_id="LoadImageTextDataSetFromFolder", display_name="Load Image and Text Dataset from Folder", category="dataset", + description="Loads paired images and text captions from a folder, matching each image with its corresponding text file.", + short_description="Loads paired images and text captions from folder.", is_experimental=True, inputs=[ io.Combo.Input( @@ -208,6 +212,8 @@ class SaveImageDataSetToFolderNode(io.ComfyNode): node_id="SaveImageDataSetToFolder", display_name="Save Image Dataset to Folder", category="dataset", + description="Saves a list of images to a named folder in the output directory with configurable filename prefix.", + short_description="Saves image list to an output folder.", is_experimental=True, is_output_node=True, is_input_list=True, # Receive images as list @@ -247,6 +253,8 @@ class SaveImageTextDataSetToFolderNode(io.ComfyNode): node_id="SaveImageTextDataSetToFolder", display_name="Save Image and Text Dataset to Folder", category="dataset", + description="Saves paired images and text captions to a named folder in the output directory with configurable filename prefix.", + short_description="Saves paired images and text to output folder.", is_experimental=True, is_output_node=True, is_input_list=True, # Receive both images and texts as lists @@ -401,6 +409,8 @@ class ImageProcessingNode(io.ComfyNode): return io.Schema( node_id=cls.node_id, display_name=cls.display_name or cls.node_id, + description=getattr(cls, 'description', ''), + short_description=getattr(cls, 'short_description', ''), category="dataset/image", is_experimental=True, is_input_list=is_group, # True for group, False for individual @@ -550,6 +560,8 @@ class TextProcessingNode(io.ComfyNode): return io.Schema( node_id=cls.node_id, display_name=cls.display_name or cls.node_id, + description=getattr(cls, 'description', ''), + short_description=getattr(cls, 'short_description', ''), category="dataset/text", is_experimental=True, is_input_list=is_group, # True for group, False for individual @@ -627,6 +639,7 @@ class ResizeImagesByShorterEdgeNode(ImageProcessingNode): node_id = "ResizeImagesByShorterEdge" display_name = "Resize Images by Shorter Edge" description = "Resize images so that the shorter edge matches the specified length while preserving aspect ratio." + short_description = "Resizes images by shorter edge preserving aspect ratio." extra_inputs = [ io.Int.Input( "shorter_edge", @@ -655,6 +668,7 @@ class ResizeImagesByLongerEdgeNode(ImageProcessingNode): node_id = "ResizeImagesByLongerEdge" display_name = "Resize Images by Longer Edge" description = "Resize images so that the longer edge matches the specified length while preserving aspect ratio." + short_description = "Resizes images by longer edge preserving aspect ratio." extra_inputs = [ io.Int.Input( "longer_edge", @@ -686,6 +700,7 @@ class CenterCropImagesNode(ImageProcessingNode): node_id = "CenterCropImages" display_name = "Center Crop Images" description = "Center crop all images to the specified dimensions." + short_description = None extra_inputs = [ io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."), io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."), @@ -708,6 +723,7 @@ class RandomCropImagesNode(ImageProcessingNode): description = ( "Randomly crop all images to the specified dimensions (for data augmentation)." ) + short_description = None extra_inputs = [ io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."), io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."), @@ -734,6 +750,7 @@ class NormalizeImagesNode(ImageProcessingNode): node_id = "NormalizeImages" display_name = "Normalize Images" description = "Normalize images using mean and standard deviation." + short_description = None extra_inputs = [ io.Float.Input( "mean", @@ -760,6 +777,7 @@ class AdjustBrightnessNode(ImageProcessingNode): node_id = "AdjustBrightness" display_name = "Adjust Brightness" description = "Adjust brightness of all images." + short_description = None extra_inputs = [ io.Float.Input( "factor", @@ -779,6 +797,7 @@ class AdjustContrastNode(ImageProcessingNode): node_id = "AdjustContrast" display_name = "Adjust Contrast" description = "Adjust contrast of all images." + short_description = None extra_inputs = [ io.Float.Input( "factor", @@ -798,6 +817,7 @@ class ShuffleDatasetNode(ImageProcessingNode): node_id = "ShuffleDataset" display_name = "Shuffle Image Dataset" description = "Randomly shuffle the order of images in the dataset." + short_description = None is_group_process = True # Requires full list to shuffle extra_inputs = [ io.Int.Input( @@ -821,6 +841,8 @@ class ShuffleImageTextDatasetNode(io.ComfyNode): node_id="ShuffleImageTextDataset", display_name="Shuffle Image-Text Dataset", category="dataset/image", + description="Randomly shuffles paired image and text lists together using a seed, preserving their correspondence.", + short_description="Shuffles paired image-text lists together.", is_experimental=True, is_input_list=True, inputs=[ @@ -863,6 +885,7 @@ class TextToLowercaseNode(TextProcessingNode): node_id = "TextToLowercase" display_name = "Text to Lowercase" description = "Convert all texts to lowercase." + short_description = None @classmethod def _process(cls, text): @@ -873,6 +896,7 @@ class TextToUppercaseNode(TextProcessingNode): node_id = "TextToUppercase" display_name = "Text to Uppercase" description = "Convert all texts to uppercase." + short_description = None @classmethod def _process(cls, text): @@ -883,6 +907,7 @@ class TruncateTextNode(TextProcessingNode): node_id = "TruncateText" display_name = "Truncate Text" description = "Truncate all texts to a maximum length." + short_description = None extra_inputs = [ io.Int.Input( "max_length", default=77, min=1, max=10000, tooltip="Maximum text length." @@ -898,6 +923,7 @@ class AddTextPrefixNode(TextProcessingNode): node_id = "AddTextPrefix" display_name = "Add Text Prefix" description = "Add a prefix to all texts." + short_description = None extra_inputs = [ io.String.Input("prefix", default="", tooltip="Prefix to add."), ] @@ -911,6 +937,7 @@ class AddTextSuffixNode(TextProcessingNode): node_id = "AddTextSuffix" display_name = "Add Text Suffix" description = "Add a suffix to all texts." + short_description = None extra_inputs = [ io.String.Input("suffix", default="", tooltip="Suffix to add."), ] @@ -924,6 +951,7 @@ class ReplaceTextNode(TextProcessingNode): node_id = "ReplaceText" display_name = "Replace Text" description = "Replace text in all texts." + short_description = None extra_inputs = [ io.String.Input("find", default="", tooltip="Text to find."), io.String.Input("replace", default="", tooltip="Text to replace with."), @@ -938,6 +966,7 @@ class StripWhitespaceNode(TextProcessingNode): node_id = "StripWhitespace" display_name = "Strip Whitespace" description = "Strip leading and trailing whitespace from all texts." + short_description = None @classmethod def _process(cls, text): @@ -953,6 +982,7 @@ class ImageDeduplicationNode(ImageProcessingNode): node_id = "ImageDeduplication" display_name = "Image Deduplication" description = "Remove duplicate or very similar images from the dataset." + short_description = None is_group_process = True # Requires full list to compare images extra_inputs = [ io.Float.Input( @@ -1023,6 +1053,7 @@ class ImageGridNode(ImageProcessingNode): node_id = "ImageGrid" display_name = "Image Grid" description = "Arrange multiple images into a grid layout." + short_description = None is_group_process = True # Requires full list to create grid is_output_list = False # Outputs single grid image extra_inputs = [ @@ -1097,6 +1128,7 @@ class MergeImageListsNode(ImageProcessingNode): node_id = "MergeImageLists" display_name = "Merge Image Lists" description = "Concatenate multiple image lists into one." + short_description = None is_group_process = True # Receives images as list @classmethod @@ -1114,6 +1146,7 @@ class MergeTextListsNode(TextProcessingNode): node_id = "MergeTextLists" display_name = "Merge Text Lists" description = "Concatenate multiple text lists into one." + short_description = None is_group_process = True # Receives texts as list @classmethod @@ -1137,6 +1170,8 @@ class ResolutionBucket(io.ComfyNode): node_id="ResolutionBucket", display_name="Resolution Bucket", category="dataset", + description="Groups latents and conditioning by resolution into batched buckets for efficient training with mixed aspect ratios.", + short_description="Groups latents by resolution into training buckets.", is_experimental=True, is_input_list=True, inputs=[ @@ -1230,6 +1265,8 @@ class MakeTrainingDataset(io.ComfyNode): search_aliases=["encode dataset"], display_name="Make Training Dataset", category="dataset", + description="Encodes images with a VAE and text captions with CLIP to create paired latent and conditioning training data.", + short_description="Encodes images and text into training data.", is_experimental=True, is_input_list=True, # images and texts as lists inputs=[ @@ -1316,6 +1353,8 @@ class SaveTrainingDataset(io.ComfyNode): search_aliases=["export training data"], display_name="Save Training Dataset", category="dataset", + description="Saves encoded latent and conditioning training data to disk in sharded files with configurable shard size.", + short_description="Saves encoded training data to sharded files.", is_experimental=True, is_output_node=True, is_input_list=True, # Receive lists @@ -1417,6 +1456,8 @@ class LoadTrainingDataset(io.ComfyNode): search_aliases=["import dataset", "training data"], display_name="Load Training Dataset", category="dataset", + description="Loads a previously saved training dataset of latents and conditioning from sharded files on disk.", + short_description="Loads saved training dataset from disk.", is_experimental=True, inputs=[ io.String.Input( diff --git a/comfy_extras/nodes_differential_diffusion.py b/comfy_extras/nodes_differential_diffusion.py index 34ffb9a89..d5872c206 100644 --- a/comfy_extras/nodes_differential_diffusion.py +++ b/comfy_extras/nodes_differential_diffusion.py @@ -14,6 +14,8 @@ class DifferentialDiffusion(io.ComfyNode): search_aliases=["inpaint gradient", "variable denoise strength"], display_name="Differential Diffusion", category="_for_testing", + description="Enables per-pixel variable denoise strength using a mask, where mask intensity controls how much each region is denoised during sampling.", + short_description="Per-pixel variable denoise strength via mask.", inputs=[ io.Model.Input("model"), io.Float.Input( diff --git a/comfy_extras/nodes_easycache.py b/comfy_extras/nodes_easycache.py index b1912392c..5099ecd2c 100644 --- a/comfy_extras/nodes_easycache.py +++ b/comfy_extras/nodes_easycache.py @@ -363,6 +363,7 @@ class EasyCacheNode(io.ComfyNode): node_id="EasyCache", display_name="EasyCache", description="Native EasyCache implementation.", + short_description=None, category="advanced/debug/model", is_experimental=True, inputs=[ @@ -496,6 +497,7 @@ class LazyCacheNode(io.ComfyNode): node_id="LazyCache", display_name="LazyCache", description="A homebrew version of EasyCache - even 'easier' version of EasyCache to implement. Overall works worse than EasyCache, but better in some rare cases AND universal compatibility with everything in ComfyUI.", + short_description="Simpler EasyCache alternative with universal ComfyUI compatibility.", category="advanced/debug/model", is_experimental=True, inputs=[ diff --git a/comfy_extras/nodes_edit_model.py b/comfy_extras/nodes_edit_model.py index 36da66f34..055e7ef8c 100644 --- a/comfy_extras/nodes_edit_model.py +++ b/comfy_extras/nodes_edit_model.py @@ -10,6 +10,7 @@ class ReferenceLatent(io.ComfyNode): node_id="ReferenceLatent", category="advanced/conditioning/edit_models", description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.", + short_description="Sets guiding latent for edit models with chaining support.", inputs=[ io.Conditioning.Input("conditioning"), io.Latent.Input("latent", optional=True), diff --git a/comfy_extras/nodes_eps.py b/comfy_extras/nodes_eps.py index 4d8061741..26be90da6 100644 --- a/comfy_extras/nodes_eps.py +++ b/comfy_extras/nodes_eps.py @@ -19,6 +19,8 @@ class EpsilonScaling(io.ComfyNode): return io.Schema( node_id="Epsilon Scaling", category="model_patches/unet", + description="Applies epsilon scaling to mitigate exposure bias in diffusion models by scaling the predicted noise after CFG, improving sample quality.", + short_description="Scale predicted noise to reduce exposure bias.", inputs=[ io.Model.Input("model"), io.Float.Input( @@ -121,6 +123,7 @@ class TemporalScoreRescaling(io.ComfyNode): "TSR - Temporal Score Rescaling (2510.01184)\n\n" "Rescaling the model's score or noise to steer the sampling diversity.\n" ), + short_description="Rescales temporal scores to control sampling diversity.", ) @classmethod diff --git a/comfy_extras/nodes_flux.py b/comfy_extras/nodes_flux.py index 12c8ed3e6..a597887f2 100644 --- a/comfy_extras/nodes_flux.py +++ b/comfy_extras/nodes_flux.py @@ -13,6 +13,8 @@ class CLIPTextEncodeFlux(io.ComfyNode): return io.Schema( node_id="CLIPTextEncodeFlux", category="advanced/conditioning/flux", + description="Encodes separate CLIP-L and T5-XXL text prompts with a guidance value into Flux conditioning.", + short_description="Encodes CLIP-L and T5-XXL prompts for Flux.", inputs=[ io.Clip.Input("clip"), io.String.Input("clip_l", multiline=True, dynamic_prompts=True), @@ -40,6 +42,8 @@ class EmptyFlux2LatentImage(io.ComfyNode): node_id="EmptyFlux2LatentImage", display_name="Empty Flux 2 Latent", category="latent", + description="Creates an empty Flux 2 latent image tensor with the specified width, height, and batch size.", + short_description="Creates an empty Flux 2 latent image tensor.", inputs=[ io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), @@ -61,6 +65,8 @@ class FluxGuidance(io.ComfyNode): return io.Schema( node_id="FluxGuidance", category="advanced/conditioning/flux", + description="Sets the guidance strength value on Flux conditioning to control how closely generation follows the prompt.", + short_description="Sets guidance strength on Flux conditioning.", inputs=[ io.Conditioning.Input("conditioning"), io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1), @@ -85,6 +91,7 @@ class FluxDisableGuidance(io.ComfyNode): node_id="FluxDisableGuidance", category="advanced/conditioning/flux", description="This node completely disables the guidance embed on Flux and Flux like models", + short_description="Disables guidance embed on Flux and Flux-like models.", inputs=[ io.Conditioning.Input("conditioning"), ], @@ -129,6 +136,7 @@ class FluxKontextImageScale(io.ComfyNode): node_id="FluxKontextImageScale", category="advanced/conditioning/flux", description="This node resizes the image to one that is more optimal for flux kontext.", + short_description="Resizes images to optimal dimensions for Flux Kontext.", inputs=[ io.Image.Input("image"), ], @@ -156,6 +164,8 @@ class FluxKontextMultiReferenceLatentMethod(io.ComfyNode): node_id="FluxKontextMultiReferenceLatentMethod", display_name="Edit Model Reference Method", category="advanced/conditioning/flux", + description="Selects the method used for handling multiple reference latents in Flux Kontext edit models.", + short_description="Selects reference latent method for Flux Kontext.", inputs=[ io.Conditioning.Input("conditioning"), io.Combo.Input( @@ -214,6 +224,8 @@ class Flux2Scheduler(io.ComfyNode): return io.Schema( node_id="Flux2Scheduler", category="sampling/custom_sampling/schedulers", + description="Generates a sigma schedule for Flux 2 sampling based on step count and image resolution.", + short_description="Generates a sigma schedule for Flux 2 sampling.", inputs=[ io.Int.Input("steps", default=20, min=1, max=4096), io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=1), diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py index 3429b731e..717d25468 100644 --- a/comfy_extras/nodes_freelunch.py +++ b/comfy_extras/nodes_freelunch.py @@ -30,6 +30,8 @@ class FreeU(IO.ComfyNode): return IO.Schema( node_id="FreeU", category="model_patches/unet", + description="Applies FreeU v1 to a UNet model, boosting backbone features and filtering skip connections using Fourier transforms for improved quality.", + short_description="Applies FreeU v1 backbone boost and skip filtering.", inputs=[ IO.Model.Input("model"), IO.Float.Input("b1", default=1.1, min=0.0, max=10.0, step=0.01), @@ -77,6 +79,8 @@ class FreeU_V2(IO.ComfyNode): return IO.Schema( node_id="FreeU_V2", category="model_patches/unet", + description="Applies FreeU v2 to a UNet model with adaptive backbone scaling based on hidden state magnitude and Fourier skip filtering.", + short_description="Applies FreeU v2 with adaptive scaling.", inputs=[ IO.Model.Input("model"), IO.Float.Input("b1", default=1.3, min=0.0, max=10.0, step=0.01), diff --git a/comfy_extras/nodes_fresca.py b/comfy_extras/nodes_fresca.py index 3d590af4b..9444461bb 100644 --- a/comfy_extras/nodes_fresca.py +++ b/comfy_extras/nodes_fresca.py @@ -62,6 +62,7 @@ class FreSca(io.ComfyNode): display_name="FreSca", category="_for_testing", description="Applies frequency-dependent scaling to the guidance", + short_description=None, inputs=[ io.Model.Input("model"), io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01, diff --git a/comfy_extras/nodes_gits.py b/comfy_extras/nodes_gits.py index 25367560a..4ffb77d0e 100644 --- a/comfy_extras/nodes_gits.py +++ b/comfy_extras/nodes_gits.py @@ -341,6 +341,8 @@ class GITSScheduler(io.ComfyNode): return io.Schema( node_id="GITSScheduler", category="sampling/custom_sampling/schedulers", + description="Generates a noise schedule using the GITS method with precomputed optimal sigma levels and configurable coefficient.", + short_description="Generates a GITS noise schedule with optimal sigma levels.", inputs=[ io.Float.Input("coeff", default=1.20, min=0.80, max=1.50, step=0.05), io.Int.Input("steps", default=10, min=2, max=1000), diff --git a/comfy_extras/nodes_hidream.py b/comfy_extras/nodes_hidream.py index e345fe51d..e6638b5a4 100644 --- a/comfy_extras/nodes_hidream.py +++ b/comfy_extras/nodes_hidream.py @@ -13,6 +13,7 @@ class QuadrupleCLIPLoader(io.ComfyNode): node_id="QuadrupleCLIPLoader", category="advanced/loaders", description="[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct", + short_description=None, inputs=[ io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")), io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")), @@ -40,6 +41,8 @@ class CLIPTextEncodeHiDream(io.ComfyNode): node_id="CLIPTextEncodeHiDream", search_aliases=["hidream prompt"], category="advanced/conditioning", + description="Encodes separate CLIP-L, CLIP-G, T5-XXL, and Llama text prompts into HiDream conditioning.", + short_description="Encodes multi-encoder text prompts for HiDream.", inputs=[ io.Clip.Input("clip"), io.String.Input("clip_l", multiline=True, dynamic_prompts=True), diff --git a/comfy_extras/nodes_hooks.py b/comfy_extras/nodes_hooks.py index 58e511ef5..62ed15f45 100644 --- a/comfy_extras/nodes_hooks.py +++ b/comfy_extras/nodes_hooks.py @@ -38,6 +38,8 @@ class PairConditioningSetProperties: RETURN_TYPES = ("CONDITIONING", "CONDITIONING") RETURN_NAMES = ("positive", "negative") CATEGORY = "advanced/hooks/cond pair" + DESCRIPTION = "Set properties like strength, mask, hooks, and timesteps on a positive/negative conditioning pair." + SHORT_DESCRIPTION = "Set properties on a positive/negative conditioning pair." FUNCTION = "set_properties" def set_properties(self, positive_NEW, negative_NEW, @@ -73,6 +75,8 @@ class PairConditioningSetPropertiesAndCombine: RETURN_TYPES = ("CONDITIONING", "CONDITIONING") RETURN_NAMES = ("positive", "negative") CATEGORY = "advanced/hooks/cond pair" + DESCRIPTION = "Set properties on new conditioning pair and combine with existing positive/negative conditioning." + SHORT_DESCRIPTION = "Set properties on new cond pair, combine with existing." FUNCTION = "set_properties" def set_properties(self, positive, negative, positive_NEW, negative_NEW, @@ -104,6 +108,8 @@ class ConditioningSetProperties: EXPERIMENTAL = True RETURN_TYPES = ("CONDITIONING",) CATEGORY = "advanced/hooks/cond single" + DESCRIPTION = "Set properties like strength, mask, hooks, and timesteps on a single conditioning input." + SHORT_DESCRIPTION = "Set properties on a single conditioning input." FUNCTION = "set_properties" def set_properties(self, cond_NEW, @@ -136,6 +142,8 @@ class ConditioningSetPropertiesAndCombine: EXPERIMENTAL = True RETURN_TYPES = ("CONDITIONING",) CATEGORY = "advanced/hooks/cond single" + DESCRIPTION = "Set properties on new conditioning and combine it with an existing conditioning input." + SHORT_DESCRIPTION = "Set properties on new conditioning, combine with existing." FUNCTION = "set_properties" def set_properties(self, cond, cond_NEW, @@ -164,6 +172,8 @@ class PairConditioningCombine: RETURN_TYPES = ("CONDITIONING", "CONDITIONING") RETURN_NAMES = ("positive", "negative") CATEGORY = "advanced/hooks/cond pair" + DESCRIPTION = "Combine two positive/negative conditioning pairs into a single pair." + SHORT_DESCRIPTION = None FUNCTION = "combine" def combine(self, positive_A, negative_A, positive_B, negative_B): @@ -191,6 +201,8 @@ class PairConditioningSetDefaultAndCombine: RETURN_TYPES = ("CONDITIONING", "CONDITIONING") RETURN_NAMES = ("positive", "negative") CATEGORY = "advanced/hooks/cond pair" + DESCRIPTION = "Set default conditioning pair and combine it with existing positive/negative conditioning and optional hooks." + SHORT_DESCRIPTION = "Set default cond pair and combine with existing." FUNCTION = "set_default_and_combine" def set_default_and_combine(self, positive, negative, positive_DEFAULT, negative_DEFAULT, @@ -217,6 +229,8 @@ class ConditioningSetDefaultAndCombine: EXPERIMENTAL = True RETURN_TYPES = ("CONDITIONING",) CATEGORY = "advanced/hooks/cond single" + DESCRIPTION = "Set default conditioning and combine it with existing conditioning input and optional hooks." + SHORT_DESCRIPTION = "Set default conditioning and combine with existing." FUNCTION = "set_default_and_combine" def set_default_and_combine(self, cond, cond_DEFAULT, @@ -244,6 +258,8 @@ class SetClipHooks: EXPERIMENTAL = True RETURN_TYPES = ("CLIP",) CATEGORY = "advanced/hooks/clip" + DESCRIPTION = "Apply hooks to a CLIP model, optionally propagating them to conditioning outputs and enabling CLIP scheduling." + SHORT_DESCRIPTION = "Apply hooks to a CLIP model with scheduling options." FUNCTION = "apply_hooks" def apply_hooks(self, clip: CLIP, schedule_clip: bool, apply_to_conds: bool, hooks: comfy.hooks.HookGroup=None): @@ -275,6 +291,8 @@ class ConditioningTimestepsRange: RETURN_TYPES = ("TIMESTEPS_RANGE", "TIMESTEPS_RANGE", "TIMESTEPS_RANGE") RETURN_NAMES = ("TIMESTEPS_RANGE", "BEFORE_RANGE", "AFTER_RANGE") CATEGORY = "advanced/hooks" + DESCRIPTION = "Define a timestep percentage range and output the range plus its complement before and after segments." + SHORT_DESCRIPTION = "Define a timestep range with before/after complements." FUNCTION = "create_range" def create_range(self, start_percent: float, end_percent: float): @@ -308,6 +326,8 @@ class CreateHookLora: EXPERIMENTAL = True RETURN_TYPES = ("HOOKS",) CATEGORY = "advanced/hooks/create" + DESCRIPTION = "Create a LoRA hook with separate model and CLIP strength that can be scheduled on conditioning." + SHORT_DESCRIPTION = "Create a LoRA hook with model and CLIP strength." FUNCTION = "create_hook" def create_hook(self, lora_name: str, strength_model: float, strength_clip: float, prev_hooks: comfy.hooks.HookGroup=None): @@ -353,6 +373,8 @@ class CreateHookLoraModelOnly(CreateHookLora): EXPERIMENTAL = True RETURN_TYPES = ("HOOKS",) CATEGORY = "advanced/hooks/create" + DESCRIPTION = "Create a LoRA hook that only affects the model (not CLIP) for scheduling on conditioning." + SHORT_DESCRIPTION = "Create a model-only LoRA hook." FUNCTION = "create_hook_model_only" def create_hook_model_only(self, lora_name: str, strength_model: float, prev_hooks: comfy.hooks.HookGroup=None): @@ -383,6 +405,8 @@ class CreateHookModelAsLora: EXPERIMENTAL = True RETURN_TYPES = ("HOOKS",) CATEGORY = "advanced/hooks/create" + DESCRIPTION = "Create a hook from a full checkpoint treated as a LoRA, with separate model and CLIP strength controls." + SHORT_DESCRIPTION = "Create a hook from a checkpoint treated as LoRA." FUNCTION = "create_hook" def create_hook(self, ckpt_name: str, strength_model: float, strength_clip: float, @@ -431,6 +455,8 @@ class CreateHookModelAsLoraModelOnly(CreateHookModelAsLora): EXPERIMENTAL = True RETURN_TYPES = ("HOOKS",) CATEGORY = "advanced/hooks/create" + DESCRIPTION = "Create a model-only hook from a full checkpoint treated as a LoRA, without affecting CLIP." + SHORT_DESCRIPTION = "Create a model-only hook from a checkpoint as LoRA." FUNCTION = "create_hook_model_only" def create_hook_model_only(self, ckpt_name: str, strength_model: float, @@ -460,6 +486,8 @@ class SetHookKeyframes: EXPERIMENTAL = True RETURN_TYPES = ("HOOKS",) CATEGORY = "advanced/hooks/scheduling" + DESCRIPTION = "Assign keyframe schedules to hooks for controlling their strength over time during sampling." + SHORT_DESCRIPTION = "Assign keyframe schedules to hooks over time." FUNCTION = "set_hook_keyframes" def set_hook_keyframes(self, hooks: comfy.hooks.HookGroup, hook_kf: comfy.hooks.HookKeyframeGroup=None): @@ -488,6 +516,8 @@ class CreateHookKeyframe: RETURN_TYPES = ("HOOK_KEYFRAMES",) RETURN_NAMES = ("HOOK_KF",) CATEGORY = "advanced/hooks/scheduling" + DESCRIPTION = "Create a single hook keyframe with a strength multiplier at a specific timestep percentage." + SHORT_DESCRIPTION = "Create a hook keyframe at a specific timestep." FUNCTION = "create_hook_keyframe" def create_hook_keyframe(self, strength_mult: float, start_percent: float, prev_hook_kf: comfy.hooks.HookKeyframeGroup=None): @@ -523,6 +553,8 @@ class CreateHookKeyframesInterpolated: RETURN_TYPES = ("HOOK_KEYFRAMES",) RETURN_NAMES = ("HOOK_KF",) CATEGORY = "advanced/hooks/scheduling" + DESCRIPTION = "Generate multiple interpolated hook keyframes between start and end strength values over a timestep range." + SHORT_DESCRIPTION = "Generate interpolated hook keyframes over a timestep range." FUNCTION = "create_hook_keyframes" def create_hook_keyframes(self, strength_start: float, strength_end: float, interpolation: str, @@ -568,6 +600,8 @@ class CreateHookKeyframesFromFloats: RETURN_TYPES = ("HOOK_KEYFRAMES",) RETURN_NAMES = ("HOOK_KF",) CATEGORY = "advanced/hooks/scheduling" + DESCRIPTION = "Create hook keyframes from a list of float values distributed evenly across a timestep percentage range." + SHORT_DESCRIPTION = "Create hook keyframes from a list of float values." FUNCTION = "create_hook_keyframes" def create_hook_keyframes(self, floats_strength: Union[float, list[float]], @@ -639,6 +673,8 @@ class CombineHooks: EXPERIMENTAL = True RETURN_TYPES = ("HOOKS",) CATEGORY = "advanced/hooks/combine" + DESCRIPTION = "Combine two hook groups into one." + SHORT_DESCRIPTION = None FUNCTION = "combine_hooks" def combine_hooks(self, @@ -666,6 +702,8 @@ class CombineHooksFour: EXPERIMENTAL = True RETURN_TYPES = ("HOOKS",) CATEGORY = "advanced/hooks/combine" + DESCRIPTION = "Combine up to four hook groups into one." + SHORT_DESCRIPTION = None FUNCTION = "combine_hooks" def combine_hooks(self, @@ -699,6 +737,8 @@ class CombineHooksEight: EXPERIMENTAL = True RETURN_TYPES = ("HOOKS",) CATEGORY = "advanced/hooks/combine" + DESCRIPTION = "Combine up to eight hook groups into one." + SHORT_DESCRIPTION = None FUNCTION = "combine_hooks" def combine_hooks(self, diff --git a/comfy_extras/nodes_hunyuan.py b/comfy_extras/nodes_hunyuan.py index 774da75a3..17002278e 100644 --- a/comfy_extras/nodes_hunyuan.py +++ b/comfy_extras/nodes_hunyuan.py @@ -15,6 +15,8 @@ class CLIPTextEncodeHunyuanDiT(io.ComfyNode): return io.Schema( node_id="CLIPTextEncodeHunyuanDiT", category="advanced/conditioning", + description="Encodes text using both BERT and mT5-XL tokenizers for Hunyuan DiT conditioning.", + short_description="Dual-tokenizer text encoding for Hunyuan DiT.", inputs=[ io.Clip.Input("clip"), io.String.Input("bert", multiline=True, dynamic_prompts=True), @@ -42,6 +44,8 @@ class EmptyHunyuanLatentVideo(io.ComfyNode): node_id="EmptyHunyuanLatentVideo", display_name="Empty HunyuanVideo 1.0 Latent", category="latent/video", + description="Creates an empty latent tensor sized for HunyuanVideo 1.0 video generation.", + short_description="Empty latent for HunyuanVideo 1.0 generation.", inputs=[ io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), @@ -67,6 +71,8 @@ class EmptyHunyuanVideo15Latent(EmptyHunyuanLatentVideo): schema = super().define_schema() schema.node_id = "EmptyHunyuanVideo15Latent" schema.display_name = "Empty HunyuanVideo 1.5 Latent" + schema.description = "Creates an empty latent tensor sized for HunyuanVideo 1.5 video generation with 16x spatial downscale." + schema.short_description = "Empty latent for HunyuanVideo 1.5 generation." return schema @classmethod @@ -82,6 +88,8 @@ class HunyuanVideo15ImageToVideo(io.ComfyNode): return io.Schema( node_id="HunyuanVideo15ImageToVideo", category="conditioning/video_models", + description="Prepares conditioning and latent for HunyuanVideo 1.5 image-to-video generation with start image and CLIP vision support.", + short_description="HunyuanVideo 1.5 image-to-video conditioning setup.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -131,6 +139,9 @@ class HunyuanVideo15SuperResolution(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="HunyuanVideo15SuperResolution", + category="conditioning/video_models", + description="Sets up conditioning for HunyuanVideo 1.5 super-resolution upscaling of a latent with noise augmentation and optional image guidance.", + short_description="HunyuanVideo 1.5 super-resolution latent conditioning.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -177,6 +188,8 @@ class LatentUpscaleModelLoader(io.ComfyNode): node_id="LatentUpscaleModelLoader", display_name="Load Latent Upscale Model", category="loaders", + description="Loads a latent upscale model from disk, supporting HunyuanVideo 720p, 1080p, and other latent upsampler architectures.", + short_description="Load a latent upscale model from file.", inputs=[ io.Combo.Input("model_name", options=folder_paths.get_filename_list("latent_upscale_models")), ], @@ -226,6 +239,8 @@ class HunyuanVideo15LatentUpscaleWithModel(io.ComfyNode): node_id="HunyuanVideo15LatentUpscaleWithModel", display_name="Hunyuan Video 15 Latent Upscale With Model", category="latent", + description="Upscales a video latent to a target resolution using a loaded latent upscale model and configurable upscale method.", + short_description="Upscale video latent using a latent upscale model.", inputs=[ io.LatentUpscaleModel.Input("model"), io.Latent.Input("samples"), @@ -275,6 +290,8 @@ class TextEncodeHunyuanVideo_ImageToVideo(io.ComfyNode): return io.Schema( node_id="TextEncodeHunyuanVideo_ImageToVideo", category="advanced/conditioning", + description="Encodes text with CLIP vision image embeddings for HunyuanVideo image-to-video conditioning using an interleaved template.", + short_description="Text and image encoding for HunyuanVideo image-to-video.", inputs=[ io.Clip.Input("clip"), io.ClipVisionOutput.Input("clip_vision_output"), @@ -306,6 +323,8 @@ class HunyuanImageToVideo(io.ComfyNode): return io.Schema( node_id="HunyuanImageToVideo", category="conditioning/video_models", + description="Prepares conditioning and latent for Hunyuan image-to-video generation with selectable guidance type.", + short_description="Hunyuan image-to-video conditioning with guidance options.", inputs=[ io.Conditioning.Input("positive"), io.Vae.Input("vae"), @@ -357,6 +376,8 @@ class EmptyHunyuanImageLatent(io.ComfyNode): return io.Schema( node_id="EmptyHunyuanImageLatent", category="latent", + description="Creates an empty latent tensor sized for Hunyuan image generation.", + short_description="Empty latent for Hunyuan image generation.", inputs=[ io.Int.Input("width", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32), io.Int.Input("height", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32), @@ -380,6 +401,9 @@ class HunyuanRefinerLatent(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="HunyuanRefinerLatent", + category="conditioning/video_models", + description="Prepares conditioning for a Hunyuan refiner pass by concatenating the input latent with noise augmentation settings.", + short_description="Hunyuan refiner conditioning with noise augmentation.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), diff --git a/comfy_extras/nodes_hunyuan3d.py b/comfy_extras/nodes_hunyuan3d.py index c2df3e859..71203d343 100644 --- a/comfy_extras/nodes_hunyuan3d.py +++ b/comfy_extras/nodes_hunyuan3d.py @@ -18,6 +18,8 @@ class EmptyLatentHunyuan3Dv2(IO.ComfyNode): return IO.Schema( node_id="EmptyLatentHunyuan3Dv2", category="latent/3d", + description="Creates an empty latent tensor for Hunyuan 3D v2 generation with configurable resolution and batch size.", + short_description="Empty latent for Hunyuan 3D v2 generation.", inputs=[ IO.Int.Input("resolution", default=3072, min=1, max=8192), IO.Int.Input("batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."), @@ -41,6 +43,8 @@ class Hunyuan3Dv2Conditioning(IO.ComfyNode): return IO.Schema( node_id="Hunyuan3Dv2Conditioning", category="conditioning/video_models", + description="Creates positive and negative conditioning for Hunyuan 3D v2 from a CLIP vision output embedding.", + short_description="Conditioning from CLIP vision for Hunyuan 3D v2.", inputs=[ IO.ClipVisionOutput.Input("clip_vision_output"), ], @@ -66,6 +70,8 @@ class Hunyuan3Dv2ConditioningMultiView(IO.ComfyNode): return IO.Schema( node_id="Hunyuan3Dv2ConditioningMultiView", category="conditioning/video_models", + description="Creates multi-view conditioning for Hunyuan 3D v2 from up to four directional CLIP vision outputs with positional encoding.", + short_description="Multi-view conditioning for Hunyuan 3D v2.", inputs=[ IO.ClipVisionOutput.Input("front", optional=True), IO.ClipVisionOutput.Input("left", optional=True), @@ -103,6 +109,8 @@ class VAEDecodeHunyuan3D(IO.ComfyNode): return IO.Schema( node_id="VAEDecodeHunyuan3D", category="latent/3d", + description="Decodes a Hunyuan 3D latent into a voxel grid using a VAE with configurable chunk size and octree resolution.", + short_description="Decodes Hunyuan 3D latent into voxels.", inputs=[ IO.Latent.Input("samples"), IO.Vae.Input("vae"), @@ -425,6 +433,8 @@ class VoxelToMeshBasic(IO.ComfyNode): return IO.Schema( node_id="VoxelToMeshBasic", category="3d", + description="Converts a voxel grid to a 3D mesh using basic cube-based surface extraction with adjustable threshold.", + short_description="Converts voxels to mesh using basic extraction.", inputs=[ IO.Voxel.Input("voxel"), IO.Float.Input("threshold", default=0.6, min=-1.0, max=1.0, step=0.01), @@ -454,6 +464,8 @@ class VoxelToMesh(IO.ComfyNode): return IO.Schema( node_id="VoxelToMesh", category="3d", + description="Converts a voxel grid to a 3D mesh using selectable surface net or basic algorithm with adjustable threshold.", + short_description="Converts voxels to mesh with algorithm selection.", inputs=[ IO.Voxel.Input("voxel"), IO.Combo.Input("algorithm", options=["surface net", "basic"]), @@ -621,6 +633,8 @@ class SaveGLB(IO.ComfyNode): display_name="Save 3D Model", search_aliases=["export 3d model", "save mesh"], category="3d", + description="Saves a 3D mesh or model file to disk in GLB format with optional workflow metadata embedding.", + short_description="Saves 3D mesh or model to GLB file.", is_output_node=True, inputs=[ IO.MultiType.Input( diff --git a/comfy_extras/nodes_hypernetwork.py b/comfy_extras/nodes_hypernetwork.py index 2a6a87a81..fb32b0b74 100644 --- a/comfy_extras/nodes_hypernetwork.py +++ b/comfy_extras/nodes_hypernetwork.py @@ -103,6 +103,8 @@ class HypernetworkLoader(IO.ComfyNode): return IO.Schema( node_id="HypernetworkLoader", category="loaders", + description="Loads a hypernetwork and patches it onto a diffusion model's attention layers with adjustable strength.", + short_description="Loads and applies a hypernetwork to a model.", inputs=[ IO.Model.Input("model"), IO.Combo.Input("hypernetwork_name", options=folder_paths.get_filename_list("hypernetworks")), diff --git a/comfy_extras/nodes_hypertile.py b/comfy_extras/nodes_hypertile.py index 0ad5e6773..083f2a196 100644 --- a/comfy_extras/nodes_hypertile.py +++ b/comfy_extras/nodes_hypertile.py @@ -28,6 +28,8 @@ class HyperTile(io.ComfyNode): return io.Schema( node_id="HyperTile", category="model_patches/unet", + description="Patches the model to split self-attention into smaller tiles during inference, reducing memory usage and speeding up generation at higher resolutions.", + short_description="Tile self-attention for faster high-res generation.", inputs=[ io.Model.Input("model"), io.Int.Input("tile_size", default=256, min=1, max=2048), diff --git a/comfy_extras/nodes_image_compare.py b/comfy_extras/nodes_image_compare.py index 8e9f809e6..9c95f4088 100644 --- a/comfy_extras/nodes_image_compare.py +++ b/comfy_extras/nodes_image_compare.py @@ -13,6 +13,7 @@ class ImageCompare(IO.ComfyNode): node_id="ImageCompare", display_name="Image Compare", description="Compares two images side by side with a slider.", + short_description=None, category="image", is_experimental=True, is_output_node=True, diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index cb4fb24a1..359984162 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -25,6 +25,8 @@ class ImageCrop(IO.ComfyNode): search_aliases=["trim"], display_name="Image Crop", category="image/transform", + description="Crops a rectangular region from an image at the specified position and dimensions.", + short_description="Crops a region from an image.", inputs=[ IO.Image.Input("image"), IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1), @@ -54,6 +56,8 @@ class RepeatImageBatch(IO.ComfyNode): node_id="RepeatImageBatch", search_aliases=["duplicate image", "clone image"], category="image/batch", + description="Repeats an image a specified number of times to create a batch of identical images.", + short_description="Repeats an image to create a batch.", inputs=[ IO.Image.Input("image"), IO.Int.Input("amount", default=1, min=1, max=4096), @@ -76,6 +80,8 @@ class ImageFromBatch(IO.ComfyNode): node_id="ImageFromBatch", search_aliases=["select image", "pick from batch", "extract image"], category="image/batch", + description="Selects a contiguous range of images from a batch starting at a given index.", + short_description="Selects images from a batch by index.", inputs=[ IO.Image.Input("image"), IO.Int.Input("batch_index", default=0, min=0, max=4095), @@ -102,6 +108,8 @@ class ImageAddNoise(IO.ComfyNode): node_id="ImageAddNoise", search_aliases=["film grain"], category="image", + description="Adds random noise to an image with adjustable strength, useful for film grain effects.", + short_description="Adds random noise to an image.", inputs=[ IO.Image.Input("image"), IO.Int.Input( @@ -134,6 +142,8 @@ class SaveAnimatedWEBP(IO.ComfyNode): return IO.Schema( node_id="SaveAnimatedWEBP", category="image/animation", + description="Saves a sequence of images as an animated WEBP file with configurable FPS, quality, and compression.", + short_description="Saves images as an animated WEBP file.", inputs=[ IO.Image.Input("images"), IO.String.Input("filename_prefix", default="ComfyUI"), @@ -171,6 +181,8 @@ class SaveAnimatedPNG(IO.ComfyNode): return IO.Schema( node_id="SaveAnimatedPNG", category="image/animation", + description="Saves a sequence of images as an animated PNG (APNG) file with configurable FPS and compression level.", + short_description="Saves images as an animated PNG file.", inputs=[ IO.Image.Input("images"), IO.String.Input("filename_prefix", default="ComfyUI"), @@ -207,6 +219,7 @@ class ImageStitch(IO.ComfyNode): description="Stitches image2 to image1 in the specified direction.\n" "If image2 is not provided, returns image1 unchanged.\n" "Optional spacing can be added between images.", + short_description="Joins two images together in a specified direction.", category="image/transform", inputs=[ IO.Image.Input("image1"), @@ -379,6 +392,8 @@ class ResizeAndPadImage(IO.ComfyNode): node_id="ResizeAndPadImage", search_aliases=["fit to size"], category="image/transform", + description="Resizes an image to fit within target dimensions while preserving aspect ratio, then pads with a solid color to fill the target size.", + short_description="Resizes an image to fit and pads the remainder.", inputs=[ IO.Image.Input("image"), IO.Int.Input("target_width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1), @@ -430,6 +445,7 @@ class SaveSVGNode(IO.ComfyNode): node_id="SaveSVGNode", search_aliases=["export vector", "save vector graphics"], description="Save SVG files on disk.", + short_description=None, category="image/save", inputs=[ IO.SVG.Input("svg"), @@ -502,7 +518,7 @@ class GetImageSize(IO.ComfyNode): node_id="GetImageSize", search_aliases=["dimensions", "resolution", "image info"], display_name="Get Image Size", - description="Returns width and height of the image, and passes it through unchanged.", + description="Returns the width, height, and batch size of an image.", category="image", inputs=[ IO.Image.Input("image"), @@ -537,6 +553,8 @@ class ImageRotate(IO.ComfyNode): node_id="ImageRotate", search_aliases=["turn", "flip orientation"], category="image/transform", + description="Rotates an image by 90, 180, or 270 degrees.", + short_description=None, inputs=[ IO.Image.Input("image"), IO.Combo.Input("rotation", options=["none", "90 degrees", "180 degrees", "270 degrees"]), @@ -567,6 +585,8 @@ class ImageFlip(IO.ComfyNode): node_id="ImageFlip", search_aliases=["mirror", "reflect"], category="image/transform", + description="Flips an image horizontally or vertically.", + short_description=None, inputs=[ IO.Image.Input("image"), IO.Combo.Input("flip_method", options=["x-axis: vertically", "y-axis: horizontally"]), @@ -593,6 +613,8 @@ class ImageScaleToMaxDimension(IO.ComfyNode): return IO.Schema( node_id="ImageScaleToMaxDimension", category="image/upscaling", + description="Scales an image so its largest dimension matches the specified size while preserving aspect ratio.", + short_description="Scales image to a target max dimension size.", inputs=[ IO.Image.Input("image"), IO.Combo.Input( diff --git a/comfy_extras/nodes_ip2p.py b/comfy_extras/nodes_ip2p.py index 78f29915d..1849a69d1 100644 --- a/comfy_extras/nodes_ip2p.py +++ b/comfy_extras/nodes_ip2p.py @@ -10,6 +10,8 @@ class InstructPixToPixConditioning(io.ComfyNode): return io.Schema( node_id="InstructPixToPixConditioning", category="conditioning/instructpix2pix", + description="Prepares conditioning for InstructPix2Pix image editing by encoding the input image through a VAE and attaching it as concat latent to both positive and negative conditioning.", + short_description="Prepare conditioning for InstructPix2Pix editing.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), diff --git a/comfy_extras/nodes_kandinsky5.py b/comfy_extras/nodes_kandinsky5.py index 346c50cde..3c9f7aea9 100644 --- a/comfy_extras/nodes_kandinsky5.py +++ b/comfy_extras/nodes_kandinsky5.py @@ -14,6 +14,8 @@ class Kandinsky5ImageToVideo(io.ComfyNode): return io.Schema( node_id="Kandinsky5ImageToVideo", category="conditioning/video_models", + description="Sets up Kandinsky 5 image-to-video generation by creating an empty video latent and optionally encoding a start image for conditioning.", + short_description="Sets up Kandinsky 5 image-to-video conditioning.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -73,6 +75,7 @@ class NormalizeVideoLatentStart(io.ComfyNode): node_id="NormalizeVideoLatentStart", category="conditioning/video_models", description="Normalizes the initial frames of a video latent to match the mean and standard deviation of subsequent reference frames. Helps reduce differences between the starting frames and the rest of the video.", + short_description="Normalizes initial video latent frames to match reference frames.", inputs=[ io.Latent.Input("latent"), io.Int.Input("start_frame_count", default=4, min=1, max=nodes.MAX_RESOLUTION, step=1, tooltip="Number of latent frames to normalize, counted from the start"), @@ -106,6 +109,8 @@ class CLIPTextEncodeKandinsky5(io.ComfyNode): node_id="CLIPTextEncodeKandinsky5", search_aliases=["kandinsky prompt"], category="advanced/conditioning/kandinsky5", + description="Encodes separate CLIP-L and Qwen 2.5 7B text prompts into Kandinsky 5 conditioning.", + short_description="Encodes CLIP-L and Qwen prompts for Kandinsky 5.", inputs=[ io.Clip.Input("clip"), io.String.Input("clip_l", multiline=True, dynamic_prompts=True), diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py index 8d2d7297a..9282c76bb 100644 --- a/comfy_extras/nodes_latent.py +++ b/comfy_extras/nodes_latent.py @@ -23,6 +23,8 @@ class LatentAdd(io.ComfyNode): node_id="LatentAdd", search_aliases=["combine latents", "sum latents"], category="latent/advanced", + description="Adds two latent tensors element-wise, automatically resizing the second to match the first.", + short_description="Add two latent tensors element-wise.", inputs=[ io.Latent.Input("samples1"), io.Latent.Input("samples2"), @@ -50,6 +52,8 @@ class LatentSubtract(io.ComfyNode): node_id="LatentSubtract", search_aliases=["difference latent", "remove features"], category="latent/advanced", + description="Subtracts one latent tensor from another element-wise, automatically resizing the second to match the first.", + short_description="Subtract one latent tensor from another.", inputs=[ io.Latent.Input("samples1"), io.Latent.Input("samples2"), @@ -77,6 +81,8 @@ class LatentMultiply(io.ComfyNode): node_id="LatentMultiply", search_aliases=["scale latent", "amplify latent", "latent gain"], category="latent/advanced", + description="Multiplies a latent tensor by a scalar value to scale its magnitude up or down.", + short_description="Scale a latent tensor by a multiplier.", inputs=[ io.Latent.Input("samples"), io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01), @@ -101,6 +107,8 @@ class LatentInterpolate(io.ComfyNode): node_id="LatentInterpolate", search_aliases=["blend latent", "mix latent", "lerp latent", "transition"], category="latent/advanced", + description="Interpolates between two latent tensors using a ratio, preserving magnitude for smoother blending than linear interpolation.", + short_description="Interpolate between two latent tensors.", inputs=[ io.Latent.Input("samples1"), io.Latent.Input("samples2"), @@ -140,6 +148,8 @@ class LatentConcat(io.ComfyNode): node_id="LatentConcat", search_aliases=["join latents", "stitch latents"], category="latent/advanced", + description="Concatenates two latent tensors along a chosen spatial or temporal dimension (x, y, or t) with optional reversal.", + short_description="Concatenate two latents along a chosen dimension.", inputs=[ io.Latent.Input("samples1"), io.Latent.Input("samples2"), @@ -180,6 +190,8 @@ class LatentCut(io.ComfyNode): node_id="LatentCut", search_aliases=["crop latent", "slice latent", "extract region"], category="latent/advanced", + description="Extracts a contiguous slice from a latent tensor along a chosen spatial or temporal dimension at a specified index and size.", + short_description="Extract a slice from a latent along a dimension.", inputs=[ io.Latent.Input("samples"), io.Combo.Input("dim", options=["x", "y", "t"]), @@ -221,6 +233,8 @@ class LatentCutToBatch(io.ComfyNode): node_id="LatentCutToBatch", search_aliases=["slice to batch", "split latent", "tile latent"], category="latent/advanced", + description="Slices a latent tensor along a chosen dimension into equal-sized chunks and reshapes them into the batch dimension.", + short_description="Slice latent along a dimension into batch chunks.", inputs=[ io.Latent.Input("samples"), io.Combo.Input("dim", options=["t", "x", "y"]), @@ -263,6 +277,8 @@ class LatentBatch(io.ComfyNode): node_id="LatentBatch", search_aliases=["combine latents", "merge latents", "join latents"], category="latent/batch", + description="Concatenates two latent tensors along the batch dimension, preserving batch index metadata.", + short_description="Concatenate two latents along the batch dimension.", is_deprecated=True, inputs=[ io.Latent.Input("samples1"), @@ -291,6 +307,8 @@ class LatentBatchSeedBehavior(io.ComfyNode): return io.Schema( node_id="LatentBatchSeedBehavior", category="latent/advanced", + description="Controls whether each item in a latent batch receives a random or fixed noise seed during sampling.", + short_description="Set random or fixed seed behavior for batches.", inputs=[ io.Latent.Input("samples"), io.Combo.Input("seed_behavior", options=["random", "fixed"], default="fixed"), @@ -320,6 +338,8 @@ class LatentApplyOperation(io.ComfyNode): node_id="LatentApplyOperation", search_aliases=["transform latent"], category="latent/advanced/operations", + description="Applies a latent operation (such as tonemap or sharpen) directly to a latent tensor.", + short_description="Apply a latent operation to a latent tensor.", is_experimental=True, inputs=[ io.Latent.Input("samples"), @@ -344,6 +364,8 @@ class LatentApplyOperationCFG(io.ComfyNode): return io.Schema( node_id="LatentApplyOperationCFG", category="latent/advanced/operations", + description="Applies a latent operation during the CFG pre-processing stage of sampling, modifying the model's prediction before guidance is applied.", + short_description="Apply a latent operation during CFG pre-processing.", is_experimental=True, inputs=[ io.Model.Input("model"), @@ -376,6 +398,8 @@ class LatentOperationTonemapReinhard(io.ComfyNode): node_id="LatentOperationTonemapReinhard", search_aliases=["hdr latent"], category="latent/advanced/operations", + description="Creates a Reinhard tonemapping operation that compresses high-magnitude latent values to reduce blown-out artifacts.", + short_description="Create a Reinhard tonemapping latent operation.", is_experimental=True, inputs=[ io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01), @@ -411,6 +435,8 @@ class LatentOperationSharpen(io.ComfyNode): return io.Schema( node_id="LatentOperationSharpen", category="latent/advanced/operations", + description="Creates a sharpening operation that enhances detail in latent space using a Gaussian-based unsharp mask with configurable radius, sigma, and strength.", + short_description="Create a Gaussian-based latent sharpening operation.", is_experimental=True, inputs=[ io.Int.Input("sharpen_radius", default=9, min=1, max=31, step=1), @@ -448,6 +474,8 @@ class ReplaceVideoLatentFrames(io.ComfyNode): return io.Schema( node_id="ReplaceVideoLatentFrames", category="latent/batch", + description="Replaces a range of frames in a destination video latent with frames from a source latent at a specified index.", + short_description="Replace video latent frames at a given index.", inputs=[ io.Latent.Input("destination", tooltip="The destination latent where frames will be replaced."), io.Latent.Input("source", optional=True, tooltip="The source latent providing frames to insert into the destination latent. If not provided, the destination latent is returned unchanged."), diff --git a/comfy_extras/nodes_load_3d.py b/comfy_extras/nodes_load_3d.py index edbb5cd40..3949a2b5f 100644 --- a/comfy_extras/nodes_load_3d.py +++ b/comfy_extras/nodes_load_3d.py @@ -31,6 +31,8 @@ class Load3D(IO.ComfyNode): node_id="Load3D", display_name="Load 3D & Animation", category="3d", + description="Loads a 3D model file and renders it to produce an image, mask, normal map, camera info, recording video, and 3D file output.", + short_description="Loads and renders a 3D model file.", is_experimental=True, inputs=[ IO.Combo.Input("model_file", options=sorted(files), upload=IO.UploadType.model), @@ -81,6 +83,8 @@ class Preview3D(IO.ComfyNode): search_aliases=["view mesh", "3d viewer"], display_name="Preview 3D & Animation", category="3d", + description="Previews a 3D model or file in the UI with optional camera info and background image overlay.", + short_description="Previews a 3D model in the UI.", is_experimental=True, is_output_node=True, inputs=[ diff --git a/comfy_extras/nodes_logic.py b/comfy_extras/nodes_logic.py index c066064ac..b54046c55 100644 --- a/comfy_extras/nodes_logic.py +++ b/comfy_extras/nodes_logic.py @@ -16,6 +16,8 @@ class SwitchNode(io.ComfyNode): node_id="ComfySwitchNode", display_name="Switch", category="logic", + description="Routes one of two inputs to the output based on a boolean switch value, evaluating only the selected branch lazily.", + short_description="Route one of two inputs based on a boolean.", is_experimental=True, inputs=[ io.Boolean.Input("switch"), @@ -47,6 +49,8 @@ class SoftSwitchNode(io.ComfyNode): node_id="ComfySoftSwitchNode", display_name="Soft Switch", category="logic", + description="Routes one of two optional inputs to the output based on a boolean, falling back to whichever input is connected if only one is provided.", + short_description="Switch with optional fallback to connected input.", is_experimental=True, inputs=[ io.Boolean.Input("switch"), @@ -102,6 +106,8 @@ class CustomComboNode(io.ComfyNode): node_id="CustomCombo", display_name="Custom Combo", category="utils", + description="Provides a user-defined dropdown combo box where options are written by the user, outputting the selected string and its index.", + short_description="User-defined dropdown outputting string and index.", is_experimental=True, inputs=[io.Combo.Input("choice", options=[])], outputs=[ @@ -137,6 +143,8 @@ class DCTestNode(io.ComfyNode): node_id="DCTestNode", display_name="DCTest", category="logic", + description="Test node demonstrating DynamicCombo inputs with nested sub-options that conditionally show different input types.", + short_description="Test node for DynamicCombo nested inputs.", is_output_node=True, inputs=[io.DynamicCombo.Input("combo", options=[ io.DynamicCombo.Option("option1", [io.String.Input("string")]), @@ -175,6 +183,8 @@ class AutogrowNamesTestNode(io.ComfyNode): node_id="AutogrowNamesTestNode", display_name="AutogrowNamesTest", category="logic", + description="Test node demonstrating Autogrow inputs with named template slots that dynamically add float inputs.", + short_description="Test node for Autogrow named template inputs.", inputs=[ _io.Autogrow.Input("autogrow", template=template) ], @@ -195,6 +205,8 @@ class AutogrowPrefixTestNode(io.ComfyNode): node_id="AutogrowPrefixTestNode", display_name="AutogrowPrefixTest", category="logic", + description="Test node demonstrating Autogrow inputs with prefix-based template slots that dynamically add numbered float inputs.", + short_description="Test node for Autogrow prefix template inputs.", inputs=[ _io.Autogrow.Input("autogrow", template=template) ], @@ -214,6 +226,8 @@ class ComboOutputTestNode(io.ComfyNode): node_id="ComboOptionTestNode", display_name="ComboOptionTest", category="logic", + description="Test node demonstrating combo output types by passing two selected combo values through as outputs.", + short_description="Test node for combo output passthrough.", inputs=[io.Combo.Input("combo", options=["option1", "option2", "option3"]), io.Combo.Input("combo2", options=["option4", "option5", "option6"])], outputs=[io.Combo.Output(), io.Combo.Output()], @@ -231,6 +245,8 @@ class ConvertStringToComboNode(io.ComfyNode): search_aliases=["string to dropdown", "text to combo"], display_name="Convert String to Combo", category="logic", + description="Converts a string value into a combo type output so it can be used as a dropdown selection in downstream nodes.", + short_description="Convert a string to a combo type output.", inputs=[io.String.Input("string")], outputs=[io.Combo.Output()], ) @@ -247,6 +263,8 @@ class InvertBooleanNode(io.ComfyNode): search_aliases=["not", "toggle", "negate", "flip boolean"], display_name="Invert Boolean", category="logic", + description="Inverts a boolean value, outputting true when input is false and vice versa.", + short_description="Invert a boolean value.", inputs=[io.Boolean.Input("boolean")], outputs=[io.Boolean.Output()], ) diff --git a/comfy_extras/nodes_lora_debug.py b/comfy_extras/nodes_lora_debug.py index 937a0fbfb..3ce39a476 100644 --- a/comfy_extras/nodes_lora_debug.py +++ b/comfy_extras/nodes_lora_debug.py @@ -32,6 +32,7 @@ class LoraLoaderBypass: CATEGORY = "loaders" DESCRIPTION = "Apply LoRA in bypass mode. Unlike regular LoRA, this doesn't modify model weights - instead it injects the LoRA computation during forward pass. Useful for training scenarios." + SHORT_DESCRIPTION = "Applies LoRA via forward pass injection, not weight modification." EXPERIMENTAL = True def load_lora(self, model, clip, lora_name, strength_model, strength_clip): @@ -62,6 +63,8 @@ class LoraLoaderBypassModelOnly(LoraLoaderBypass): "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}), }} RETURN_TYPES = ("MODEL",) + DESCRIPTION = "Apply LoRA in bypass mode to only the diffusion model without modifying base weights or affecting CLIP." + SHORT_DESCRIPTION = "Apply bypass LoRA to model only, no CLIP." FUNCTION = "load_lora_model_only" def load_lora_model_only(self, model, lora_name, strength_model): diff --git a/comfy_extras/nodes_lora_extract.py b/comfy_extras/nodes_lora_extract.py index 1542d0a88..8f44cccf3 100644 --- a/comfy_extras/nodes_lora_extract.py +++ b/comfy_extras/nodes_lora_extract.py @@ -92,6 +92,8 @@ class LoraSave(io.ComfyNode): search_aliases=["export lora"], display_name="Extract and Save Lora", category="_for_testing", + description="Extracts LoRA weights from a model or text encoder diff using SVD decomposition and saves them as a safetensors file, supporting standard and full diff modes.", + short_description="Extract and save LoRA from model diff.", inputs=[ io.String.Input("filename_prefix", default="loras/ComfyUI_extracted_lora"), io.Int.Input("rank", default=8, min=1, max=4096, step=1), diff --git a/comfy_extras/nodes_lotus.py b/comfy_extras/nodes_lotus.py index 9f62ba2bf..60f02f6e8 100644 --- a/comfy_extras/nodes_lotus.py +++ b/comfy_extras/nodes_lotus.py @@ -11,6 +11,8 @@ class LotusConditioning(io.ComfyNode): return io.Schema( node_id="LotusConditioning", category="conditioning/lotus", + description="Provides precomputed null conditioning embeddings for the Lotus depth/normal estimation model, avoiding the need for a separate text encoder.", + short_description="Precomputed null conditioning for Lotus model.", inputs=[], outputs=[io.Conditioning.Output(display_name="conditioning")], ) diff --git a/comfy_extras/nodes_lt.py b/comfy_extras/nodes_lt.py index 2aec62f61..e537ab093 100644 --- a/comfy_extras/nodes_lt.py +++ b/comfy_extras/nodes_lt.py @@ -18,6 +18,8 @@ class EmptyLTXVLatentVideo(io.ComfyNode): return io.Schema( node_id="EmptyLTXVLatentVideo", category="latent/video/ltxv", + description="Creates an empty LTXV video latent tensor with the specified dimensions and batch size.", + short_description="Creates an empty LTXV video latent tensor.", inputs=[ io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32), io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32), @@ -42,6 +44,8 @@ class LTXVImgToVideo(io.ComfyNode): return io.Schema( node_id="LTXVImgToVideo", category="conditioning/video_models", + description="Encodes an image through a VAE and sets up conditioning for LTXV image-to-video generation with adjustable strength.", + short_description="Sets up LTXV image-to-video conditioning.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -87,6 +91,8 @@ class LTXVImgToVideoInplace(io.ComfyNode): return io.Schema( node_id="LTXVImgToVideoInplace", category="conditioning/video_models", + description="Encodes an image through a VAE and injects it into an existing latent for in-place LTXV image-to-video conditioning.", + short_description="In-place LTXV image-to-video latent conditioning.", inputs=[ io.Vae.Input("vae"), io.Image.Input("image"), @@ -171,6 +177,8 @@ class LTXVAddGuide(io.ComfyNode): return io.Schema( node_id="LTXVAddGuide", category="conditioning/video_models", + description="Adds a guiding image or video to LTXV conditioning at a specified frame index to control video generation.", + short_description="Adds a guiding image or video to LTXV conditioning.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -335,6 +343,8 @@ class LTXVCropGuides(io.ComfyNode): return io.Schema( node_id="LTXVCropGuides", category="conditioning/video_models", + description="Removes appended keyframe guide latents from an LTXV latent and resets keyframe indices in the conditioning.", + short_description="Removes keyframe guide latents from LTXV conditioning.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -373,6 +383,8 @@ class LTXVConditioning(io.ComfyNode): return io.Schema( node_id="LTXVConditioning", category="conditioning/video_models", + description="Sets the frame rate on LTXV positive and negative conditioning for video generation.", + short_description="Sets frame rate on LTXV conditioning.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -397,6 +409,8 @@ class ModelSamplingLTXV(io.ComfyNode): return io.Schema( node_id="ModelSamplingLTXV", category="advanced/model", + description="Configures LTXV model sampling by computing a shift parameter from max_shift, base_shift, and latent token count.", + short_description="Configures LTXV model sampling shift parameters.", inputs=[ io.Model.Input("model"), io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01), @@ -442,6 +456,8 @@ class LTXVScheduler(io.ComfyNode): return io.Schema( node_id="LTXVScheduler", category="sampling/custom_sampling/schedulers", + description="Generates a sigma schedule for LTXV sampling with configurable shift parameters, stretch, and terminal value.", + short_description="Generates a sigma schedule for LTXV sampling.", inputs=[ io.Int.Input("steps", default=20, min=1, max=10000), io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01), @@ -546,6 +562,8 @@ class LTXVPreprocess(io.ComfyNode): return io.Schema( node_id="LTXVPreprocess", category="image", + description="Applies H.264 video compression preprocessing to images to improve LTXV generation quality.", + short_description="Applies video compression preprocessing for LTXV.", inputs=[ io.Image.Input("image"), io.Int.Input( @@ -574,6 +592,8 @@ class LTXVConcatAVLatent(io.ComfyNode): return io.Schema( node_id="LTXVConcatAVLatent", category="latent/video/ltxv", + description="Concatenates separate video and audio latents into a combined audio-video latent for LTXV processing.", + short_description="Concatenates video and audio latents for LTXV.", inputs=[ io.Latent.Input("video_latent"), io.Latent.Input("audio_latent"), @@ -609,7 +629,8 @@ class LTXVSeparateAVLatent(io.ComfyNode): return io.Schema( node_id="LTXVSeparateAVLatent", category="latent/video/ltxv", - description="LTXV Separate AV Latent", + description="Separates a combined audio-video latent into individual video and audio latents.", + short_description=None, inputs=[ io.Latent.Input("av_latent"), ], diff --git a/comfy_extras/nodes_lt_audio.py b/comfy_extras/nodes_lt_audio.py index 1966fd1bf..50291bad9 100644 --- a/comfy_extras/nodes_lt_audio.py +++ b/comfy_extras/nodes_lt_audio.py @@ -14,6 +14,8 @@ class LTXVAudioVAELoader(io.ComfyNode): node_id="LTXVAudioVAELoader", display_name="LTXV Audio VAE Loader", category="audio", + description="Loads an LTXV Audio VAE model from a checkpoint file for audio encoding and decoding.", + short_description="Loads an LTXV Audio VAE model checkpoint.", inputs=[ io.Combo.Input( "ckpt_name", @@ -38,6 +40,7 @@ class LTXVAudioVAEEncode(io.ComfyNode): node_id="LTXVAudioVAEEncode", display_name="LTXV Audio VAE Encode", category="audio", + description="Encodes audio into latent representations using the LTXV Audio VAE model.", inputs=[ io.Audio.Input("audio", tooltip="The audio to be encoded."), io.Vae.Input( @@ -68,6 +71,8 @@ class LTXVAudioVAEDecode(io.ComfyNode): node_id="LTXVAudioVAEDecode", display_name="LTXV Audio VAE Decode", category="audio", + description="Decodes latent representations back into audio using the LTXV Audio VAE model.", + short_description="Decodes latents back to audio via LTXV Audio VAE.", inputs=[ io.Latent.Input("samples", tooltip="The latent to be decoded."), io.Vae.Input( @@ -101,6 +106,8 @@ class LTXVEmptyLatentAudio(io.ComfyNode): node_id="LTXVEmptyLatentAudio", display_name="LTXV Empty Latent Audio", category="latent/audio", + description="Creates an empty LTXV audio latent tensor sized according to the frame count, frame rate, and Audio VAE configuration.", + short_description="Creates an empty LTXV audio latent tensor.", inputs=[ io.Int.Input( "frames_number", @@ -177,6 +184,7 @@ class LTXAVTextEncoderLoader(io.ComfyNode): display_name="LTXV Audio Text Encoder Loader", category="advanced/loaders", description="[Recipes]\n\nltxav: gemma 3 12B", + short_description=None, inputs=[ io.Combo.Input( "text_encoder", diff --git a/comfy_extras/nodes_lt_upsampler.py b/comfy_extras/nodes_lt_upsampler.py index f99ba13fb..c0cd981a3 100644 --- a/comfy_extras/nodes_lt_upsampler.py +++ b/comfy_extras/nodes_lt_upsampler.py @@ -19,6 +19,8 @@ class LTXVLatentUpsampler: RETURN_TYPES = ("LATENT",) FUNCTION = "upsample_latent" CATEGORY = "latent/video" + DESCRIPTION = "Upsample an LTXV video latent by a factor of 2 using a dedicated latent upscale model." + SHORT_DESCRIPTION = "Upsample an LTXV video latent by 2x." EXPERIMENTAL = True def upsample_latent( diff --git a/comfy_extras/nodes_lumina2.py b/comfy_extras/nodes_lumina2.py index 2550475ae..c02318db1 100644 --- a/comfy_extras/nodes_lumina2.py +++ b/comfy_extras/nodes_lumina2.py @@ -10,6 +10,8 @@ class RenormCFG(io.ComfyNode): return io.Schema( node_id="RenormCFG", category="advanced/model", + description="Applies renormalized classifier-free guidance with configurable truncation threshold and renormalization strength to control CFG output magnitude.", + short_description="Applies renormalized classifier-free guidance with truncation.", inputs=[ io.Model.Input("model"), io.Float.Input("cfg_trunc", default=100, min=0.0, max=100.0, step=0.01), @@ -84,6 +86,7 @@ class CLIPTextEncodeLumina2(io.ComfyNode): category="conditioning", description="Encodes a system prompt and a user prompt using a CLIP model into an embedding " "that can be used to guide the diffusion model towards generating specific images.", + short_description="Encodes system and user prompts via CLIP for Lumina2.", inputs=[ io.Combo.Input( "system_prompt", diff --git a/comfy_extras/nodes_mahiro.py b/comfy_extras/nodes_mahiro.py index 6459ca8c1..a422ca672 100644 --- a/comfy_extras/nodes_mahiro.py +++ b/comfy_extras/nodes_mahiro.py @@ -13,6 +13,7 @@ class Mahiro(io.ComfyNode): display_name="Mahiro CFG", category="_for_testing", description="Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.", + short_description="Scales guidance toward positive prompt direction over negative difference.", inputs=[ io.Model.Input("model"), ], diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 98e8fef8f..2f6bb4bc1 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -52,6 +52,8 @@ class LatentCompositeMasked(IO.ComfyNode): node_id="LatentCompositeMasked", search_aliases=["overlay latent", "layer latent", "paste latent", "inpaint latent"], category="latent", + description="Composites a source latent onto a destination latent at a specified position with optional mask and resize support.", + short_description="Composites one latent onto another with masking.", inputs=[ IO.Latent.Input("destination"), IO.Latent.Input("source"), @@ -81,6 +83,8 @@ class ImageCompositeMasked(IO.ComfyNode): node_id="ImageCompositeMasked", search_aliases=["paste image", "overlay", "layer"], category="image", + description="Composites a source image onto a destination image at a specified position with optional mask and resize support.", + short_description="Composites one image onto another with masking.", inputs=[ IO.Image.Input("destination"), IO.Image.Input("source"), @@ -110,6 +114,8 @@ class MaskToImage(IO.ComfyNode): search_aliases=["convert mask"], display_name="Convert Mask to Image", category="mask", + description="Converts a single-channel mask into a three-channel grayscale image.", + short_description=None, inputs=[ IO.Mask.Input("mask"), ], @@ -132,6 +138,7 @@ class ImageToMask(IO.ComfyNode): search_aliases=["extract channel", "channel to mask"], display_name="Convert Image to Mask", category="mask", + description="Extracts a selected color channel from an image as a mask.", inputs=[ IO.Image.Input("image"), IO.Combo.Input("channel", options=["red", "green", "blue", "alpha"]), @@ -155,6 +162,8 @@ class ImageColorToMask(IO.ComfyNode): node_id="ImageColorToMask", search_aliases=["color keying", "chroma key"], category="mask", + description="Creates a mask from an image where pixels matching a specified RGB color value become white.", + short_description="Creates a mask from pixels matching a color.", inputs=[ IO.Image.Input("image"), IO.Int.Input("color", default=0, min=0, max=0xFFFFFF, step=1, display_mode=IO.NumberDisplay.number), @@ -178,6 +187,8 @@ class SolidMask(IO.ComfyNode): return IO.Schema( node_id="SolidMask", category="mask", + description="Creates a uniform solid mask filled with a single value at the specified dimensions.", + short_description="Creates a solid mask with a uniform value.", inputs=[ IO.Float.Input("value", default=1.0, min=0.0, max=1.0, step=0.01), IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1), @@ -201,6 +212,8 @@ class InvertMask(IO.ComfyNode): node_id="InvertMask", search_aliases=["reverse mask", "flip mask"], category="mask", + description="Inverts a mask so white becomes black and vice versa.", + short_description=None, inputs=[ IO.Mask.Input("mask"), ], @@ -222,6 +235,8 @@ class CropMask(IO.ComfyNode): node_id="CropMask", search_aliases=["cut mask", "extract mask region", "mask slice"], category="mask", + description="Crops a rectangular region from a mask at the specified position and dimensions.", + short_description="Crops a rectangular region from a mask.", inputs=[ IO.Mask.Input("mask"), IO.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), @@ -248,6 +263,8 @@ class MaskComposite(IO.ComfyNode): node_id="MaskComposite", search_aliases=["combine masks", "blend masks", "layer masks"], category="mask", + description="Composites a source mask onto a destination mask at a specified position using selectable blend operations.", + short_description="Composites masks with selectable blend operations.", inputs=[ IO.Mask.Input("destination"), IO.Mask.Input("source"), @@ -297,6 +314,8 @@ class FeatherMask(IO.ComfyNode): node_id="FeatherMask", search_aliases=["soft edge mask", "blur mask edges", "gradient mask edge"], category="mask", + description="Applies a soft gradient feather to the edges of a mask with independent control for each side.", + short_description="Feathers mask edges with per-side control.", inputs=[ IO.Mask.Input("mask"), IO.Int.Input("left", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), @@ -345,6 +364,8 @@ class GrowMask(IO.ComfyNode): search_aliases=["expand mask", "shrink mask"], display_name="Grow Mask", category="mask", + description="Expands or shrinks a mask by a specified number of pixels using morphological dilation or erosion with optional tapered corners.", + short_description="Expands or shrinks a mask by pixel amount.", inputs=[ IO.Mask.Input("mask"), IO.Int.Input("expand", default=0, min=-nodes.MAX_RESOLUTION, max=nodes.MAX_RESOLUTION, step=1), @@ -382,6 +403,8 @@ class ThresholdMask(IO.ComfyNode): node_id="ThresholdMask", search_aliases=["binary mask"], category="mask", + description="Converts a mask to binary by setting pixels above a threshold to white and below to black.", + short_description="Converts a mask to binary using a threshold.", inputs=[ IO.Mask.Input("mask"), IO.Float.Input("value", default=0.5, min=0.0, max=1.0, step=0.01), @@ -408,7 +431,8 @@ class MaskPreview(IO.ComfyNode): search_aliases=["show mask", "view mask", "inspect mask", "debug mask"], display_name="Preview Mask", category="mask", - description="Saves the input images to your ComfyUI output directory.", + description="Previews a mask in the UI by rendering it as a grayscale image.", + short_description="Previews a mask as a grayscale image.", inputs=[ IO.Mask.Input("mask"), ], diff --git a/comfy_extras/nodes_mochi.py b/comfy_extras/nodes_mochi.py index d750194fc..407d15fd2 100644 --- a/comfy_extras/nodes_mochi.py +++ b/comfy_extras/nodes_mochi.py @@ -11,6 +11,8 @@ class EmptyMochiLatentVideo(io.ComfyNode): return io.Schema( node_id="EmptyMochiLatentVideo", category="latent/video", + description="Creates an empty latent tensor sized for Mochi video generation with configurable width, height, frame length, and batch size.", + short_description="Create empty latent for Mochi video generation.", inputs=[ io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index f22b333fc..b8d90a8ac 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -60,6 +60,8 @@ class ModelSamplingDiscrete: FUNCTION = "patch" CATEGORY = "advanced/model" + DESCRIPTION = "Override the model's sampling method to use a discrete noise schedule with a selectable prediction type." + SHORT_DESCRIPTION = "Override model sampling to a discrete noise schedule." def patch(self, model, sampling, zsnr): m = model.clone() @@ -96,6 +98,8 @@ class ModelSamplingStableCascade: FUNCTION = "patch" CATEGORY = "advanced/model" + DESCRIPTION = "Override the model's sampling to use Stable Cascade noise scheduling with an adjustable shift parameter." + SHORT_DESCRIPTION = "Override sampling to Stable Cascade noise scheduling." def patch(self, model, shift): m = model.clone() @@ -122,6 +126,8 @@ class ModelSamplingSD3: FUNCTION = "patch" CATEGORY = "advanced/model" + DESCRIPTION = "Override the model's sampling to use the SD3 discrete flow noise schedule with an adjustable shift parameter." + SHORT_DESCRIPTION = "Override sampling to SD3 discrete flow schedule." def patch(self, model, shift, multiplier=1000): m = model.clone() @@ -144,6 +150,8 @@ class ModelSamplingAuraFlow(ModelSamplingSD3): "shift": ("FLOAT", {"default": 1.73, "min": 0.0, "max": 100.0, "step":0.01}), }} + DESCRIPTION = "Override the model's sampling to use the AuraFlow discrete flow noise schedule with an adjustable shift." + SHORT_DESCRIPTION = "Override sampling to AuraFlow discrete flow schedule." FUNCTION = "patch_aura" def patch_aura(self, model, shift): @@ -163,6 +171,8 @@ class ModelSamplingFlux: FUNCTION = "patch" CATEGORY = "advanced/model" + DESCRIPTION = "Override the model's sampling to use the Flux flow schedule with resolution-dependent shift computed from base and max shift values." + SHORT_DESCRIPTION = "Override sampling to Flux flow schedule with resolution shift." def patch(self, model, max_shift, base_shift, width, height): m = model.clone() @@ -198,6 +208,8 @@ class ModelSamplingContinuousEDM: FUNCTION = "patch" CATEGORY = "advanced/model" + DESCRIPTION = "Override the model's sampling to use a continuous EDM noise schedule with configurable sigma range and prediction type." + SHORT_DESCRIPTION = "Override sampling to continuous EDM noise schedule." def patch(self, model, sampling, sigma_max, sigma_min): m = model.clone() @@ -243,6 +255,8 @@ class ModelSamplingContinuousV: FUNCTION = "patch" CATEGORY = "advanced/model" + DESCRIPTION = "Override the model's sampling to use a continuous V-prediction noise schedule with configurable sigma range." + SHORT_DESCRIPTION = "Override sampling to continuous V-prediction schedule." def patch(self, model, sampling, sigma_max, sigma_min): m = model.clone() @@ -269,6 +283,8 @@ class RescaleCFG: FUNCTION = "patch" CATEGORY = "advanced/model" + DESCRIPTION = "Apply Rescale CFG to the model, which normalizes the CFG output to match the standard deviation of the positive conditioning prediction." + SHORT_DESCRIPTION = "Normalize CFG output to match positive conditioning std." def patch(self, model, multiplier): def rescale_cfg(args): @@ -310,6 +326,7 @@ class ModelComputeDtype: FUNCTION = "patch" CATEGORY = "advanced/debug/model" + DESCRIPTION = "Override the compute dtype used by the model during inference." def patch(self, model, dtype): m = model.clone() diff --git a/comfy_extras/nodes_model_downscale.py b/comfy_extras/nodes_model_downscale.py index dec2ae841..a0e6aa2da 100644 --- a/comfy_extras/nodes_model_downscale.py +++ b/comfy_extras/nodes_model_downscale.py @@ -11,6 +11,8 @@ class PatchModelAddDownscale(io.ComfyNode): node_id="PatchModelAddDownscale", display_name="PatchModelAddDownscale (Kohya Deep Shrink)", category="model_patches/unet", + description="Patches the UNet to downscale internal feature maps at a specified block during a configurable sigma range, then upscale on output, implementing the Kohya Deep Shrink technique for faster generation.", + short_description="Kohya Deep Shrink: downscale UNet internals for speed.", inputs=[ io.Model.Input("model"), io.Int.Input("block_number", default=3, min=1, max=32, step=1), diff --git a/comfy_extras/nodes_model_merging.py b/comfy_extras/nodes_model_merging.py index 5384ed531..3fa9f3484 100644 --- a/comfy_extras/nodes_model_merging.py +++ b/comfy_extras/nodes_model_merging.py @@ -22,6 +22,8 @@ class ModelMergeSimple: FUNCTION = "merge" CATEGORY = "advanced/model_merging" + DESCRIPTION = "Merge two diffusion models using a simple ratio to blend all weights uniformly." + SHORT_DESCRIPTION = "Merge two models with a uniform blend ratio." def merge(self, model1, model2, ratio): m = model1.clone() @@ -41,6 +43,8 @@ class ModelSubtract: FUNCTION = "merge" CATEGORY = "advanced/model_merging" + DESCRIPTION = "Subtract one diffusion model's weights from another with an adjustable multiplier for extracting differences." + SHORT_DESCRIPTION = "Subtract model weights with adjustable multiplier." def merge(self, model1, model2, multiplier): m = model1.clone() @@ -59,6 +63,8 @@ class ModelAdd: FUNCTION = "merge" CATEGORY = "advanced/model_merging" + DESCRIPTION = "Add the weights of one diffusion model on top of another." + SHORT_DESCRIPTION = None def merge(self, model1, model2): m = model1.clone() @@ -79,6 +85,8 @@ class CLIPMergeSimple: FUNCTION = "merge" CATEGORY = "advanced/model_merging" + DESCRIPTION = "Merge two CLIP text encoder models using a simple ratio to blend all weights uniformly." + SHORT_DESCRIPTION = "Merge two CLIP models with a uniform blend ratio." def merge(self, clip1, clip2, ratio): m = clip1.clone() @@ -102,6 +110,8 @@ class CLIPSubtract: FUNCTION = "merge" CATEGORY = "advanced/model_merging" + DESCRIPTION = "Subtract one CLIP model's weights from another with an adjustable multiplier for extracting differences." + SHORT_DESCRIPTION = "Subtract CLIP weights with adjustable multiplier." def merge(self, clip1, clip2, multiplier): m = clip1.clone() @@ -124,6 +134,8 @@ class CLIPAdd: FUNCTION = "merge" CATEGORY = "advanced/model_merging" + DESCRIPTION = "Add the weights of one CLIP model on top of another." + SHORT_DESCRIPTION = None def merge(self, clip1, clip2): m = clip1.clone() @@ -148,6 +160,8 @@ class ModelMergeBlocks: FUNCTION = "merge" CATEGORY = "advanced/model_merging" + DESCRIPTION = "Merge two diffusion models with separate blend ratios for input, middle, and output blocks." + SHORT_DESCRIPTION = "Merge two models with per-block blend ratios." def merge(self, model1, model2, **kwargs): m = model1.clone() @@ -228,6 +242,8 @@ def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefi class CheckpointSave: SEARCH_ALIASES = ["save model", "export checkpoint", "merge save"] + DESCRIPTION = "Saves a model, CLIP, and VAE as a combined checkpoint file in safetensors format with optional workflow metadata." + SHORT_DESCRIPTION = "Saves model, CLIP, and VAE as a checkpoint." def __init__(self): self.output_dir = folder_paths.get_output_directory() @@ -262,6 +278,8 @@ class CLIPSave: OUTPUT_NODE = True CATEGORY = "advanced/model_merging" + DESCRIPTION = "Save a CLIP text encoder model to safetensors files, splitting by model component." + SHORT_DESCRIPTION = "Save a CLIP model to safetensors files." def save(self, clip, filename_prefix, prompt=None, extra_pnginfo=None): prompt_info = "" @@ -319,6 +337,8 @@ class VAESave: OUTPUT_NODE = True CATEGORY = "advanced/model_merging" + DESCRIPTION = "Save a VAE model to a safetensors file." + SHORT_DESCRIPTION = None def save(self, vae, filename_prefix, prompt=None, extra_pnginfo=None): full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) @@ -354,6 +374,8 @@ class ModelSave: OUTPUT_NODE = True CATEGORY = "advanced/model_merging" + DESCRIPTION = "Save a diffusion model to a safetensors file." + SHORT_DESCRIPTION = None def save(self, model, filename_prefix, prompt=None, extra_pnginfo=None): save_checkpoint(model, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo) diff --git a/comfy_extras/nodes_model_merging_model_specific.py b/comfy_extras/nodes_model_merging_model_specific.py index 55eb3ccfe..7c5380789 100644 --- a/comfy_extras/nodes_model_merging_model_specific.py +++ b/comfy_extras/nodes_model_merging_model_specific.py @@ -2,6 +2,8 @@ import comfy_extras.nodes_model_merging class ModelMergeSD1(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two SD1 models with per-block weight control over input, middle, and output blocks." + SHORT_DESCRIPTION = "Merge two SD1 models with per-block control." @classmethod def INPUT_TYPES(s): arg_dict = { "model1": ("MODEL",), @@ -26,8 +28,15 @@ class ModelMergeSD1(comfy_extras.nodes_model_merging.ModelMergeBlocks): return {"required": arg_dict} +class ModelMergeSD2(ModelMergeSD1): + DESCRIPTION = "Merge two SD2 models with per-block weight control over input, middle, and output blocks." + SHORT_DESCRIPTION = "Merge two SD2 models with per-block control." + + class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two SDXL models with per-block weight control over input, middle, and output blocks." + SHORT_DESCRIPTION = "Merge two SDXL models with per-block control." @classmethod def INPUT_TYPES(s): @@ -54,6 +63,8 @@ class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two SD3 2B models with per-block weight control over 24 joint blocks and embedders." + SHORT_DESCRIPTION = "Merge two SD3 2B models with per-block control." @classmethod def INPUT_TYPES(s): @@ -78,6 +89,8 @@ class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two AuraFlow models with per-block weight control over double and single layers." + SHORT_DESCRIPTION = "Merge two AuraFlow models with per-block control." @classmethod def INPUT_TYPES(s): @@ -105,6 +118,8 @@ class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two Flux1 models with per-block weight control over 19 double blocks and 38 single blocks." + SHORT_DESCRIPTION = "Merge two Flux1 models with per-block control." @classmethod def INPUT_TYPES(s): @@ -131,6 +146,8 @@ class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two SD3.5 Large models with per-block weight control over 38 joint blocks and embedders." + SHORT_DESCRIPTION = "Merge two SD3.5 Large models with per-block control." @classmethod def INPUT_TYPES(s): @@ -154,6 +171,8 @@ class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeMochiPreview(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two Mochi Preview models with per-block weight control over 48 blocks and embedders." + SHORT_DESCRIPTION = "Merge two Mochi Preview models with per-block control." @classmethod def INPUT_TYPES(s): @@ -176,6 +195,8 @@ class ModelMergeMochiPreview(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two LTXV models with per-block weight control over 28 transformer blocks." + SHORT_DESCRIPTION = "Merge two LTXV models with per-block control." @classmethod def INPUT_TYPES(s): @@ -198,6 +219,8 @@ class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two Cosmos 7B models with per-block weight control over 28 blocks and embedders." + SHORT_DESCRIPTION = "Merge two Cosmos 7B models with per-block control." @classmethod def INPUT_TYPES(s): @@ -222,6 +245,8 @@ class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two Cosmos 14B models with per-block weight control over 36 blocks and embedders." + SHORT_DESCRIPTION = "Merge two Cosmos 14B models with per-block control." @classmethod def INPUT_TYPES(s): @@ -247,6 +272,7 @@ class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" DESCRIPTION = "1.3B model has 30 blocks, 14B model has 40 blocks. Image to video model has the extra img_emb." + SHORT_DESCRIPTION = "WAN 2.1 model merging with block-level control." @classmethod def INPUT_TYPES(s): @@ -270,6 +296,8 @@ class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeCosmosPredict2_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two Cosmos Predict2 2B models with per-block weight control over 28 blocks and embedders." + SHORT_DESCRIPTION = "Merge two Cosmos Predict2 2B models with per-block control." @classmethod def INPUT_TYPES(s): @@ -293,6 +321,8 @@ class ModelMergeCosmosPredict2_2B(comfy_extras.nodes_model_merging.ModelMergeBlo class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two Cosmos Predict2 14B models with per-block weight control over 36 blocks and embedders." + SHORT_DESCRIPTION = "Merge two Cosmos Predict2 14B models with per-block control." @classmethod def INPUT_TYPES(s): @@ -316,6 +346,8 @@ class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBl class ModelMergeQwenImage(comfy_extras.nodes_model_merging.ModelMergeBlocks): CATEGORY = "advanced/model_merging/model_specific" + DESCRIPTION = "Merge two Qwen Image models with per-block weight control over 60 transformer blocks." + SHORT_DESCRIPTION = "Merge two Qwen Image models with per-block control." @classmethod def INPUT_TYPES(s): @@ -339,7 +371,7 @@ class ModelMergeQwenImage(comfy_extras.nodes_model_merging.ModelMergeBlocks): NODE_CLASS_MAPPINGS = { "ModelMergeSD1": ModelMergeSD1, - "ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks + "ModelMergeSD2": ModelMergeSD2, #SD1 and SD2 have the same blocks "ModelMergeSDXL": ModelMergeSDXL, "ModelMergeSD3_2B": ModelMergeSD3_2B, "ModelMergeAuraflow": ModelMergeAuraflow, diff --git a/comfy_extras/nodes_model_patch.py b/comfy_extras/nodes_model_patch.py index 176e6bc2f..0b5850f45 100644 --- a/comfy_extras/nodes_model_patch.py +++ b/comfy_extras/nodes_model_patch.py @@ -230,6 +230,8 @@ class ModelPatchLoader: EXPERIMENTAL = True CATEGORY = "advanced/loaders" + DESCRIPTION = "Load a model patch file such as a controlnet or style reference patch for use with compatible model nodes." + SHORT_DESCRIPTION = "Load a model patch file for controlnet or style." def load_model_patch(self, name): model_patch_path = folder_paths.get_full_path_or_raise("model_patches", name) @@ -456,6 +458,8 @@ class QwenImageDiffsynthControlnet: EXPERIMENTAL = True CATEGORY = "advanced/loaders/qwen" + DESCRIPTION = "Apply a DiffSynth-style controlnet patch to a Qwen Image model using a VAE-encoded control image." + SHORT_DESCRIPTION = "Apply DiffSynth controlnet to a Qwen Image model." def diffsynth_controlnet(self, model, model_patch, vae, image=None, strength=1.0, inpaint_image=None, mask=None): model_patched = model.clone() @@ -489,6 +493,8 @@ class ZImageFunControlnet(QwenImageDiffsynthControlnet): "optional": {"image": ("IMAGE",), "inpaint_image": ("IMAGE",), "mask": ("MASK",)}} CATEGORY = "advanced/loaders/zimage" + DESCRIPTION = "Apply a Z-Image Fun controlnet patch to a model with optional control image, inpaint image, and mask inputs." + SHORT_DESCRIPTION = "Apply Z-Image Fun controlnet with optional inpainting." class UsoStyleProjectorPatch: def __init__(self, model_patch, encoded_image): @@ -525,6 +531,8 @@ class USOStyleReference: EXPERIMENTAL = True CATEGORY = "advanced/model_patches/flux" + DESCRIPTION = "Apply a USO style reference patch to a Flux model using multi-layer SigLIP features from CLIP vision output." + SHORT_DESCRIPTION = "Apply USO style reference to a Flux model." def apply_patch(self, model, model_patch, clip_vision_output): encoded_image = torch.stack((clip_vision_output.all_hidden_states[:, -20], clip_vision_output.all_hidden_states[:, -11], clip_vision_output.penultimate_hidden_states)) diff --git a/comfy_extras/nodes_morphology.py b/comfy_extras/nodes_morphology.py index 4ab2fb7e8..e2052349e 100644 --- a/comfy_extras/nodes_morphology.py +++ b/comfy_extras/nodes_morphology.py @@ -15,6 +15,8 @@ class Morphology(io.ComfyNode): search_aliases=["erode", "dilate"], display_name="ImageMorphology", category="image/postprocessing", + description="Applies morphological operations to an image using a configurable kernel size.", + short_description="", inputs=[ io.Image.Input("image"), io.Combo.Input( @@ -60,6 +62,8 @@ class ImageRGBToYUV(io.ComfyNode): node_id="ImageRGBToYUV", search_aliases=["color space conversion"], category="image/batch", + description="Converts an RGB image to YUV (YCbCr) color space, outputting separate Y, U, and V channel images.", + short_description="Convert RGB image to YUV color space.", inputs=[ io.Image.Input("image"), ], @@ -82,6 +86,8 @@ class ImageYUVToRGB(io.ComfyNode): node_id="ImageYUVToRGB", search_aliases=["color space conversion"], category="image/batch", + description="Converts separate Y, U, and V (YCbCr) channel images back into a single RGB image.", + short_description="Convert YUV channels back to RGB image.", inputs=[ io.Image.Input("Y"), io.Image.Input("U"), diff --git a/comfy_extras/nodes_nop.py b/comfy_extras/nodes_nop.py index 953061bcb..9a8861922 100644 --- a/comfy_extras/nodes_nop.py +++ b/comfy_extras/nodes_nop.py @@ -14,6 +14,7 @@ class wanBlockSwap(io.ComfyNode): node_id="wanBlockSwap", category="", description="NOP", + short_description=None, inputs=[ io.Model.Input("model"), ], diff --git a/comfy_extras/nodes_optimalsteps.py b/comfy_extras/nodes_optimalsteps.py index 73f0104d8..15e3e4b4c 100644 --- a/comfy_extras/nodes_optimalsteps.py +++ b/comfy_extras/nodes_optimalsteps.py @@ -32,6 +32,8 @@ class OptimalStepsScheduler(io.ComfyNode): return io.Schema( node_id="OptimalStepsScheduler", category="sampling/custom_sampling/schedulers", + description="Generates an optimized noise schedule with precomputed optimal sigma levels using log-linear interpolation.", + short_description="Optimal noise schedule with precomputed sigma levels.", inputs=[ io.Combo.Input("model_type", options=["FLUX", "Wan", "Chroma"]), io.Int.Input("steps", default=20, min=3, max=1000), diff --git a/comfy_extras/nodes_pag.py b/comfy_extras/nodes_pag.py index 79fea5f0c..1fd264395 100644 --- a/comfy_extras/nodes_pag.py +++ b/comfy_extras/nodes_pag.py @@ -16,6 +16,8 @@ class PerturbedAttentionGuidance(io.ComfyNode): return io.Schema( node_id="PerturbedAttentionGuidance", category="model_patches/unet", + description="Applies Perturbed Attention Guidance (PAG) by replacing self-attention with identity in the middle block to compute a guidance signal that enhances structural coherence.", + short_description="Perturbed Attention Guidance for structural coherence.", inputs=[ io.Model.Input("model"), io.Float.Input("scale", default=3.0, min=0.0, max=100.0, step=0.01, round=0.01), diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py index cd068ce9c..d6079eb7a 100644 --- a/comfy_extras/nodes_perpneg.py +++ b/comfy_extras/nodes_perpneg.py @@ -26,6 +26,8 @@ class PerpNeg(io.ComfyNode): node_id="PerpNeg", display_name="Perp-Neg (DEPRECATED by PerpNegGuider)", category="_for_testing", + description="Applies perpendicular negative guidance by projecting out the component of negative conditioning parallel to positive conditioning. Deprecated in favor of PerpNegGuider.", + short_description="Perpendicular negative guidance (deprecated).", inputs=[ io.Model.Input("model"), io.Conditioning.Input("empty_conditioning"), @@ -128,6 +130,8 @@ class PerpNegGuider(io.ComfyNode): return io.Schema( node_id="PerpNegGuider", category="_for_testing", + description="Creates a guider that applies perpendicular negative guidance, computing positive, negative, and empty conditioning in a single batch for efficient sampling.", + short_description="Guider with perpendicular negative guidance.", inputs=[ io.Model.Input("model"), io.Conditioning.Input("positive"), diff --git a/comfy_extras/nodes_photomaker.py b/comfy_extras/nodes_photomaker.py index 228183c07..59efe04bd 100644 --- a/comfy_extras/nodes_photomaker.py +++ b/comfy_extras/nodes_photomaker.py @@ -124,6 +124,8 @@ class PhotoMakerLoader(io.ComfyNode): return io.Schema( node_id="PhotoMakerLoader", category="_for_testing/photomaker", + description="Loads a PhotoMaker model from a safetensors file for identity-preserving image generation.", + short_description="Load a PhotoMaker model from file.", inputs=[ io.Combo.Input("photomaker_model_name", options=folder_paths.get_filename_list("photomaker")), ], @@ -150,6 +152,8 @@ class PhotoMakerEncode(io.ComfyNode): return io.Schema( node_id="PhotoMakerEncode", category="_for_testing/photomaker", + description="Encodes a reference image and text prompt using PhotoMaker to produce identity-preserving conditioning for image generation.", + short_description="Encode image and text with PhotoMaker.", inputs=[ io.Photomaker.Input("photomaker"), io.Image.Input("image"), diff --git a/comfy_extras/nodes_pixart.py b/comfy_extras/nodes_pixart.py index 2f1b73e60..473ef0883 100644 --- a/comfy_extras/nodes_pixart.py +++ b/comfy_extras/nodes_pixart.py @@ -10,6 +10,7 @@ class CLIPTextEncodePixArtAlpha(io.ComfyNode): search_aliases=["pixart prompt"], category="advanced/conditioning", description="Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma.", + short_description="Encodes text with resolution conditioning for PixArt Alpha.", inputs=[ io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION), io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION), diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index 66dac10b1..0b5049b69 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -20,6 +20,8 @@ class Blend(io.ComfyNode): return io.Schema( node_id="ImageBlend", category="image/postprocessing", + description="Blends two images together using a selectable blend mode and adjustable blend factor.", + short_description="Blends two images using a selected blend mode.", inputs=[ io.Image.Input("image1"), io.Image.Input("image2"), @@ -77,6 +79,8 @@ class Blur(io.ComfyNode): return io.Schema( node_id="ImageBlur", category="image/postprocessing", + description="Applies a Gaussian blur to an image with configurable radius and sigma.", + short_description="Applies Gaussian blur to an image.", inputs=[ io.Image.Input("image"), io.Int.Input("blur_radius", default=1, min=1, max=31, step=1), @@ -112,6 +116,8 @@ class Quantize(io.ComfyNode): return io.Schema( node_id="ImageQuantize", category="image/postprocessing", + description="Reduces the number of colors in an image with optional dithering.", + short_description="", inputs=[ io.Image.Input("image"), io.Int.Input("colors", default=256, min=1, max=256, step=1), @@ -177,6 +183,8 @@ class Sharpen(io.ComfyNode): return io.Schema( node_id="ImageSharpen", category="image/postprocessing", + description="Sharpens an image using an unsharp mask with configurable radius, sigma, and strength.", + short_description="Sharpens an image using unsharp mask.", inputs=[ io.Image.Input("image"), io.Int.Input("sharpen_radius", default=1, min=1, max=31, step=1), @@ -221,6 +229,8 @@ class ImageScaleToTotalPixels(io.ComfyNode): return io.Schema( node_id="ImageScaleToTotalPixels", category="image/upscaling", + description="Scales an image to a target total megapixel count while preserving aspect ratio, with configurable resolution stepping.", + short_description="Scales an image to a target megapixel count.", inputs=[ io.Image.Input("image"), io.Combo.Input("upscale_method", options=cls.upscale_methods), @@ -430,6 +440,7 @@ class ResizeImageMaskNode(io.ComfyNode): node_id="ResizeImageMaskNode", display_name="Resize Image/Mask", description="Resize an image or mask using various scaling methods.", + short_description=None, category="transform", search_aliases=["resize", "resize image", "resize mask", "scale", "scale image", "scale mask", "image resize", "change size", "dimensions", "shrink", "enlarge"], inputs=[ @@ -565,6 +576,8 @@ class BatchImagesNode(io.ComfyNode): node_id="BatchImagesNode", display_name="Batch Images", category="image", + description="Combines multiple images into a single batch, resizing them to match the first image's dimensions.", + short_description="Combines multiple images into a single batch.", search_aliases=["batch", "image batch", "batch images", "combine images", "merge images", "stack images"], inputs=[ io.Autogrow.Input("images", template=autogrow_template) @@ -587,6 +600,8 @@ class BatchMasksNode(io.ComfyNode): search_aliases=["combine masks", "stack masks", "merge masks"], display_name="Batch Masks", category="mask", + description="Combines multiple masks into a single batch, resizing them to match the first mask's dimensions.", + short_description="Combines multiple masks into a single batch.", inputs=[ io.Autogrow.Input("masks", template=autogrow_template) ], @@ -608,6 +623,8 @@ class BatchLatentsNode(io.ComfyNode): search_aliases=["combine latents", "stack latents", "merge latents"], display_name="Batch Latents", category="latent", + description="Combines multiple latent tensors into a single batch, reshaping them to match the first latent's dimensions.", + short_description="Combines multiple latents into a single batch.", inputs=[ io.Autogrow.Input("latents", template=autogrow_template) ], @@ -632,6 +649,8 @@ class BatchImagesMasksLatentsNode(io.ComfyNode): search_aliases=["combine batch", "merge batch", "stack inputs"], display_name="Batch Images/Masks/Latents", category="util", + description="Combines multiple images, masks, or latents into a single batch, automatically detecting the input type.", + short_description="Batches images, masks, or latents together.", inputs=[ io.Autogrow.Input("inputs", template=autogrow_template) ], diff --git a/comfy_extras/nodes_preview_any.py b/comfy_extras/nodes_preview_any.py index b0a6f279d..4ce96fd0b 100644 --- a/comfy_extras/nodes_preview_any.py +++ b/comfy_extras/nodes_preview_any.py @@ -16,6 +16,8 @@ class PreviewAny(): OUTPUT_NODE = True CATEGORY = "utils" + DESCRIPTION = "Preview any input value as text, converting it to a JSON or string representation for display." + SHORT_DESCRIPTION = "Preview any input value as text." SEARCH_ALIASES = ["show output", "inspect", "debug", "print value", "show text"] def main(self, source=None): diff --git a/comfy_extras/nodes_primitive.py b/comfy_extras/nodes_primitive.py index 937321800..356c52f51 100644 --- a/comfy_extras/nodes_primitive.py +++ b/comfy_extras/nodes_primitive.py @@ -11,6 +11,8 @@ class String(io.ComfyNode): node_id="PrimitiveString", display_name="String", category="utils/primitive", + description="A primitive node that passes through a string value.", + short_description=None, inputs=[ io.String.Input("value"), ], @@ -29,6 +31,8 @@ class StringMultiline(io.ComfyNode): node_id="PrimitiveStringMultiline", display_name="String (Multiline)", category="utils/primitive", + description="A primitive node that passes through a multiline string value.", + short_description=None, inputs=[ io.String.Input("value", multiline=True), ], @@ -47,6 +51,8 @@ class Int(io.ComfyNode): node_id="PrimitiveInt", display_name="Int", category="utils/primitive", + description="A primitive node that passes through an integer value.", + short_description=None, inputs=[ io.Int.Input("value", min=-sys.maxsize, max=sys.maxsize, control_after_generate=True), ], @@ -65,6 +71,8 @@ class Float(io.ComfyNode): node_id="PrimitiveFloat", display_name="Float", category="utils/primitive", + description="A primitive node that passes through a float value.", + short_description=None, inputs=[ io.Float.Input("value", min=-sys.maxsize, max=sys.maxsize, step=0.1), ], @@ -83,6 +91,8 @@ class Boolean(io.ComfyNode): node_id="PrimitiveBoolean", display_name="Boolean", category="utils/primitive", + description="A primitive node that passes through a boolean value.", + short_description=None, inputs=[ io.Boolean.Input("value"), ], diff --git a/comfy_extras/nodes_qwen.py b/comfy_extras/nodes_qwen.py index fde8fac9a..91f2f4291 100644 --- a/comfy_extras/nodes_qwen.py +++ b/comfy_extras/nodes_qwen.py @@ -13,6 +13,8 @@ class TextEncodeQwenImageEdit(io.ComfyNode): return io.Schema( node_id="TextEncodeQwenImageEdit", category="advanced/conditioning", + description="Encodes a text prompt with an optional reference image for Qwen-based image editing, producing conditioning with latent reference.", + short_description="Text and image encoding for Qwen image editing.", inputs=[ io.Clip.Input("clip"), io.String.Input("prompt", multiline=True, dynamic_prompts=True), @@ -56,6 +58,8 @@ class TextEncodeQwenImageEditPlus(io.ComfyNode): return io.Schema( node_id="TextEncodeQwenImageEditPlus", category="advanced/conditioning", + description="Encodes a text prompt with up to three reference images for Qwen-based multi-image editing, producing conditioning with latent references.", + short_description="Multi-image text encoding for Qwen image editing.", inputs=[ io.Clip.Input("clip"), io.String.Input("prompt", multiline=True, dynamic_prompts=True), @@ -113,6 +117,8 @@ class EmptyQwenImageLayeredLatentImage(io.ComfyNode): node_id="EmptyQwenImageLayeredLatentImage", display_name="Empty Qwen Image Layered Latent", category="latent/qwen", + description="Creates an empty multi-layer latent tensor for Qwen image generation with a configurable number of layers.", + short_description="Empty multi-layer latent for Qwen image generation.", inputs=[ io.Int.Input("width", default=640, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=640, min=16, max=nodes.MAX_RESOLUTION, step=16), diff --git a/comfy_extras/nodes_rebatch.py b/comfy_extras/nodes_rebatch.py index 5f4e82aef..2a6d8af2f 100644 --- a/comfy_extras/nodes_rebatch.py +++ b/comfy_extras/nodes_rebatch.py @@ -11,6 +11,8 @@ class LatentRebatch(io.ComfyNode): node_id="RebatchLatents", display_name="Rebatch Latents", category="latent/batch", + description="Splits and recombines latent batches into a new batch size, handling noise masks and batch indices across differently sized inputs.", + short_description="Rebatch latents to a specified batch size.", is_input_list=True, inputs=[ io.Latent.Input("latents"), @@ -114,6 +116,7 @@ class ImageRebatch(io.ComfyNode): node_id="RebatchImages", display_name="Rebatch Images", category="image/batch", + description="Splits and recombines image batches into a new specified batch size.", is_input_list=True, inputs=[ io.Image.Input("images"), diff --git a/comfy_extras/nodes_rope.py b/comfy_extras/nodes_rope.py index d1feb031e..6f0d01930 100644 --- a/comfy_extras/nodes_rope.py +++ b/comfy_extras/nodes_rope.py @@ -9,6 +9,7 @@ class ScaleROPE(io.ComfyNode): node_id="ScaleROPE", category="advanced/model_patches", description="Scale and shift the ROPE of the model.", + short_description=None, is_experimental=True, inputs=[ io.Model.Input("model"), diff --git a/comfy_extras/nodes_sag.py b/comfy_extras/nodes_sag.py index 0f47db30b..7eab5ea78 100644 --- a/comfy_extras/nodes_sag.py +++ b/comfy_extras/nodes_sag.py @@ -114,6 +114,8 @@ class SelfAttentionGuidance(io.ComfyNode): node_id="SelfAttentionGuidance", display_name="Self-Attention Guidance", category="_for_testing", + description="Applies Self-Attention Guidance (SAG) which uses attention maps to create adversarially blurred images and computes a guidance signal that enhances fine details.", + short_description="Self-Attention Guidance for enhanced detail.", inputs=[ io.Model.Input("model"), io.Float.Input("scale", default=0.5, min=-2.0, max=5.0, step=0.01), diff --git a/comfy_extras/nodes_sd3.py b/comfy_extras/nodes_sd3.py index 736213a47..bf1da3798 100644 --- a/comfy_extras/nodes_sd3.py +++ b/comfy_extras/nodes_sd3.py @@ -15,6 +15,7 @@ class TripleCLIPLoader(io.ComfyNode): node_id="TripleCLIPLoader", category="advanced/loaders", description="[Recipes]\n\nsd3: clip-l, clip-g, t5", + short_description=None, inputs=[ io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")), io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")), @@ -42,6 +43,8 @@ class EmptySD3LatentImage(io.ComfyNode): return io.Schema( node_id="EmptySD3LatentImage", category="latent/sd3", + description="Creates an empty SD3 latent image tensor with the specified width, height, and batch size.", + short_description="Creates an empty SD3 latent image tensor.", inputs=[ io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), @@ -67,6 +70,8 @@ class CLIPTextEncodeSD3(io.ComfyNode): node_id="CLIPTextEncodeSD3", search_aliases=["sd3 prompt"], category="advanced/conditioning", + description="Encodes separate CLIP-L, CLIP-G, and T5-XXL text prompts into SD3 conditioning with optional empty padding.", + short_description="Encodes multi-encoder text prompts for SD3.", inputs=[ io.Clip.Input("clip"), io.String.Input("clip_l", multiline=True, dynamic_prompts=True), @@ -114,6 +119,8 @@ class ControlNetApplySD3(io.ComfyNode): node_id="ControlNetApplySD3", display_name="Apply Controlnet with VAE", category="conditioning/controlnet", + description="Applies a ControlNet to SD3 conditioning using a VAE-encoded control image with adjustable strength and start/end percentages.", + short_description="Applies ControlNet with VAE to SD3 conditioning.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -177,6 +184,7 @@ class SkipLayerGuidanceSD3(io.ComfyNode): node_id="SkipLayerGuidanceSD3", category="advanced/guidance", description="Generic version of SkipLayerGuidance node that can be used on every DiT model.", + short_description="Skip layer guidance applicable to any DiT model.", inputs=[ io.Model.Input("model"), io.String.Input("layers", default="7, 8, 9", multiline=False), diff --git a/comfy_extras/nodes_sdupscale.py b/comfy_extras/nodes_sdupscale.py index 31b373370..49cceacf6 100644 --- a/comfy_extras/nodes_sdupscale.py +++ b/comfy_extras/nodes_sdupscale.py @@ -10,6 +10,8 @@ class SD_4XUpscale_Conditioning(io.ComfyNode): return io.Schema( node_id="SD_4XUpscale_Conditioning", category="conditioning/upscale_diffusion", + description="Prepares conditioning for the Stable Diffusion 4x upscale model by resizing the input image and attaching it with noise augmentation to positive and negative conditioning.", + short_description="Prepare conditioning for SD 4x upscale model.", inputs=[ io.Image.Input("images"), io.Conditioning.Input("positive"), diff --git a/comfy_extras/nodes_slg.py b/comfy_extras/nodes_slg.py index f462faa8f..0c6a8b90b 100644 --- a/comfy_extras/nodes_slg.py +++ b/comfy_extras/nodes_slg.py @@ -18,6 +18,7 @@ class SkipLayerGuidanceDiT(io.ComfyNode): node_id="SkipLayerGuidanceDiT", category="advanced/guidance", description="Generic version of SkipLayerGuidance node that can be used on every DiT model.", + short_description="Skip layer guidance applicable to any DiT model.", is_experimental=True, inputs=[ io.Model.Input("model"), @@ -98,6 +99,7 @@ class SkipLayerGuidanceDiTSimple(io.ComfyNode): node_id="SkipLayerGuidanceDiTSimple", category="advanced/guidance", description="Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass.", + short_description="Simplified skip layer guidance modifying only the uncond pass.", is_experimental=True, inputs=[ io.Model.Input("model"), diff --git a/comfy_extras/nodes_stable3d.py b/comfy_extras/nodes_stable3d.py index c6d8a683d..0530c24f5 100644 --- a/comfy_extras/nodes_stable3d.py +++ b/comfy_extras/nodes_stable3d.py @@ -28,6 +28,8 @@ class StableZero123_Conditioning(io.ComfyNode): return io.Schema( node_id="StableZero123_Conditioning", category="conditioning/3d_models", + description="Prepares conditioning for Stable Zero123 novel view synthesis by encoding an image with CLIP vision and VAE, combined with camera elevation and azimuth embeddings.", + short_description="Conditioning for Stable Zero123 novel view synthesis.", inputs=[ io.ClipVision.Input("clip_vision"), io.Image.Input("init_image"), @@ -66,6 +68,8 @@ class StableZero123_Conditioning_Batched(io.ComfyNode): return io.Schema( node_id="StableZero123_Conditioning_Batched", category="conditioning/3d_models", + description="Prepares batched conditioning for Stable Zero123 with incrementally changing elevation and azimuth angles across the batch for multi-view generation.", + short_description="Batched Stable Zero123 conditioning with angle increments.", inputs=[ io.ClipVision.Input("clip_vision"), io.Image.Input("init_image"), @@ -113,6 +117,8 @@ class SV3D_Conditioning(io.ComfyNode): return io.Schema( node_id="SV3D_Conditioning", category="conditioning/3d_models", + description="Prepares conditioning for SV3D multi-view video generation by encoding an image with CLIP vision and VAE, generating a full 360-degree azimuth orbit at a fixed elevation.", + short_description="Conditioning for SV3D 360-degree multi-view generation.", inputs=[ io.ClipVision.Input("clip_vision"), io.Image.Input("init_image"), diff --git a/comfy_extras/nodes_stable_cascade.py b/comfy_extras/nodes_stable_cascade.py index 04c0b366a..b63e342ac 100644 --- a/comfy_extras/nodes_stable_cascade.py +++ b/comfy_extras/nodes_stable_cascade.py @@ -30,6 +30,8 @@ class StableCascade_EmptyLatentImage(io.ComfyNode): return io.Schema( node_id="StableCascade_EmptyLatentImage", category="latent/stable_cascade", + description="Creates empty Stage C and Stage B latent tensors for Stable Cascade image generation with configurable compression.", + short_description="Empty Stage C and Stage B latents for Stable Cascade.", inputs=[ io.Int.Input("width", default=1024, min=256, max=nodes.MAX_RESOLUTION, step=8), io.Int.Input("height", default=1024, min=256, max=nodes.MAX_RESOLUTION, step=8), @@ -59,6 +61,8 @@ class StableCascade_StageC_VAEEncode(io.ComfyNode): return io.Schema( node_id="StableCascade_StageC_VAEEncode", category="latent/stable_cascade", + description="Encodes an image into Stable Cascade Stage C and Stage B latents using a VAE with configurable compression ratio.", + short_description="VAE encode image into Stable Cascade Stage C latent.", inputs=[ io.Image.Input("image"), io.Vae.Input("vae"), @@ -94,6 +98,8 @@ class StableCascade_StageB_Conditioning(io.ComfyNode): return io.Schema( node_id="StableCascade_StageB_Conditioning", category="conditioning/stable_cascade", + description="Applies Stage C prior latent to conditioning for use in Stable Cascade Stage B generation.", + short_description="Apply Stage C prior to Stage B conditioning.", inputs=[ io.Conditioning.Input("conditioning"), io.Latent.Input("stage_c"), @@ -120,6 +126,8 @@ class StableCascade_SuperResolutionControlnet(io.ComfyNode): return io.Schema( node_id="StableCascade_SuperResolutionControlnet", category="_for_testing/stable_cascade", + description="Encodes an image for Stable Cascade super-resolution ControlNet, producing controlnet input and empty Stage C and Stage B latents.", + short_description="Stable Cascade super-resolution ControlNet image encoding.", is_experimental=True, inputs=[ io.Image.Input("image"), diff --git a/comfy_extras/nodes_string.py b/comfy_extras/nodes_string.py index 8d3e65cc5..c6b3713ed 100644 --- a/comfy_extras/nodes_string.py +++ b/comfy_extras/nodes_string.py @@ -11,6 +11,8 @@ class StringConcatenate(io.ComfyNode): node_id="StringConcatenate", display_name="Concatenate", category="utils/string", + description="Joins two strings together with an optional delimiter between them.", + short_description="Joins two strings with an optional delimiter.", search_aliases=["text concat", "join text", "merge text", "combine strings", "concat", "concatenate", "append text", "combine text", "string"], inputs=[ io.String.Input("string_a", multiline=True), @@ -35,6 +37,8 @@ class StringSubstring(io.ComfyNode): search_aliases=["extract text", "text portion"], display_name="Substring", category="utils/string", + description="Extracts a portion of a string between the specified start and end indices.", + short_description="Extracts a portion of a string by index.", inputs=[ io.String.Input("string", multiline=True), io.Int.Input("start"), @@ -58,6 +62,8 @@ class StringLength(io.ComfyNode): search_aliases=["character count", "text size"], display_name="Length", category="utils/string", + description="Returns the number of characters in a string.", + short_description=None, inputs=[ io.String.Input("string", multiline=True), ], @@ -79,6 +85,8 @@ class CaseConverter(io.ComfyNode): search_aliases=["text case", "uppercase", "lowercase", "capitalize"], display_name="Case Converter", category="utils/string", + description="Converts a string to a selected letter case.", + short_description="", inputs=[ io.String.Input("string", multiline=True), io.Combo.Input("mode", options=["UPPERCASE", "lowercase", "Capitalize", "Title Case"]), @@ -112,6 +120,8 @@ class StringTrim(io.ComfyNode): search_aliases=["clean whitespace", "remove whitespace"], display_name="Trim", category="utils/string", + description="Removes leading and/or trailing whitespace from a string.", + short_description="Removes whitespace from string edges.", inputs=[ io.String.Input("string", multiline=True), io.Combo.Input("mode", options=["Both", "Left", "Right"]), @@ -143,6 +153,8 @@ class StringReplace(io.ComfyNode): search_aliases=["find and replace", "substitute", "swap text"], display_name="Replace", category="utils/string", + description="Replaces all occurrences of a search string with a replacement string.", + short_description="Replaces all occurrences of a substring.", inputs=[ io.String.Input("string", multiline=True), io.String.Input("find", multiline=True), @@ -166,6 +178,8 @@ class StringContains(io.ComfyNode): search_aliases=["text includes", "string includes"], display_name="Contains", category="utils/string", + description="Checks whether a string contains a given substring, with optional case sensitivity.", + short_description="Checks if a string contains a substring.", inputs=[ io.String.Input("string", multiline=True), io.String.Input("substring", multiline=True), @@ -194,6 +208,8 @@ class StringCompare(io.ComfyNode): search_aliases=["text match", "string equals", "starts with", "ends with"], display_name="Compare", category="utils/string", + description="Compares two strings for equality, or checks if one starts with or ends with the other.", + short_description="Compares two strings using various modes.", inputs=[ io.String.Input("string_a", multiline=True), io.String.Input("string_b", multiline=True), @@ -230,6 +246,8 @@ class RegexMatch(io.ComfyNode): search_aliases=["pattern match", "text contains", "string match"], display_name="Regex Match", category="utils/string", + description="Tests whether a string matches a regular expression pattern, with configurable flags for case sensitivity, multiline, and dotall modes.", + short_description="Tests if a string matches a regex pattern.", inputs=[ io.String.Input("string", multiline=True), io.String.Input("regex_pattern", multiline=True), @@ -271,6 +289,8 @@ class RegexExtract(io.ComfyNode): search_aliases=["pattern extract", "text parser", "parse text"], display_name="Regex Extract", category="utils/string", + description="Extracts text from a string using a regular expression pattern, supporting first match, all matches, and capture group extraction.", + short_description="Extracts text using a regex pattern.", inputs=[ io.String.Input("string", multiline=True), io.String.Input("regex_pattern", multiline=True), @@ -347,6 +367,7 @@ class RegexReplace(io.ComfyNode): display_name="Regex Replace", category="utils/string", description="Find and replace text using regex patterns.", + short_description=None, inputs=[ io.String.Input("string", multiline=True), io.String.Input("regex_pattern", multiline=True), diff --git a/comfy_extras/nodes_tcfg.py b/comfy_extras/nodes_tcfg.py index 1a6767770..68c0aef0e 100644 --- a/comfy_extras/nodes_tcfg.py +++ b/comfy_extras/nodes_tcfg.py @@ -35,6 +35,7 @@ class TCFG(io.ComfyNode): display_name="Tangential Damping CFG", category="advanced/guidance", description="TCFG – Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality.", + short_description="Aligns negative conditioning with positive for improved quality.", inputs=[ io.Model.Input("model"), ], diff --git a/comfy_extras/nodes_tomesd.py b/comfy_extras/nodes_tomesd.py index 87bf29b8f..f2e0035c6 100644 --- a/comfy_extras/nodes_tomesd.py +++ b/comfy_extras/nodes_tomesd.py @@ -152,6 +152,8 @@ class TomePatchModel(io.ComfyNode): return io.Schema( node_id="TomePatchModel", category="model_patches/unet", + description="Applies Token Merging (ToMe) to the model, reducing the number of tokens in self-attention by merging similar ones to speed up inference.", + short_description="Token Merging (ToMe) for faster inference.", inputs=[ io.Model.Input("model"), io.Float.Input("ratio", default=0.3, min=0.0, max=1.0, step=0.01), diff --git a/comfy_extras/nodes_toolkit.py b/comfy_extras/nodes_toolkit.py index 71faf7226..e5db7853a 100644 --- a/comfy_extras/nodes_toolkit.py +++ b/comfy_extras/nodes_toolkit.py @@ -15,6 +15,8 @@ class CreateList(io.ComfyNode): node_id="CreateList", display_name="Create List", category="logic", + description="Combines multiple inputs of the same type into a single output list for batch processing.", + short_description="Combines multiple inputs into a single list.", is_input_list=True, search_aliases=["Image Iterator", "Text Iterator", "Iterator"], inputs=[io.Autogrow.Input("inputs", template=template_autogrow)], diff --git a/comfy_extras/nodes_torch_compile.py b/comfy_extras/nodes_torch_compile.py index c43e8ad63..0aed3c5ee 100644 --- a/comfy_extras/nodes_torch_compile.py +++ b/comfy_extras/nodes_torch_compile.py @@ -11,6 +11,8 @@ class TorchCompileModel(io.ComfyNode): return io.Schema( node_id="TorchCompileModel", category="_for_testing", + description="Applies torch.compile to the model using a selectable backend for optimized inference performance.", + short_description="Optimize model inference with torch.compile.", inputs=[ io.Model.Input("model"), io.Combo.Input( diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py index aa2d88673..bfbe6732c 100644 --- a/comfy_extras/nodes_train.py +++ b/comfy_extras/nodes_train.py @@ -936,6 +936,8 @@ class TrainLoraNode(io.ComfyNode): node_id="TrainLoraNode", display_name="Train LoRA", category="training", + description="Trains a LoRA adapter on a diffusion model using provided latents and conditioning, with configurable optimizer, loss function, rank, and training parameters including bucket and bypass modes.", + short_description="Train a LoRA adapter on a diffusion model.", is_experimental=True, is_input_list=True, # All inputs become lists inputs=[ @@ -1270,6 +1272,8 @@ class LoraModelLoader(io.ComfyNode): node_id="LoraModelLoader", display_name="Load LoRA Model", category="loaders", + description="Applies a trained LoRA model to a diffusion model with configurable strength for inference.", + short_description="Apply trained LoRA weights to a model.", is_experimental=True, inputs=[ io.Model.Input( @@ -1322,6 +1326,8 @@ class SaveLoRA(io.ComfyNode): search_aliases=["export lora"], display_name="Save LoRA Weights", category="loaders", + description="Saves trained LoRA weights to a safetensors file with an optional step count in the filename.", + short_description="Save LoRA weights to safetensors file.", is_experimental=True, is_output_node=True, inputs=[ @@ -1366,6 +1372,8 @@ class LossGraphNode(io.ComfyNode): search_aliases=["training chart", "training visualization", "plot loss"], display_name="Plot Loss Graph", category="training", + description="Generates and displays a line graph visualization of training loss values over steps from a loss map produced by the Train LoRA node.", + short_description="Visualize training loss as a line graph.", is_experimental=True, is_output_node=True, inputs=[ diff --git a/comfy_extras/nodes_upscale_model.py b/comfy_extras/nodes_upscale_model.py index 97b9e948d..3daa02bed 100644 --- a/comfy_extras/nodes_upscale_model.py +++ b/comfy_extras/nodes_upscale_model.py @@ -22,6 +22,8 @@ class UpscaleModelLoader(io.ComfyNode): node_id="UpscaleModelLoader", display_name="Load Upscale Model", category="loaders", + description="Loads an image upscale model (such as ESRGAN or RealESRGAN) from a file for use with the image upscaling node.", + short_description="Load an image upscale model from file.", inputs=[ io.Combo.Input("model_name", options=folder_paths.get_filename_list("upscale_models")), ], @@ -54,6 +56,8 @@ class ImageUpscaleWithModel(io.ComfyNode): display_name="Upscale Image (using Model)", category="image/upscaling", search_aliases=["upscale", "upscaler", "upsc", "enlarge image", "super resolution", "hires", "superres", "increase resolution"], + description="Upscales an image using a loaded upscale model with automatic tiling to manage memory usage at higher resolutions.", + short_description="Upscale image using a loaded upscale model.", inputs=[ io.UpscaleModel.Input("upscale_model"), io.Image.Input("image"), diff --git a/comfy_extras/nodes_video.py b/comfy_extras/nodes_video.py index cd765a7c1..91d23f83d 100644 --- a/comfy_extras/nodes_video.py +++ b/comfy_extras/nodes_video.py @@ -18,6 +18,8 @@ class SaveWEBM(io.ComfyNode): node_id="SaveWEBM", search_aliases=["export webm"], category="image/video", + description="Saves a sequence of images as a WEBM video file with selectable codec and configurable quality.", + short_description="Saves images as a WEBM video file.", is_experimental=True, inputs=[ io.Image.Input("images"), @@ -73,7 +75,8 @@ class SaveVideo(io.ComfyNode): search_aliases=["export video"], display_name="Save Video", category="image/video", - description="Saves the input images to your ComfyUI output directory.", + description="Saves a video to your ComfyUI output directory.", + short_description=None, inputs=[ io.Video.Input("video", tooltip="The video to save."), io.String.Input("filename_prefix", default="video/ComfyUI", tooltip="The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."), @@ -122,6 +125,7 @@ class CreateVideo(io.ComfyNode): display_name="Create Video", category="image/video", description="Create a video from images.", + short_description=None, inputs=[ io.Image.Input("images", tooltip="The images to create a video from."), io.Float.Input("fps", default=30.0, min=1.0, max=120.0, step=1.0), @@ -147,6 +151,7 @@ class GetVideoComponents(io.ComfyNode): display_name="Get Video Components", category="image/video", description="Extracts all components from a video: frames, audio, and framerate.", + short_description=None, inputs=[ io.Video.Input("video", tooltip="The video to extract components from."), ], @@ -174,6 +179,8 @@ class LoadVideo(io.ComfyNode): search_aliases=["import video", "open video", "video file"], display_name="Load Video", category="image/video", + description="Loads a video file from the input directory.", + short_description=None, inputs=[ io.Combo.Input("file", options=sorted(files), upload=io.UploadType.video), ], @@ -215,6 +222,8 @@ class VideoSlice(io.ComfyNode): "start time", ], category="image/video", + description="Slices a video to a specified start time and duration, with optional strict duration enforcement.", + short_description="Slices a video to a specified time range.", inputs=[ io.Video.Input("video"), io.Float.Input( diff --git a/comfy_extras/nodes_video_model.py b/comfy_extras/nodes_video_model.py index 0f760aa26..a4ae1ab77 100644 --- a/comfy_extras/nodes_video_model.py +++ b/comfy_extras/nodes_video_model.py @@ -16,6 +16,8 @@ class ImageOnlyCheckpointLoader: FUNCTION = "load_checkpoint" CATEGORY = "loaders/video_models" + DESCRIPTION = "Load an image-only checkpoint (such as SVD) and return the model, CLIP vision, and VAE components." + SHORT_DESCRIPTION = "Load an image-only video model checkpoint." def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True): ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name) @@ -42,6 +44,8 @@ class SVD_img2vid_Conditioning: FUNCTION = "encode" CATEGORY = "conditioning/video_models" + DESCRIPTION = "Generate conditioning for SVD image-to-video by encoding an init image with CLIP vision and VAE, with motion and FPS controls." + SHORT_DESCRIPTION = "Generate SVD img2vid conditioning from an image." def encode(self, clip_vision, init_image, vae, width, height, video_frames, motion_bucket_id, fps, augmentation_level): output = clip_vision.encode_image(init_image) @@ -66,6 +70,8 @@ class VideoLinearCFGGuidance: FUNCTION = "patch" CATEGORY = "sampling/video_models" + DESCRIPTION = "Apply linear CFG guidance scaling across video frames, ramping from a minimum CFG to the full scale." + SHORT_DESCRIPTION = "Apply linear CFG scaling across video frames." def patch(self, model, min_cfg): def linear_cfg(args): @@ -90,6 +96,8 @@ class VideoTriangleCFGGuidance: FUNCTION = "patch" CATEGORY = "sampling/video_models" + DESCRIPTION = "Apply triangle-wave CFG guidance scaling across video frames, peaking at the center and tapering to min at edges." + SHORT_DESCRIPTION = "Apply triangle-wave CFG scaling across video frames." def patch(self, model, min_cfg): def linear_cfg(args): @@ -109,6 +117,8 @@ class VideoTriangleCFGGuidance: class ImageOnlyCheckpointSave(comfy_extras.nodes_model_merging.CheckpointSave): CATEGORY = "advanced/model_merging" + DESCRIPTION = "Save a video model checkpoint with model, CLIP vision, and VAE components to a safetensors file." + SHORT_DESCRIPTION = "Save a video model checkpoint to a file." @classmethod def INPUT_TYPES(s): @@ -139,6 +149,8 @@ class ConditioningSetAreaPercentageVideo: FUNCTION = "append" CATEGORY = "conditioning" + DESCRIPTION = "Set a 3D conditioning area for video models using percentage-based spatial and temporal dimensions with position offsets." + SHORT_DESCRIPTION = "Set a 3D conditioning area for video models." def append(self, conditioning, width, height, temporal, x, y, z, strength): c = node_helpers.conditioning_set_values(conditioning, {"area": ("percentage", temporal, height, width, z, y, x), diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 2ff012134..4b62e7fc6 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -19,6 +19,8 @@ class WanImageToVideo(io.ComfyNode): return io.Schema( node_id="WanImageToVideo", category="conditioning/video_models", + description="Prepares conditioning and latent inputs for Wan image-to-video generation using a start image and CLIP vision.", + short_description="Prepare conditioning for Wan image-to-video generation.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -67,6 +69,8 @@ class WanFunControlToVideo(io.ComfyNode): return io.Schema( node_id="WanFunControlToVideo", category="conditioning/video_models", + description="Sets up conditioning for Wan Fun control-to-video generation with optional start image and control video inputs.", + short_description="Conditioning for Wan Fun control-to-video generation.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -120,6 +124,8 @@ class Wan22FunControlToVideo(io.ComfyNode): return io.Schema( node_id="Wan22FunControlToVideo", category="conditioning/video_models", + description="Prepares conditioning for Wan 2.2 Fun control-to-video generation with reference image and control video support.", + short_description="Conditioning for Wan 2.2 Fun control-to-video generation.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -185,6 +191,8 @@ class WanFirstLastFrameToVideo(io.ComfyNode): return io.Schema( node_id="WanFirstLastFrameToVideo", category="conditioning/video_models", + description="Generates video conditioning from first and last frame images with optional CLIP vision guidance for Wan models.", + short_description="Video conditioning from first and last frames.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -257,6 +265,8 @@ class WanFunInpaintToVideo(io.ComfyNode): return io.Schema( node_id="WanFunInpaintToVideo", category="conditioning/video_models", + description="Prepares conditioning for Wan Fun inpainting-based video generation using start and end images with CLIP vision output.", + short_description="Conditioning for Wan Fun inpainting video generation.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -289,6 +299,8 @@ class WanVaceToVideo(io.ComfyNode): node_id="WanVaceToVideo", search_aliases=["video conditioning", "video control"], category="conditioning/video_models", + description="Sets up Wan VACE video conditioning with control video, masks, and optional reference image for guided video generation.", + short_description="Wan VACE video conditioning with control and reference inputs.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -376,6 +388,8 @@ class TrimVideoLatent(io.ComfyNode): return io.Schema( node_id="TrimVideoLatent", category="latent/video", + description="Trims a specified number of frames from the beginning of a video latent tensor.", + short_description="Trim frames from the start of a video latent.", inputs=[ io.Latent.Input("samples"), io.Int.Input("trim_amount", default=0, min=0, max=99999), @@ -399,6 +413,8 @@ class WanCameraImageToVideo(io.ComfyNode): return io.Schema( node_id="WanCameraImageToVideo", category="conditioning/video_models", + description="Prepares conditioning for Wan camera-controlled image-to-video generation with camera motion embeddings and CLIP vision.", + short_description="Wan camera-controlled image-to-video conditioning.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -453,6 +469,8 @@ class WanPhantomSubjectToVideo(io.ComfyNode): return io.Schema( node_id="WanPhantomSubjectToVideo", category="conditioning/video_models", + description="Generates conditioning for Wan Phantom subject-driven video creation from reference subject images.", + short_description="Wan Phantom subject-driven video conditioning.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -708,6 +726,8 @@ class WanTrackToVideo(io.ComfyNode): node_id="WanTrackToVideo", search_aliases=["motion tracking", "trajectory video", "point tracking", "keypoint animation"], category="conditioning/video_models", + description="Creates video conditioning from motion track data, applying trajectory-based guidance to generate tracked object movement in video.", + short_description="Video conditioning from motion track trajectory data.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -952,6 +972,8 @@ class WanSoundImageToVideo(io.ComfyNode): return io.Schema( node_id="WanSoundImageToVideo", category="conditioning/video_models", + description="Prepares conditioning for Wan audio-driven video generation from a reference image, audio, control video, and motion reference.", + short_description="Wan audio-driven image-to-video conditioning.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -985,6 +1007,8 @@ class WanSoundImageToVideoExtend(io.ComfyNode): return io.Schema( node_id="WanSoundImageToVideoExtend", category="conditioning/video_models", + description="Extends a previously generated Wan audio-driven video by continuing from an existing video latent with new audio and control inputs.", + short_description="Extend Wan audio-driven video from existing latent.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -1047,6 +1071,8 @@ class WanHuMoImageToVideo(io.ComfyNode): return io.Schema( node_id="WanHuMoImageToVideo", category="conditioning/video_models", + description="Prepares conditioning for Wan HuMo human motion video generation driven by audio input and a reference image.", + short_description="Wan HuMo audio-driven human motion video conditioning.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -1113,6 +1139,8 @@ class WanAnimateToVideo(io.ComfyNode): return io.Schema( node_id="WanAnimateToVideo", category="conditioning/video_models", + description="Sets up conditioning for Wan character animation video generation with face, pose, and background video inputs and motion continuation support.", + short_description="Wan character animation video conditioning with multiple inputs.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -1253,6 +1281,8 @@ class Wan22ImageToVideoLatent(io.ComfyNode): return io.Schema( node_id="Wan22ImageToVideoLatent", category="conditioning/inpaint", + description="Creates an inpainting latent for Wan 2.2 image-to-video generation by encoding a start image with a noise mask.", + short_description="Inpainting latent for Wan 2.2 image-to-video generation.", inputs=[ io.Vae.Input("vae"), io.Int.Input("width", default=1280, min=32, max=nodes.MAX_RESOLUTION, step=32), @@ -1303,6 +1333,8 @@ class WanInfiniteTalkToVideo(io.ComfyNode): return io.Schema( node_id="WanInfiniteTalkToVideo", category="conditioning/video_models", + description="Prepares conditioning for Wan InfiniteTalk video generation with single or dual speaker audio, motion frames, and cross-attention patching.", + short_description="Wan InfiniteTalk audio-driven talking video conditioning.", inputs=[ io.DynamicCombo.Input("mode", options=[ io.DynamicCombo.Option("single_speaker", []), diff --git a/comfy_extras/nodes_wanmove.py b/comfy_extras/nodes_wanmove.py index d60baf230..79135eb96 100644 --- a/comfy_extras/nodes_wanmove.py +++ b/comfy_extras/nodes_wanmove.py @@ -248,6 +248,8 @@ class WanMoveVisualizeTracks(io.ComfyNode): return io.Schema( node_id="WanMoveVisualizeTracks", category="conditioning/video_models", + description="Renders motion track paths and points as a visual overlay on video frames for preview and debugging.", + short_description="Visualize motion tracks as overlay on video frames.", inputs=[ io.Image.Input("images"), io.Tracks.Input("tracks", optional=True), @@ -284,6 +286,8 @@ class WanMoveTracksFromCoords(io.ComfyNode): return io.Schema( node_id="WanMoveTracksFromCoords", category="conditioning/video_models", + description="Converts JSON coordinate data and optional mask into a structured tracks object for Wan Move video generation.", + short_description="Convert JSON coordinates to Wan Move track data.", inputs=[ io.String.Input("track_coords", force_input=True, default="[]", optional=True), io.Mask.Input("track_mask", optional=True), @@ -326,6 +330,8 @@ class GenerateTracks(io.ComfyNode): node_id="GenerateTracks", search_aliases=["motion paths", "camera movement", "trajectory"], category="conditioning/video_models", + description="Generates motion track paths between start and end points with configurable interpolation, Bezier curves, and multi-track spread.", + short_description="Generate motion tracks between two points with interpolation.", inputs=[ io.Int.Input("width", default=832, min=16, max=4096, step=16), io.Int.Input("height", default=480, min=16, max=4096, step=16), @@ -435,6 +441,8 @@ class WanMoveConcatTrack(io.ComfyNode): return io.Schema( node_id="WanMoveConcatTrack", category="conditioning/video_models", + description="Concatenates two sets of motion tracks into a single combined tracks object along the track dimension.", + short_description="Concatenate two sets of motion tracks together.", inputs=[ io.Tracks.Input("tracks_1"), io.Tracks.Input("tracks_2", optional=True), @@ -464,6 +472,8 @@ class WanMoveTrackToVideo(io.ComfyNode): return io.Schema( node_id="WanMoveTrackToVideo", category="conditioning/video_models", + description="Prepares conditioning for Wan Move track-guided video generation by encoding a start image and applying trajectory-based latent feature replacement.", + short_description="Wan Move track-guided video conditioning from start image.", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), diff --git a/comfy_extras/nodes_webcam.py b/comfy_extras/nodes_webcam.py index 6349ac017..ee9c166a7 100644 --- a/comfy_extras/nodes_webcam.py +++ b/comfy_extras/nodes_webcam.py @@ -20,6 +20,8 @@ class WebcamCapture(nodes.LoadImage): FUNCTION = "load_capture" CATEGORY = "image" + DESCRIPTION = "Capture an image from a connected webcam with optional width and height settings." + SHORT_DESCRIPTION = "Capture an image from a webcam." def load_capture(self, image, **kwargs): return super().load_image(folder_paths.get_annotated_filepath(image)) diff --git a/comfy_extras/nodes_zimage.py b/comfy_extras/nodes_zimage.py index 2ee3c43b1..80f9ffb5a 100644 --- a/comfy_extras/nodes_zimage.py +++ b/comfy_extras/nodes_zimage.py @@ -11,6 +11,8 @@ class TextEncodeZImageOmni(io.ComfyNode): return io.Schema( node_id="TextEncodeZImageOmni", category="advanced/conditioning", + description="Encodes text and optional reference images into conditioning for ZImage Omni models, supporting both CLIP vision and VAE-based image encoding.", + short_description="Encodes text and images for ZImage Omni models.", is_experimental=True, inputs=[ io.Clip.Input("clip"), diff --git a/nodes.py b/nodes.py index db5f98408..4455ecc5b 100644 --- a/nodes.py +++ b/nodes.py @@ -70,6 +70,7 @@ class CLIPTextEncode(ComfyNodeABC): CATEGORY = "conditioning" DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images." + SHORT_DESCRIPTION = "Encodes text prompts using CLIP for guiding diffusion models." SEARCH_ALIASES = ["text", "prompt", "text prompt", "positive prompt", "negative prompt", "encode text", "text encoder", "encode prompt"] def encode(self, clip, text): @@ -87,6 +88,8 @@ class ConditioningCombine: FUNCTION = "combine" CATEGORY = "conditioning" + DESCRIPTION = "Combines two conditioning inputs into one by appending them together." + SHORT_DESCRIPTION = None SEARCH_ALIASES = ["combine", "merge conditioning", "combine prompts", "merge prompts", "mix prompts", "add prompt"] def combine(self, conditioning_1, conditioning_2): @@ -104,6 +107,8 @@ class ConditioningAverage : FUNCTION = "addWeighted" CATEGORY = "conditioning" + DESCRIPTION = "Blends two conditioning inputs using a weighted average, allowing smooth interpolation between prompts based on a strength parameter." + SHORT_DESCRIPTION = "Blends two conditionings via weighted average interpolation." def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength): out = [] @@ -143,6 +148,8 @@ class ConditioningConcat: FUNCTION = "concat" CATEGORY = "conditioning" + DESCRIPTION = "Concatenates conditioning tokens from one conditioning onto another, extending the token sequence to combine their effects." + SHORT_DESCRIPTION = "Concatenates conditioning tokens to combine prompt effects." def concat(self, conditioning_to, conditioning_from): out = [] @@ -176,6 +183,8 @@ class ConditioningSetArea: FUNCTION = "append" CATEGORY = "conditioning" + DESCRIPTION = "Sets a rectangular area on conditioning using pixel coordinates, allowing the prompt to apply only to a specific region of the image." + SHORT_DESCRIPTION = "Restricts conditioning to a specific pixel-coordinate area." def append(self, conditioning, width, height, x, y, strength): c = node_helpers.conditioning_set_values(conditioning, {"area": (height // 8, width // 8, y // 8, x // 8), @@ -197,6 +206,8 @@ class ConditioningSetAreaPercentage: FUNCTION = "append" CATEGORY = "conditioning" + DESCRIPTION = "Sets a rectangular area on conditioning using percentage-based coordinates, allowing the prompt to apply to a proportional region of the image." + SHORT_DESCRIPTION = "Restricts conditioning to a percentage-based area." def append(self, conditioning, width, height, x, y, strength): c = node_helpers.conditioning_set_values(conditioning, {"area": ("percentage", height, width, y, x), @@ -214,6 +225,8 @@ class ConditioningSetAreaStrength: FUNCTION = "append" CATEGORY = "conditioning" + DESCRIPTION = "Sets the strength of a conditioning area." + SHORT_DESCRIPTION = None def append(self, conditioning, strength): c = node_helpers.conditioning_set_values(conditioning, {"strength": strength}) @@ -234,6 +247,8 @@ class ConditioningSetMask: FUNCTION = "append" CATEGORY = "conditioning" + DESCRIPTION = "Applies a mask to conditioning so the prompt only affects the masked region, with adjustable strength and optional bounds-based area restriction." + SHORT_DESCRIPTION = "Applies a mask to limit conditioning to a region." def append(self, conditioning, mask, set_cond_area, strength): set_area_to_bounds = False @@ -257,6 +272,8 @@ class ConditioningZeroOut: FUNCTION = "zero_out" CATEGORY = "advanced/conditioning" + DESCRIPTION = "Zeros out all conditioning tensors including pooled output, producing an empty unconditional embedding." + SHORT_DESCRIPTION = "Zeros out conditioning to produce an empty embedding." def zero_out(self, conditioning): c = [] @@ -283,6 +300,8 @@ class ConditioningSetTimestepRange: FUNCTION = "set_range" CATEGORY = "advanced/conditioning" + DESCRIPTION = "Sets the start and end timestep percentages for conditioning, controlling which portion of the sampling process it is active during." + SHORT_DESCRIPTION = "Limits conditioning to a specific timestep range." def set_range(self, conditioning, start, end): c = node_helpers.conditioning_set_values(conditioning, {"start_percent": start, @@ -304,6 +323,7 @@ class VAEDecode: CATEGORY = "latent" DESCRIPTION = "Decodes latent images back into pixel space images." + SHORT_DESCRIPTION = None SEARCH_ALIASES = ["decode", "decode latent", "latent to image", "render latent"] def decode(self, vae, samples): @@ -329,6 +349,8 @@ class VAEDecodeTiled: FUNCTION = "decode" CATEGORY = "_for_testing" + DESCRIPTION = "Decodes latent images to pixel space using tiling to reduce memory usage, with configurable tile size, overlap, and temporal settings for video VAEs." + SHORT_DESCRIPTION = "Decodes latents to images using tiling for lower memory." def decode(self, vae, samples, tile_size, overlap=64, temporal_size=64, temporal_overlap=8): if tile_size < overlap * 4: @@ -357,6 +379,8 @@ class VAEEncode: FUNCTION = "encode" CATEGORY = "latent" + DESCRIPTION = "Encodes pixel images into latent space using a VAE model." + SHORT_DESCRIPTION = None SEARCH_ALIASES = ["encode", "encode image", "image to latent"] def encode(self, vae, pixels): @@ -376,6 +400,8 @@ class VAEEncodeTiled: FUNCTION = "encode" CATEGORY = "_for_testing" + DESCRIPTION = "Encodes pixel images into latent space using tiling to reduce memory usage, with configurable tile size, overlap, and temporal settings for video VAEs." + SHORT_DESCRIPTION = "Encodes images to latents using tiling for lower memory." def encode(self, vae, pixels, tile_size, overlap, temporal_size=64, temporal_overlap=8): t = vae.encode_tiled(pixels, tile_x=tile_size, tile_y=tile_size, overlap=overlap, tile_t=temporal_size, overlap_t=temporal_overlap) @@ -389,6 +415,8 @@ class VAEEncodeForInpaint: FUNCTION = "encode" CATEGORY = "latent/inpaint" + DESCRIPTION = "Encodes an image into latent space for inpainting by applying and optionally growing a mask, zeroing out masked pixel regions before encoding." + SHORT_DESCRIPTION = "Encodes images for inpainting with mask-aware encoding." def encode(self, vae, pixels, mask, grow_mask_by=6): downscale_ratio = vae.spacial_compression_encode() @@ -438,6 +466,8 @@ class InpaintModelConditioning: FUNCTION = "encode" CATEGORY = "conditioning/inpaint" + DESCRIPTION = "Prepares conditioning for inpaint models by encoding the masked image and concatenating the latent and mask information into the positive and negative conditioning." + SHORT_DESCRIPTION = "Prepares inpaint model conditioning with mask-aware latents." def encode(self, positive, negative, pixels, vae, mask, noise_mask=True): x = (pixels.shape[1] // 8) * 8 @@ -492,6 +522,8 @@ class SaveLatent: OUTPUT_NODE = True CATEGORY = "_for_testing" + DESCRIPTION = "Saves latent tensors to a safetensors file in the output directory with optional workflow metadata." + SHORT_DESCRIPTION = "Saves latent tensors to a safetensors file." def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) @@ -537,6 +569,8 @@ class LoadLatent: return {"required": {"latent": [sorted(files), ]}, } CATEGORY = "_for_testing" + DESCRIPTION = "Loads latent tensors from a previously saved safetensors file in the input directory." + SHORT_DESCRIPTION = "Loads latent tensors from a safetensors file." RETURN_TYPES = ("LATENT", ) FUNCTION = "load" @@ -576,6 +610,8 @@ class CheckpointLoader: FUNCTION = "load_checkpoint" CATEGORY = "advanced/loaders" + DESCRIPTION = "Loads a checkpoint using a separate model config file. Deprecated in favor of CheckpointLoaderSimple which auto-detects the config." + SHORT_DESCRIPTION = "Loads a checkpoint with a manual config file (deprecated)." DEPRECATED = True def load_checkpoint(self, config_name, ckpt_name): @@ -599,6 +635,7 @@ class CheckpointLoaderSimple: CATEGORY = "loaders" DESCRIPTION = "Loads a diffusion model checkpoint, diffusion models are used to denoise latents." + SHORT_DESCRIPTION = "Loads a diffusion model checkpoint for denoising latents." SEARCH_ALIASES = ["load model", "checkpoint", "model loader", "load checkpoint", "ckpt", "model"] def load_checkpoint(self, ckpt_name): @@ -623,6 +660,8 @@ class DiffusersLoader: FUNCTION = "load_checkpoint" CATEGORY = "advanced/loaders/deprecated" + DESCRIPTION = "Loads a diffusion model from the Hugging Face diffusers format, outputting the model, CLIP, and VAE components." + SHORT_DESCRIPTION = "Loads diffusers-format models into model, CLIP, and VAE." def load_checkpoint(self, model_path, output_vae=True, output_clip=True): for search_path in folder_paths.get_folder_paths("diffusers"): @@ -644,6 +683,8 @@ class unCLIPCheckpointLoader: FUNCTION = "load_checkpoint" CATEGORY = "loaders" + DESCRIPTION = "Loads an unCLIP checkpoint, outputting the model, CLIP, VAE, and CLIP Vision components needed for image-guided generation." + SHORT_DESCRIPTION = "Loads unCLIP checkpoints with CLIP Vision output." def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True): ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name) @@ -660,6 +701,8 @@ class CLIPSetLastLayer: FUNCTION = "set_last_layer" CATEGORY = "conditioning" + DESCRIPTION = "Sets which CLIP layer to use as the output. Earlier layers (more negative values) can produce different stylistic effects." + SHORT_DESCRIPTION = "Sets which CLIP layer to use as output." def set_last_layer(self, clip, stop_at_clip_layer): clip = clip.clone() @@ -688,6 +731,7 @@ class LoraLoader: CATEGORY = "loaders" DESCRIPTION = "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together." + SHORT_DESCRIPTION = "Modifies diffusion and CLIP models using LoRA adjustments." SEARCH_ALIASES = ["lora", "load lora", "apply lora", "lora loader", "lora model"] def load_lora(self, model, clip, lora_name, strength_model, strength_clip): @@ -718,6 +762,8 @@ class LoraLoaderModelOnly(LoraLoader): }} RETURN_TYPES = ("MODEL",) FUNCTION = "load_lora_model_only" + DESCRIPTION = "Loads a LoRA and applies it to the diffusion model only, without modifying the CLIP model." + SHORT_DESCRIPTION = "Applies a LoRA to the diffusion model only." def load_lora_model_only(self, model, lora_name, strength_model): return (self.load_lora(model, None, lora_name, strength_model, 0)[0],) @@ -808,6 +854,8 @@ class VAELoader: FUNCTION = "load_vae" CATEGORY = "loaders" + DESCRIPTION = "Loads a VAE model for encoding and decoding images to and from latent space, supporting full VAEs, tiny autoencoders (TAESD), and video VAEs." + SHORT_DESCRIPTION = "Loads a VAE model for image encoding and decoding." #TODO: scale factor? def load_vae(self, vae_name): @@ -836,6 +884,8 @@ class ControlNetLoader: FUNCTION = "load_controlnet" CATEGORY = "loaders" + DESCRIPTION = "Loads a ControlNet model from file for guiding image generation with structural conditioning." + SHORT_DESCRIPTION = "Loads a ControlNet model from file." SEARCH_ALIASES = ["controlnet", "control net", "cn", "load controlnet", "controlnet loader"] def load_controlnet(self, control_net_name): @@ -855,6 +905,8 @@ class DiffControlNetLoader: FUNCTION = "load_controlnet" CATEGORY = "loaders" + DESCRIPTION = "Loads a differential ControlNet model that requires a base diffusion model for initialization." + SHORT_DESCRIPTION = "Loads a differential ControlNet with a base model." def load_controlnet(self, model, control_net_name): controlnet_path = folder_paths.get_full_path_or_raise("controlnet", control_net_name) @@ -875,6 +927,8 @@ class ControlNetApply: DEPRECATED = True CATEGORY = "conditioning/controlnet" + DESCRIPTION = "Applies a ControlNet to conditioning with an image hint and strength. Deprecated in favor of ControlNetApplyAdvanced." + SHORT_DESCRIPTION = "Applies ControlNet to conditioning (deprecated)." def apply_controlnet(self, conditioning, control_net, image, strength): if strength == 0: @@ -913,6 +967,8 @@ class ControlNetApplyAdvanced: FUNCTION = "apply_controlnet" CATEGORY = "conditioning/controlnet" + DESCRIPTION = "Applies a ControlNet to both positive and negative conditioning with an image hint, adjustable strength, and start/end percentage controls for scheduling." + SHORT_DESCRIPTION = "Applies ControlNet with strength and timestep scheduling." SEARCH_ALIASES = ["controlnet", "apply controlnet", "use controlnet", "control net"] def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent, vae=None, extra_concat=[]): @@ -954,6 +1010,8 @@ class UNETLoader: FUNCTION = "load_unet" CATEGORY = "advanced/loaders" + DESCRIPTION = "Loads a standalone diffusion model (UNET/DiT) with optional weight dtype selection including fp8 precision modes for lower memory usage." + SHORT_DESCRIPTION = "Loads a diffusion model with optional fp8 precision." def load_unet(self, unet_name, weight_dtype): model_options = {} @@ -984,6 +1042,7 @@ class CLIPLoader: CATEGORY = "advanced/loaders" DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl\n hidream: llama-3.1 (Recommend) or t5\nomnigen2: qwen vl 2.5 3B" + SHORT_DESCRIPTION = "Loads a single CLIP model with architecture-specific recipes." def load_clip(self, clip_name, type="stable_diffusion", device="default"): clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) @@ -1012,6 +1071,7 @@ class DualCLIPLoader: CATEGORY = "advanced/loaders" DESCRIPTION = "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5\nhidream: at least one of t5 or llama, recommended t5 and llama\nhunyuan_image: qwen2.5vl 7b and byt5 small\nnewbie: gemma-3-4b-it, jina clip v2" + SHORT_DESCRIPTION = "Loads two CLIP models simultaneously with architecture recipes." def load_clip(self, clip_name1, clip_name2, type, device="default"): clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) @@ -1035,6 +1095,8 @@ class CLIPVisionLoader: FUNCTION = "load_clip" CATEGORY = "loaders" + DESCRIPTION = "Loads a CLIP Vision model for encoding images into CLIP vision embeddings." + SHORT_DESCRIPTION = "Loads a CLIP Vision model for image encoding." def load_clip(self, clip_name): clip_path = folder_paths.get_full_path_or_raise("clip_vision", clip_name) @@ -1054,6 +1116,8 @@ class CLIPVisionEncode: FUNCTION = "encode" CATEGORY = "conditioning" + DESCRIPTION = "Encodes an image using a CLIP Vision model to produce a vision embedding, with optional center cropping." + SHORT_DESCRIPTION = "Encodes images into CLIP Vision embeddings." def encode(self, clip_vision, image, crop): crop_image = True @@ -1071,6 +1135,8 @@ class StyleModelLoader: FUNCTION = "load_style_model" CATEGORY = "loaders" + DESCRIPTION = "Loads a style model from file for applying visual styles to conditioning." + SHORT_DESCRIPTION = "Loads a style model from file." def load_style_model(self, style_model_name): style_model_path = folder_paths.get_full_path_or_raise("style_models", style_model_name) @@ -1093,6 +1159,8 @@ class StyleModelApply: FUNCTION = "apply_stylemodel" CATEGORY = "conditioning/style_model" + DESCRIPTION = "Applies a style model to conditioning using a CLIP Vision output, with adjustable strength via multiply or attention bias modes." + SHORT_DESCRIPTION = "Applies a style model to conditioning with strength control." def apply_stylemodel(self, conditioning, style_model, clip_vision_output, strength, strength_type): cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0) @@ -1153,6 +1221,8 @@ class unCLIPConditioning: FUNCTION = "apply_adm" CATEGORY = "conditioning" + DESCRIPTION = "Applies unCLIP image conditioning by adding CLIP vision embeddings to conditioning with adjustable strength and noise augmentation." + SHORT_DESCRIPTION = "Applies unCLIP image conditioning with vision embeddings." def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation): if strength == 0: @@ -1170,6 +1240,7 @@ class GLIGENLoader: FUNCTION = "load_gligen" CATEGORY = "loaders" + DESCRIPTION = "Loads a GLIGEN model for spatially-grounded text-to-image generation." def load_gligen(self, gligen_name): gligen_path = folder_paths.get_full_path_or_raise("gligen", gligen_name) @@ -1192,6 +1263,8 @@ class GLIGENTextBoxApply: FUNCTION = "append" CATEGORY = "conditioning/gligen" + DESCRIPTION = "Applies GLIGEN text box conditioning to place a text-described object at a specific bounding box position in the generated image." + SHORT_DESCRIPTION = "Places a text-described object at a bounding box position." def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y): c = [] @@ -1226,6 +1299,7 @@ class EmptyLatentImage: CATEGORY = "latent" DESCRIPTION = "Create a new batch of empty latent images to be denoised via sampling." + SHORT_DESCRIPTION = "Creates empty latent images for denoising via sampling." SEARCH_ALIASES = ["empty", "empty latent", "new latent", "create latent", "blank latent", "blank"] def generate(self, width, height, batch_size=1): @@ -1246,6 +1320,8 @@ class LatentFromBatch: FUNCTION = "frombatch" CATEGORY = "latent/batch" + DESCRIPTION = "Extracts a contiguous range of samples from a latent batch by specifying a start index and length." + SHORT_DESCRIPTION = "Extracts a range of samples from a latent batch." def frombatch(self, samples, batch_index, length): s = samples.copy() @@ -1279,6 +1355,8 @@ class RepeatLatentBatch: FUNCTION = "repeat" CATEGORY = "latent/batch" + DESCRIPTION = "Repeats a latent batch a specified number of times to create a larger batch." + SHORT_DESCRIPTION = "Duplicates latent batch samples a specified number of times." def repeat(self, samples, amount): s = samples.copy() @@ -1311,6 +1389,8 @@ class LatentUpscale: FUNCTION = "upscale" CATEGORY = "latent" + DESCRIPTION = "Upscales latent representations to a target width and height using various interpolation methods, with optional cropping." + SHORT_DESCRIPTION = "Upscales latents to a target resolution." def upscale(self, samples, upscale_method, width, height, crop): if width == 0 and height == 0: @@ -1344,6 +1424,7 @@ class LatentUpscaleBy: FUNCTION = "upscale" CATEGORY = "latent" + DESCRIPTION = "Upscales latent representations by a relative scale factor using various interpolation methods." def upscale(self, samples, upscale_method, scale_by): s = samples.copy() @@ -1362,6 +1443,8 @@ class LatentRotate: FUNCTION = "rotate" CATEGORY = "latent/transform" + DESCRIPTION = "Rotates latent representations by 90, 180, or 270 degrees." + SHORT_DESCRIPTION = None def rotate(self, samples, rotation): s = samples.copy() @@ -1388,6 +1471,8 @@ class LatentFlip: FUNCTION = "flip" CATEGORY = "latent/transform" + DESCRIPTION = "Flips latent representations vertically or horizontally." + SHORT_DESCRIPTION = None def flip(self, samples, flip_method): s = samples.copy() @@ -1413,6 +1498,8 @@ class LatentComposite: FUNCTION = "composite" CATEGORY = "latent" + DESCRIPTION = "Composites one latent onto another at a specified position with optional feathered blending at the edges." + SHORT_DESCRIPTION = "Composites one latent onto another with feathering." def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0): x = x // 8 @@ -1462,6 +1549,8 @@ class LatentBlend: FUNCTION = "blend" CATEGORY = "_for_testing" + DESCRIPTION = "Blends two latent representations together using a blend factor, automatically resizing if dimensions differ." + SHORT_DESCRIPTION = "Blends two latents together using a blend factor." def blend(self, samples1, samples2, blend_factor:float, blend_mode: str="normal"): @@ -1500,6 +1589,8 @@ class LatentCrop: FUNCTION = "crop" CATEGORY = "latent/transform" + DESCRIPTION = "Crops a latent representation to a specified width, height, and position." + SHORT_DESCRIPTION = None def crop(self, samples, width, height, x, y): s = samples.copy() @@ -1530,6 +1621,8 @@ class SetLatentNoiseMask: FUNCTION = "set_mask" CATEGORY = "latent/inpaint" + DESCRIPTION = "Sets a noise mask on a latent so that sampling only adds noise within the masked region, used for inpainting workflows." + SHORT_DESCRIPTION = "Sets a noise mask on latent for inpainting." def set_mask(self, samples, mask): s = samples.copy() @@ -1584,6 +1677,7 @@ class KSampler: CATEGORY = "sampling" DESCRIPTION = "Uses the provided model, positive and negative conditioning to denoise the latent image." + SHORT_DESCRIPTION = "Denoises latent images using model and conditioning inputs." SEARCH_ALIASES = ["sampler", "sample", "generate", "denoise", "diffuse", "txt2img", "img2img"] def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0): @@ -1613,6 +1707,8 @@ class KSamplerAdvanced: FUNCTION = "sample" CATEGORY = "sampling" + DESCRIPTION = "Advanced sampler with fine-grained control over noise addition, start/end steps, and whether to return with leftover noise for multi-pass sampling." + SHORT_DESCRIPTION = "Advanced sampler with step range and noise controls." def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0): force_full_denoise = True @@ -1649,6 +1745,7 @@ class SaveImage: CATEGORY = "image" DESCRIPTION = "Saves the input images to your ComfyUI output directory." + SHORT_DESCRIPTION = None SEARCH_ALIASES = ["save", "save image", "export image", "output image", "write image", "download"] def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): @@ -1687,6 +1784,8 @@ class PreviewImage(SaveImage): self.compress_level = 1 SEARCH_ALIASES = ["preview", "preview image", "show image", "view image", "display image", "image viewer"] + DESCRIPTION = "Previews images in the UI by saving them as temporary files that are not permanently stored." + SHORT_DESCRIPTION = "Previews images in the UI as temporary files." @classmethod def INPUT_TYPES(s): @@ -1706,6 +1805,8 @@ class LoadImage: } CATEGORY = "image" + DESCRIPTION = "Loads an image from the input directory, supporting animated formats and alpha channel extraction as a mask output." + SHORT_DESCRIPTION = "Loads an image file with optional alpha mask output." SEARCH_ALIASES = ["load image", "open image", "import image", "image input", "upload image", "read image", "image loader"] RETURN_TYPES = ("IMAGE", "MASK") @@ -1787,6 +1888,8 @@ class LoadImageMask: } CATEGORY = "mask" + DESCRIPTION = "Loads an image and extracts a specific color channel as a mask." + SHORT_DESCRIPTION = "Loads an image channel as a mask." RETURN_TYPES = ("MASK",) FUNCTION = "load_image" @@ -1845,6 +1948,7 @@ class LoadImageOutput(LoadImage): } DESCRIPTION = "Load an image from the output folder. When the refresh button is clicked, the node will update the image list and automatically select the first image, allowing for easy iteration." + SHORT_DESCRIPTION = "Loads images from the output folder with auto-refresh." EXPERIMENTAL = True FUNCTION = "load_image" @@ -1863,6 +1967,8 @@ class ImageScale: FUNCTION = "upscale" CATEGORY = "image/upscaling" + DESCRIPTION = "Scales an image to a target width and height using various interpolation methods, with optional center cropping." + SHORT_DESCRIPTION = "Scales images to a target width and height." SEARCH_ALIASES = ["resize", "resize image", "scale image", "image resize", "zoom", "zoom in", "change size"] def upscale(self, image, upscale_method, width, height, crop): @@ -1891,6 +1997,8 @@ class ImageScaleBy: FUNCTION = "upscale" CATEGORY = "image/upscaling" + DESCRIPTION = "Scales an image by a relative factor using various interpolation methods." + SHORT_DESCRIPTION = None def upscale(self, image, upscale_method, scale_by): samples = image.movedim(-1,1) @@ -1911,6 +2019,8 @@ class ImageInvert: FUNCTION = "invert" CATEGORY = "image" + DESCRIPTION = "Inverts image colors by subtracting each pixel value from 1.0." + SHORT_DESCRIPTION = None def invert(self, image): s = 1.0 - image @@ -1927,6 +2037,8 @@ class ImageBatch: FUNCTION = "batch" CATEGORY = "image" + DESCRIPTION = "Batches two images together into a single batch, automatically resizing if dimensions differ. Deprecated in favor of the general-purpose Batch node." + SHORT_DESCRIPTION = "Batches two images together (deprecated)." DEPRECATED = True def batch(self, image1, image2): @@ -1955,6 +2067,8 @@ class EmptyImage: FUNCTION = "generate" CATEGORY = "image" + DESCRIPTION = "Creates a batch of blank images filled with a specified color at the given dimensions." + SHORT_DESCRIPTION = "Creates blank images filled with a solid color." def generate(self, width, height, batch_size=1, color=0): r = torch.full([batch_size, height, width, 1], ((color >> 16) & 0xFF) / 0xFF) @@ -1982,6 +2096,8 @@ class ImagePadForOutpaint: FUNCTION = "expand_image" CATEGORY = "image" + DESCRIPTION = "Pads an image on all sides for outpainting, generating a feathered mask that blends the original image edges into the new padded area." + SHORT_DESCRIPTION = "Pads images with feathered mask for outpainting." def expand_image(self, image, left, top, right, bottom, feathering): d1, d2, d3, d4 = image.size() diff --git a/server.py b/server.py index 8882e43c4..400800f4d 100644 --- a/server.py +++ b/server.py @@ -665,6 +665,8 @@ class PromptServer(): info['name'] = node_class info['display_name'] = nodes.NODE_DISPLAY_NAME_MAPPINGS[node_class] if node_class in nodes.NODE_DISPLAY_NAME_MAPPINGS.keys() else node_class info['description'] = obj_class.DESCRIPTION if hasattr(obj_class,'DESCRIPTION') else '' + if hasattr(obj_class, 'SHORT_DESCRIPTION') and obj_class.SHORT_DESCRIPTION: + info['short_description'] = obj_class.SHORT_DESCRIPTION info['python_module'] = getattr(obj_class, "RELATIVE_PYTHON_MODULE", "nodes") info['category'] = 'sd' if hasattr(obj_class, 'OUTPUT_NODE') and obj_class.OUTPUT_NODE == True: