Files
ComfyUI_frontend/src/locales/en/nodeDefs.json
Comfy Org PR Bot eb85f99b10 1.30.1 (#6124)
Patch version increment to 1.30.1

┆Issue is synchronized with this [Notion
page](https://www.notion.so/PR-6124-1-30-1-2906d73d36508176a9a0dca7ecfbb40c)
by [Unito](https://www.unito.io)

---------

Co-authored-by: christian-byrne <72887196+christian-byrne@users.noreply.github.com>
Co-authored-by: github-actions <github-actions@github.com>
2025-10-18 04:24:04 -07:00

13494 lines
303 KiB
JSON
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

{
"AddNoise": {
"display_name": "AddNoise",
"inputs": {
"model": {
"name": "model"
},
"noise": {
"name": "noise"
},
"sigmas": {
"name": "sigmas"
},
"latent_image": {
"name": "latent_image"
}
}
},
"AlignYourStepsScheduler": {
"display_name": "AlignYourStepsScheduler",
"inputs": {
"model_type": {
"name": "model_type"
},
"steps": {
"name": "steps"
},
"denoise": {
"name": "denoise"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"APG": {
"display_name": "Adaptive Projected Guidance",
"inputs": {
"model": {
"name": "model"
},
"eta": {
"name": "eta",
"tooltip": "Controls the scale of the parallel guidance vector. Default CFG behavior at a setting of 1."
},
"norm_threshold": {
"name": "norm_threshold",
"tooltip": "Normalize guidance vector to this value, normalization disable at a setting of 0."
},
"momentum": {
"name": "momentum",
"tooltip": "Controls a running average of guidance during diffusion, disabled at a setting of 0."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"AudioAdjustVolume": {
"display_name": "Audio Adjust Volume",
"inputs": {
"audio": {
"name": "audio"
},
"volume": {
"name": "volume",
"tooltip": "Volume adjustment in decibels (dB). 0 = no change, +6 = double, -6 = half, etc"
}
}
},
"AudioConcat": {
"display_name": "Audio Concat",
"description": "Concatenates the audio1 to audio2 in the specified direction.",
"inputs": {
"audio1": {
"name": "audio1"
},
"audio2": {
"name": "audio2"
},
"direction": {
"name": "direction",
"tooltip": "Whether to append audio2 after or before audio1."
}
}
},
"AudioEncoderEncode": {
"display_name": "AudioEncoderEncode",
"inputs": {
"audio_encoder": {
"name": "audio_encoder"
},
"audio": {
"name": "audio"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"AudioEncoderLoader": {
"display_name": "AudioEncoderLoader",
"inputs": {
"audio_encoder_name": {
"name": "audio_encoder_name"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"AudioMerge": {
"display_name": "Audio Merge",
"description": "Combine two audio tracks by overlaying their waveforms.",
"inputs": {
"audio1": {
"name": "audio1"
},
"audio2": {
"name": "audio2"
},
"merge_method": {
"name": "merge_method",
"tooltip": "The method used to combine the audio waveforms."
}
}
},
"BasicGuider": {
"display_name": "BasicGuider",
"inputs": {
"model": {
"name": "model"
},
"conditioning": {
"name": "conditioning"
}
}
},
"BasicScheduler": {
"display_name": "BasicScheduler",
"inputs": {
"model": {
"name": "model"
},
"scheduler": {
"name": "scheduler"
},
"steps": {
"name": "steps"
},
"denoise": {
"name": "denoise"
}
}
},
"BetaSamplingScheduler": {
"display_name": "BetaSamplingScheduler",
"inputs": {
"model": {
"name": "model"
},
"steps": {
"name": "steps"
},
"alpha": {
"name": "alpha"
},
"beta": {
"name": "beta"
}
}
},
"ByteDanceFirstLastFrameNode": {
"display_name": "ByteDance First-Last-Frame to Video",
"description": "Generate video using prompt and first and last frames.",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
},
"prompt": {
"name": "prompt",
"tooltip": "The text prompt used to generate the video."
},
"first_frame": {
"name": "first_frame",
"tooltip": "First frame to be used for the video."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Last frame to be used for the video."
},
"resolution": {
"name": "resolution",
"tooltip": "The resolution of the output video."
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "The aspect ratio of the output video."
},
"duration": {
"name": "duration",
"tooltip": "The duration of the output video in seconds."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"camera_fixed": {
"name": "camera_fixed",
"tooltip": "Specifies whether to fix the camera. The platform appends an instruction to fix the camera to your prompt, but does not guarantee the actual effect."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an \"AI generated\" watermark to the video."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ByteDanceImageEditNode": {
"display_name": "ByteDance Image Edit",
"description": "Edit images using ByteDance models via api based on prompt",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
},
"image": {
"name": "image",
"tooltip": "The base image to edit"
},
"prompt": {
"name": "prompt",
"tooltip": "Instruction to edit image"
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation"
},
"guidance_scale": {
"name": "guidance_scale",
"tooltip": "Higher value makes the image follow the prompt more closely"
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an \"AI generated\" watermark to the image"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ByteDanceImageNode": {
"display_name": "ByteDance Image",
"description": "Generate images using ByteDance models via api based on prompt",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
},
"prompt": {
"name": "prompt",
"tooltip": "The text prompt used to generate the image"
},
"size_preset": {
"name": "size_preset",
"tooltip": "Pick a recommended size. Select Custom to use the width and height below"
},
"width": {
"name": "width",
"tooltip": "Custom width for image. Value is working only if `size_preset` is set to `Custom`"
},
"height": {
"name": "height",
"tooltip": "Custom height for image. Value is working only if `size_preset` is set to `Custom`"
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation"
},
"guidance_scale": {
"name": "guidance_scale",
"tooltip": "Higher value makes the image follow the prompt more closely"
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an \"AI generated\" watermark to the image"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ByteDanceImageReferenceNode": {
"display_name": "ByteDance Reference Images to Video",
"description": "Generate video using prompt and reference images.",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
},
"prompt": {
"name": "prompt",
"tooltip": "The text prompt used to generate the video."
},
"images": {
"name": "images",
"tooltip": "One to four images."
},
"resolution": {
"name": "resolution",
"tooltip": "The resolution of the output video."
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "The aspect ratio of the output video."
},
"duration": {
"name": "duration",
"tooltip": "The duration of the output video in seconds."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an \"AI generated\" watermark to the video."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ByteDanceImageToVideoNode": {
"display_name": "ByteDance Image to Video",
"description": "Generate video using ByteDance models via api based on image and prompt",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
},
"prompt": {
"name": "prompt",
"tooltip": "The text prompt used to generate the video."
},
"image": {
"name": "image",
"tooltip": "First frame to be used for the video."
},
"resolution": {
"name": "resolution",
"tooltip": "The resolution of the output video."
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "The aspect ratio of the output video."
},
"duration": {
"name": "duration",
"tooltip": "The duration of the output video in seconds."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"camera_fixed": {
"name": "camera_fixed",
"tooltip": "Specifies whether to fix the camera. The platform appends an instruction to fix the camera to your prompt, but does not guarantee the actual effect."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an \"AI generated\" watermark to the video."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ByteDanceSeedreamNode": {
"display_name": "ByteDance Seedream 4",
"description": "Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
},
"prompt": {
"name": "prompt",
"tooltip": "Text prompt for creating or editing an image."
},
"size_preset": {
"name": "size_preset",
"tooltip": "Pick a recommended size. Select Custom to use the width and height below."
},
"image": {
"name": "image",
"tooltip": "Input image(s) for image-to-image generation. List of 1-10 images for single or multi-reference generation."
},
"width": {
"name": "width",
"tooltip": "Custom width for image. Value is working only if `size_preset` is set to `Custom`"
},
"height": {
"name": "height",
"tooltip": "Custom height for image. Value is working only if `size_preset` is set to `Custom`"
},
"sequential_image_generation": {
"name": "sequential_image_generation",
"tooltip": "Group image generation mode. 'disabled' generates a single image. 'auto' lets the model decide whether to generate multiple related images (e.g., story scenes, character variations)."
},
"max_images": {
"name": "max_images",
"tooltip": "Maximum number of images to generate when sequential_image_generation='auto'. Total images (input + generated) cannot exceed 15."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an \"AI generated\" watermark to the image."
},
"fail_on_partial": {
"name": "fail_on_partial",
"tooltip": "If enabled, abort execution if any requested images are missing or return an error."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ByteDanceTextToVideoNode": {
"display_name": "ByteDance Text to Video",
"description": "Generate video using ByteDance models via api based on prompt",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
},
"prompt": {
"name": "prompt",
"tooltip": "The text prompt used to generate the video."
},
"resolution": {
"name": "resolution",
"tooltip": "The resolution of the output video."
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "The aspect ratio of the output video."
},
"duration": {
"name": "duration",
"tooltip": "The duration of the output video in seconds."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"camera_fixed": {
"name": "camera_fixed",
"tooltip": "Specifies whether to fix the camera. The platform appends an instruction to fix the camera to your prompt, but does not guarantee the actual effect."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an \"AI generated\" watermark to the video."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Canny": {
"display_name": "Canny",
"inputs": {
"image": {
"name": "image"
},
"low_threshold": {
"name": "low_threshold"
},
"high_threshold": {
"name": "high_threshold"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CaseConverter": {
"display_name": "Case Converter",
"inputs": {
"string": {
"name": "string"
},
"mode": {
"name": "mode"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CFGGuider": {
"display_name": "CFGGuider",
"inputs": {
"model": {
"name": "model"
},
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"cfg": {
"name": "cfg"
}
}
},
"CFGNorm": {
"display_name": "CFGNorm",
"inputs": {
"model": {
"name": "model"
},
"strength": {
"name": "strength"
}
},
"outputs": {
"0": {
"name": "patched_model",
"tooltip": null
}
}
},
"CFGZeroStar": {
"display_name": "CFGZeroStar",
"inputs": {
"model": {
"name": "model"
}
},
"outputs": {
"0": {
"name": "patched_model",
"tooltip": null
}
}
},
"CheckpointLoader": {
"display_name": "Load Checkpoint With Config (DEPRECATED)",
"inputs": {
"config_name": {
"name": "config_name"
},
"ckpt_name": {
"name": "ckpt_name"
}
}
},
"CheckpointLoaderSimple": {
"display_name": "Load Checkpoint",
"description": "Loads a diffusion model checkpoint, diffusion models are used to denoise latents.",
"inputs": {
"ckpt_name": {
"name": "ckpt_name",
"tooltip": "The name of the checkpoint (model) to load."
}
},
"outputs": {
"0": {
"tooltip": "The model used for denoising latents."
},
"1": {
"tooltip": "The CLIP model used for encoding text prompts."
},
"2": {
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
}
},
"CheckpointSave": {
"display_name": "Save Checkpoint",
"inputs": {
"model": {
"name": "model"
},
"clip": {
"name": "clip"
},
"vae": {
"name": "vae"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"ChromaRadianceOptions": {
"display_name": "ChromaRadianceOptions",
"description": "Allows setting advanced options for the Chroma Radiance model.",
"inputs": {
"model": {
"name": "model"
},
"preserve_wrapper": {
"name": "preserve_wrapper",
"tooltip": "When enabled, will delegate to an existing model function wrapper if it exists. Generally should be left enabled."
},
"start_sigma": {
"name": "start_sigma",
"tooltip": "First sigma that these options will be in effect."
},
"end_sigma": {
"name": "end_sigma",
"tooltip": "Last sigma that these options will be in effect."
},
"nerf_tile_size": {
"name": "nerf_tile_size",
"tooltip": "Allows overriding the default NeRF tile size. -1 means use the default (32). 0 means use non-tiling mode (may require a lot of VRAM)."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CLIPAttentionMultiply": {
"display_name": "CLIPAttentionMultiply",
"inputs": {
"clip": {
"name": "clip"
},
"q": {
"name": "q"
},
"k": {
"name": "k"
},
"v": {
"name": "v"
},
"out": {
"name": "out"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CLIPLoader": {
"display_name": "Load CLIP",
"description": "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl\n hidream: llama-3.1 (Recommend) or t5\nomnigen2: qwen vl 2.5 3B",
"inputs": {
"clip_name": {
"name": "clip_name"
},
"type": {
"name": "type"
},
"device": {
"name": "device"
}
}
},
"CLIPMergeAdd": {
"display_name": "CLIPMergeAdd",
"inputs": {
"clip1": {
"name": "clip1"
},
"clip2": {
"name": "clip2"
}
}
},
"CLIPMergeSimple": {
"display_name": "CLIPMergeSimple",
"inputs": {
"clip1": {
"name": "clip1"
},
"clip2": {
"name": "clip2"
},
"ratio": {
"name": "ratio"
}
}
},
"CLIPMergeSubtract": {
"display_name": "CLIPMergeSubtract",
"inputs": {
"clip1": {
"name": "clip1"
},
"clip2": {
"name": "clip2"
},
"multiplier": {
"name": "multiplier"
}
}
},
"CLIPSave": {
"display_name": "CLIPSave",
"inputs": {
"clip": {
"name": "clip"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"CLIPSetLastLayer": {
"display_name": "CLIP Set Last Layer",
"inputs": {
"clip": {
"name": "clip"
},
"stop_at_clip_layer": {
"name": "stop_at_clip_layer"
}
}
},
"CLIPTextEncode": {
"display_name": "CLIP Text Encode (Prompt)",
"description": "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images.",
"inputs": {
"text": {
"name": "text",
"tooltip": "The text to be encoded."
},
"clip": {
"name": "clip",
"tooltip": "The CLIP model used for encoding the text."
}
},
"outputs": {
"0": {
"tooltip": "A conditioning containing the embedded text used to guide the diffusion model."
}
}
},
"CLIPTextEncodeControlnet": {
"display_name": "CLIPTextEncodeControlnet",
"inputs": {
"clip": {
"name": "clip"
},
"conditioning": {
"name": "conditioning"
},
"text": {
"name": "text"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CLIPTextEncodeFlux": {
"display_name": "CLIPTextEncodeFlux",
"inputs": {
"clip": {
"name": "clip"
},
"clip_l": {
"name": "clip_l"
},
"t5xxl": {
"name": "t5xxl"
},
"guidance": {
"name": "guidance"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CLIPTextEncodeHiDream": {
"display_name": "CLIPTextEncodeHiDream",
"inputs": {
"clip": {
"name": "clip"
},
"clip_l": {
"name": "clip_l"
},
"clip_g": {
"name": "clip_g"
},
"t5xxl": {
"name": "t5xxl"
},
"llama": {
"name": "llama"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CLIPTextEncodeHunyuanDiT": {
"display_name": "CLIPTextEncodeHunyuanDiT",
"inputs": {
"clip": {
"name": "clip"
},
"bert": {
"name": "bert"
},
"mt5xl": {
"name": "mt5xl"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CLIPTextEncodeLumina2": {
"display_name": "CLIP Text Encode for Lumina2",
"description": "Encodes a system prompt and a user prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images.",
"inputs": {
"system_prompt": {
"name": "system_prompt",
"tooltip": "Lumina2 provide two types of system prompts:Superior: You are an assistant designed to generate superior images with the superior degree of image-text alignment based on textual prompts or user prompts. Alignment: You are an assistant designed to generate high-quality images with the highest degree of image-text alignment based on textual prompts."
},
"user_prompt": {
"name": "user_prompt",
"tooltip": "The text to be encoded."
},
"clip": {
"name": "clip",
"tooltip": "The CLIP model used for encoding the text."
}
},
"outputs": {
"0": {
"tooltip": "A conditioning containing the embedded text used to guide the diffusion model."
}
}
},
"CLIPTextEncodePixArtAlpha": {
"display_name": "CLIPTextEncodePixArtAlpha",
"description": "Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma.",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"text": {
"name": "text"
},
"clip": {
"name": "clip"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CLIPTextEncodeSD3": {
"display_name": "CLIPTextEncodeSD3",
"inputs": {
"clip": {
"name": "clip"
},
"clip_l": {
"name": "clip_l"
},
"clip_g": {
"name": "clip_g"
},
"t5xxl": {
"name": "t5xxl"
},
"empty_padding": {
"name": "empty_padding"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CLIPTextEncodeSDXL": {
"display_name": "CLIPTextEncodeSDXL",
"inputs": {
"clip": {
"name": "clip"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"crop_w": {
"name": "crop_w"
},
"crop_h": {
"name": "crop_h"
},
"target_width": {
"name": "target_width"
},
"target_height": {
"name": "target_height"
},
"text_g": {
"name": "text_g"
},
"text_l": {
"name": "text_l"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CLIPTextEncodeSDXLRefiner": {
"display_name": "CLIPTextEncodeSDXLRefiner",
"inputs": {
"ascore": {
"name": "ascore"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"text": {
"name": "text"
},
"clip": {
"name": "clip"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CLIPVisionEncode": {
"display_name": "CLIP Vision Encode",
"inputs": {
"clip_vision": {
"name": "clip_vision"
},
"image": {
"name": "image"
},
"crop": {
"name": "crop"
}
}
},
"CLIPVisionLoader": {
"display_name": "Load CLIP Vision",
"inputs": {
"clip_name": {
"name": "clip_name"
}
}
},
"CombineHooks2": {
"display_name": "Combine Hooks [2]",
"inputs": {
"hooks_A": {
"name": "hooks_A"
},
"hooks_B": {
"name": "hooks_B"
}
}
},
"CombineHooks4": {
"display_name": "Combine Hooks [4]",
"inputs": {
"hooks_A": {
"name": "hooks_A"
},
"hooks_B": {
"name": "hooks_B"
},
"hooks_C": {
"name": "hooks_C"
},
"hooks_D": {
"name": "hooks_D"
}
}
},
"CombineHooks8": {
"display_name": "Combine Hooks [8]",
"inputs": {
"hooks_A": {
"name": "hooks_A"
},
"hooks_B": {
"name": "hooks_B"
},
"hooks_C": {
"name": "hooks_C"
},
"hooks_D": {
"name": "hooks_D"
},
"hooks_E": {
"name": "hooks_E"
},
"hooks_F": {
"name": "hooks_F"
},
"hooks_G": {
"name": "hooks_G"
},
"hooks_H": {
"name": "hooks_H"
}
}
},
"ConditioningAverage": {
"display_name": "ConditioningAverage",
"inputs": {
"conditioning_to": {
"name": "conditioning_to"
},
"conditioning_from": {
"name": "conditioning_from"
},
"conditioning_to_strength": {
"name": "conditioning_to_strength"
}
}
},
"ConditioningCombine": {
"display_name": "Conditioning (Combine)",
"inputs": {
"conditioning_1": {
"name": "conditioning_1"
},
"conditioning_2": {
"name": "conditioning_2"
}
}
},
"ConditioningConcat": {
"display_name": "Conditioning (Concat)",
"inputs": {
"conditioning_to": {
"name": "conditioning_to"
},
"conditioning_from": {
"name": "conditioning_from"
}
}
},
"ConditioningSetArea": {
"display_name": "Conditioning (Set Area)",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"strength": {
"name": "strength"
}
}
},
"ConditioningSetAreaPercentage": {
"display_name": "Conditioning (Set Area with Percentage)",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"strength": {
"name": "strength"
}
}
},
"ConditioningSetAreaPercentageVideo": {
"display_name": "ConditioningSetAreaPercentageVideo",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"temporal": {
"name": "temporal"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"z": {
"name": "z"
},
"strength": {
"name": "strength"
}
}
},
"ConditioningSetAreaStrength": {
"display_name": "ConditioningSetAreaStrength",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"strength": {
"name": "strength"
}
}
},
"ConditioningSetDefaultCombine": {
"display_name": "Cond Set Default Combine",
"inputs": {
"cond": {
"name": "cond"
},
"cond_DEFAULT": {
"name": "cond_DEFAULT"
},
"hooks": {
"name": "hooks"
}
}
},
"ConditioningSetMask": {
"display_name": "Conditioning (Set Mask)",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"mask": {
"name": "mask"
},
"strength": {
"name": "strength"
},
"set_cond_area": {
"name": "set_cond_area"
}
}
},
"ConditioningSetProperties": {
"display_name": "Cond Set Props",
"inputs": {
"cond_NEW": {
"name": "cond_NEW"
},
"strength": {
"name": "strength"
},
"set_cond_area": {
"name": "set_cond_area"
},
"mask": {
"name": "mask"
},
"hooks": {
"name": "hooks"
},
"timesteps": {
"name": "timesteps"
}
}
},
"ConditioningSetPropertiesAndCombine": {
"display_name": "Cond Set Props Combine",
"inputs": {
"cond": {
"name": "cond"
},
"cond_NEW": {
"name": "cond_NEW"
},
"strength": {
"name": "strength"
},
"set_cond_area": {
"name": "set_cond_area"
},
"mask": {
"name": "mask"
},
"hooks": {
"name": "hooks"
},
"timesteps": {
"name": "timesteps"
}
}
},
"ConditioningSetTimestepRange": {
"display_name": "ConditioningSetTimestepRange",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"start": {
"name": "start"
},
"end": {
"name": "end"
}
}
},
"ConditioningStableAudio": {
"display_name": "ConditioningStableAudio",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"seconds_start": {
"name": "seconds_start"
},
"seconds_total": {
"name": "seconds_total"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"ConditioningTimestepsRange": {
"display_name": "Timesteps Range",
"inputs": {
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
}
},
"outputs": {
"1": {
"name": "BEFORE_RANGE"
},
"2": {
"name": "AFTER_RANGE"
}
}
},
"ConditioningZeroOut": {
"display_name": "ConditioningZeroOut",
"inputs": {
"conditioning": {
"name": "conditioning"
}
}
},
"ContextWindowsManual": {
"display_name": "Context Windows (Manual)",
"description": "Manually set context windows.",
"inputs": {
"model": {
"name": "model",
"tooltip": "The model to apply context windows to during sampling."
},
"context_length": {
"name": "context_length",
"tooltip": "The length of the context window."
},
"context_overlap": {
"name": "context_overlap",
"tooltip": "The overlap of the context window."
},
"context_schedule": {
"name": "context_schedule",
"tooltip": "The stride of the context window."
},
"context_stride": {
"name": "context_stride",
"tooltip": "The stride of the context window; only applicable to uniform schedules."
},
"closed_loop": {
"name": "closed_loop",
"tooltip": "Whether to close the context window loop; only applicable to looped schedules."
},
"fuse_method": {
"name": "fuse_method",
"tooltip": "The method to use to fuse the context windows."
},
"dim": {
"name": "dim",
"tooltip": "The dimension to apply the context windows to."
}
},
"outputs": {
"0": {
"tooltip": "The model with context windows applied during sampling."
}
}
},
"ControlNetApply": {
"display_name": "Apply ControlNet (OLD)",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"control_net": {
"name": "control_net"
},
"image": {
"name": "image"
},
"strength": {
"name": "strength"
}
}
},
"ControlNetApplyAdvanced": {
"display_name": "Apply ControlNet",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"control_net": {
"name": "control_net"
},
"image": {
"name": "image"
},
"strength": {
"name": "strength"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
},
"vae": {
"name": "vae"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"ControlNetApplySD3": {
"display_name": "Apply Controlnet with VAE",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"control_net": {
"name": "control_net"
},
"vae": {
"name": "vae"
},
"image": {
"name": "image"
},
"strength": {
"name": "strength"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
}
}
},
"ControlNetInpaintingAliMamaApply": {
"display_name": "ControlNetInpaintingAliMamaApply",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"control_net": {
"name": "control_net"
},
"vae": {
"name": "vae"
},
"image": {
"name": "image"
},
"mask": {
"name": "mask"
},
"strength": {
"name": "strength"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
}
}
},
"ControlNetLoader": {
"display_name": "Load ControlNet Model",
"inputs": {
"control_net_name": {
"name": "control_net_name"
}
}
},
"CosmosImageToVideoLatent": {
"display_name": "CosmosImageToVideoLatent",
"inputs": {
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"start_image": {
"name": "start_image"
},
"end_image": {
"name": "end_image"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CosmosPredict2ImageToVideoLatent": {
"display_name": "CosmosPredict2ImageToVideoLatent",
"inputs": {
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"start_image": {
"name": "start_image"
},
"end_image": {
"name": "end_image"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CreateHookKeyframe": {
"display_name": "Create Hook Keyframe",
"inputs": {
"strength_mult": {
"name": "strength_mult"
},
"start_percent": {
"name": "start_percent"
},
"prev_hook_kf": {
"name": "prev_hook_kf"
}
},
"outputs": {
"0": {
"name": "HOOK_KF"
}
}
},
"CreateHookKeyframesFromFloats": {
"display_name": "Create Hook Keyframes From Floats",
"inputs": {
"floats_strength": {
"name": "floats_strength"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
},
"print_keyframes": {
"name": "print_keyframes"
},
"prev_hook_kf": {
"name": "prev_hook_kf"
}
},
"outputs": {
"0": {
"name": "HOOK_KF"
}
}
},
"CreateHookKeyframesInterpolated": {
"display_name": "Create Hook Keyframes Interp.",
"inputs": {
"strength_start": {
"name": "strength_start"
},
"strength_end": {
"name": "strength_end"
},
"interpolation": {
"name": "interpolation"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
},
"keyframes_count": {
"name": "keyframes_count"
},
"print_keyframes": {
"name": "print_keyframes"
},
"prev_hook_kf": {
"name": "prev_hook_kf"
}
},
"outputs": {
"0": {
"name": "HOOK_KF"
}
}
},
"CreateHookLora": {
"display_name": "Create Hook LoRA",
"inputs": {
"lora_name": {
"name": "lora_name"
},
"strength_model": {
"name": "strength_model"
},
"strength_clip": {
"name": "strength_clip"
},
"prev_hooks": {
"name": "prev_hooks"
}
}
},
"CreateHookLoraModelOnly": {
"display_name": "Create Hook LoRA (MO)",
"inputs": {
"lora_name": {
"name": "lora_name"
},
"strength_model": {
"name": "strength_model"
},
"prev_hooks": {
"name": "prev_hooks"
}
}
},
"CreateHookModelAsLora": {
"display_name": "Create Hook Model as LoRA",
"inputs": {
"ckpt_name": {
"name": "ckpt_name"
},
"strength_model": {
"name": "strength_model"
},
"strength_clip": {
"name": "strength_clip"
},
"prev_hooks": {
"name": "prev_hooks"
}
}
},
"CreateHookModelAsLoraModelOnly": {
"display_name": "Create Hook Model as LoRA (MO)",
"inputs": {
"ckpt_name": {
"name": "ckpt_name"
},
"strength_model": {
"name": "strength_model"
},
"prev_hooks": {
"name": "prev_hooks"
}
}
},
"CreateVideo": {
"display_name": "Create Video",
"description": "Create a video from images.",
"inputs": {
"images": {
"name": "images",
"tooltip": "The images to create a video from."
},
"fps": {
"name": "fps"
},
"audio": {
"name": "audio",
"tooltip": "The audio to add to the video."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CropMask": {
"display_name": "CropMask",
"inputs": {
"mask": {
"name": "mask"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
}
}
},
"DiffControlNetLoader": {
"display_name": "Load ControlNet Model (diff)",
"inputs": {
"model": {
"name": "model"
},
"control_net_name": {
"name": "control_net_name"
}
}
},
"DifferentialDiffusion": {
"display_name": "Differential Diffusion",
"inputs": {
"model": {
"name": "model"
},
"strength": {
"name": "strength"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"DiffusersLoader": {
"display_name": "DiffusersLoader",
"inputs": {
"model_path": {
"name": "model_path"
}
}
},
"DisableNoise": {
"display_name": "DisableNoise"
},
"DualCFGGuider": {
"display_name": "DualCFGGuider",
"inputs": {
"model": {
"name": "model"
},
"cond1": {
"name": "cond1"
},
"cond2": {
"name": "cond2"
},
"negative": {
"name": "negative"
},
"cfg_conds": {
"name": "cfg_conds"
},
"cfg_cond2_negative": {
"name": "cfg_cond2_negative"
},
"style": {
"name": "style"
}
}
},
"DualCLIPLoader": {
"display_name": "DualCLIPLoader",
"description": "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5\nhidream: at least one of t5 or llama, recommended t5 and llama\nhunyuan_image: qwen2.5vl 7b and byt5 small",
"inputs": {
"clip_name1": {
"name": "clip_name1"
},
"clip_name2": {
"name": "clip_name2"
},
"type": {
"name": "type"
},
"device": {
"name": "device"
}
}
},
"EasyCache": {
"display_name": "EasyCache",
"description": "Native EasyCache implementation.",
"inputs": {
"model": {
"name": "model",
"tooltip": "The model to add EasyCache to."
},
"reuse_threshold": {
"name": "reuse_threshold",
"tooltip": "The threshold for reusing cached steps."
},
"start_percent": {
"name": "start_percent",
"tooltip": "The relative sampling step to begin use of EasyCache."
},
"end_percent": {
"name": "end_percent",
"tooltip": "The relative sampling step to end use of EasyCache."
},
"verbose": {
"name": "verbose",
"tooltip": "Whether to log verbose information."
}
},
"outputs": {
"0": {
"tooltip": "The model with EasyCache."
}
}
},
"EmptyAceStepLatentAudio": {
"display_name": "EmptyAceStepLatentAudio",
"inputs": {
"seconds": {
"name": "seconds"
},
"batch_size": {
"name": "batch_size",
"tooltip": "The number of latent images in the batch."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"EmptyAudio": {
"display_name": "Empty Audio",
"inputs": {
"duration": {
"name": "duration",
"tooltip": "Duration of the empty audio clip in seconds"
},
"sample_rate": {
"name": "sample_rate",
"tooltip": "Sample rate of the empty audio clip."
},
"channels": {
"name": "channels",
"tooltip": "Number of audio channels (1 for mono, 2 for stereo)."
}
}
},
"EmptyChromaRadianceLatentImage": {
"display_name": "EmptyChromaRadianceLatentImage",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"batch_size": {
"name": "batch_size"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"EmptyCosmosLatentVideo": {
"display_name": "EmptyCosmosLatentVideo",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"EmptyHunyuanImageLatent": {
"display_name": "EmptyHunyuanImageLatent",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"batch_size": {
"name": "batch_size"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"EmptyHunyuanLatentVideo": {
"display_name": "EmptyHunyuanLatentVideo",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"EmptyImage": {
"display_name": "EmptyImage",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"batch_size": {
"name": "batch_size"
},
"color": {
"name": "color"
}
}
},
"EmptyLatentAudio": {
"display_name": "Empty Latent Audio",
"inputs": {
"seconds": {
"name": "seconds"
},
"batch_size": {
"name": "batch_size",
"tooltip": "The number of latent images in the batch."
}
}
},
"EmptyLatentHunyuan3Dv2": {
"display_name": "EmptyLatentHunyuan3Dv2",
"inputs": {
"resolution": {
"name": "resolution"
},
"batch_size": {
"name": "batch_size",
"tooltip": "The number of latent images in the batch."
}
}
},
"EmptyLatentImage": {
"display_name": "Empty Latent Image",
"description": "Create a new batch of empty latent images to be denoised via sampling.",
"inputs": {
"width": {
"name": "width",
"tooltip": "The width of the latent images in pixels."
},
"height": {
"name": "height",
"tooltip": "The height of the latent images in pixels."
},
"batch_size": {
"name": "batch_size",
"tooltip": "The number of latent images in the batch."
}
},
"outputs": {
"0": {
"tooltip": "The empty latent image batch."
}
}
},
"EmptyLTXVLatentVideo": {
"display_name": "EmptyLTXVLatentVideo",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"EmptyMochiLatentVideo": {
"display_name": "EmptyMochiLatentVideo",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"EmptySD3LatentImage": {
"display_name": "EmptySD3LatentImage",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"batch_size": {
"name": "batch_size"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Epsilon Scaling": {
"display_name": "Epsilon Scaling",
"inputs": {
"model": {
"name": "model"
},
"scaling_factor": {
"name": "scaling_factor"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ExponentialScheduler": {
"display_name": "ExponentialScheduler",
"inputs": {
"steps": {
"name": "steps"
},
"sigma_max": {
"name": "sigma_max"
},
"sigma_min": {
"name": "sigma_min"
}
}
},
"ExtendIntermediateSigmas": {
"display_name": "ExtendIntermediateSigmas",
"inputs": {
"sigmas": {
"name": "sigmas"
},
"steps": {
"name": "steps"
},
"start_at_sigma": {
"name": "start_at_sigma"
},
"end_at_sigma": {
"name": "end_at_sigma"
},
"spacing": {
"name": "spacing"
}
}
},
"FeatherMask": {
"display_name": "FeatherMask",
"inputs": {
"mask": {
"name": "mask"
},
"left": {
"name": "left"
},
"top": {
"name": "top"
},
"right": {
"name": "right"
},
"bottom": {
"name": "bottom"
}
}
},
"FlipSigmas": {
"display_name": "FlipSigmas",
"inputs": {
"sigmas": {
"name": "sigmas"
}
}
},
"FluxDisableGuidance": {
"display_name": "FluxDisableGuidance",
"description": "This node completely disables the guidance embed on Flux and Flux like models",
"inputs": {
"conditioning": {
"name": "conditioning"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FluxGuidance": {
"display_name": "FluxGuidance",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"guidance": {
"name": "guidance"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FluxKontextImageScale": {
"display_name": "FluxKontextImageScale",
"description": "This node resizes the image to one that is more optimal for flux kontext.",
"inputs": {
"image": {
"name": "image"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FluxKontextMaxImageNode": {
"display_name": "Flux.1 Kontext [max] Image",
"description": "Edits images using Flux.1 Kontext [max] via api based on prompt and aspect ratio.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation - specify what and how to edit."
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio of image; must be between 1:4 and 4:1."
},
"guidance": {
"name": "guidance",
"tooltip": "Guidance strength for the image generation process"
},
"steps": {
"name": "steps",
"tooltip": "Number of steps for the image generation process"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"prompt_upsampling": {
"name": "prompt_upsampling",
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
},
"input_image": {
"name": "input_image"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FluxKontextMultiReferenceLatentMethod": {
"display_name": "FluxKontextMultiReferenceLatentMethod",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"reference_latents_method": {
"name": "reference_latents_method"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FluxKontextProImageNode": {
"display_name": "Flux.1 Kontext [pro] Image",
"description": "Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation - specify what and how to edit."
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio of image; must be between 1:4 and 4:1."
},
"guidance": {
"name": "guidance",
"tooltip": "Guidance strength for the image generation process"
},
"steps": {
"name": "steps",
"tooltip": "Number of steps for the image generation process"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"prompt_upsampling": {
"name": "prompt_upsampling",
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
},
"input_image": {
"name": "input_image"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FluxProCannyNode": {
"display_name": "Flux.1 Canny Control Image",
"description": "Generate image using a control image (canny).",
"inputs": {
"control_image": {
"name": "control_image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"prompt_upsampling": {
"name": "prompt_upsampling",
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
},
"canny_low_threshold": {
"name": "canny_low_threshold",
"tooltip": "Low threshold for Canny edge detection; ignored if skip_processing is True"
},
"canny_high_threshold": {
"name": "canny_high_threshold",
"tooltip": "High threshold for Canny edge detection; ignored if skip_processing is True"
},
"skip_preprocessing": {
"name": "skip_preprocessing",
"tooltip": "Whether to skip preprocessing; set to True if control_image already is canny-fied, False if it is a raw image."
},
"guidance": {
"name": "guidance",
"tooltip": "Guidance strength for the image generation process"
},
"steps": {
"name": "steps",
"tooltip": "Number of steps for the image generation process"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FluxProDepthNode": {
"display_name": "Flux.1 Depth Control Image",
"description": "Generate image using a control image (depth).",
"inputs": {
"control_image": {
"name": "control_image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"prompt_upsampling": {
"name": "prompt_upsampling",
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
},
"skip_preprocessing": {
"name": "skip_preprocessing",
"tooltip": "Whether to skip preprocessing; set to True if control_image already is depth-ified, False if it is a raw image."
},
"guidance": {
"name": "guidance",
"tooltip": "Guidance strength for the image generation process"
},
"steps": {
"name": "steps",
"tooltip": "Number of steps for the image generation process"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FluxProExpandNode": {
"display_name": "Flux.1 Expand Image",
"description": "Outpaints image based on prompt.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"prompt_upsampling": {
"name": "prompt_upsampling",
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
},
"top": {
"name": "top",
"tooltip": "Number of pixels to expand at the top of the image"
},
"bottom": {
"name": "bottom",
"tooltip": "Number of pixels to expand at the bottom of the image"
},
"left": {
"name": "left",
"tooltip": "Number of pixels to expand at the left of the image"
},
"right": {
"name": "right",
"tooltip": "Number of pixels to expand at the right of the image"
},
"guidance": {
"name": "guidance",
"tooltip": "Guidance strength for the image generation process"
},
"steps": {
"name": "steps",
"tooltip": "Number of steps for the image generation process"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FluxProFillNode": {
"display_name": "Flux.1 Fill Image",
"description": "Inpaints image based on mask and prompt.",
"inputs": {
"image": {
"name": "image"
},
"mask": {
"name": "mask"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"prompt_upsampling": {
"name": "prompt_upsampling",
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
},
"guidance": {
"name": "guidance",
"tooltip": "Guidance strength for the image generation process"
},
"steps": {
"name": "steps",
"tooltip": "Number of steps for the image generation process"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FluxProUltraImageNode": {
"display_name": "Flux 1.1 [pro] Ultra Image",
"description": "Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"prompt_upsampling": {
"name": "prompt_upsampling",
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio of image; must be between 1:4 and 4:1."
},
"raw": {
"name": "raw",
"tooltip": "When True, generate less processed, more natural-looking images."
},
"image_prompt": {
"name": "image_prompt"
},
"image_prompt_strength": {
"name": "image_prompt_strength",
"tooltip": "Blend between the prompt and the image prompt."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FreeU": {
"display_name": "FreeU",
"inputs": {
"model": {
"name": "model"
},
"b1": {
"name": "b1"
},
"b2": {
"name": "b2"
},
"s1": {
"name": "s1"
},
"s2": {
"name": "s2"
}
}
},
"FreeU_V2": {
"display_name": "FreeU_V2",
"inputs": {
"model": {
"name": "model"
},
"b1": {
"name": "b1"
},
"b2": {
"name": "b2"
},
"s1": {
"name": "s1"
},
"s2": {
"name": "s2"
}
}
},
"FreSca": {
"display_name": "FreSca",
"description": "Applies frequency-dependent scaling to the guidance",
"inputs": {
"model": {
"name": "model"
},
"scale_low": {
"name": "scale_low",
"tooltip": "Scaling factor for low-frequency components"
},
"scale_high": {
"name": "scale_high",
"tooltip": "Scaling factor for high-frequency components"
},
"freq_cutoff": {
"name": "freq_cutoff",
"tooltip": "Number of frequency indices around center to consider as low-frequency"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"GeminiImageNode": {
"display_name": "Google Gemini Image",
"description": "Edit images synchronously via Google API.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text prompt for generation"
},
"model": {
"name": "model",
"tooltip": "The Gemini model to use for generating responses."
},
"seed": {
"name": "seed",
"tooltip": "When seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used."
},
"images": {
"name": "images",
"tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node."
},
"files": {
"name": "files",
"tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node."
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Defaults to matching the output image size to that of your input image, or otherwise generates 1:1 squares."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"GeminiInputFiles": {
"display_name": "Gemini Input Files",
"description": "Loads and prepares input files to include as inputs for Gemini LLM nodes. The files will be read by the Gemini model when generating a response. The contents of the text file count toward the token limit. 🛈 TIP: Can be chained together with other Gemini Input File nodes.",
"inputs": {
"file": {
"name": "file",
"tooltip": "Input files to include as context for the model. Only accepts text (.txt) and PDF (.pdf) files for now."
},
"GEMINI_INPUT_FILES": {
"name": "GEMINI_INPUT_FILES",
"tooltip": "An optional additional file(s) to batch together with the file loaded from this node. Allows chaining of input files so that a single message can include multiple input files."
}
}
},
"GeminiNode": {
"display_name": "Google Gemini",
"description": "Generate text responses with Google's Gemini AI model. You can provide multiple types of inputs (text, images, audio, video) as context for generating more relevant and meaningful responses.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text inputs to the model, used to generate a response. You can include detailed instructions, questions, or context for the model."
},
"model": {
"name": "model",
"tooltip": "The Gemini model to use for generating responses."
},
"seed": {
"name": "seed",
"tooltip": "When seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used."
},
"images": {
"name": "images",
"tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node."
},
"audio": {
"name": "audio",
"tooltip": "Optional audio to use as context for the model."
},
"video": {
"name": "video",
"tooltip": "Optional video to use as context for the model."
},
"files": {
"name": "files",
"tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"GetImageSize": {
"display_name": "Get Image Size",
"description": "Returns width and height of the image, and passes it through unchanged.",
"inputs": {
"image": {
"name": "image"
}
},
"outputs": {
"0": {
"name": "width"
},
"1": {
"name": "height"
},
"2": {
"name": "batch_size"
}
}
},
"GetVideoComponents": {
"display_name": "Get Video Components",
"description": "Extracts all components from a video: frames, audio, and framerate.",
"inputs": {
"video": {
"name": "video",
"tooltip": "The video to extract components from."
}
},
"outputs": {
"0": {
"name": "images",
"tooltip": null
},
"1": {
"name": "audio",
"tooltip": null
},
"2": {
"name": "fps",
"tooltip": null
}
}
},
"GITSScheduler": {
"display_name": "GITSScheduler",
"inputs": {
"coeff": {
"name": "coeff"
},
"steps": {
"name": "steps"
},
"denoise": {
"name": "denoise"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"GLIGENLoader": {
"display_name": "GLIGENLoader",
"inputs": {
"gligen_name": {
"name": "gligen_name"
}
}
},
"GLIGENTextBoxApply": {
"display_name": "GLIGENTextBoxApply",
"inputs": {
"conditioning_to": {
"name": "conditioning_to"
},
"clip": {
"name": "clip"
},
"gligen_textbox_model": {
"name": "gligen_textbox_model"
},
"text": {
"name": "text"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
}
}
},
"GrowMask": {
"display_name": "GrowMask",
"inputs": {
"mask": {
"name": "mask"
},
"expand": {
"name": "expand"
},
"tapered_corners": {
"name": "tapered_corners"
}
}
},
"Hunyuan3Dv2Conditioning": {
"display_name": "Hunyuan3Dv2Conditioning",
"inputs": {
"clip_vision_output": {
"name": "clip_vision_output"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"Hunyuan3Dv2ConditioningMultiView": {
"display_name": "Hunyuan3Dv2ConditioningMultiView",
"inputs": {
"front": {
"name": "front"
},
"left": {
"name": "left"
},
"back": {
"name": "back"
},
"right": {
"name": "right"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"HunyuanImageToVideo": {
"display_name": "HunyuanImageToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"guidance_type": {
"name": "guidance_type"
},
"start_image": {
"name": "start_image"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "latent",
"tooltip": null
}
}
},
"HunyuanRefinerLatent": {
"display_name": "HunyuanRefinerLatent",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"latent": {
"name": "latent"
},
"noise_augmentation": {
"name": "noise_augmentation"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"HypernetworkLoader": {
"display_name": "HypernetworkLoader",
"inputs": {
"model": {
"name": "model"
},
"hypernetwork_name": {
"name": "hypernetwork_name"
},
"strength": {
"name": "strength"
}
}
},
"HyperTile": {
"display_name": "HyperTile",
"inputs": {
"model": {
"name": "model"
},
"tile_size": {
"name": "tile_size"
},
"swap_size": {
"name": "swap_size"
},
"max_depth": {
"name": "max_depth"
},
"scale_depth": {
"name": "scale_depth"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"IdeogramV1": {
"display_name": "Ideogram V1",
"description": "Generates images using the Ideogram V1 model.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"turbo": {
"name": "turbo",
"tooltip": "Whether to use turbo mode (faster generation, potentially lower quality)"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "The aspect ratio for image generation."
},
"magic_prompt_option": {
"name": "magic_prompt_option",
"tooltip": "Determine if MagicPrompt should be used in generation"
},
"seed": {
"name": "seed"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Description of what to exclude from the image"
},
"num_images": {
"name": "num_images"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"IdeogramV2": {
"display_name": "Ideogram V2",
"description": "Generates images using the Ideogram V2 model.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"turbo": {
"name": "turbo",
"tooltip": "Whether to use turbo mode (faster generation, potentially lower quality)"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "The aspect ratio for image generation. Ignored if resolution is not set to AUTO."
},
"resolution": {
"name": "resolution",
"tooltip": "The resolution for image generation. If not set to AUTO, this overrides the aspect_ratio setting."
},
"magic_prompt_option": {
"name": "magic_prompt_option",
"tooltip": "Determine if MagicPrompt should be used in generation"
},
"seed": {
"name": "seed"
},
"style_type": {
"name": "style_type",
"tooltip": "Style type for generation (V2 only)"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Description of what to exclude from the image"
},
"num_images": {
"name": "num_images"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"IdeogramV3": {
"display_name": "Ideogram V3",
"description": "Generates images using the Ideogram V3 model. Supports both regular image generation from text prompts and image editing with mask.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation or editing"
},
"image": {
"name": "image",
"tooltip": "Optional reference image for image editing."
},
"mask": {
"name": "mask",
"tooltip": "Optional mask for inpainting (white areas will be replaced)"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "The aspect ratio for image generation. Ignored if resolution is not set to Auto."
},
"resolution": {
"name": "resolution",
"tooltip": "The resolution for image generation. If not set to Auto, this overrides the aspect_ratio setting."
},
"magic_prompt_option": {
"name": "magic_prompt_option",
"tooltip": "Determine if MagicPrompt should be used in generation"
},
"seed": {
"name": "seed"
},
"num_images": {
"name": "num_images"
},
"rendering_speed": {
"name": "rendering_speed",
"tooltip": "Controls the trade-off between generation speed and quality"
},
"character_image": {
"name": "character_image",
"tooltip": "Image to use as character reference."
},
"character_mask": {
"name": "character_mask",
"tooltip": "Optional mask for character reference image."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ImageAddNoise": {
"display_name": "ImageAddNoise",
"inputs": {
"image": {
"name": "image"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"strength": {
"name": "strength"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"ImageBatch": {
"display_name": "Batch Images",
"inputs": {
"image1": {
"name": "image1"
},
"image2": {
"name": "image2"
}
}
},
"ImageBlend": {
"display_name": "ImageBlend",
"inputs": {
"image1": {
"name": "image1"
},
"image2": {
"name": "image2"
},
"blend_factor": {
"name": "blend_factor"
},
"blend_mode": {
"name": "blend_mode"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ImageBlur": {
"display_name": "ImageBlur",
"inputs": {
"image": {
"name": "image"
},
"blur_radius": {
"name": "blur_radius"
},
"sigma": {
"name": "sigma"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ImageColorToMask": {
"display_name": "ImageColorToMask",
"inputs": {
"image": {
"name": "image"
},
"color": {
"name": "color"
}
}
},
"ImageCompositeMasked": {
"display_name": "ImageCompositeMasked",
"inputs": {
"destination": {
"name": "destination"
},
"source": {
"name": "source"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"resize_source": {
"name": "resize_source"
},
"mask": {
"name": "mask"
}
}
},
"ImageCrop": {
"display_name": "Image Crop",
"inputs": {
"image": {
"name": "image"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
}
}
},
"ImageFlip": {
"display_name": "ImageFlip",
"inputs": {
"image": {
"name": "image"
},
"flip_method": {
"name": "flip_method"
}
}
},
"ImageFromBatch": {
"display_name": "ImageFromBatch",
"inputs": {
"image": {
"name": "image"
},
"batch_index": {
"name": "batch_index"
},
"length": {
"name": "length"
}
}
},
"ImageInvert": {
"display_name": "Invert Image",
"inputs": {
"image": {
"name": "image"
}
}
},
"ImageOnlyCheckpointLoader": {
"display_name": "Image Only Checkpoint Loader (img2vid model)",
"inputs": {
"ckpt_name": {
"name": "ckpt_name"
}
}
},
"ImageOnlyCheckpointSave": {
"display_name": "ImageOnlyCheckpointSave",
"inputs": {
"model": {
"name": "model"
},
"clip_vision": {
"name": "clip_vision"
},
"vae": {
"name": "vae"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"ImagePadForOutpaint": {
"display_name": "Pad Image for Outpainting",
"inputs": {
"image": {
"name": "image"
},
"left": {
"name": "left"
},
"top": {
"name": "top"
},
"right": {
"name": "right"
},
"bottom": {
"name": "bottom"
},
"feathering": {
"name": "feathering"
}
}
},
"ImageQuantize": {
"display_name": "ImageQuantize",
"inputs": {
"image": {
"name": "image"
},
"colors": {
"name": "colors"
},
"dither": {
"name": "dither"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ImageRGBToYUV": {
"display_name": "ImageRGBToYUV",
"inputs": {
"image": {
"name": "image"
}
},
"outputs": {
"0": {
"name": "Y",
"tooltip": null
},
"1": {
"name": "U",
"tooltip": null
},
"2": {
"name": "V",
"tooltip": null
}
}
},
"ImageRotate": {
"display_name": "ImageRotate",
"inputs": {
"image": {
"name": "image"
},
"rotation": {
"name": "rotation"
}
}
},
"ImageScale": {
"display_name": "Upscale Image",
"inputs": {
"image": {
"name": "image"
},
"upscale_method": {
"name": "upscale_method"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"crop": {
"name": "crop"
}
}
},
"ImageScaleBy": {
"display_name": "Upscale Image By",
"inputs": {
"image": {
"name": "image"
},
"upscale_method": {
"name": "upscale_method"
},
"scale_by": {
"name": "scale_by"
}
}
},
"ImageScaleToMaxDimension": {
"display_name": "ImageScaleToMaxDimension",
"inputs": {
"image": {
"name": "image"
},
"upscale_method": {
"name": "upscale_method"
},
"largest_size": {
"name": "largest_size"
}
}
},
"ImageScaleToTotalPixels": {
"display_name": "ImageScaleToTotalPixels",
"inputs": {
"image": {
"name": "image"
},
"upscale_method": {
"name": "upscale_method"
},
"megapixels": {
"name": "megapixels"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ImageSharpen": {
"display_name": "ImageSharpen",
"inputs": {
"image": {
"name": "image"
},
"sharpen_radius": {
"name": "sharpen_radius"
},
"sigma": {
"name": "sigma"
},
"alpha": {
"name": "alpha"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ImageStitch": {
"display_name": "Image Stitch",
"description": "\nStitches image2 to image1 in the specified direction.\nIf image2 is not provided, returns image1 unchanged.\nOptional spacing can be added between images.\n",
"inputs": {
"image1": {
"name": "image1"
},
"direction": {
"name": "direction"
},
"match_image_size": {
"name": "match_image_size"
},
"spacing_width": {
"name": "spacing_width"
},
"spacing_color": {
"name": "spacing_color"
},
"image2": {
"name": "image2"
}
}
},
"ImageToMask": {
"display_name": "Convert Image to Mask",
"inputs": {
"image": {
"name": "image"
},
"channel": {
"name": "channel"
}
}
},
"ImageUpscaleWithModel": {
"display_name": "Upscale Image (using Model)",
"inputs": {
"upscale_model": {
"name": "upscale_model"
},
"image": {
"name": "image"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ImageYUVToRGB": {
"display_name": "ImageYUVToRGB",
"inputs": {
"Y": {
"name": "Y"
},
"U": {
"name": "U"
},
"V": {
"name": "V"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"InpaintModelConditioning": {
"display_name": "InpaintModelConditioning",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"pixels": {
"name": "pixels"
},
"mask": {
"name": "mask"
},
"noise_mask": {
"name": "noise_mask",
"tooltip": "Add a noise mask to the latent so sampling will only happen within the mask. Might improve results or completely break things depending on the model."
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"InstructPixToPixConditioning": {
"display_name": "InstructPixToPixConditioning",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"pixels": {
"name": "pixels"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"InvertMask": {
"display_name": "InvertMask",
"inputs": {
"mask": {
"name": "mask"
}
}
},
"JoinImageWithAlpha": {
"display_name": "Join Image with Alpha",
"inputs": {
"image": {
"name": "image"
},
"alpha": {
"name": "alpha"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"KarrasScheduler": {
"display_name": "KarrasScheduler",
"inputs": {
"steps": {
"name": "steps"
},
"sigma_max": {
"name": "sigma_max"
},
"sigma_min": {
"name": "sigma_min"
},
"rho": {
"name": "rho"
}
}
},
"KlingCameraControlI2VNode": {
"display_name": "Kling Image to Video (Camera Control)",
"description": "Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.",
"inputs": {
"start_frame": {
"name": "start_frame",
"tooltip": "Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix."
},
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt"
},
"cfg_scale": {
"name": "cfg_scale"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"camera_control": {
"name": "camera_control",
"tooltip": "Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation."
}
},
"outputs": {
"0": {
"tooltip": null
},
"1": {
"name": "video_id",
"tooltip": null
},
"2": {
"name": "duration",
"tooltip": null
}
}
},
"KlingCameraControls": {
"display_name": "Kling Camera Controls",
"description": "Allows specifying configuration options for Kling Camera Controls and motion control effects.",
"inputs": {
"camera_control_type": {
"name": "camera_control_type"
},
"horizontal_movement": {
"name": "horizontal_movement",
"tooltip": "Controls camera's movement along horizontal axis (x-axis). Negative indicates left, positive indicates right"
},
"vertical_movement": {
"name": "vertical_movement",
"tooltip": "Controls camera's movement along vertical axis (y-axis). Negative indicates downward, positive indicates upward."
},
"pan": {
"name": "pan",
"tooltip": "Controls camera's rotation in vertical plane (x-axis). Negative indicates downward rotation, positive indicates upward rotation."
},
"tilt": {
"name": "tilt",
"tooltip": "Controls camera's rotation in horizontal plane (y-axis). Negative indicates left rotation, positive indicates right rotation."
},
"roll": {
"name": "roll",
"tooltip": "Controls camera's rolling amount (z-axis). Negative indicates counterclockwise, positive indicates clockwise."
},
"zoom": {
"name": "zoom",
"tooltip": "Controls change in camera's focal length. Negative indicates narrower field of view, positive indicates wider field of view."
}
},
"outputs": {
"0": {
"name": "camera_control",
"tooltip": null
}
}
},
"KlingCameraControlT2VNode": {
"display_name": "Kling Text to Video (Camera Control)",
"description": "Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt"
},
"cfg_scale": {
"name": "cfg_scale"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"camera_control": {
"name": "camera_control",
"tooltip": "Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation."
}
},
"outputs": {
"0": {
"tooltip": null
},
"1": {
"name": "video_id",
"tooltip": null
},
"2": {
"name": "duration",
"tooltip": null
}
}
},
"KlingDualCharacterVideoEffectNode": {
"display_name": "Kling Dual Character Video Effects",
"description": "Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.",
"inputs": {
"image_left": {
"name": "image_left",
"tooltip": "Left side image"
},
"image_right": {
"name": "image_right",
"tooltip": "Right side image"
},
"effect_scene": {
"name": "effect_scene"
},
"model_name": {
"name": "model_name"
},
"mode": {
"name": "mode"
},
"duration": {
"name": "duration"
}
},
"outputs": {
"0": {
"tooltip": null
},
"1": {
"name": "duration",
"tooltip": null
}
}
},
"KlingImage2VideoNode": {
"display_name": "Kling Image to Video",
"description": "Kling Image to Video Node",
"inputs": {
"start_frame": {
"name": "start_frame",
"tooltip": "The reference image used to generate the video."
},
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt"
},
"model_name": {
"name": "model_name"
},
"cfg_scale": {
"name": "cfg_scale"
},
"mode": {
"name": "mode"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"duration": {
"name": "duration"
}
},
"outputs": {
"0": {
"tooltip": null
},
"1": {
"name": "video_id",
"tooltip": null
},
"2": {
"name": "duration",
"tooltip": null
}
}
},
"KlingImageGenerationNode": {
"display_name": "Kling Image Generation",
"description": "Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt"
},
"image_type": {
"name": "image_type"
},
"image_fidelity": {
"name": "image_fidelity",
"tooltip": "Reference intensity for user-uploaded images"
},
"human_fidelity": {
"name": "human_fidelity",
"tooltip": "Subject reference similarity"
},
"model_name": {
"name": "model_name"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"n": {
"name": "n",
"tooltip": "Number of generated images"
},
"image": {
"name": "image"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"KlingLipSyncAudioToVideoNode": {
"display_name": "Kling Lip Sync Video with Audio",
"description": "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
"inputs": {
"video": {
"name": "video"
},
"audio": {
"name": "audio"
},
"voice_language": {
"name": "voice_language"
}
},
"outputs": {
"0": {
"tooltip": null
},
"1": {
"name": "video_id",
"tooltip": null
},
"2": {
"name": "duration",
"tooltip": null
}
}
},
"KlingLipSyncTextToVideoNode": {
"display_name": "Kling Lip Sync Video with Text",
"description": "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
"inputs": {
"video": {
"name": "video"
},
"text": {
"name": "text",
"tooltip": "Text Content for Lip-Sync Video Generation. Required when mode is text2video. Maximum length is 120 characters."
},
"voice": {
"name": "voice"
},
"voice_speed": {
"name": "voice_speed",
"tooltip": "Speech Rate. Valid range: 0.8~2.0, accurate to one decimal place."
}
},
"outputs": {
"0": {
"tooltip": null
},
"1": {
"name": "video_id",
"tooltip": null
},
"2": {
"name": "duration",
"tooltip": null
}
}
},
"KlingSingleImageVideoEffectNode": {
"display_name": "Kling Video Effects",
"description": "Achieve different special effects when generating a video based on the effect_scene.",
"inputs": {
"image": {
"name": "image",
"tooltip": " Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1"
},
"effect_scene": {
"name": "effect_scene"
},
"model_name": {
"name": "model_name"
},
"duration": {
"name": "duration"
}
},
"outputs": {
"0": {
"tooltip": null
},
"1": {
"name": "video_id",
"tooltip": null
},
"2": {
"name": "duration",
"tooltip": null
}
}
},
"KlingStartEndFrameNode": {
"display_name": "Kling Start-End Frame to Video",
"description": "Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.",
"inputs": {
"start_frame": {
"name": "start_frame",
"tooltip": "Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix."
},
"end_frame": {
"name": "end_frame",
"tooltip": "Reference Image - End frame control. URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px. Base64 should not include data:image prefix."
},
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt"
},
"cfg_scale": {
"name": "cfg_scale"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"mode": {
"name": "mode",
"tooltip": "The configuration to use for the video generation following the format: mode / duration / model_name."
}
},
"outputs": {
"0": {
"tooltip": null
},
"1": {
"name": "video_id",
"tooltip": null
},
"2": {
"name": "duration",
"tooltip": null
}
}
},
"KlingTextToVideoNode": {
"display_name": "Kling Text to Video",
"description": "Kling Text to Video Node",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt"
},
"cfg_scale": {
"name": "cfg_scale"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"mode": {
"name": "mode",
"tooltip": "The configuration to use for the video generation following the format: mode / duration / model_name."
}
},
"outputs": {
"0": {
"tooltip": null
},
"1": {
"name": "video_id",
"tooltip": null
},
"2": {
"name": "duration",
"tooltip": null
}
}
},
"KlingVideoExtendNode": {
"display_name": "Kling Video Extend",
"description": "Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt for guiding the video extension"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt for elements to avoid in the extended video"
},
"cfg_scale": {
"name": "cfg_scale"
},
"video_id": {
"name": "video_id",
"tooltip": "The ID of the video to be extended. Supports videos generated by text-to-video, image-to-video, and previous video extension operations. Cannot exceed 3 minutes total duration after extension."
}
},
"outputs": {
"0": {
"tooltip": null
},
"1": {
"name": "video_id",
"tooltip": null
},
"2": {
"name": "duration",
"tooltip": null
}
}
},
"KlingVirtualTryOnNode": {
"display_name": "Kling Virtual Try On",
"description": "Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.",
"inputs": {
"human_image": {
"name": "human_image"
},
"cloth_image": {
"name": "cloth_image"
},
"model_name": {
"name": "model_name"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"KSampler": {
"display_name": "KSampler",
"description": "Uses the provided model, positive and negative conditioning to denoise the latent image.",
"inputs": {
"model": {
"name": "model",
"tooltip": "The model used for denoising the input latent."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"steps": {
"name": "steps",
"tooltip": "The number of steps used in the denoising process."
},
"cfg": {
"name": "cfg",
"tooltip": "The Classifier-Free Guidance scale balances creativity and adherence to the prompt. Higher values result in images more closely matching the prompt however too high values will negatively impact quality."
},
"sampler_name": {
"name": "sampler_name",
"tooltip": "The algorithm used when sampling, this can affect the quality, speed, and style of the generated output."
},
"scheduler": {
"name": "scheduler",
"tooltip": "The scheduler controls how noise is gradually removed to form the image."
},
"positive": {
"name": "positive",
"tooltip": "The conditioning describing the attributes you want to include in the image."
},
"negative": {
"name": "negative",
"tooltip": "The conditioning describing the attributes you want to exclude from the image."
},
"latent_image": {
"name": "latent_image",
"tooltip": "The latent image to denoise."
},
"denoise": {
"name": "denoise",
"tooltip": "The amount of denoising applied, lower values will maintain the structure of the initial image allowing for image to image sampling."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": "The denoised latent."
}
}
},
"KSamplerAdvanced": {
"display_name": "KSampler (Advanced)",
"inputs": {
"model": {
"name": "model"
},
"add_noise": {
"name": "add_noise"
},
"noise_seed": {
"name": "noise_seed"
},
"steps": {
"name": "steps"
},
"cfg": {
"name": "cfg"
},
"sampler_name": {
"name": "sampler_name"
},
"scheduler": {
"name": "scheduler"
},
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"latent_image": {
"name": "latent_image"
},
"start_at_step": {
"name": "start_at_step"
},
"end_at_step": {
"name": "end_at_step"
},
"return_with_leftover_noise": {
"name": "return_with_leftover_noise"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"KSamplerSelect": {
"display_name": "KSamplerSelect",
"inputs": {
"sampler_name": {
"name": "sampler_name"
}
}
},
"LaplaceScheduler": {
"display_name": "LaplaceScheduler",
"inputs": {
"steps": {
"name": "steps"
},
"sigma_max": {
"name": "sigma_max"
},
"sigma_min": {
"name": "sigma_min"
},
"mu": {
"name": "mu"
},
"beta": {
"name": "beta"
}
}
},
"LatentAdd": {
"display_name": "LatentAdd",
"inputs": {
"samples1": {
"name": "samples1"
},
"samples2": {
"name": "samples2"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentApplyOperation": {
"display_name": "LatentApplyOperation",
"inputs": {
"samples": {
"name": "samples"
},
"operation": {
"name": "operation"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentApplyOperationCFG": {
"display_name": "LatentApplyOperationCFG",
"inputs": {
"model": {
"name": "model"
},
"operation": {
"name": "operation"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentBatch": {
"display_name": "LatentBatch",
"inputs": {
"samples1": {
"name": "samples1"
},
"samples2": {
"name": "samples2"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentBatchSeedBehavior": {
"display_name": "LatentBatchSeedBehavior",
"inputs": {
"samples": {
"name": "samples"
},
"seed_behavior": {
"name": "seed_behavior"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentBlend": {
"display_name": "Latent Blend",
"inputs": {
"samples1": {
"name": "samples1"
},
"samples2": {
"name": "samples2"
},
"blend_factor": {
"name": "blend_factor"
}
}
},
"LatentComposite": {
"display_name": "Latent Composite",
"inputs": {
"samples_to": {
"name": "samples_to"
},
"samples_from": {
"name": "samples_from"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"feather": {
"name": "feather"
}
}
},
"LatentCompositeMasked": {
"display_name": "LatentCompositeMasked",
"inputs": {
"destination": {
"name": "destination"
},
"source": {
"name": "source"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"resize_source": {
"name": "resize_source"
},
"mask": {
"name": "mask"
}
}
},
"LatentConcat": {
"display_name": "LatentConcat",
"inputs": {
"samples1": {
"name": "samples1"
},
"samples2": {
"name": "samples2"
},
"dim": {
"name": "dim"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentCrop": {
"display_name": "Crop Latent",
"inputs": {
"samples": {
"name": "samples"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
}
}
},
"LatentCut": {
"display_name": "LatentCut",
"inputs": {
"samples": {
"name": "samples"
},
"dim": {
"name": "dim"
},
"index": {
"name": "index"
},
"amount": {
"name": "amount"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentFlip": {
"display_name": "Flip Latent",
"inputs": {
"samples": {
"name": "samples"
},
"flip_method": {
"name": "flip_method"
}
}
},
"LatentFromBatch": {
"display_name": "Latent From Batch",
"inputs": {
"samples": {
"name": "samples"
},
"batch_index": {
"name": "batch_index"
},
"length": {
"name": "length"
}
}
},
"LatentInterpolate": {
"display_name": "LatentInterpolate",
"inputs": {
"samples1": {
"name": "samples1"
},
"samples2": {
"name": "samples2"
},
"ratio": {
"name": "ratio"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentMultiply": {
"display_name": "LatentMultiply",
"inputs": {
"samples": {
"name": "samples"
},
"multiplier": {
"name": "multiplier"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentOperationSharpen": {
"display_name": "LatentOperationSharpen",
"inputs": {
"sharpen_radius": {
"name": "sharpen_radius"
},
"sigma": {
"name": "sigma"
},
"alpha": {
"name": "alpha"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentOperationTonemapReinhard": {
"display_name": "LatentOperationTonemapReinhard",
"inputs": {
"multiplier": {
"name": "multiplier"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentRotate": {
"display_name": "Rotate Latent",
"inputs": {
"samples": {
"name": "samples"
},
"rotation": {
"name": "rotation"
}
}
},
"LatentSubtract": {
"display_name": "LatentSubtract",
"inputs": {
"samples1": {
"name": "samples1"
},
"samples2": {
"name": "samples2"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentUpscale": {
"display_name": "Upscale Latent",
"inputs": {
"samples": {
"name": "samples"
},
"upscale_method": {
"name": "upscale_method"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"crop": {
"name": "crop"
}
}
},
"LatentUpscaleBy": {
"display_name": "Upscale Latent By",
"inputs": {
"samples": {
"name": "samples"
},
"upscale_method": {
"name": "upscale_method"
},
"scale_by": {
"name": "scale_by"
}
}
},
"LazyCache": {
"display_name": "LazyCache",
"description": "A homebrew version of EasyCache - even 'easier' version of EasyCache to implement. Overall works worse than EasyCache, but better in some rare cases AND universal compatibility with everything in ComfyUI.",
"inputs": {
"model": {
"name": "model",
"tooltip": "The model to add LazyCache to."
},
"reuse_threshold": {
"name": "reuse_threshold",
"tooltip": "The threshold for reusing cached steps."
},
"start_percent": {
"name": "start_percent",
"tooltip": "The relative sampling step to begin use of LazyCache."
},
"end_percent": {
"name": "end_percent",
"tooltip": "The relative sampling step to end use of LazyCache."
},
"verbose": {
"name": "verbose",
"tooltip": "Whether to log verbose information."
}
},
"outputs": {
"0": {
"tooltip": "The model with LazyCache."
}
}
},
"Load3D": {
"display_name": "Load 3D",
"inputs": {
"model_file": {
"name": "model_file"
},
"image": {
"name": "image"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"clear": {},
"upload 3d model": {},
"upload extra resources": {}
},
"outputs": {
"0": {
"name": "image"
},
"1": {
"name": "mask"
},
"2": {
"name": "mesh_path"
},
"3": {
"name": "normal"
},
"4": {
"name": "lineart"
},
"5": {
"name": "camera_info"
},
"6": {
"name": "recording_video"
}
}
},
"Load3DAnimation": {
"display_name": "Load 3D - Animation",
"inputs": {
"model_file": {
"name": "model_file"
},
"image": {
"name": "image"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"clear": {},
"upload 3d model": {},
"upload extra resources": {}
},
"outputs": {
"0": {
"name": "image"
},
"1": {
"name": "mask"
},
"2": {
"name": "mesh_path"
},
"3": {
"name": "normal"
},
"4": {
"name": "camera_info"
},
"5": {
"name": "recording_video"
}
}
},
"LoadAudio": {
"display_name": "Load Audio",
"inputs": {
"audio": {
"name": "audio"
},
"audioUI": {
"name": "audioUI"
},
"upload": {
"name": "choose file to upload"
}
}
},
"LoadImage": {
"display_name": "Load Image",
"inputs": {
"image": {
"name": "image"
},
"upload": {
"name": "choose file to upload"
}
}
},
"LoadImageMask": {
"display_name": "Load Image (as Mask)",
"inputs": {
"image": {
"name": "image"
},
"channel": {
"name": "channel"
},
"upload": {
"name": "choose file to upload"
}
}
},
"LoadImageOutput": {
"display_name": "Load Image (from Outputs)",
"description": "Load an image from the output folder. When the refresh button is clicked, the node will update the image list and automatically select the first image, allowing for easy iteration.",
"inputs": {
"image": {
"name": "image"
},
"Auto-refresh after generation": {},
"refresh": {},
"upload": {
"name": "choose file to upload"
}
}
},
"LoadImageSetFromFolderNode": {
"display_name": "Load Image Dataset from Folder",
"description": "Loads a batch of images from a directory for training.",
"inputs": {
"folder": {
"name": "folder",
"tooltip": "The folder to load images from."
},
"resize_method": {
"name": "resize_method"
}
}
},
"LoadImageTextSetFromFolderNode": {
"display_name": "Load Image and Text Dataset from Folder",
"description": "Loads a batch of images and caption from a directory for training.",
"inputs": {
"folder": {
"name": "folder",
"tooltip": "The folder to load images from."
},
"clip": {
"name": "clip",
"tooltip": "The CLIP model used for encoding the text."
},
"resize_method": {
"name": "resize_method"
},
"width": {
"name": "width",
"tooltip": "The width to resize the images to. -1 means use the original width."
},
"height": {
"name": "height",
"tooltip": "The height to resize the images to. -1 means use the original height."
}
}
},
"LoadLatent": {
"display_name": "LoadLatent",
"inputs": {
"latent": {
"name": "latent"
}
}
},
"LoadVideo": {
"display_name": "Load Video",
"inputs": {
"file": {
"name": "file"
},
"upload": {
"name": "choose file to upload"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LoraLoader": {
"display_name": "Load LoRA",
"description": "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together.",
"inputs": {
"model": {
"name": "model",
"tooltip": "The diffusion model the LoRA will be applied to."
},
"clip": {
"name": "clip",
"tooltip": "The CLIP model the LoRA will be applied to."
},
"lora_name": {
"name": "lora_name",
"tooltip": "The name of the LoRA."
},
"strength_model": {
"name": "strength_model",
"tooltip": "How strongly to modify the diffusion model. This value can be negative."
},
"strength_clip": {
"name": "strength_clip",
"tooltip": "How strongly to modify the CLIP model. This value can be negative."
}
},
"outputs": {
"0": {
"tooltip": "The modified diffusion model."
},
"1": {
"tooltip": "The modified CLIP model."
}
}
},
"LoraLoaderModelOnly": {
"display_name": "LoraLoaderModelOnly",
"description": "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together.",
"inputs": {
"model": {
"name": "model"
},
"lora_name": {
"name": "lora_name"
},
"strength_model": {
"name": "strength_model"
}
},
"outputs": {
"0": {
"tooltip": "The modified diffusion model."
}
}
},
"LoraModelLoader": {
"display_name": "Load LoRA Model",
"description": "Load Trained LoRA weights from Train LoRA node.",
"inputs": {
"model": {
"name": "model",
"tooltip": "The diffusion model the LoRA will be applied to."
},
"lora": {
"name": "lora",
"tooltip": "The LoRA model to apply to the diffusion model."
},
"strength_model": {
"name": "strength_model",
"tooltip": "How strongly to modify the diffusion model. This value can be negative."
}
},
"outputs": {
"0": {
"tooltip": "The modified diffusion model."
}
}
},
"LoraSave": {
"display_name": "Extract and Save Lora",
"inputs": {
"filename_prefix": {
"name": "filename_prefix"
},
"rank": {
"name": "rank"
},
"lora_type": {
"name": "lora_type"
},
"bias_diff": {
"name": "bias_diff"
},
"model_diff": {
"name": "model_diff",
"tooltip": "The ModelSubtract output to be converted to a lora."
},
"text_encoder_diff": {
"name": "text_encoder_diff",
"tooltip": "The CLIPSubtract output to be converted to a lora."
}
}
},
"LossGraphNode": {
"display_name": "Plot Loss Graph",
"description": "Plots the loss graph and saves it to the output directory.",
"inputs": {
"loss": {
"name": "loss"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"LotusConditioning": {
"display_name": "LotusConditioning",
"outputs": {
"0": {
"name": "conditioning",
"tooltip": null
}
}
},
"LTXVAddGuide": {
"display_name": "LTXVAddGuide",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"latent": {
"name": "latent"
},
"image": {
"name": "image",
"tooltip": "Image or video to condition the latent video on. Must be 8*n + 1 frames. If the video is not 8*n + 1 frames, it will be cropped to the nearest 8*n + 1 frames."
},
"frame_idx": {
"name": "frame_idx",
"tooltip": "Frame index to start the conditioning at. For single-frame images or videos with 1-8 frames, any frame_idx value is acceptable. For videos with 9+ frames, frame_idx must be divisible by 8, otherwise it will be rounded down to the nearest multiple of 8. Negative values are counted from the end of the video."
},
"strength": {
"name": "strength"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"LTXVConditioning": {
"display_name": "LTXVConditioning",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"frame_rate": {
"name": "frame_rate"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
}
}
},
"LTXVCropGuides": {
"display_name": "LTXVCropGuides",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"latent": {
"name": "latent"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"LTXVImgToVideo": {
"display_name": "LTXVImgToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"image": {
"name": "image"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"strength": {
"name": "strength"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"LTXVPreprocess": {
"display_name": "LTXVPreprocess",
"inputs": {
"image": {
"name": "image"
},
"img_compression": {
"name": "img_compression",
"tooltip": "Amount of compression to apply on image."
}
},
"outputs": {
"0": {
"name": "output_image",
"tooltip": null
}
}
},
"LTXVScheduler": {
"display_name": "LTXVScheduler",
"inputs": {
"steps": {
"name": "steps"
},
"max_shift": {
"name": "max_shift"
},
"base_shift": {
"name": "base_shift"
},
"stretch": {
"name": "stretch",
"tooltip": "Stretch the sigmas to be in the range [terminal, 1]."
},
"terminal": {
"name": "terminal",
"tooltip": "The terminal value of the sigmas after stretching."
},
"latent": {
"name": "latent"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LumaConceptsNode": {
"display_name": "Luma Concepts",
"description": "Holds one or more Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.",
"inputs": {
"concept1": {
"name": "concept1"
},
"concept2": {
"name": "concept2"
},
"concept3": {
"name": "concept3"
},
"concept4": {
"name": "concept4"
},
"luma_concepts": {
"name": "luma_concepts",
"tooltip": "Optional Camera Concepts to add to the ones chosen here."
}
},
"outputs": {
"0": {
"name": "luma_concepts",
"tooltip": null
}
}
},
"LumaImageModifyNode": {
"display_name": "Luma Image to Image",
"description": "Modifies images synchronously based on prompt and aspect ratio.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"image_weight": {
"name": "image_weight",
"tooltip": "Weight of the image; the closer to 1.0, the less the image will be modified."
},
"model": {
"name": "model"
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LumaImageNode": {
"display_name": "Luma Text to Image",
"description": "Generates images synchronously based on prompt and aspect ratio.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"model": {
"name": "model"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"style_image_weight": {
"name": "style_image_weight",
"tooltip": "Weight of style image. Ignored if no style_image provided."
},
"image_luma_ref": {
"name": "image_luma_ref",
"tooltip": "Luma Reference node connection to influence generation with input images; up to 4 images can be considered."
},
"style_image": {
"name": "style_image",
"tooltip": "Style reference image; only 1 image will be used."
},
"character_image": {
"name": "character_image",
"tooltip": "Character reference images; can be a batch of multiple, up to 4 images can be considered."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LumaImageToVideoNode": {
"display_name": "Luma Image to Video",
"description": "Generates videos synchronously based on prompt, input images, and output_size.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the video generation"
},
"model": {
"name": "model"
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration"
},
"loop": {
"name": "loop"
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"first_image": {
"name": "first_image",
"tooltip": "First frame of generated video."
},
"last_image": {
"name": "last_image",
"tooltip": "Last frame of generated video."
},
"luma_concepts": {
"name": "luma_concepts",
"tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LumaReferenceNode": {
"display_name": "Luma Reference",
"description": "Holds an image and weight for use with Luma Generate Image node.",
"inputs": {
"image": {
"name": "image",
"tooltip": "Image to use as reference."
},
"weight": {
"name": "weight",
"tooltip": "Weight of image reference."
},
"luma_ref": {
"name": "luma_ref"
}
},
"outputs": {
"0": {
"name": "luma_ref",
"tooltip": null
}
}
},
"LumaVideoNode": {
"display_name": "Luma Text to Video",
"description": "Generates videos synchronously based on prompt and output_size.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the video generation"
},
"model": {
"name": "model"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration"
},
"loop": {
"name": "loop"
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"luma_concepts": {
"name": "luma_concepts",
"tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Mahiro": {
"display_name": "Mahiro is so cute that she deserves a better guidance function!! (。・ω・。)",
"description": "Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.",
"inputs": {
"model": {
"name": "model"
}
},
"outputs": {
"0": {
"name": "patched_model",
"tooltip": null
}
}
},
"MaskComposite": {
"display_name": "MaskComposite",
"inputs": {
"destination": {
"name": "destination"
},
"source": {
"name": "source"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"operation": {
"name": "operation"
}
}
},
"MaskPreview": {
"display_name": "MaskPreview",
"description": "Saves the input images to your ComfyUI output directory.",
"inputs": {
"mask": {
"name": "mask"
}
}
},
"MaskToImage": {
"display_name": "Convert Mask to Image",
"inputs": {
"mask": {
"name": "mask"
}
}
},
"MinimaxHailuoVideoNode": {
"display_name": "MiniMax Hailuo Video",
"description": "Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.",
"inputs": {
"prompt_text": {
"name": "prompt_text",
"tooltip": "Text prompt to guide the video generation."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"first_frame_image": {
"name": "first_frame_image",
"tooltip": "Optional image to use as the first frame to generate a video."
},
"prompt_optimizer": {
"name": "prompt_optimizer",
"tooltip": "Optimize prompt to improve generation quality when needed."
},
"duration": {
"name": "duration",
"tooltip": "The length of the output video in seconds."
},
"resolution": {
"name": "resolution",
"tooltip": "The dimensions of the video display. 1080p is 1920x1080, 768p is 1366x768."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"MinimaxImageToVideoNode": {
"display_name": "MiniMax Image to Video",
"description": "Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API.",
"inputs": {
"image": {
"name": "image",
"tooltip": "Image to use as first frame of video generation"
},
"prompt_text": {
"name": "prompt_text",
"tooltip": "Text prompt to guide the video generation"
},
"model": {
"name": "model",
"tooltip": "Model to use for video generation"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"MinimaxTextToVideoNode": {
"display_name": "MiniMax Text to Video",
"description": "Generates videos synchronously based on a prompt, and optional parameters using MiniMax's API.",
"inputs": {
"prompt_text": {
"name": "prompt_text",
"tooltip": "Text prompt to guide the video generation"
},
"model": {
"name": "model",
"tooltip": "Model to use for video generation"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ModelComputeDtype": {
"display_name": "ModelComputeDtype",
"inputs": {
"model": {
"name": "model"
},
"dtype": {
"name": "dtype"
}
}
},
"ModelMergeAdd": {
"display_name": "ModelMergeAdd",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
}
}
},
"ModelMergeAuraflow": {
"display_name": "ModelMergeAuraflow",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"init_x_linear_": {
"name": "init_x_linear."
},
"positional_encoding": {
"name": "positional_encoding"
},
"cond_seq_linear_": {
"name": "cond_seq_linear."
},
"register_tokens": {
"name": "register_tokens"
},
"t_embedder_": {
"name": "t_embedder."
},
"double_layers_0_": {
"name": "double_layers.0."
},
"double_layers_1_": {
"name": "double_layers.1."
},
"double_layers_2_": {
"name": "double_layers.2."
},
"double_layers_3_": {
"name": "double_layers.3."
},
"single_layers_0_": {
"name": "single_layers.0."
},
"single_layers_1_": {
"name": "single_layers.1."
},
"single_layers_2_": {
"name": "single_layers.2."
},
"single_layers_3_": {
"name": "single_layers.3."
},
"single_layers_4_": {
"name": "single_layers.4."
},
"single_layers_5_": {
"name": "single_layers.5."
},
"single_layers_6_": {
"name": "single_layers.6."
},
"single_layers_7_": {
"name": "single_layers.7."
},
"single_layers_8_": {
"name": "single_layers.8."
},
"single_layers_9_": {
"name": "single_layers.9."
},
"single_layers_10_": {
"name": "single_layers.10."
},
"single_layers_11_": {
"name": "single_layers.11."
},
"single_layers_12_": {
"name": "single_layers.12."
},
"single_layers_13_": {
"name": "single_layers.13."
},
"single_layers_14_": {
"name": "single_layers.14."
},
"single_layers_15_": {
"name": "single_layers.15."
},
"single_layers_16_": {
"name": "single_layers.16."
},
"single_layers_17_": {
"name": "single_layers.17."
},
"single_layers_18_": {
"name": "single_layers.18."
},
"single_layers_19_": {
"name": "single_layers.19."
},
"single_layers_20_": {
"name": "single_layers.20."
},
"single_layers_21_": {
"name": "single_layers.21."
},
"single_layers_22_": {
"name": "single_layers.22."
},
"single_layers_23_": {
"name": "single_layers.23."
},
"single_layers_24_": {
"name": "single_layers.24."
},
"single_layers_25_": {
"name": "single_layers.25."
},
"single_layers_26_": {
"name": "single_layers.26."
},
"single_layers_27_": {
"name": "single_layers.27."
},
"single_layers_28_": {
"name": "single_layers.28."
},
"single_layers_29_": {
"name": "single_layers.29."
},
"single_layers_30_": {
"name": "single_layers.30."
},
"single_layers_31_": {
"name": "single_layers.31."
},
"modF_": {
"name": "modF."
},
"final_linear_": {
"name": "final_linear."
}
}
},
"ModelMergeBlocks": {
"display_name": "ModelMergeBlocks",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"input": {
"name": "input"
},
"middle": {
"name": "middle"
},
"out": {
"name": "out"
}
}
},
"ModelMergeCosmos14B": {
"display_name": "ModelMergeCosmos14B",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_embedder_": {
"name": "pos_embedder."
},
"extra_pos_embedder_": {
"name": "extra_pos_embedder."
},
"x_embedder_": {
"name": "x_embedder."
},
"t_embedder_": {
"name": "t_embedder."
},
"affline_norm_": {
"name": "affline_norm."
},
"blocks_block0_": {
"name": "blocks.block0."
},
"blocks_block1_": {
"name": "blocks.block1."
},
"blocks_block2_": {
"name": "blocks.block2."
},
"blocks_block3_": {
"name": "blocks.block3."
},
"blocks_block4_": {
"name": "blocks.block4."
},
"blocks_block5_": {
"name": "blocks.block5."
},
"blocks_block6_": {
"name": "blocks.block6."
},
"blocks_block7_": {
"name": "blocks.block7."
},
"blocks_block8_": {
"name": "blocks.block8."
},
"blocks_block9_": {
"name": "blocks.block9."
},
"blocks_block10_": {
"name": "blocks.block10."
},
"blocks_block11_": {
"name": "blocks.block11."
},
"blocks_block12_": {
"name": "blocks.block12."
},
"blocks_block13_": {
"name": "blocks.block13."
},
"blocks_block14_": {
"name": "blocks.block14."
},
"blocks_block15_": {
"name": "blocks.block15."
},
"blocks_block16_": {
"name": "blocks.block16."
},
"blocks_block17_": {
"name": "blocks.block17."
},
"blocks_block18_": {
"name": "blocks.block18."
},
"blocks_block19_": {
"name": "blocks.block19."
},
"blocks_block20_": {
"name": "blocks.block20."
},
"blocks_block21_": {
"name": "blocks.block21."
},
"blocks_block22_": {
"name": "blocks.block22."
},
"blocks_block23_": {
"name": "blocks.block23."
},
"blocks_block24_": {
"name": "blocks.block24."
},
"blocks_block25_": {
"name": "blocks.block25."
},
"blocks_block26_": {
"name": "blocks.block26."
},
"blocks_block27_": {
"name": "blocks.block27."
},
"blocks_block28_": {
"name": "blocks.block28."
},
"blocks_block29_": {
"name": "blocks.block29."
},
"blocks_block30_": {
"name": "blocks.block30."
},
"blocks_block31_": {
"name": "blocks.block31."
},
"blocks_block32_": {
"name": "blocks.block32."
},
"blocks_block33_": {
"name": "blocks.block33."
},
"blocks_block34_": {
"name": "blocks.block34."
},
"blocks_block35_": {
"name": "blocks.block35."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeCosmos7B": {
"display_name": "ModelMergeCosmos7B",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_embedder_": {
"name": "pos_embedder."
},
"extra_pos_embedder_": {
"name": "extra_pos_embedder."
},
"x_embedder_": {
"name": "x_embedder."
},
"t_embedder_": {
"name": "t_embedder."
},
"affline_norm_": {
"name": "affline_norm."
},
"blocks_block0_": {
"name": "blocks.block0."
},
"blocks_block1_": {
"name": "blocks.block1."
},
"blocks_block2_": {
"name": "blocks.block2."
},
"blocks_block3_": {
"name": "blocks.block3."
},
"blocks_block4_": {
"name": "blocks.block4."
},
"blocks_block5_": {
"name": "blocks.block5."
},
"blocks_block6_": {
"name": "blocks.block6."
},
"blocks_block7_": {
"name": "blocks.block7."
},
"blocks_block8_": {
"name": "blocks.block8."
},
"blocks_block9_": {
"name": "blocks.block9."
},
"blocks_block10_": {
"name": "blocks.block10."
},
"blocks_block11_": {
"name": "blocks.block11."
},
"blocks_block12_": {
"name": "blocks.block12."
},
"blocks_block13_": {
"name": "blocks.block13."
},
"blocks_block14_": {
"name": "blocks.block14."
},
"blocks_block15_": {
"name": "blocks.block15."
},
"blocks_block16_": {
"name": "blocks.block16."
},
"blocks_block17_": {
"name": "blocks.block17."
},
"blocks_block18_": {
"name": "blocks.block18."
},
"blocks_block19_": {
"name": "blocks.block19."
},
"blocks_block20_": {
"name": "blocks.block20."
},
"blocks_block21_": {
"name": "blocks.block21."
},
"blocks_block22_": {
"name": "blocks.block22."
},
"blocks_block23_": {
"name": "blocks.block23."
},
"blocks_block24_": {
"name": "blocks.block24."
},
"blocks_block25_": {
"name": "blocks.block25."
},
"blocks_block26_": {
"name": "blocks.block26."
},
"blocks_block27_": {
"name": "blocks.block27."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeCosmosPredict2_14B": {
"display_name": "ModelMergeCosmosPredict2_14B",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_embedder_": {
"name": "pos_embedder."
},
"x_embedder_": {
"name": "x_embedder."
},
"t_embedder_": {
"name": "t_embedder."
},
"t_embedding_norm_": {
"name": "t_embedding_norm."
},
"blocks_0_": {
"name": "blocks.0."
},
"blocks_1_": {
"name": "blocks.1."
},
"blocks_2_": {
"name": "blocks.2."
},
"blocks_3_": {
"name": "blocks.3."
},
"blocks_4_": {
"name": "blocks.4."
},
"blocks_5_": {
"name": "blocks.5."
},
"blocks_6_": {
"name": "blocks.6."
},
"blocks_7_": {
"name": "blocks.7."
},
"blocks_8_": {
"name": "blocks.8."
},
"blocks_9_": {
"name": "blocks.9."
},
"blocks_10_": {
"name": "blocks.10."
},
"blocks_11_": {
"name": "blocks.11."
},
"blocks_12_": {
"name": "blocks.12."
},
"blocks_13_": {
"name": "blocks.13."
},
"blocks_14_": {
"name": "blocks.14."
},
"blocks_15_": {
"name": "blocks.15."
},
"blocks_16_": {
"name": "blocks.16."
},
"blocks_17_": {
"name": "blocks.17."
},
"blocks_18_": {
"name": "blocks.18."
},
"blocks_19_": {
"name": "blocks.19."
},
"blocks_20_": {
"name": "blocks.20."
},
"blocks_21_": {
"name": "blocks.21."
},
"blocks_22_": {
"name": "blocks.22."
},
"blocks_23_": {
"name": "blocks.23."
},
"blocks_24_": {
"name": "blocks.24."
},
"blocks_25_": {
"name": "blocks.25."
},
"blocks_26_": {
"name": "blocks.26."
},
"blocks_27_": {
"name": "blocks.27."
},
"blocks_28_": {
"name": "blocks.28."
},
"blocks_29_": {
"name": "blocks.29."
},
"blocks_30_": {
"name": "blocks.30."
},
"blocks_31_": {
"name": "blocks.31."
},
"blocks_32_": {
"name": "blocks.32."
},
"blocks_33_": {
"name": "blocks.33."
},
"blocks_34_": {
"name": "blocks.34."
},
"blocks_35_": {
"name": "blocks.35."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeCosmosPredict2_2B": {
"display_name": "ModelMergeCosmosPredict2_2B",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_embedder_": {
"name": "pos_embedder."
},
"x_embedder_": {
"name": "x_embedder."
},
"t_embedder_": {
"name": "t_embedder."
},
"t_embedding_norm_": {
"name": "t_embedding_norm."
},
"blocks_0_": {
"name": "blocks.0."
},
"blocks_1_": {
"name": "blocks.1."
},
"blocks_2_": {
"name": "blocks.2."
},
"blocks_3_": {
"name": "blocks.3."
},
"blocks_4_": {
"name": "blocks.4."
},
"blocks_5_": {
"name": "blocks.5."
},
"blocks_6_": {
"name": "blocks.6."
},
"blocks_7_": {
"name": "blocks.7."
},
"blocks_8_": {
"name": "blocks.8."
},
"blocks_9_": {
"name": "blocks.9."
},
"blocks_10_": {
"name": "blocks.10."
},
"blocks_11_": {
"name": "blocks.11."
},
"blocks_12_": {
"name": "blocks.12."
},
"blocks_13_": {
"name": "blocks.13."
},
"blocks_14_": {
"name": "blocks.14."
},
"blocks_15_": {
"name": "blocks.15."
},
"blocks_16_": {
"name": "blocks.16."
},
"blocks_17_": {
"name": "blocks.17."
},
"blocks_18_": {
"name": "blocks.18."
},
"blocks_19_": {
"name": "blocks.19."
},
"blocks_20_": {
"name": "blocks.20."
},
"blocks_21_": {
"name": "blocks.21."
},
"blocks_22_": {
"name": "blocks.22."
},
"blocks_23_": {
"name": "blocks.23."
},
"blocks_24_": {
"name": "blocks.24."
},
"blocks_25_": {
"name": "blocks.25."
},
"blocks_26_": {
"name": "blocks.26."
},
"blocks_27_": {
"name": "blocks.27."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeFlux1": {
"display_name": "ModelMergeFlux1",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"img_in_": {
"name": "img_in."
},
"time_in_": {
"name": "time_in."
},
"guidance_in": {
"name": "guidance_in"
},
"vector_in_": {
"name": "vector_in."
},
"txt_in_": {
"name": "txt_in."
},
"double_blocks_0_": {
"name": "double_blocks.0."
},
"double_blocks_1_": {
"name": "double_blocks.1."
},
"double_blocks_2_": {
"name": "double_blocks.2."
},
"double_blocks_3_": {
"name": "double_blocks.3."
},
"double_blocks_4_": {
"name": "double_blocks.4."
},
"double_blocks_5_": {
"name": "double_blocks.5."
},
"double_blocks_6_": {
"name": "double_blocks.6."
},
"double_blocks_7_": {
"name": "double_blocks.7."
},
"double_blocks_8_": {
"name": "double_blocks.8."
},
"double_blocks_9_": {
"name": "double_blocks.9."
},
"double_blocks_10_": {
"name": "double_blocks.10."
},
"double_blocks_11_": {
"name": "double_blocks.11."
},
"double_blocks_12_": {
"name": "double_blocks.12."
},
"double_blocks_13_": {
"name": "double_blocks.13."
},
"double_blocks_14_": {
"name": "double_blocks.14."
},
"double_blocks_15_": {
"name": "double_blocks.15."
},
"double_blocks_16_": {
"name": "double_blocks.16."
},
"double_blocks_17_": {
"name": "double_blocks.17."
},
"double_blocks_18_": {
"name": "double_blocks.18."
},
"single_blocks_0_": {
"name": "single_blocks.0."
},
"single_blocks_1_": {
"name": "single_blocks.1."
},
"single_blocks_2_": {
"name": "single_blocks.2."
},
"single_blocks_3_": {
"name": "single_blocks.3."
},
"single_blocks_4_": {
"name": "single_blocks.4."
},
"single_blocks_5_": {
"name": "single_blocks.5."
},
"single_blocks_6_": {
"name": "single_blocks.6."
},
"single_blocks_7_": {
"name": "single_blocks.7."
},
"single_blocks_8_": {
"name": "single_blocks.8."
},
"single_blocks_9_": {
"name": "single_blocks.9."
},
"single_blocks_10_": {
"name": "single_blocks.10."
},
"single_blocks_11_": {
"name": "single_blocks.11."
},
"single_blocks_12_": {
"name": "single_blocks.12."
},
"single_blocks_13_": {
"name": "single_blocks.13."
},
"single_blocks_14_": {
"name": "single_blocks.14."
},
"single_blocks_15_": {
"name": "single_blocks.15."
},
"single_blocks_16_": {
"name": "single_blocks.16."
},
"single_blocks_17_": {
"name": "single_blocks.17."
},
"single_blocks_18_": {
"name": "single_blocks.18."
},
"single_blocks_19_": {
"name": "single_blocks.19."
},
"single_blocks_20_": {
"name": "single_blocks.20."
},
"single_blocks_21_": {
"name": "single_blocks.21."
},
"single_blocks_22_": {
"name": "single_blocks.22."
},
"single_blocks_23_": {
"name": "single_blocks.23."
},
"single_blocks_24_": {
"name": "single_blocks.24."
},
"single_blocks_25_": {
"name": "single_blocks.25."
},
"single_blocks_26_": {
"name": "single_blocks.26."
},
"single_blocks_27_": {
"name": "single_blocks.27."
},
"single_blocks_28_": {
"name": "single_blocks.28."
},
"single_blocks_29_": {
"name": "single_blocks.29."
},
"single_blocks_30_": {
"name": "single_blocks.30."
},
"single_blocks_31_": {
"name": "single_blocks.31."
},
"single_blocks_32_": {
"name": "single_blocks.32."
},
"single_blocks_33_": {
"name": "single_blocks.33."
},
"single_blocks_34_": {
"name": "single_blocks.34."
},
"single_blocks_35_": {
"name": "single_blocks.35."
},
"single_blocks_36_": {
"name": "single_blocks.36."
},
"single_blocks_37_": {
"name": "single_blocks.37."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeLTXV": {
"display_name": "ModelMergeLTXV",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"patchify_proj_": {
"name": "patchify_proj."
},
"adaln_single_": {
"name": "adaln_single."
},
"caption_projection_": {
"name": "caption_projection."
},
"transformer_blocks_0_": {
"name": "transformer_blocks.0."
},
"transformer_blocks_1_": {
"name": "transformer_blocks.1."
},
"transformer_blocks_2_": {
"name": "transformer_blocks.2."
},
"transformer_blocks_3_": {
"name": "transformer_blocks.3."
},
"transformer_blocks_4_": {
"name": "transformer_blocks.4."
},
"transformer_blocks_5_": {
"name": "transformer_blocks.5."
},
"transformer_blocks_6_": {
"name": "transformer_blocks.6."
},
"transformer_blocks_7_": {
"name": "transformer_blocks.7."
},
"transformer_blocks_8_": {
"name": "transformer_blocks.8."
},
"transformer_blocks_9_": {
"name": "transformer_blocks.9."
},
"transformer_blocks_10_": {
"name": "transformer_blocks.10."
},
"transformer_blocks_11_": {
"name": "transformer_blocks.11."
},
"transformer_blocks_12_": {
"name": "transformer_blocks.12."
},
"transformer_blocks_13_": {
"name": "transformer_blocks.13."
},
"transformer_blocks_14_": {
"name": "transformer_blocks.14."
},
"transformer_blocks_15_": {
"name": "transformer_blocks.15."
},
"transformer_blocks_16_": {
"name": "transformer_blocks.16."
},
"transformer_blocks_17_": {
"name": "transformer_blocks.17."
},
"transformer_blocks_18_": {
"name": "transformer_blocks.18."
},
"transformer_blocks_19_": {
"name": "transformer_blocks.19."
},
"transformer_blocks_20_": {
"name": "transformer_blocks.20."
},
"transformer_blocks_21_": {
"name": "transformer_blocks.21."
},
"transformer_blocks_22_": {
"name": "transformer_blocks.22."
},
"transformer_blocks_23_": {
"name": "transformer_blocks.23."
},
"transformer_blocks_24_": {
"name": "transformer_blocks.24."
},
"transformer_blocks_25_": {
"name": "transformer_blocks.25."
},
"transformer_blocks_26_": {
"name": "transformer_blocks.26."
},
"transformer_blocks_27_": {
"name": "transformer_blocks.27."
},
"scale_shift_table": {
"name": "scale_shift_table"
},
"proj_out_": {
"name": "proj_out."
}
}
},
"ModelMergeMochiPreview": {
"display_name": "ModelMergeMochiPreview",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_frequencies_": {
"name": "pos_frequencies."
},
"t_embedder_": {
"name": "t_embedder."
},
"t5_y_embedder_": {
"name": "t5_y_embedder."
},
"t5_yproj_": {
"name": "t5_yproj."
},
"blocks_0_": {
"name": "blocks.0."
},
"blocks_1_": {
"name": "blocks.1."
},
"blocks_2_": {
"name": "blocks.2."
},
"blocks_3_": {
"name": "blocks.3."
},
"blocks_4_": {
"name": "blocks.4."
},
"blocks_5_": {
"name": "blocks.5."
},
"blocks_6_": {
"name": "blocks.6."
},
"blocks_7_": {
"name": "blocks.7."
},
"blocks_8_": {
"name": "blocks.8."
},
"blocks_9_": {
"name": "blocks.9."
},
"blocks_10_": {
"name": "blocks.10."
},
"blocks_11_": {
"name": "blocks.11."
},
"blocks_12_": {
"name": "blocks.12."
},
"blocks_13_": {
"name": "blocks.13."
},
"blocks_14_": {
"name": "blocks.14."
},
"blocks_15_": {
"name": "blocks.15."
},
"blocks_16_": {
"name": "blocks.16."
},
"blocks_17_": {
"name": "blocks.17."
},
"blocks_18_": {
"name": "blocks.18."
},
"blocks_19_": {
"name": "blocks.19."
},
"blocks_20_": {
"name": "blocks.20."
},
"blocks_21_": {
"name": "blocks.21."
},
"blocks_22_": {
"name": "blocks.22."
},
"blocks_23_": {
"name": "blocks.23."
},
"blocks_24_": {
"name": "blocks.24."
},
"blocks_25_": {
"name": "blocks.25."
},
"blocks_26_": {
"name": "blocks.26."
},
"blocks_27_": {
"name": "blocks.27."
},
"blocks_28_": {
"name": "blocks.28."
},
"blocks_29_": {
"name": "blocks.29."
},
"blocks_30_": {
"name": "blocks.30."
},
"blocks_31_": {
"name": "blocks.31."
},
"blocks_32_": {
"name": "blocks.32."
},
"blocks_33_": {
"name": "blocks.33."
},
"blocks_34_": {
"name": "blocks.34."
},
"blocks_35_": {
"name": "blocks.35."
},
"blocks_36_": {
"name": "blocks.36."
},
"blocks_37_": {
"name": "blocks.37."
},
"blocks_38_": {
"name": "blocks.38."
},
"blocks_39_": {
"name": "blocks.39."
},
"blocks_40_": {
"name": "blocks.40."
},
"blocks_41_": {
"name": "blocks.41."
},
"blocks_42_": {
"name": "blocks.42."
},
"blocks_43_": {
"name": "blocks.43."
},
"blocks_44_": {
"name": "blocks.44."
},
"blocks_45_": {
"name": "blocks.45."
},
"blocks_46_": {
"name": "blocks.46."
},
"blocks_47_": {
"name": "blocks.47."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeQwenImage": {
"display_name": "ModelMergeQwenImage",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_embeds_": {
"name": "pos_embeds."
},
"img_in_": {
"name": "img_in."
},
"txt_norm_": {
"name": "txt_norm."
},
"txt_in_": {
"name": "txt_in."
},
"time_text_embed_": {
"name": "time_text_embed."
},
"transformer_blocks_0_": {
"name": "transformer_blocks.0."
},
"transformer_blocks_1_": {
"name": "transformer_blocks.1."
},
"transformer_blocks_2_": {
"name": "transformer_blocks.2."
},
"transformer_blocks_3_": {
"name": "transformer_blocks.3."
},
"transformer_blocks_4_": {
"name": "transformer_blocks.4."
},
"transformer_blocks_5_": {
"name": "transformer_blocks.5."
},
"transformer_blocks_6_": {
"name": "transformer_blocks.6."
},
"transformer_blocks_7_": {
"name": "transformer_blocks.7."
},
"transformer_blocks_8_": {
"name": "transformer_blocks.8."
},
"transformer_blocks_9_": {
"name": "transformer_blocks.9."
},
"transformer_blocks_10_": {
"name": "transformer_blocks.10."
},
"transformer_blocks_11_": {
"name": "transformer_blocks.11."
},
"transformer_blocks_12_": {
"name": "transformer_blocks.12."
},
"transformer_blocks_13_": {
"name": "transformer_blocks.13."
},
"transformer_blocks_14_": {
"name": "transformer_blocks.14."
},
"transformer_blocks_15_": {
"name": "transformer_blocks.15."
},
"transformer_blocks_16_": {
"name": "transformer_blocks.16."
},
"transformer_blocks_17_": {
"name": "transformer_blocks.17."
},
"transformer_blocks_18_": {
"name": "transformer_blocks.18."
},
"transformer_blocks_19_": {
"name": "transformer_blocks.19."
},
"transformer_blocks_20_": {
"name": "transformer_blocks.20."
},
"transformer_blocks_21_": {
"name": "transformer_blocks.21."
},
"transformer_blocks_22_": {
"name": "transformer_blocks.22."
},
"transformer_blocks_23_": {
"name": "transformer_blocks.23."
},
"transformer_blocks_24_": {
"name": "transformer_blocks.24."
},
"transformer_blocks_25_": {
"name": "transformer_blocks.25."
},
"transformer_blocks_26_": {
"name": "transformer_blocks.26."
},
"transformer_blocks_27_": {
"name": "transformer_blocks.27."
},
"transformer_blocks_28_": {
"name": "transformer_blocks.28."
},
"transformer_blocks_29_": {
"name": "transformer_blocks.29."
},
"transformer_blocks_30_": {
"name": "transformer_blocks.30."
},
"transformer_blocks_31_": {
"name": "transformer_blocks.31."
},
"transformer_blocks_32_": {
"name": "transformer_blocks.32."
},
"transformer_blocks_33_": {
"name": "transformer_blocks.33."
},
"transformer_blocks_34_": {
"name": "transformer_blocks.34."
},
"transformer_blocks_35_": {
"name": "transformer_blocks.35."
},
"transformer_blocks_36_": {
"name": "transformer_blocks.36."
},
"transformer_blocks_37_": {
"name": "transformer_blocks.37."
},
"transformer_blocks_38_": {
"name": "transformer_blocks.38."
},
"transformer_blocks_39_": {
"name": "transformer_blocks.39."
},
"transformer_blocks_40_": {
"name": "transformer_blocks.40."
},
"transformer_blocks_41_": {
"name": "transformer_blocks.41."
},
"transformer_blocks_42_": {
"name": "transformer_blocks.42."
},
"transformer_blocks_43_": {
"name": "transformer_blocks.43."
},
"transformer_blocks_44_": {
"name": "transformer_blocks.44."
},
"transformer_blocks_45_": {
"name": "transformer_blocks.45."
},
"transformer_blocks_46_": {
"name": "transformer_blocks.46."
},
"transformer_blocks_47_": {
"name": "transformer_blocks.47."
},
"transformer_blocks_48_": {
"name": "transformer_blocks.48."
},
"transformer_blocks_49_": {
"name": "transformer_blocks.49."
},
"transformer_blocks_50_": {
"name": "transformer_blocks.50."
},
"transformer_blocks_51_": {
"name": "transformer_blocks.51."
},
"transformer_blocks_52_": {
"name": "transformer_blocks.52."
},
"transformer_blocks_53_": {
"name": "transformer_blocks.53."
},
"transformer_blocks_54_": {
"name": "transformer_blocks.54."
},
"transformer_blocks_55_": {
"name": "transformer_blocks.55."
},
"transformer_blocks_56_": {
"name": "transformer_blocks.56."
},
"transformer_blocks_57_": {
"name": "transformer_blocks.57."
},
"transformer_blocks_58_": {
"name": "transformer_blocks.58."
},
"transformer_blocks_59_": {
"name": "transformer_blocks.59."
},
"proj_out_": {
"name": "proj_out."
}
}
},
"ModelMergeSD1": {
"display_name": "ModelMergeSD1",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"time_embed_": {
"name": "time_embed."
},
"label_emb_": {
"name": "label_emb."
},
"input_blocks_0_": {
"name": "input_blocks.0."
},
"input_blocks_1_": {
"name": "input_blocks.1."
},
"input_blocks_2_": {
"name": "input_blocks.2."
},
"input_blocks_3_": {
"name": "input_blocks.3."
},
"input_blocks_4_": {
"name": "input_blocks.4."
},
"input_blocks_5_": {
"name": "input_blocks.5."
},
"input_blocks_6_": {
"name": "input_blocks.6."
},
"input_blocks_7_": {
"name": "input_blocks.7."
},
"input_blocks_8_": {
"name": "input_blocks.8."
},
"input_blocks_9_": {
"name": "input_blocks.9."
},
"input_blocks_10_": {
"name": "input_blocks.10."
},
"input_blocks_11_": {
"name": "input_blocks.11."
},
"middle_block_0_": {
"name": "middle_block.0."
},
"middle_block_1_": {
"name": "middle_block.1."
},
"middle_block_2_": {
"name": "middle_block.2."
},
"output_blocks_0_": {
"name": "output_blocks.0."
},
"output_blocks_1_": {
"name": "output_blocks.1."
},
"output_blocks_2_": {
"name": "output_blocks.2."
},
"output_blocks_3_": {
"name": "output_blocks.3."
},
"output_blocks_4_": {
"name": "output_blocks.4."
},
"output_blocks_5_": {
"name": "output_blocks.5."
},
"output_blocks_6_": {
"name": "output_blocks.6."
},
"output_blocks_7_": {
"name": "output_blocks.7."
},
"output_blocks_8_": {
"name": "output_blocks.8."
},
"output_blocks_9_": {
"name": "output_blocks.9."
},
"output_blocks_10_": {
"name": "output_blocks.10."
},
"output_blocks_11_": {
"name": "output_blocks.11."
},
"out_": {
"name": "out."
}
}
},
"ModelMergeSD2": {
"display_name": "ModelMergeSD2",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"time_embed_": {
"name": "time_embed."
},
"label_emb_": {
"name": "label_emb."
},
"input_blocks_0_": {
"name": "input_blocks.0."
},
"input_blocks_1_": {
"name": "input_blocks.1."
},
"input_blocks_2_": {
"name": "input_blocks.2."
},
"input_blocks_3_": {
"name": "input_blocks.3."
},
"input_blocks_4_": {
"name": "input_blocks.4."
},
"input_blocks_5_": {
"name": "input_blocks.5."
},
"input_blocks_6_": {
"name": "input_blocks.6."
},
"input_blocks_7_": {
"name": "input_blocks.7."
},
"input_blocks_8_": {
"name": "input_blocks.8."
},
"input_blocks_9_": {
"name": "input_blocks.9."
},
"input_blocks_10_": {
"name": "input_blocks.10."
},
"input_blocks_11_": {
"name": "input_blocks.11."
},
"middle_block_0_": {
"name": "middle_block.0."
},
"middle_block_1_": {
"name": "middle_block.1."
},
"middle_block_2_": {
"name": "middle_block.2."
},
"output_blocks_0_": {
"name": "output_blocks.0."
},
"output_blocks_1_": {
"name": "output_blocks.1."
},
"output_blocks_2_": {
"name": "output_blocks.2."
},
"output_blocks_3_": {
"name": "output_blocks.3."
},
"output_blocks_4_": {
"name": "output_blocks.4."
},
"output_blocks_5_": {
"name": "output_blocks.5."
},
"output_blocks_6_": {
"name": "output_blocks.6."
},
"output_blocks_7_": {
"name": "output_blocks.7."
},
"output_blocks_8_": {
"name": "output_blocks.8."
},
"output_blocks_9_": {
"name": "output_blocks.9."
},
"output_blocks_10_": {
"name": "output_blocks.10."
},
"output_blocks_11_": {
"name": "output_blocks.11."
},
"out_": {
"name": "out."
}
}
},
"ModelMergeSD3_2B": {
"display_name": "ModelMergeSD3_2B",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_embed_": {
"name": "pos_embed."
},
"x_embedder_": {
"name": "x_embedder."
},
"context_embedder_": {
"name": "context_embedder."
},
"y_embedder_": {
"name": "y_embedder."
},
"t_embedder_": {
"name": "t_embedder."
},
"joint_blocks_0_": {
"name": "joint_blocks.0."
},
"joint_blocks_1_": {
"name": "joint_blocks.1."
},
"joint_blocks_2_": {
"name": "joint_blocks.2."
},
"joint_blocks_3_": {
"name": "joint_blocks.3."
},
"joint_blocks_4_": {
"name": "joint_blocks.4."
},
"joint_blocks_5_": {
"name": "joint_blocks.5."
},
"joint_blocks_6_": {
"name": "joint_blocks.6."
},
"joint_blocks_7_": {
"name": "joint_blocks.7."
},
"joint_blocks_8_": {
"name": "joint_blocks.8."
},
"joint_blocks_9_": {
"name": "joint_blocks.9."
},
"joint_blocks_10_": {
"name": "joint_blocks.10."
},
"joint_blocks_11_": {
"name": "joint_blocks.11."
},
"joint_blocks_12_": {
"name": "joint_blocks.12."
},
"joint_blocks_13_": {
"name": "joint_blocks.13."
},
"joint_blocks_14_": {
"name": "joint_blocks.14."
},
"joint_blocks_15_": {
"name": "joint_blocks.15."
},
"joint_blocks_16_": {
"name": "joint_blocks.16."
},
"joint_blocks_17_": {
"name": "joint_blocks.17."
},
"joint_blocks_18_": {
"name": "joint_blocks.18."
},
"joint_blocks_19_": {
"name": "joint_blocks.19."
},
"joint_blocks_20_": {
"name": "joint_blocks.20."
},
"joint_blocks_21_": {
"name": "joint_blocks.21."
},
"joint_blocks_22_": {
"name": "joint_blocks.22."
},
"joint_blocks_23_": {
"name": "joint_blocks.23."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeSD35_Large": {
"display_name": "ModelMergeSD35_Large",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_embed_": {
"name": "pos_embed."
},
"x_embedder_": {
"name": "x_embedder."
},
"context_embedder_": {
"name": "context_embedder."
},
"y_embedder_": {
"name": "y_embedder."
},
"t_embedder_": {
"name": "t_embedder."
},
"joint_blocks_0_": {
"name": "joint_blocks.0."
},
"joint_blocks_1_": {
"name": "joint_blocks.1."
},
"joint_blocks_2_": {
"name": "joint_blocks.2."
},
"joint_blocks_3_": {
"name": "joint_blocks.3."
},
"joint_blocks_4_": {
"name": "joint_blocks.4."
},
"joint_blocks_5_": {
"name": "joint_blocks.5."
},
"joint_blocks_6_": {
"name": "joint_blocks.6."
},
"joint_blocks_7_": {
"name": "joint_blocks.7."
},
"joint_blocks_8_": {
"name": "joint_blocks.8."
},
"joint_blocks_9_": {
"name": "joint_blocks.9."
},
"joint_blocks_10_": {
"name": "joint_blocks.10."
},
"joint_blocks_11_": {
"name": "joint_blocks.11."
},
"joint_blocks_12_": {
"name": "joint_blocks.12."
},
"joint_blocks_13_": {
"name": "joint_blocks.13."
},
"joint_blocks_14_": {
"name": "joint_blocks.14."
},
"joint_blocks_15_": {
"name": "joint_blocks.15."
},
"joint_blocks_16_": {
"name": "joint_blocks.16."
},
"joint_blocks_17_": {
"name": "joint_blocks.17."
},
"joint_blocks_18_": {
"name": "joint_blocks.18."
},
"joint_blocks_19_": {
"name": "joint_blocks.19."
},
"joint_blocks_20_": {
"name": "joint_blocks.20."
},
"joint_blocks_21_": {
"name": "joint_blocks.21."
},
"joint_blocks_22_": {
"name": "joint_blocks.22."
},
"joint_blocks_23_": {
"name": "joint_blocks.23."
},
"joint_blocks_24_": {
"name": "joint_blocks.24."
},
"joint_blocks_25_": {
"name": "joint_blocks.25."
},
"joint_blocks_26_": {
"name": "joint_blocks.26."
},
"joint_blocks_27_": {
"name": "joint_blocks.27."
},
"joint_blocks_28_": {
"name": "joint_blocks.28."
},
"joint_blocks_29_": {
"name": "joint_blocks.29."
},
"joint_blocks_30_": {
"name": "joint_blocks.30."
},
"joint_blocks_31_": {
"name": "joint_blocks.31."
},
"joint_blocks_32_": {
"name": "joint_blocks.32."
},
"joint_blocks_33_": {
"name": "joint_blocks.33."
},
"joint_blocks_34_": {
"name": "joint_blocks.34."
},
"joint_blocks_35_": {
"name": "joint_blocks.35."
},
"joint_blocks_36_": {
"name": "joint_blocks.36."
},
"joint_blocks_37_": {
"name": "joint_blocks.37."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeSDXL": {
"display_name": "ModelMergeSDXL",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"time_embed_": {
"name": "time_embed."
},
"label_emb_": {
"name": "label_emb."
},
"input_blocks_0": {
"name": "input_blocks.0"
},
"input_blocks_1": {
"name": "input_blocks.1"
},
"input_blocks_2": {
"name": "input_blocks.2"
},
"input_blocks_3": {
"name": "input_blocks.3"
},
"input_blocks_4": {
"name": "input_blocks.4"
},
"input_blocks_5": {
"name": "input_blocks.5"
},
"input_blocks_6": {
"name": "input_blocks.6"
},
"input_blocks_7": {
"name": "input_blocks.7"
},
"input_blocks_8": {
"name": "input_blocks.8"
},
"middle_block_0": {
"name": "middle_block.0"
},
"middle_block_1": {
"name": "middle_block.1"
},
"middle_block_2": {
"name": "middle_block.2"
},
"output_blocks_0": {
"name": "output_blocks.0"
},
"output_blocks_1": {
"name": "output_blocks.1"
},
"output_blocks_2": {
"name": "output_blocks.2"
},
"output_blocks_3": {
"name": "output_blocks.3"
},
"output_blocks_4": {
"name": "output_blocks.4"
},
"output_blocks_5": {
"name": "output_blocks.5"
},
"output_blocks_6": {
"name": "output_blocks.6"
},
"output_blocks_7": {
"name": "output_blocks.7"
},
"output_blocks_8": {
"name": "output_blocks.8"
},
"out_": {
"name": "out."
}
}
},
"ModelMergeSimple": {
"display_name": "ModelMergeSimple",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"ratio": {
"name": "ratio"
}
}
},
"ModelMergeSubtract": {
"display_name": "ModelMergeSubtract",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"multiplier": {
"name": "multiplier"
}
}
},
"ModelMergeWAN2_1": {
"display_name": "ModelMergeWAN2_1",
"description": "1.3B model has 30 blocks, 14B model has 40 blocks. Image to video model has the extra img_emb.",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"patch_embedding_": {
"name": "patch_embedding."
},
"time_embedding_": {
"name": "time_embedding."
},
"time_projection_": {
"name": "time_projection."
},
"text_embedding_": {
"name": "text_embedding."
},
"img_emb_": {
"name": "img_emb."
},
"blocks_0_": {
"name": "blocks.0."
},
"blocks_1_": {
"name": "blocks.1."
},
"blocks_2_": {
"name": "blocks.2."
},
"blocks_3_": {
"name": "blocks.3."
},
"blocks_4_": {
"name": "blocks.4."
},
"blocks_5_": {
"name": "blocks.5."
},
"blocks_6_": {
"name": "blocks.6."
},
"blocks_7_": {
"name": "blocks.7."
},
"blocks_8_": {
"name": "blocks.8."
},
"blocks_9_": {
"name": "blocks.9."
},
"blocks_10_": {
"name": "blocks.10."
},
"blocks_11_": {
"name": "blocks.11."
},
"blocks_12_": {
"name": "blocks.12."
},
"blocks_13_": {
"name": "blocks.13."
},
"blocks_14_": {
"name": "blocks.14."
},
"blocks_15_": {
"name": "blocks.15."
},
"blocks_16_": {
"name": "blocks.16."
},
"blocks_17_": {
"name": "blocks.17."
},
"blocks_18_": {
"name": "blocks.18."
},
"blocks_19_": {
"name": "blocks.19."
},
"blocks_20_": {
"name": "blocks.20."
},
"blocks_21_": {
"name": "blocks.21."
},
"blocks_22_": {
"name": "blocks.22."
},
"blocks_23_": {
"name": "blocks.23."
},
"blocks_24_": {
"name": "blocks.24."
},
"blocks_25_": {
"name": "blocks.25."
},
"blocks_26_": {
"name": "blocks.26."
},
"blocks_27_": {
"name": "blocks.27."
},
"blocks_28_": {
"name": "blocks.28."
},
"blocks_29_": {
"name": "blocks.29."
},
"blocks_30_": {
"name": "blocks.30."
},
"blocks_31_": {
"name": "blocks.31."
},
"blocks_32_": {
"name": "blocks.32."
},
"blocks_33_": {
"name": "blocks.33."
},
"blocks_34_": {
"name": "blocks.34."
},
"blocks_35_": {
"name": "blocks.35."
},
"blocks_36_": {
"name": "blocks.36."
},
"blocks_37_": {
"name": "blocks.37."
},
"blocks_38_": {
"name": "blocks.38."
},
"blocks_39_": {
"name": "blocks.39."
},
"head_": {
"name": "head."
}
}
},
"ModelPatchLoader": {
"display_name": "ModelPatchLoader",
"inputs": {
"name": {
"name": "name"
}
}
},
"ModelSamplingAuraFlow": {
"display_name": "ModelSamplingAuraFlow",
"inputs": {
"model": {
"name": "model"
},
"shift": {
"name": "shift"
}
}
},
"ModelSamplingContinuousEDM": {
"display_name": "ModelSamplingContinuousEDM",
"inputs": {
"model": {
"name": "model"
},
"sampling": {
"name": "sampling"
},
"sigma_max": {
"name": "sigma_max"
},
"sigma_min": {
"name": "sigma_min"
}
}
},
"ModelSamplingContinuousV": {
"display_name": "ModelSamplingContinuousV",
"inputs": {
"model": {
"name": "model"
},
"sampling": {
"name": "sampling"
},
"sigma_max": {
"name": "sigma_max"
},
"sigma_min": {
"name": "sigma_min"
}
}
},
"ModelSamplingDiscrete": {
"display_name": "ModelSamplingDiscrete",
"inputs": {
"model": {
"name": "model"
},
"sampling": {
"name": "sampling"
},
"zsnr": {
"name": "zsnr"
}
}
},
"ModelSamplingFlux": {
"display_name": "ModelSamplingFlux",
"inputs": {
"model": {
"name": "model"
},
"max_shift": {
"name": "max_shift"
},
"base_shift": {
"name": "base_shift"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
}
}
},
"ModelSamplingLTXV": {
"display_name": "ModelSamplingLTXV",
"inputs": {
"model": {
"name": "model"
},
"max_shift": {
"name": "max_shift"
},
"base_shift": {
"name": "base_shift"
},
"latent": {
"name": "latent"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ModelSamplingSD3": {
"display_name": "ModelSamplingSD3",
"inputs": {
"model": {
"name": "model"
},
"shift": {
"name": "shift"
}
}
},
"ModelSamplingStableCascade": {
"display_name": "ModelSamplingStableCascade",
"inputs": {
"model": {
"name": "model"
},
"shift": {
"name": "shift"
}
}
},
"ModelSave": {
"display_name": "ModelSave",
"inputs": {
"model": {
"name": "model"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"MoonvalleyImg2VideoNode": {
"display_name": "Moonvalley Marey Image to Video",
"description": "Moonvalley Marey Image to Video Node",
"inputs": {
"image": {
"name": "image",
"tooltip": "The reference image used to generate the video"
},
"prompt": {
"name": "prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative prompt text"
},
"resolution": {
"name": "resolution",
"tooltip": "Resolution of the output video"
},
"prompt_adherence": {
"name": "prompt_adherence",
"tooltip": "Guidance scale for generation control"
},
"seed": {
"name": "seed",
"tooltip": "Random seed value"
},
"steps": {
"name": "steps",
"tooltip": "Number of denoising steps"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"MoonvalleyTxt2VideoNode": {
"display_name": "Moonvalley Marey Text to Video",
"inputs": {
"prompt": {
"name": "prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative prompt text"
},
"resolution": {
"name": "resolution",
"tooltip": "Resolution of the output video"
},
"prompt_adherence": {
"name": "prompt_adherence",
"tooltip": "Guidance scale for generation control"
},
"seed": {
"name": "seed",
"tooltip": "Random seed value"
},
"steps": {
"name": "steps",
"tooltip": "Inference steps"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"MoonvalleyVideo2VideoNode": {
"display_name": "Moonvalley Marey Video to Video",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Describes the video to generate"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative prompt text"
},
"seed": {
"name": "seed",
"tooltip": "Random seed value"
},
"video": {
"name": "video",
"tooltip": "The reference video used to generate the output video. Must be at least 5 seconds long. Videos longer than 5s will be automatically trimmed. Only MP4 format supported."
},
"steps": {
"name": "steps",
"tooltip": "Number of inference steps"
},
"control_type": {
"name": "control_type"
},
"motion_intensity": {
"name": "motion_intensity",
"tooltip": "Only used if control_type is 'Motion Transfer'"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Morphology": {
"display_name": "ImageMorphology",
"inputs": {
"image": {
"name": "image"
},
"operation": {
"name": "operation"
},
"kernel_size": {
"name": "kernel_size"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"OpenAIChatConfig": {
"display_name": "OpenAI ChatGPT Advanced Options",
"description": "Allows specifying advanced configuration options for the OpenAI Chat Nodes.",
"inputs": {
"truncation": {
"name": "truncation",
"tooltip": "The truncation strategy to use for the model response. auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.disabled: If a model response will exceed the context window size for a model, the request will fail with a 400 error"
},
"max_output_tokens": {
"name": "max_output_tokens",
"tooltip": "An upper bound for the number of tokens that can be generated for a response, including visible output tokens"
},
"instructions": {
"name": "instructions",
"tooltip": "Instructions for the model on how to generate the response"
}
}
},
"OpenAIChatNode": {
"display_name": "OpenAI ChatGPT",
"description": "Generate text responses from an OpenAI model.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text inputs to the model, used to generate a response."
},
"persist_context": {
"name": "persist_context",
"tooltip": "Persist chat context between calls (multi-turn conversation)"
},
"model": {
"name": "model",
"tooltip": "The model used to generate the response"
},
"images": {
"name": "images",
"tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node."
},
"files": {
"name": "files",
"tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the OpenAI Chat Input Files node."
},
"advanced_options": {
"name": "advanced_options",
"tooltip": "Optional configuration for the model. Accepts inputs from the OpenAI Chat Advanced Options node."
}
}
},
"OpenAIDalle2": {
"display_name": "OpenAI DALL·E 2",
"description": "Generates images synchronously via OpenAI's DALL·E 2 endpoint.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text prompt for DALL·E"
},
"seed": {
"name": "seed",
"tooltip": "not implemented yet in backend"
},
"size": {
"name": "size",
"tooltip": "Image size"
},
"n": {
"name": "n",
"tooltip": "How many images to generate"
},
"image": {
"name": "image",
"tooltip": "Optional reference image for image editing."
},
"mask": {
"name": "mask",
"tooltip": "Optional mask for inpainting (white areas will be replaced)"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"OpenAIDalle3": {
"display_name": "OpenAI DALL·E 3",
"description": "Generates images synchronously via OpenAI's DALL·E 3 endpoint.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text prompt for DALL·E"
},
"seed": {
"name": "seed",
"tooltip": "not implemented yet in backend"
},
"quality": {
"name": "quality",
"tooltip": "Image quality"
},
"style": {
"name": "style",
"tooltip": "Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images."
},
"size": {
"name": "size",
"tooltip": "Image size"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"OpenAIGPTImage1": {
"display_name": "OpenAI GPT Image 1",
"description": "Generates images synchronously via OpenAI's GPT Image 1 endpoint.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text prompt for GPT Image 1"
},
"seed": {
"name": "seed",
"tooltip": "not implemented yet in backend"
},
"quality": {
"name": "quality",
"tooltip": "Image quality, affects cost and generation time."
},
"background": {
"name": "background",
"tooltip": "Return image with or without background"
},
"size": {
"name": "size",
"tooltip": "Image size"
},
"n": {
"name": "n",
"tooltip": "How many images to generate"
},
"image": {
"name": "image",
"tooltip": "Optional reference image for image editing."
},
"mask": {
"name": "mask",
"tooltip": "Optional mask for inpainting (white areas will be replaced)"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"OpenAIInputFiles": {
"display_name": "OpenAI ChatGPT Input Files",
"description": "Loads and prepares input files (text, pdf, etc.) to include as inputs for the OpenAI Chat Node. The files will be read by the OpenAI model when generating a response. 🛈 TIP: Can be chained together with other OpenAI Input File nodes.",
"inputs": {
"file": {
"name": "file",
"tooltip": "Input files to include as context for the model. Only accepts text (.txt) and PDF (.pdf) files for now."
},
"OPENAI_INPUT_FILES": {
"name": "OPENAI_INPUT_FILES",
"tooltip": "An optional additional file(s) to batch together with the file loaded from this node. Allows chaining of input files so that a single message can include multiple input files."
}
}
},
"OpenAIVideoSora2": {
"display_name": "OpenAI Sora - Video",
"description": "OpenAI video and audio generation.",
"inputs": {
"model": {
"name": "model"
},
"prompt": {
"name": "prompt",
"tooltip": "Guiding text; may be empty if an input image is present."
},
"size": {
"name": "size"
},
"duration": {
"name": "duration"
},
"image": {
"name": "image"
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"OptimalStepsScheduler": {
"display_name": "OptimalStepsScheduler",
"inputs": {
"model_type": {
"name": "model_type"
},
"steps": {
"name": "steps"
},
"denoise": {
"name": "denoise"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PairConditioningCombine": {
"display_name": "Cond Pair Combine",
"inputs": {
"positive_A": {
"name": "positive_A"
},
"negative_A": {
"name": "negative_A"
},
"positive_B": {
"name": "positive_B"
},
"negative_B": {
"name": "negative_B"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"PairConditioningSetDefaultCombine": {
"display_name": "Cond Pair Set Default Combine",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"positive_DEFAULT": {
"name": "positive_DEFAULT"
},
"negative_DEFAULT": {
"name": "negative_DEFAULT"
},
"hooks": {
"name": "hooks"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"PairConditioningSetProperties": {
"display_name": "Cond Pair Set Props",
"inputs": {
"positive_NEW": {
"name": "positive_NEW"
},
"negative_NEW": {
"name": "negative_NEW"
},
"strength": {
"name": "strength"
},
"set_cond_area": {
"name": "set_cond_area"
},
"mask": {
"name": "mask"
},
"hooks": {
"name": "hooks"
},
"timesteps": {
"name": "timesteps"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"PairConditioningSetPropertiesAndCombine": {
"display_name": "Cond Pair Set Props Combine",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"positive_NEW": {
"name": "positive_NEW"
},
"negative_NEW": {
"name": "negative_NEW"
},
"strength": {
"name": "strength"
},
"set_cond_area": {
"name": "set_cond_area"
},
"mask": {
"name": "mask"
},
"hooks": {
"name": "hooks"
},
"timesteps": {
"name": "timesteps"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"PatchModelAddDownscale": {
"display_name": "PatchModelAddDownscale (Kohya Deep Shrink)",
"inputs": {
"model": {
"name": "model"
},
"block_number": {
"name": "block_number"
},
"downscale_factor": {
"name": "downscale_factor"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
},
"downscale_after_skip": {
"name": "downscale_after_skip"
},
"downscale_method": {
"name": "downscale_method"
},
"upscale_method": {
"name": "upscale_method"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PerpNeg": {
"display_name": "Perp-Neg (DEPRECATED by PerpNegGuider)",
"inputs": {
"model": {
"name": "model"
},
"empty_conditioning": {
"name": "empty_conditioning"
},
"neg_scale": {
"name": "neg_scale"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PerpNegGuider": {
"display_name": "PerpNegGuider",
"inputs": {
"model": {
"name": "model"
},
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"empty_conditioning": {
"name": "empty_conditioning"
},
"cfg": {
"name": "cfg"
},
"neg_scale": {
"name": "neg_scale"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PerturbedAttentionGuidance": {
"display_name": "PerturbedAttentionGuidance",
"inputs": {
"model": {
"name": "model"
},
"scale": {
"name": "scale"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PhotoMakerEncode": {
"display_name": "PhotoMakerEncode",
"inputs": {
"photomaker": {
"name": "photomaker"
},
"image": {
"name": "image"
},
"clip": {
"name": "clip"
},
"text": {
"name": "text"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PhotoMakerLoader": {
"display_name": "PhotoMakerLoader",
"inputs": {
"photomaker_model_name": {
"name": "photomaker_model_name"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Pikadditions": {
"display_name": "Pikadditions (Video Object Insertion)",
"description": "Add any object or image into your video. Upload a video and specify what you'd like to add to create a seamlessly integrated result.",
"inputs": {
"video": {
"name": "video",
"tooltip": "The video to add an image to."
},
"image": {
"name": "image",
"tooltip": "The image to add to the video."
},
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Pikaffects": {
"display_name": "Pikaffects (Video Effects)",
"description": "Generate a video with a specific Pikaffect. Supported Pikaffects: Cake-ify, Crumble, Crush, Decapitate, Deflate, Dissolve, Explode, Eye-pop, Inflate, Levitate, Melt, Peel, Poke, Squish, Ta-da, Tear",
"inputs": {
"image": {
"name": "image",
"tooltip": "The reference image to apply the Pikaffect to."
},
"pikaffect": {
"name": "pikaffect"
},
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PikaImageToVideoNode2_2": {
"display_name": "Pika Image to Video",
"description": "Sends an image and prompt to the Pika API v2.2 to generate a video.",
"inputs": {
"image": {
"name": "image",
"tooltip": "The image to convert to video"
},
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PikaScenesV2_2": {
"display_name": "Pika Scenes (Video Image Composition)",
"description": "Combine your images to create a video with the objects in them. Upload multiple images as ingredients and generate a high-quality video that incorporates all of them.",
"inputs": {
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration"
},
"ingredients_mode": {
"name": "ingredients_mode"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio (width / height)"
},
"image_ingredient_1": {
"name": "image_ingredient_1",
"tooltip": "Image that will be used as ingredient to create a video."
},
"image_ingredient_2": {
"name": "image_ingredient_2",
"tooltip": "Image that will be used as ingredient to create a video."
},
"image_ingredient_3": {
"name": "image_ingredient_3",
"tooltip": "Image that will be used as ingredient to create a video."
},
"image_ingredient_4": {
"name": "image_ingredient_4",
"tooltip": "Image that will be used as ingredient to create a video."
},
"image_ingredient_5": {
"name": "image_ingredient_5",
"tooltip": "Image that will be used as ingredient to create a video."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PikaStartEndFrameNode2_2": {
"display_name": "Pika Start and End Frame to Video",
"description": "Generate a video by combining your first and last frame. Upload two images to define the start and end points, and let the AI create a smooth transition between them.",
"inputs": {
"image_start": {
"name": "image_start",
"tooltip": "The first image to combine."
},
"image_end": {
"name": "image_end",
"tooltip": "The last image to combine."
},
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Pikaswaps": {
"display_name": "Pika Swaps (Video Object Replacement)",
"description": "Swap out any object or region of your video with a new image or object. Define areas to replace either with a mask or coordinates.",
"inputs": {
"video": {
"name": "video",
"tooltip": "The video to swap an object in."
},
"image": {
"name": "image",
"tooltip": "The image used to replace the masked object in the video."
},
"mask": {
"name": "mask",
"tooltip": "Use the mask to define areas in the video to replace."
},
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"region_to_modify": {
"name": "region_to_modify",
"tooltip": "Plaintext description of the object / region to modify."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PikaTextToVideoNode2_2": {
"display_name": "Pika Text to Video",
"description": "Sends a text prompt to the Pika API v2.2 to generate a video.",
"inputs": {
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio (width / height)"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PixverseImageToVideoNode": {
"display_name": "PixVerse Image to Video",
"description": "Generates videos based on prompt and output_size.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the video generation"
},
"quality": {
"name": "quality"
},
"duration_seconds": {
"name": "duration_seconds"
},
"motion_mode": {
"name": "motion_mode"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"pixverse_template": {
"name": "pixverse_template",
"tooltip": "An optional template to influence style of generation, created by the PixVerse Template node."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PixverseTemplateNode": {
"display_name": "PixVerse Template",
"inputs": {
"template": {
"name": "template"
}
},
"outputs": {
"0": {
"name": "pixverse_template",
"tooltip": null
}
}
},
"PixverseTextToVideoNode": {
"display_name": "PixVerse Text to Video",
"description": "Generates videos based on prompt and output_size.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the video generation"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"quality": {
"name": "quality"
},
"duration_seconds": {
"name": "duration_seconds"
},
"motion_mode": {
"name": "motion_mode"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"pixverse_template": {
"name": "pixverse_template",
"tooltip": "An optional template to influence style of generation, created by the PixVerse Template node."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PixverseTransitionVideoNode": {
"display_name": "PixVerse Transition Video",
"description": "Generates videos based on prompt and output_size.",
"inputs": {
"first_frame": {
"name": "first_frame"
},
"last_frame": {
"name": "last_frame"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the video generation"
},
"quality": {
"name": "quality"
},
"duration_seconds": {
"name": "duration_seconds"
},
"motion_mode": {
"name": "motion_mode"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PolyexponentialScheduler": {
"display_name": "PolyexponentialScheduler",
"inputs": {
"steps": {
"name": "steps"
},
"sigma_max": {
"name": "sigma_max"
},
"sigma_min": {
"name": "sigma_min"
},
"rho": {
"name": "rho"
}
}
},
"PorterDuffImageComposite": {
"display_name": "Porter-Duff Image Composite",
"inputs": {
"source": {
"name": "source"
},
"source_alpha": {
"name": "source_alpha"
},
"destination": {
"name": "destination"
},
"destination_alpha": {
"name": "destination_alpha"
},
"mode": {
"name": "mode"
}
},
"outputs": {
"0": {
"tooltip": null
},
"1": {
"tooltip": null
}
}
},
"Preview3D": {
"display_name": "Preview 3D",
"inputs": {
"model_file": {
"name": "model_file"
},
"camera_info": {
"name": "camera_info"
},
"image": {
"name": "image"
}
}
},
"Preview3DAnimation": {
"display_name": "Preview 3D - Animation",
"inputs": {
"model_file": {
"name": "model_file"
},
"camera_info": {
"name": "camera_info"
},
"image": {
"name": "image"
}
}
},
"PreviewAny": {
"display_name": "Preview Any",
"inputs": {
"source": {
"name": "source"
},
"preview": {}
}
},
"PreviewAudio": {
"display_name": "Preview Audio",
"inputs": {
"audio": {
"name": "audio"
},
"audioUI": {
"name": "audioUI"
}
}
},
"PreviewImage": {
"display_name": "Preview Image",
"description": "Saves the input images to your ComfyUI output directory.",
"inputs": {
"images": {
"name": "images"
}
}
},
"PrimitiveBoolean": {
"display_name": "Boolean",
"inputs": {
"value": {
"name": "value"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PrimitiveFloat": {
"display_name": "Float",
"inputs": {
"value": {
"name": "value"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PrimitiveInt": {
"display_name": "Int",
"inputs": {
"value": {
"name": "value"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PrimitiveString": {
"display_name": "String",
"inputs": {
"value": {
"name": "value"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"PrimitiveStringMultiline": {
"display_name": "String (Multiline)",
"inputs": {
"value": {
"name": "value"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"QuadrupleCLIPLoader": {
"display_name": "QuadrupleCLIPLoader",
"description": "[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct",
"inputs": {
"clip_name1": {
"name": "clip_name1"
},
"clip_name2": {
"name": "clip_name2"
},
"clip_name3": {
"name": "clip_name3"
},
"clip_name4": {
"name": "clip_name4"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"QwenImageDiffsynthControlnet": {
"display_name": "QwenImageDiffsynthControlnet",
"inputs": {
"model": {
"name": "model"
},
"model_patch": {
"name": "model_patch"
},
"vae": {
"name": "vae"
},
"image": {
"name": "image"
},
"strength": {
"name": "strength"
},
"mask": {
"name": "mask"
}
}
},
"RandomNoise": {
"display_name": "RandomNoise",
"inputs": {
"noise_seed": {
"name": "noise_seed"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"RebatchImages": {
"display_name": "Rebatch Images",
"inputs": {
"images": {
"name": "images"
},
"batch_size": {
"name": "batch_size"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"RebatchLatents": {
"display_name": "Rebatch Latents",
"inputs": {
"latents": {
"name": "latents"
},
"batch_size": {
"name": "batch_size"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"RecordAudio": {
"display_name": "Record Audio",
"inputs": {
"audio": {
"name": "audio"
}
}
},
"RecraftColorRGB": {
"display_name": "Recraft Color RGB",
"description": "Create Recraft Color by choosing specific RGB values.",
"inputs": {
"r": {
"name": "r",
"tooltip": "Red value of color."
},
"g": {
"name": "g",
"tooltip": "Green value of color."
},
"b": {
"name": "b",
"tooltip": "Blue value of color."
},
"recraft_color": {
"name": "recraft_color"
}
},
"outputs": {
"0": {
"name": "recraft_color"
}
}
},
"RecraftControls": {
"display_name": "Recraft Controls",
"description": "Create Recraft Controls for customizing Recraft generation.",
"inputs": {
"colors": {
"name": "colors"
},
"background_color": {
"name": "background_color"
}
},
"outputs": {
"0": {
"name": "recraft_controls"
}
}
},
"RecraftCreativeUpscaleNode": {
"display_name": "Recraft Creative Upscale Image",
"description": "Upscale image synchronously.\nEnhances a given raster image using creative upscale tool, boosting resolution with a focus on refining small details and faces.",
"inputs": {
"image": {
"name": "image"
}
}
},
"RecraftCrispUpscaleNode": {
"display_name": "Recraft Crisp Upscale Image",
"description": "Upscale image synchronously.\nEnhances a given raster image using crisp upscale tool, increasing image resolution, making the image sharper and cleaner.",
"inputs": {
"image": {
"name": "image"
}
}
},
"RecraftImageInpaintingNode": {
"display_name": "Recraft Image Inpainting",
"description": "Modify image based on prompt and mask.",
"inputs": {
"image": {
"name": "image"
},
"mask": {
"name": "mask"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation."
},
"n": {
"name": "n",
"tooltip": "The number of images to generate."
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"recraft_style": {
"name": "recraft_style"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"RecraftImageToImageNode": {
"display_name": "Recraft Image to Image",
"description": "Modify image based on prompt and strength.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation."
},
"n": {
"name": "n",
"tooltip": "The number of images to generate."
},
"strength": {
"name": "strength",
"tooltip": "Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity."
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"recraft_style": {
"name": "recraft_style"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"recraft_controls": {
"name": "recraft_controls",
"tooltip": "Optional additional controls over the generation via the Recraft Controls node."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"RecraftRemoveBackgroundNode": {
"display_name": "Recraft Remove Background",
"description": "Remove background from image, and return processed image and mask.",
"inputs": {
"image": {
"name": "image"
}
}
},
"RecraftReplaceBackgroundNode": {
"display_name": "Recraft Replace Background",
"description": "Replace background on image, based on provided prompt.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation."
},
"n": {
"name": "n",
"tooltip": "The number of images to generate."
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"recraft_style": {
"name": "recraft_style"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"RecraftStyleV3DigitalIllustration": {
"display_name": "Recraft Style - Digital Illustration",
"description": "Select realistic_image style and optional substyle.",
"inputs": {
"substyle": {
"name": "substyle"
}
},
"outputs": {
"0": {
"name": "recraft_style"
}
}
},
"RecraftStyleV3InfiniteStyleLibrary": {
"display_name": "Recraft Style - Infinite Style Library",
"description": "Select style based on preexisting UUID from Recraft's Infinite Style Library.",
"inputs": {
"style_id": {
"name": "style_id",
"tooltip": "UUID of style from Infinite Style Library."
}
},
"outputs": {
"0": {
"name": "recraft_style"
}
}
},
"RecraftStyleV3LogoRaster": {
"display_name": "Recraft Style - Logo Raster",
"description": "Select realistic_image style and optional substyle.",
"inputs": {
"substyle": {
"name": "substyle"
}
},
"outputs": {
"0": {
"name": "recraft_style"
}
}
},
"RecraftStyleV3RealisticImage": {
"display_name": "Recraft Style - Realistic Image",
"description": "Select realistic_image style and optional substyle.",
"inputs": {
"substyle": {
"name": "substyle"
}
},
"outputs": {
"0": {
"name": "recraft_style"
}
}
},
"RecraftTextToImageNode": {
"display_name": "Recraft Text to Image",
"description": "Generates images synchronously based on prompt and resolution.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation."
},
"size": {
"name": "size",
"tooltip": "The size of the generated image."
},
"n": {
"name": "n",
"tooltip": "The number of images to generate."
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"recraft_style": {
"name": "recraft_style"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"recraft_controls": {
"name": "recraft_controls",
"tooltip": "Optional additional controls over the generation via the Recraft Controls node."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"RecraftTextToVectorNode": {
"display_name": "Recraft Text to Vector",
"description": "Generates SVG synchronously based on prompt and resolution.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation."
},
"substyle": {
"name": "substyle"
},
"size": {
"name": "size",
"tooltip": "The size of the generated image."
},
"n": {
"name": "n",
"tooltip": "The number of images to generate."
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"recraft_controls": {
"name": "recraft_controls",
"tooltip": "Optional additional controls over the generation via the Recraft Controls node."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"RecraftVectorizeImageNode": {
"display_name": "Recraft Vectorize Image",
"description": "Generates SVG synchronously from an input image.",
"inputs": {
"image": {
"name": "image"
}
}
},
"ReferenceLatent": {
"display_name": "ReferenceLatent",
"description": "This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"latent": {
"name": "latent"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"RegexExtract": {
"display_name": "Regex Extract",
"inputs": {
"string": {
"name": "string"
},
"regex_pattern": {
"name": "regex_pattern"
},
"mode": {
"name": "mode"
},
"case_insensitive": {
"name": "case_insensitive"
},
"multiline": {
"name": "multiline"
},
"dotall": {
"name": "dotall"
},
"group_index": {
"name": "group_index"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"RegexMatch": {
"display_name": "Regex Match",
"inputs": {
"string": {
"name": "string"
},
"regex_pattern": {
"name": "regex_pattern"
},
"case_insensitive": {
"name": "case_insensitive"
},
"multiline": {
"name": "multiline"
},
"dotall": {
"name": "dotall"
}
},
"outputs": {
"0": {
"name": "matches",
"tooltip": null
}
}
},
"RegexReplace": {
"display_name": "Regex Replace",
"description": "Find and replace text using regex patterns.",
"inputs": {
"string": {
"name": "string"
},
"regex_pattern": {
"name": "regex_pattern"
},
"replace": {
"name": "replace"
},
"case_insensitive": {
"name": "case_insensitive"
},
"multiline": {
"name": "multiline"
},
"dotall": {
"name": "dotall",
"tooltip": "When enabled, the dot (.) character will match any character including newline characters. When disabled, dots won't match newlines."
},
"count": {
"name": "count",
"tooltip": "Maximum number of replacements to make. Set to 0 to replace all occurrences (default). Set to 1 to replace only the first match, 2 for the first two matches, etc."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"RenormCFG": {
"display_name": "RenormCFG",
"inputs": {
"model": {
"name": "model"
},
"cfg_trunc": {
"name": "cfg_trunc"
},
"renorm_cfg": {
"name": "renorm_cfg"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"RepeatImageBatch": {
"display_name": "RepeatImageBatch",
"inputs": {
"image": {
"name": "image"
},
"amount": {
"name": "amount"
}
}
},
"RepeatLatentBatch": {
"display_name": "Repeat Latent Batch",
"inputs": {
"samples": {
"name": "samples"
},
"amount": {
"name": "amount"
}
}
},
"RescaleCFG": {
"display_name": "RescaleCFG",
"inputs": {
"model": {
"name": "model"
},
"multiplier": {
"name": "multiplier"
}
}
},
"ResizeAndPadImage": {
"display_name": "ResizeAndPadImage",
"inputs": {
"image": {
"name": "image"
},
"target_width": {
"name": "target_width"
},
"target_height": {
"name": "target_height"
},
"padding_color": {
"name": "padding_color"
},
"interpolation": {
"name": "interpolation"
}
}
},
"Rodin3D_Detail": {
"display_name": "Rodin 3D Generate - Detail Generate",
"description": "Generate 3D Assets using Rodin API",
"inputs": {
"Images": {
"name": "Images"
},
"Seed": {
"name": "Seed"
},
"Material_Type": {
"name": "Material_Type"
},
"Polygon_count": {
"name": "Polygon_count"
}
},
"outputs": {
"0": {
"name": "3D Model Path",
"tooltip": null
}
}
},
"Rodin3D_Gen2": {
"display_name": "Rodin 3D Generate - Gen-2 Generate",
"description": "Generate 3D Assets using Rodin API",
"inputs": {
"Images": {
"name": "Images"
},
"TAPose": {
"name": "TAPose"
},
"Seed": {
"name": "Seed"
},
"Material_Type": {
"name": "Material_Type"
},
"Polygon_count": {
"name": "Polygon_count"
}
},
"outputs": {
"0": {
"name": "3D Model Path",
"tooltip": null
}
}
},
"Rodin3D_Regular": {
"display_name": "Rodin 3D Generate - Regular Generate",
"description": "Generate 3D Assets using Rodin API",
"inputs": {
"Images": {
"name": "Images"
},
"Seed": {
"name": "Seed"
},
"Material_Type": {
"name": "Material_Type"
},
"Polygon_count": {
"name": "Polygon_count"
}
},
"outputs": {
"0": {
"name": "3D Model Path",
"tooltip": null
}
}
},
"Rodin3D_Sketch": {
"display_name": "Rodin 3D Generate - Sketch Generate",
"description": "Generate 3D Assets using Rodin API",
"inputs": {
"Images": {
"name": "Images"
},
"Seed": {
"name": "Seed"
}
},
"outputs": {
"0": {
"name": "3D Model Path",
"tooltip": null
}
}
},
"Rodin3D_Smooth": {
"display_name": "Rodin 3D Generate - Smooth Generate",
"description": "Generate 3D Assets using Rodin API",
"inputs": {
"Images": {
"name": "Images"
},
"Seed": {
"name": "Seed"
},
"Material_Type": {
"name": "Material_Type"
},
"Polygon_count": {
"name": "Polygon_count"
}
},
"outputs": {
"0": {
"name": "3D Model Path",
"tooltip": null
}
}
},
"RunwayFirstLastFrameNode": {
"display_name": "Runway First-Last-Frame to Video",
"description": "Upload first and last keyframes, draft a prompt, and generate a video. More complex transitions, such as cases where the Last frame is completely different from the First frame, may benefit from the longer 10s duration. This would give the generation more time to smoothly transition between the two inputs. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text prompt for the generation"
},
"start_frame": {
"name": "start_frame",
"tooltip": "Start frame to be used for the video"
},
"end_frame": {
"name": "end_frame",
"tooltip": "End frame to be used for the video. Supported for gen3a_turbo only."
},
"duration": {
"name": "duration"
},
"ratio": {
"name": "ratio"
},
"seed": {
"name": "seed",
"tooltip": "Random seed for generation"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"RunwayImageToVideoNodeGen3a": {
"display_name": "Runway Image to Video (Gen3a Turbo)",
"description": "Generate a video from a single starting frame using Gen3a Turbo model. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text prompt for the generation"
},
"start_frame": {
"name": "start_frame",
"tooltip": "Start frame to be used for the video"
},
"duration": {
"name": "duration"
},
"ratio": {
"name": "ratio"
},
"seed": {
"name": "seed",
"tooltip": "Random seed for generation"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"RunwayImageToVideoNodeGen4": {
"display_name": "Runway Image to Video (Gen4 Turbo)",
"description": "Generate a video from a single starting frame using Gen4 Turbo model. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text prompt for the generation"
},
"start_frame": {
"name": "start_frame",
"tooltip": "Start frame to be used for the video"
},
"duration": {
"name": "duration"
},
"ratio": {
"name": "ratio"
},
"seed": {
"name": "seed",
"tooltip": "Random seed for generation"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"RunwayTextToImageNode": {
"display_name": "Runway Text to Image",
"description": "Generate an image from a text prompt using Runway's Gen 4 model. You can also include reference image to guide the generation.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text prompt for the generation"
},
"ratio": {
"name": "ratio"
},
"reference_image": {
"name": "reference_image",
"tooltip": "Optional reference image to guide the generation"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"SamplerCustom": {
"display_name": "SamplerCustom",
"inputs": {
"model": {
"name": "model"
},
"add_noise": {
"name": "add_noise"
},
"noise_seed": {
"name": "noise_seed"
},
"cfg": {
"name": "cfg"
},
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"sampler": {
"name": "sampler"
},
"sigmas": {
"name": "sigmas"
},
"latent_image": {
"name": "latent_image"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"name": "output"
},
"1": {
"name": "denoised_output"
}
}
},
"SamplerCustomAdvanced": {
"display_name": "SamplerCustomAdvanced",
"inputs": {
"noise": {
"name": "noise"
},
"guider": {
"name": "guider"
},
"sampler": {
"name": "sampler"
},
"sigmas": {
"name": "sigmas"
},
"latent_image": {
"name": "latent_image"
}
},
"outputs": {
"0": {
"name": "output"
},
"1": {
"name": "denoised_output"
}
}
},
"SamplerDPMAdaptative": {
"display_name": "SamplerDPMAdaptative",
"inputs": {
"order": {
"name": "order"
},
"rtol": {
"name": "rtol"
},
"atol": {
"name": "atol"
},
"h_init": {
"name": "h_init"
},
"pcoeff": {
"name": "pcoeff"
},
"icoeff": {
"name": "icoeff"
},
"dcoeff": {
"name": "dcoeff"
},
"accept_safety": {
"name": "accept_safety"
},
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
}
}
},
"SamplerDPMPP_2M_SDE": {
"display_name": "SamplerDPMPP_2M_SDE",
"inputs": {
"solver_type": {
"name": "solver_type"
},
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
},
"noise_device": {
"name": "noise_device"
}
}
},
"SamplerDPMPP_2S_Ancestral": {
"display_name": "SamplerDPMPP_2S_Ancestral",
"inputs": {
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
}
}
},
"SamplerDPMPP_3M_SDE": {
"display_name": "SamplerDPMPP_3M_SDE",
"inputs": {
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
},
"noise_device": {
"name": "noise_device"
}
}
},
"SamplerDPMPP_SDE": {
"display_name": "SamplerDPMPP_SDE",
"inputs": {
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
},
"r": {
"name": "r"
},
"noise_device": {
"name": "noise_device"
}
}
},
"SamplerER_SDE": {
"display_name": "SamplerER_SDE",
"inputs": {
"solver_type": {
"name": "solver_type"
},
"max_stage": {
"name": "max_stage"
},
"eta": {
"name": "eta",
"tooltip": "Stochastic strength of reverse-time SDE.\nWhen eta=0, it reduces to deterministic ODE. This setting doesn't apply to ER-SDE solver type."
},
"s_noise": {
"name": "s_noise"
}
}
},
"SamplerEulerAncestral": {
"display_name": "SamplerEulerAncestral",
"inputs": {
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
}
}
},
"SamplerEulerAncestralCFGPP": {
"display_name": "SamplerEulerAncestralCFG++",
"inputs": {
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
}
}
},
"SamplerEulerCFGpp": {
"display_name": "SamplerEulerCFG++",
"inputs": {
"version": {
"name": "version"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"SamplerLCMUpscale": {
"display_name": "SamplerLCMUpscale",
"inputs": {
"scale_ratio": {
"name": "scale_ratio"
},
"scale_steps": {
"name": "scale_steps"
},
"upscale_method": {
"name": "upscale_method"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"SamplerLMS": {
"display_name": "SamplerLMS",
"inputs": {
"order": {
"name": "order"
}
}
},
"SamplerSASolver": {
"display_name": "SamplerSASolver",
"inputs": {
"model": {
"name": "model"
},
"eta": {
"name": "eta"
},
"sde_start_percent": {
"name": "sde_start_percent"
},
"sde_end_percent": {
"name": "sde_end_percent"
},
"s_noise": {
"name": "s_noise"
},
"predictor_order": {
"name": "predictor_order"
},
"corrector_order": {
"name": "corrector_order"
},
"use_pece": {
"name": "use_pece"
},
"simple_order_2": {
"name": "simple_order_2"
}
}
},
"SamplingPercentToSigma": {
"display_name": "SamplingPercentToSigma",
"inputs": {
"model": {
"name": "model"
},
"sampling_percent": {
"name": "sampling_percent"
},
"return_actual_sigma": {
"name": "return_actual_sigma",
"tooltip": "Return the actual sigma value instead of the value used for interval checks.\nThis only affects results at 0.0 and 1.0."
}
},
"outputs": {
"0": {
"name": "sigma_value"
}
}
},
"SaveAnimatedPNG": {
"display_name": "SaveAnimatedPNG",
"inputs": {
"images": {
"name": "images"
},
"filename_prefix": {
"name": "filename_prefix"
},
"fps": {
"name": "fps"
},
"compress_level": {
"name": "compress_level"
}
}
},
"SaveAnimatedWEBP": {
"display_name": "SaveAnimatedWEBP",
"inputs": {
"images": {
"name": "images"
},
"filename_prefix": {
"name": "filename_prefix"
},
"fps": {
"name": "fps"
},
"lossless": {
"name": "lossless"
},
"quality": {
"name": "quality"
},
"method": {
"name": "method"
}
}
},
"SaveAudio": {
"display_name": "Save Audio (FLAC)",
"inputs": {
"audio": {
"name": "audio"
},
"filename_prefix": {
"name": "filename_prefix"
},
"audioUI": {
"name": "audioUI"
}
}
},
"SaveAudioMP3": {
"display_name": "Save Audio (MP3)",
"inputs": {
"audio": {
"name": "audio"
},
"filename_prefix": {
"name": "filename_prefix"
},
"quality": {
"name": "quality"
},
"audioUI": {
"name": "audioUI"
}
}
},
"SaveAudioOpus": {
"display_name": "Save Audio (Opus)",
"inputs": {
"audio": {
"name": "audio"
},
"filename_prefix": {
"name": "filename_prefix"
},
"quality": {
"name": "quality"
},
"audioUI": {
"name": "audioUI"
}
}
},
"SaveGLB": {
"display_name": "SaveGLB",
"inputs": {
"mesh": {
"name": "mesh"
},
"filename_prefix": {
"name": "filename_prefix"
},
"image": {
"name": "image"
}
}
},
"SaveImage": {
"display_name": "Save Image",
"description": "Saves the input images to your ComfyUI output directory.",
"inputs": {
"images": {
"name": "images",
"tooltip": "The images to save."
},
"filename_prefix": {
"name": "filename_prefix",
"tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."
}
}
},
"SaveImageWebsocket": {
"display_name": "SaveImageWebsocket",
"inputs": {
"images": {
"name": "images"
}
}
},
"SaveLatent": {
"display_name": "SaveLatent",
"inputs": {
"samples": {
"name": "samples"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"SaveLoRANode": {
"display_name": "Save LoRA Weights",
"inputs": {
"lora": {
"name": "lora",
"tooltip": "The LoRA model to save. Do not use the model with LoRA layers."
},
"prefix": {
"name": "prefix",
"tooltip": "The prefix to use for the saved LoRA file."
},
"steps": {
"name": "steps",
"tooltip": "Optional: The number of steps to LoRA has been trained for, used to name the saved file."
}
}
},
"SaveSVGNode": {
"display_name": "SaveSVGNode",
"description": "Save SVG files on disk.",
"inputs": {
"svg": {
"name": "svg"
},
"filename_prefix": {
"name": "filename_prefix",
"tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."
}
}
},
"SaveVideo": {
"display_name": "Save Video",
"description": "Saves the input images to your ComfyUI output directory.",
"inputs": {
"video": {
"name": "video",
"tooltip": "The video to save."
},
"filename_prefix": {
"name": "filename_prefix",
"tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."
},
"format": {
"name": "format",
"tooltip": "The format to save the video as."
},
"codec": {
"name": "codec",
"tooltip": "The codec to use for the video."
}
}
},
"SaveWEBM": {
"display_name": "SaveWEBM",
"inputs": {
"images": {
"name": "images"
},
"filename_prefix": {
"name": "filename_prefix"
},
"codec": {
"name": "codec"
},
"fps": {
"name": "fps"
},
"crf": {
"name": "crf",
"tooltip": "Higher crf means lower quality with a smaller file size, lower crf means higher quality higher filesize."
}
}
},
"SD_4XUpscale_Conditioning": {
"display_name": "SD_4XUpscale_Conditioning",
"inputs": {
"images": {
"name": "images"
},
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"scale_ratio": {
"name": "scale_ratio"
},
"noise_augmentation": {
"name": "noise_augmentation"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"SDTurboScheduler": {
"display_name": "SDTurboScheduler",
"inputs": {
"model": {
"name": "model"
},
"steps": {
"name": "steps"
},
"denoise": {
"name": "denoise"
}
}
},
"SelfAttentionGuidance": {
"display_name": "Self-Attention Guidance",
"inputs": {
"model": {
"name": "model"
},
"scale": {
"name": "scale"
},
"blur_sigma": {
"name": "blur_sigma"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"SetClipHooks": {
"display_name": "Set CLIP Hooks",
"inputs": {
"clip": {
"name": "clip"
},
"apply_to_conds": {
"name": "apply_to_conds"
},
"schedule_clip": {
"name": "schedule_clip"
},
"hooks": {
"name": "hooks"
}
}
},
"SetFirstSigma": {
"display_name": "SetFirstSigma",
"inputs": {
"sigmas": {
"name": "sigmas"
},
"sigma": {
"name": "sigma"
}
}
},
"SetHookKeyframes": {
"display_name": "Set Hook Keyframes",
"inputs": {
"hooks": {
"name": "hooks"
},
"hook_kf": {
"name": "hook_kf"
}
}
},
"SetLatentNoiseMask": {
"display_name": "Set Latent Noise Mask",
"inputs": {
"samples": {
"name": "samples"
},
"mask": {
"name": "mask"
}
}
},
"SetUnionControlNetType": {
"display_name": "SetUnionControlNetType",
"inputs": {
"control_net": {
"name": "control_net"
},
"type": {
"name": "type"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"SkipLayerGuidanceDiT": {
"display_name": "SkipLayerGuidanceDiT",
"description": "Generic version of SkipLayerGuidance node that can be used on every DiT model.",
"inputs": {
"model": {
"name": "model"
},
"double_layers": {
"name": "double_layers"
},
"single_layers": {
"name": "single_layers"
},
"scale": {
"name": "scale"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
},
"rescaling_scale": {
"name": "rescaling_scale"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"SkipLayerGuidanceDiTSimple": {
"display_name": "SkipLayerGuidanceDiTSimple",
"description": "Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass.",
"inputs": {
"model": {
"name": "model"
},
"double_layers": {
"name": "double_layers"
},
"single_layers": {
"name": "single_layers"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"SkipLayerGuidanceSD3": {
"display_name": "SkipLayerGuidanceSD3",
"description": "Generic version of SkipLayerGuidance node that can be used on every DiT model.",
"inputs": {
"model": {
"name": "model"
},
"layers": {
"name": "layers"
},
"scale": {
"name": "scale"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"SolidMask": {
"display_name": "SolidMask",
"inputs": {
"value": {
"name": "value"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
}
}
},
"SplitAudioChannels": {
"display_name": "Split Audio Channels",
"description": "Separates the audio into left and right channels.",
"inputs": {
"audio": {
"name": "audio"
}
},
"outputs": {
"0": {
"name": "left"
},
"1": {
"name": "right"
}
}
},
"SplitImageWithAlpha": {
"display_name": "Split Image with Alpha",
"inputs": {
"image": {
"name": "image"
}
},
"outputs": {
"0": {
"tooltip": null
},
"1": {
"tooltip": null
}
}
},
"SplitSigmas": {
"display_name": "SplitSigmas",
"inputs": {
"sigmas": {
"name": "sigmas"
},
"step": {
"name": "step"
}
},
"outputs": {
"0": {
"name": "high_sigmas"
},
"1": {
"name": "low_sigmas"
}
}
},
"SplitSigmasDenoise": {
"display_name": "SplitSigmasDenoise",
"inputs": {
"sigmas": {
"name": "sigmas"
},
"denoise": {
"name": "denoise"
}
},
"outputs": {
"0": {
"name": "high_sigmas"
},
"1": {
"name": "low_sigmas"
}
}
},
"StabilityAudioInpaint": {
"display_name": "Stability AI Audio Inpaint",
"description": "Transforms part of existing audio sample using text instructions.",
"inputs": {
"model": {
"name": "model"
},
"prompt": {
"name": "prompt"
},
"audio": {
"name": "audio",
"tooltip": "Audio must be between 6 and 190 seconds long."
},
"duration": {
"name": "duration",
"tooltip": "Controls the duration in seconds of the generated audio."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for generation."
},
"steps": {
"name": "steps",
"tooltip": "Controls the number of sampling steps."
},
"mask_start": {
"name": "mask_start"
},
"mask_end": {
"name": "mask_end"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StabilityAudioToAudio": {
"display_name": "Stability AI Audio To Audio",
"description": "Transforms existing audio samples into new high-quality compositions using text instructions.",
"inputs": {
"model": {
"name": "model"
},
"prompt": {
"name": "prompt"
},
"audio": {
"name": "audio",
"tooltip": "Audio must be between 6 and 190 seconds long."
},
"duration": {
"name": "duration",
"tooltip": "Controls the duration in seconds of the generated audio."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for generation."
},
"steps": {
"name": "steps",
"tooltip": "Controls the number of sampling steps."
},
"strength": {
"name": "strength",
"tooltip": "Parameter controls how much influence the audio parameter has on the generated audio."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StabilityStableImageSD_3_5Node": {
"display_name": "Stability AI Stable Diffusion 3.5 Image",
"description": "Generates images synchronously based on prompt and resolution.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results."
},
"model": {
"name": "model"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio of generated image."
},
"style_preset": {
"name": "style_preset",
"tooltip": "Optional desired style of generated image."
},
"cfg_scale": {
"name": "cfg_scale",
"tooltip": "How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt)"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"image": {
"name": "image"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature."
},
"image_denoise": {
"name": "image_denoise",
"tooltip": "Denoise of input image; 0.0 yields image identical to input, 1.0 is as if no image was provided at all."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StabilityStableImageUltraNode": {
"display_name": "Stability AI Stable Image Ultra",
"description": "Generates images synchronously based on prompt and resolution.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defineselements, colors, and subjects will lead to better results. To control the weight of a given word use the format `(word:weight)`,where `word` is the word you'd like to control the weight of and `weight`is a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`would convey a sky that was blue and green, but more green than blue."
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio of generated image."
},
"style_preset": {
"name": "style_preset",
"tooltip": "Optional desired style of generated image."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"image": {
"name": "image"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "A blurb of text describing what you do not wish to see in the output image. This is an advanced feature."
},
"image_denoise": {
"name": "image_denoise",
"tooltip": "Denoise of input image; 0.0 yields image identical to input, 1.0 is as if no image was provided at all."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StabilityTextToAudio": {
"display_name": "Stability AI Text To Audio",
"description": "Generates high-quality music and sound effects from text descriptions.",
"inputs": {
"model": {
"name": "model"
},
"prompt": {
"name": "prompt"
},
"duration": {
"name": "duration",
"tooltip": "Controls the duration in seconds of the generated audio."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for generation."
},
"steps": {
"name": "steps",
"tooltip": "Controls the number of sampling steps."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StabilityUpscaleConservativeNode": {
"display_name": "Stability AI Upscale Conservative",
"description": "Upscale image with minimal alterations to 4K resolution.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results."
},
"creativity": {
"name": "creativity",
"tooltip": "Controls the likelihood of creating additional details not heavily conditioned by the init image."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StabilityUpscaleCreativeNode": {
"display_name": "Stability AI Upscale Creative",
"description": "Upscale image with minimal alterations to 4K resolution.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results."
},
"creativity": {
"name": "creativity",
"tooltip": "Controls the likelihood of creating additional details not heavily conditioned by the init image."
},
"style_preset": {
"name": "style_preset",
"tooltip": "Optional desired style of generated image."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StabilityUpscaleFastNode": {
"display_name": "Stability AI Upscale Fast",
"description": "Quickly upscales an image via Stability API call to 4x its original size; intended for upscaling low-quality/compressed images.",
"inputs": {
"image": {
"name": "image"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StableCascade_EmptyLatentImage": {
"display_name": "StableCascade_EmptyLatentImage",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"compression": {
"name": "compression"
},
"batch_size": {
"name": "batch_size"
}
},
"outputs": {
"0": {
"name": "stage_c",
"tooltip": null
},
"1": {
"name": "stage_b",
"tooltip": null
}
}
},
"StableCascade_StageB_Conditioning": {
"display_name": "StableCascade_StageB_Conditioning",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"stage_c": {
"name": "stage_c"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StableCascade_StageC_VAEEncode": {
"display_name": "StableCascade_StageC_VAEEncode",
"inputs": {
"image": {
"name": "image"
},
"vae": {
"name": "vae"
},
"compression": {
"name": "compression"
}
},
"outputs": {
"0": {
"name": "stage_c",
"tooltip": null
},
"1": {
"name": "stage_b",
"tooltip": null
}
}
},
"StableCascade_SuperResolutionControlnet": {
"display_name": "StableCascade_SuperResolutionControlnet",
"inputs": {
"image": {
"name": "image"
},
"vae": {
"name": "vae"
}
},
"outputs": {
"0": {
"name": "controlnet_input",
"tooltip": null
},
"1": {
"name": "stage_c",
"tooltip": null
},
"2": {
"name": "stage_b",
"tooltip": null
}
}
},
"StableZero123_Conditioning": {
"display_name": "StableZero123_Conditioning",
"inputs": {
"clip_vision": {
"name": "clip_vision"
},
"init_image": {
"name": "init_image"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"batch_size": {
"name": "batch_size"
},
"elevation": {
"name": "elevation"
},
"azimuth": {
"name": "azimuth"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"StableZero123_Conditioning_Batched": {
"display_name": "StableZero123_Conditioning_Batched",
"inputs": {
"clip_vision": {
"name": "clip_vision"
},
"init_image": {
"name": "init_image"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"batch_size": {
"name": "batch_size"
},
"elevation": {
"name": "elevation"
},
"azimuth": {
"name": "azimuth"
},
"elevation_batch_increment": {
"name": "elevation_batch_increment"
},
"azimuth_batch_increment": {
"name": "azimuth_batch_increment"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"StringCompare": {
"display_name": "Compare",
"inputs": {
"string_a": {
"name": "string_a"
},
"string_b": {
"name": "string_b"
},
"mode": {
"name": "mode"
},
"case_sensitive": {
"name": "case_sensitive"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StringConcatenate": {
"display_name": "Concatenate",
"inputs": {
"string_a": {
"name": "string_a"
},
"string_b": {
"name": "string_b"
},
"delimiter": {
"name": "delimiter"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StringContains": {
"display_name": "Contains",
"inputs": {
"string": {
"name": "string"
},
"substring": {
"name": "substring"
},
"case_sensitive": {
"name": "case_sensitive"
}
},
"outputs": {
"0": {
"name": "contains",
"tooltip": null
}
}
},
"StringLength": {
"display_name": "Length",
"inputs": {
"string": {
"name": "string"
}
},
"outputs": {
"0": {
"name": "length",
"tooltip": null
}
}
},
"StringReplace": {
"display_name": "Replace",
"inputs": {
"string": {
"name": "string"
},
"find": {
"name": "find"
},
"replace": {
"name": "replace"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StringSubstring": {
"display_name": "Substring",
"inputs": {
"string": {
"name": "string"
},
"start": {
"name": "start"
},
"end": {
"name": "end"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StringTrim": {
"display_name": "Trim",
"inputs": {
"string": {
"name": "string"
},
"mode": {
"name": "mode"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"StyleModelApply": {
"display_name": "Apply Style Model",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"style_model": {
"name": "style_model"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"strength": {
"name": "strength"
},
"strength_type": {
"name": "strength_type"
}
}
},
"StyleModelLoader": {
"display_name": "Load Style Model",
"inputs": {
"style_model_name": {
"name": "style_model_name"
}
}
},
"SV3D_Conditioning": {
"display_name": "SV3D_Conditioning",
"inputs": {
"clip_vision": {
"name": "clip_vision"
},
"init_image": {
"name": "init_image"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"video_frames": {
"name": "video_frames"
},
"elevation": {
"name": "elevation"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"SVD_img2vid_Conditioning": {
"display_name": "SVD_img2vid_Conditioning",
"inputs": {
"clip_vision": {
"name": "clip_vision"
},
"init_image": {
"name": "init_image"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"video_frames": {
"name": "video_frames"
},
"motion_bucket_id": {
"name": "motion_bucket_id"
},
"fps": {
"name": "fps"
},
"augmentation_level": {
"name": "augmentation_level"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"T5TokenizerOptions": {
"display_name": "T5TokenizerOptions",
"inputs": {
"clip": {
"name": "clip"
},
"min_padding": {
"name": "min_padding"
},
"min_length": {
"name": "min_length"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"TCFG": {
"display_name": "Tangential Damping CFG",
"description": "TCFG Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality.",
"inputs": {
"model": {
"name": "model"
}
},
"outputs": {
"0": {
"name": "patched_model",
"tooltip": null
}
}
},
"TemporalScoreRescaling": {
"display_name": "TSR - Temporal Score Rescaling",
"description": "[Post-CFG Function]\nTSR - Temporal Score Rescaling (2510.01184)\n\nRescaling the model's score or noise to steer the sampling diversity.\n",
"inputs": {
"model": {
"name": "model"
},
"tsr_k": {
"name": "tsr_k",
"tooltip": "Controls the rescaling strength.\nLower k produces more detailed results; higher k produces smoother results in image generation. Setting k = 1 disables rescaling."
},
"tsr_sigma": {
"name": "tsr_sigma",
"tooltip": "Controls how early rescaling takes effect.\nLarger values take effect earlier."
}
},
"outputs": {
"0": {
"name": "patched_model",
"tooltip": null
}
}
},
"TextEncodeAceStepAudio": {
"display_name": "TextEncodeAceStepAudio",
"inputs": {
"clip": {
"name": "clip"
},
"tags": {
"name": "tags"
},
"lyrics": {
"name": "lyrics"
},
"lyrics_strength": {
"name": "lyrics_strength"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"TextEncodeHunyuanVideo_ImageToVideo": {
"display_name": "TextEncodeHunyuanVideo_ImageToVideo",
"inputs": {
"clip": {
"name": "clip"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"prompt": {
"name": "prompt"
},
"image_interleave": {
"name": "image_interleave",
"tooltip": "How much the image influences things vs the text prompt. Higher number means more influence from the text prompt."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"TextEncodeQwenImageEdit": {
"display_name": "TextEncodeQwenImageEdit",
"inputs": {
"clip": {
"name": "clip"
},
"prompt": {
"name": "prompt"
},
"vae": {
"name": "vae"
},
"image": {
"name": "image"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"TextEncodeQwenImageEditPlus": {
"display_name": "TextEncodeQwenImageEditPlus",
"inputs": {
"clip": {
"name": "clip"
},
"prompt": {
"name": "prompt"
},
"vae": {
"name": "vae"
},
"image1": {
"name": "image1"
},
"image2": {
"name": "image2"
},
"image3": {
"name": "image3"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ThresholdMask": {
"display_name": "ThresholdMask",
"inputs": {
"mask": {
"name": "mask"
},
"value": {
"name": "value"
}
}
},
"TomePatchModel": {
"display_name": "TomePatchModel",
"inputs": {
"model": {
"name": "model"
},
"ratio": {
"name": "ratio"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"TorchCompileModel": {
"display_name": "TorchCompileModel",
"inputs": {
"model": {
"name": "model"
},
"backend": {
"name": "backend"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"TrainLoraNode": {
"display_name": "Train LoRA",
"inputs": {
"model": {
"name": "model",
"tooltip": "The model to train the LoRA on."
},
"latents": {
"name": "latents",
"tooltip": "The Latents to use for training, serve as dataset/input of the model."
},
"positive": {
"name": "positive",
"tooltip": "The positive conditioning to use for training."
},
"batch_size": {
"name": "batch_size",
"tooltip": "The batch size to use for training."
},
"grad_accumulation_steps": {
"name": "grad_accumulation_steps",
"tooltip": "The number of gradient accumulation steps to use for training."
},
"steps": {
"name": "steps",
"tooltip": "The number of steps to train the LoRA for."
},
"learning_rate": {
"name": "learning_rate",
"tooltip": "The learning rate to use for training."
},
"rank": {
"name": "rank",
"tooltip": "The rank of the LoRA layers."
},
"optimizer": {
"name": "optimizer",
"tooltip": "The optimizer to use for training."
},
"loss_function": {
"name": "loss_function",
"tooltip": "The loss function to use for training."
},
"seed": {
"name": "seed",
"tooltip": "The seed to use for training (used in generator for LoRA weight initialization and noise sampling)"
},
"training_dtype": {
"name": "training_dtype",
"tooltip": "The dtype to use for training."
},
"lora_dtype": {
"name": "lora_dtype",
"tooltip": "The dtype to use for lora."
},
"algorithm": {
"name": "algorithm",
"tooltip": "The algorithm to use for training."
},
"gradient_checkpointing": {
"name": "gradient_checkpointing",
"tooltip": "Use gradient checkpointing for training."
},
"existing_lora": {
"name": "existing_lora",
"tooltip": "The existing LoRA to append to. Set to None for new LoRA."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"name": "model_with_lora"
},
"1": {
"name": "lora"
},
"2": {
"name": "loss"
},
"3": {
"name": "steps"
}
}
},
"TrimAudioDuration": {
"display_name": "Trim Audio Duration",
"description": "Trim audio tensor into chosen time range.",
"inputs": {
"audio": {
"name": "audio"
},
"start_index": {
"name": "start_index",
"tooltip": "Start time in seconds, can be negative to count from the end (supports sub-seconds)."
},
"duration": {
"name": "duration",
"tooltip": "Duration in seconds"
}
}
},
"TrimVideoLatent": {
"display_name": "TrimVideoLatent",
"inputs": {
"samples": {
"name": "samples"
},
"trim_amount": {
"name": "trim_amount"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"TripleCLIPLoader": {
"display_name": "TripleCLIPLoader",
"description": "[Recipes]\n\nsd3: clip-l, clip-g, t5",
"inputs": {
"clip_name1": {
"name": "clip_name1"
},
"clip_name2": {
"name": "clip_name2"
},
"clip_name3": {
"name": "clip_name3"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"TripoConversionNode": {
"display_name": "Tripo: Convert model",
"inputs": {
"original_model_task_id": {
"name": "original_model_task_id"
},
"format": {
"name": "format"
},
"quad": {
"name": "quad"
},
"face_limit": {
"name": "face_limit"
},
"texture_size": {
"name": "texture_size"
},
"texture_format": {
"name": "texture_format"
}
}
},
"TripoImageToModelNode": {
"display_name": "Tripo: Image to Model",
"inputs": {
"image": {
"name": "image"
},
"model_version": {
"name": "model_version",
"tooltip": "The model version to use for generation"
},
"style": {
"name": "style"
},
"texture": {
"name": "texture"
},
"pbr": {
"name": "pbr"
},
"model_seed": {
"name": "model_seed"
},
"orientation": {
"name": "orientation"
},
"texture_seed": {
"name": "texture_seed"
},
"texture_quality": {
"name": "texture_quality"
},
"texture_alignment": {
"name": "texture_alignment"
},
"face_limit": {
"name": "face_limit"
},
"quad": {
"name": "quad"
}
},
"outputs": {
"0": {
"name": "model_file"
},
"1": {
"name": "model task_id"
}
}
},
"TripoMultiviewToModelNode": {
"display_name": "Tripo: Multiview to Model",
"inputs": {
"image": {
"name": "image"
},
"image_left": {
"name": "image_left"
},
"image_back": {
"name": "image_back"
},
"image_right": {
"name": "image_right"
},
"model_version": {
"name": "model_version",
"tooltip": "The model version to use for generation"
},
"orientation": {
"name": "orientation"
},
"texture": {
"name": "texture"
},
"pbr": {
"name": "pbr"
},
"model_seed": {
"name": "model_seed"
},
"texture_seed": {
"name": "texture_seed"
},
"texture_quality": {
"name": "texture_quality"
},
"texture_alignment": {
"name": "texture_alignment"
},
"face_limit": {
"name": "face_limit"
},
"quad": {
"name": "quad"
}
},
"outputs": {
"0": {
"name": "model_file"
},
"1": {
"name": "model task_id"
}
}
},
"TripoRefineNode": {
"display_name": "Tripo: Refine Draft model",
"description": "Refine a draft model created by v1.4 Tripo models only.",
"inputs": {
"model_task_id": {
"name": "model_task_id",
"tooltip": "Must be a v1.4 Tripo model"
}
},
"outputs": {
"0": {
"name": "model_file"
},
"1": {
"name": "model task_id"
}
}
},
"TripoRetargetNode": {
"display_name": "Tripo: Retarget rigged model",
"inputs": {
"original_model_task_id": {
"name": "original_model_task_id"
},
"animation": {
"name": "animation"
}
},
"outputs": {
"0": {
"name": "model_file"
},
"1": {
"name": "retarget task_id"
}
}
},
"TripoRigNode": {
"display_name": "Tripo: Rig model",
"inputs": {
"original_model_task_id": {
"name": "original_model_task_id"
}
},
"outputs": {
"0": {
"name": "model_file"
},
"1": {
"name": "rig task_id"
}
}
},
"TripoTextToModelNode": {
"display_name": "Tripo: Text to Model",
"inputs": {
"prompt": {
"name": "prompt"
},
"negative_prompt": {
"name": "negative_prompt"
},
"model_version": {
"name": "model_version"
},
"style": {
"name": "style"
},
"texture": {
"name": "texture"
},
"pbr": {
"name": "pbr"
},
"image_seed": {
"name": "image_seed"
},
"model_seed": {
"name": "model_seed"
},
"texture_seed": {
"name": "texture_seed"
},
"texture_quality": {
"name": "texture_quality"
},
"face_limit": {
"name": "face_limit"
},
"quad": {
"name": "quad"
}
},
"outputs": {
"0": {
"name": "model_file"
},
"1": {
"name": "model task_id"
}
}
},
"TripoTextureNode": {
"display_name": "Tripo: Texture model",
"inputs": {
"model_task_id": {
"name": "model_task_id"
},
"texture": {
"name": "texture"
},
"pbr": {
"name": "pbr"
},
"texture_seed": {
"name": "texture_seed"
},
"texture_quality": {
"name": "texture_quality"
},
"texture_alignment": {
"name": "texture_alignment"
}
},
"outputs": {
"0": {
"name": "model_file"
},
"1": {
"name": "model task_id"
}
}
},
"unCLIPCheckpointLoader": {
"display_name": "unCLIPCheckpointLoader",
"inputs": {
"ckpt_name": {
"name": "ckpt_name"
}
}
},
"unCLIPConditioning": {
"display_name": "unCLIPConditioning",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"strength": {
"name": "strength"
},
"noise_augmentation": {
"name": "noise_augmentation"
}
}
},
"UNetCrossAttentionMultiply": {
"display_name": "UNetCrossAttentionMultiply",
"inputs": {
"model": {
"name": "model"
},
"q": {
"name": "q"
},
"k": {
"name": "k"
},
"v": {
"name": "v"
},
"out": {
"name": "out"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"UNETLoader": {
"display_name": "Load Diffusion Model",
"inputs": {
"unet_name": {
"name": "unet_name"
},
"weight_dtype": {
"name": "weight_dtype"
}
}
},
"UNetSelfAttentionMultiply": {
"display_name": "UNetSelfAttentionMultiply",
"inputs": {
"model": {
"name": "model"
},
"q": {
"name": "q"
},
"k": {
"name": "k"
},
"v": {
"name": "v"
},
"out": {
"name": "out"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"UNetTemporalAttentionMultiply": {
"display_name": "UNetTemporalAttentionMultiply",
"inputs": {
"model": {
"name": "model"
},
"self_structural": {
"name": "self_structural"
},
"self_temporal": {
"name": "self_temporal"
},
"cross_structural": {
"name": "cross_structural"
},
"cross_temporal": {
"name": "cross_temporal"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"UpscaleModelLoader": {
"display_name": "Load Upscale Model",
"inputs": {
"model_name": {
"name": "model_name"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"USOStyleReference": {
"display_name": "USOStyleReference",
"inputs": {
"model": {
"name": "model"
},
"model_patch": {
"name": "model_patch"
},
"clip_vision_output": {
"name": "clip_vision_output"
}
}
},
"VAEDecode": {
"display_name": "VAE Decode",
"description": "Decodes latent images back into pixel space images.",
"inputs": {
"samples": {
"name": "samples",
"tooltip": "The latent to be decoded."
},
"vae": {
"name": "vae",
"tooltip": "The VAE model used for decoding the latent."
}
},
"outputs": {
"0": {
"tooltip": "The decoded image."
}
}
},
"VAEDecodeAudio": {
"display_name": "VAE Decode Audio",
"inputs": {
"samples": {
"name": "samples"
},
"vae": {
"name": "vae"
}
}
},
"VAEDecodeHunyuan3D": {
"display_name": "VAEDecodeHunyuan3D",
"inputs": {
"samples": {
"name": "samples"
},
"vae": {
"name": "vae"
},
"num_chunks": {
"name": "num_chunks"
},
"octree_resolution": {
"name": "octree_resolution"
}
}
},
"VAEDecodeTiled": {
"display_name": "VAE Decode (Tiled)",
"inputs": {
"samples": {
"name": "samples"
},
"vae": {
"name": "vae"
},
"tile_size": {
"name": "tile_size"
},
"overlap": {
"name": "overlap"
},
"temporal_size": {
"name": "temporal_size",
"tooltip": "Only used for video VAEs: Amount of frames to decode at a time."
},
"temporal_overlap": {
"name": "temporal_overlap",
"tooltip": "Only used for video VAEs: Amount of frames to overlap."
}
}
},
"VAEEncode": {
"display_name": "VAE Encode",
"inputs": {
"pixels": {
"name": "pixels"
},
"vae": {
"name": "vae"
}
}
},
"VAEEncodeAudio": {
"display_name": "VAE Encode Audio",
"inputs": {
"audio": {
"name": "audio"
},
"vae": {
"name": "vae"
}
}
},
"VAEEncodeForInpaint": {
"display_name": "VAE Encode (for Inpainting)",
"inputs": {
"pixels": {
"name": "pixels"
},
"vae": {
"name": "vae"
},
"mask": {
"name": "mask"
},
"grow_mask_by": {
"name": "grow_mask_by"
}
}
},
"VAEEncodeTiled": {
"display_name": "VAE Encode (Tiled)",
"inputs": {
"pixels": {
"name": "pixels"
},
"vae": {
"name": "vae"
},
"tile_size": {
"name": "tile_size"
},
"overlap": {
"name": "overlap"
},
"temporal_size": {
"name": "temporal_size",
"tooltip": "Only used for video VAEs: Amount of frames to encode at a time."
},
"temporal_overlap": {
"name": "temporal_overlap",
"tooltip": "Only used for video VAEs: Amount of frames to overlap."
}
}
},
"VAELoader": {
"display_name": "Load VAE",
"inputs": {
"vae_name": {
"name": "vae_name"
}
}
},
"VAESave": {
"display_name": "VAESave",
"inputs": {
"vae": {
"name": "vae"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"Veo3VideoGenerationNode": {
"display_name": "Google Veo 3 Video Generation",
"description": "Generates videos from text prompts using Google's Veo 3 API",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text description of the video"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio of the output video"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt to guide what to avoid in the video"
},
"duration_seconds": {
"name": "duration_seconds",
"tooltip": "Duration of the output video in seconds (Veo 3 only supports 8 seconds)"
},
"enhance_prompt": {
"name": "enhance_prompt",
"tooltip": "Whether to enhance the prompt with AI assistance"
},
"person_generation": {
"name": "person_generation",
"tooltip": "Whether to allow generating people in the video"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation (0 for random)"
},
"image": {
"name": "image",
"tooltip": "Optional reference image to guide video generation"
},
"model": {
"name": "model",
"tooltip": "Veo 3 model to use for video generation"
},
"generate_audio": {
"name": "generate_audio",
"tooltip": "Generate audio for the video. Supported by all Veo 3 models."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"VeoVideoGenerationNode": {
"display_name": "Google Veo 2 Video Generation",
"description": "Generates videos from text prompts using Google's Veo 2 API",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text description of the video"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio of the output video"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt to guide what to avoid in the video"
},
"duration_seconds": {
"name": "duration_seconds",
"tooltip": "Duration of the output video in seconds"
},
"enhance_prompt": {
"name": "enhance_prompt",
"tooltip": "Whether to enhance the prompt with AI assistance"
},
"person_generation": {
"name": "person_generation",
"tooltip": "Whether to allow generating people in the video"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation (0 for random)"
},
"image": {
"name": "image",
"tooltip": "Optional reference image to guide video generation"
},
"model": {
"name": "model",
"tooltip": "Veo 2 model to use for video generation"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"VideoLinearCFGGuidance": {
"display_name": "VideoLinearCFGGuidance",
"inputs": {
"model": {
"name": "model"
},
"min_cfg": {
"name": "min_cfg"
}
}
},
"VideoTriangleCFGGuidance": {
"display_name": "VideoTriangleCFGGuidance",
"inputs": {
"model": {
"name": "model"
},
"min_cfg": {
"name": "min_cfg"
}
}
},
"ViduImageToVideoNode": {
"display_name": "Vidu Image To Video Generation",
"description": "Generate video from image and optional prompt",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
},
"image": {
"name": "image",
"tooltip": "An image to be used as the start frame of the generated video"
},
"prompt": {
"name": "prompt",
"tooltip": "A textual description for video generation"
},
"duration": {
"name": "duration",
"tooltip": "Duration of the output video in seconds"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation (0 for random)"
},
"resolution": {
"name": "resolution",
"tooltip": "Supported values may vary by model & duration"
},
"movement_amplitude": {
"name": "movement_amplitude",
"tooltip": "The movement amplitude of objects in the frame"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ViduReferenceVideoNode": {
"display_name": "Vidu Reference To Video Generation",
"description": "Generate video from multiple images and prompt",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
},
"images": {
"name": "images",
"tooltip": "Images to use as references to generate a video with consistent subjects (max 7 images)."
},
"prompt": {
"name": "prompt",
"tooltip": "A textual description for video generation"
},
"duration": {
"name": "duration",
"tooltip": "Duration of the output video in seconds"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation (0 for random)"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "The aspect ratio of the output video"
},
"resolution": {
"name": "resolution",
"tooltip": "Supported values may vary by model & duration"
},
"movement_amplitude": {
"name": "movement_amplitude",
"tooltip": "The movement amplitude of objects in the frame"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ViduStartEndToVideoNode": {
"display_name": "Vidu Start End To Video Generation",
"description": "Generate a video from start and end frames and a prompt",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
},
"first_frame": {
"name": "first_frame",
"tooltip": "Start frame"
},
"end_frame": {
"name": "end_frame",
"tooltip": "End frame"
},
"prompt": {
"name": "prompt",
"tooltip": "A textual description for video generation"
},
"duration": {
"name": "duration",
"tooltip": "Duration of the output video in seconds"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation (0 for random)"
},
"resolution": {
"name": "resolution",
"tooltip": "Supported values may vary by model & duration"
},
"movement_amplitude": {
"name": "movement_amplitude",
"tooltip": "The movement amplitude of objects in the frame"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ViduTextToVideoNode": {
"display_name": "Vidu Text To Video Generation",
"description": "Generate video from text prompt",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
},
"prompt": {
"name": "prompt",
"tooltip": "A textual description for video generation"
},
"duration": {
"name": "duration",
"tooltip": "Duration of the output video in seconds"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation (0 for random)"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "The aspect ratio of the output video"
},
"resolution": {
"name": "resolution",
"tooltip": "Supported values may vary by model & duration"
},
"movement_amplitude": {
"name": "movement_amplitude",
"tooltip": "The movement amplitude of objects in the frame"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"VoxelToMesh": {
"display_name": "VoxelToMesh",
"inputs": {
"voxel": {
"name": "voxel"
},
"algorithm": {
"name": "algorithm"
},
"threshold": {
"name": "threshold"
}
}
},
"VoxelToMeshBasic": {
"display_name": "VoxelToMeshBasic",
"inputs": {
"voxel": {
"name": "voxel"
},
"threshold": {
"name": "threshold"
}
}
},
"VPScheduler": {
"display_name": "VPScheduler",
"inputs": {
"steps": {
"name": "steps"
},
"beta_d": {
"name": "beta_d"
},
"beta_min": {
"name": "beta_min"
},
"eps_s": {
"name": "eps_s"
}
}
},
"Wan22FunControlToVideo": {
"display_name": "Wan22FunControlToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"ref_image": {
"name": "ref_image"
},
"control_video": {
"name": "control_video"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"Wan22ImageToVideoLatent": {
"display_name": "Wan22ImageToVideoLatent",
"inputs": {
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"start_image": {
"name": "start_image"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"continue_motion_max_frames": {
"name": "continue_motion_max_frames"
},
"video_frame_offset": {
"name": "video_frame_offset",
"tooltip": "The amount of frames to seek in all the input videos. Used for generating longer videos by chunk. Connect to the video_frame_offset output of the previous node for extending a video."
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"reference_image": {
"name": "reference_image"
},
"face_video": {
"name": "face_video"
},
"pose_video": {
"name": "pose_video"
},
"background_video": {
"name": "background_video"
},
"character_mask": {
"name": "character_mask"
},
"continue_motion": {
"name": "continue_motion"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
},
"3": {
"name": "trim_latent",
"tooltip": null
},
"4": {
"name": "trim_image",
"tooltip": null
},
"5": {
"name": "video_frame_offset",
"tooltip": null
}
}
},
"WanCameraEmbedding": {
"display_name": "WanCameraEmbedding",
"inputs": {
"camera_pose": {
"name": "camera_pose"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"speed": {
"name": "speed"
},
"fx": {
"name": "fx"
},
"fy": {
"name": "fy"
},
"cx": {
"name": "cx"
},
"cy": {
"name": "cy"
}
},
"outputs": {
"0": {
"name": "camera_embedding",
"tooltip": null
},
"1": {
"name": "width",
"tooltip": null
},
"2": {
"name": "height",
"tooltip": null
},
"3": {
"name": "length",
"tooltip": null
}
}
},
"WanCameraImageToVideo": {
"display_name": "WanCameraImageToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"start_image": {
"name": "start_image"
},
"camera_conditions": {
"name": "camera_conditions"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"WanContextWindowsManual": {
"display_name": "WAN Context Windows (Manual)",
"description": "Manually set context windows for WAN-like models (dim=2).",
"inputs": {
"model": {
"name": "model",
"tooltip": "The model to apply context windows to during sampling."
},
"context_length": {
"name": "context_length",
"tooltip": "The length of the context window."
},
"context_overlap": {
"name": "context_overlap",
"tooltip": "The overlap of the context window."
},
"context_schedule": {
"name": "context_schedule",
"tooltip": "The stride of the context window."
},
"context_stride": {
"name": "context_stride",
"tooltip": "The stride of the context window; only applicable to uniform schedules."
},
"closed_loop": {
"name": "closed_loop",
"tooltip": "Whether to close the context window loop; only applicable to looped schedules."
},
"fuse_method": {
"name": "fuse_method",
"tooltip": "The method to use to fuse the context windows."
}
},
"outputs": {
"0": {
"tooltip": "The model with context windows applied during sampling."
}
}
},
"WanFirstLastFrameToVideo": {
"display_name": "WanFirstLastFrameToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"clip_vision_start_image": {
"name": "clip_vision_start_image"
},
"clip_vision_end_image": {
"name": "clip_vision_end_image"
},
"start_image": {
"name": "start_image"
},
"end_image": {
"name": "end_image"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"WanFunControlToVideo": {
"display_name": "WanFunControlToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"start_image": {
"name": "start_image"
},
"control_video": {
"name": "control_video"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"WanFunInpaintToVideo": {
"display_name": "WanFunInpaintToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"start_image": {
"name": "start_image"
},
"end_image": {
"name": "end_image"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"WanHuMoImageToVideo": {
"display_name": "WanHuMoImageToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"audio_encoder_output": {
"name": "audio_encoder_output"
},
"ref_image": {
"name": "ref_image"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"WanImageToImageApi": {
"display_name": "Wan Image to Image",
"description": "Generates an image from one or two input images and a text prompt. The output image is currently fixed at 1.6 MP; its aspect ratio matches the input image(s).",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model to use."
},
"image": {
"name": "image",
"tooltip": "Single-image editing or multi-image fusion, maximum 2 images."
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt used to describe the elements and visual features, supports English/Chinese."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt to guide what to avoid."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an \"AI generated\" watermark to the result."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanImageToVideo": {
"display_name": "WanImageToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"start_image": {
"name": "start_image"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"WanImageToVideoApi": {
"display_name": "Wan Image to Video",
"description": "Generates video based on the first frame and text prompt.",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model to use."
},
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt used to describe the elements and visual features, supports English/Chinese."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt to guide what to avoid."
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration",
"tooltip": "Available durations: 5 and 10 seconds"
},
"audio": {
"name": "audio",
"tooltip": "Audio must contain a clear, loud voice, without extraneous noise, background music."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"generate_audio": {
"name": "generate_audio",
"tooltip": "If there is no audio input, generate audio automatically."
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Whether to enhance the prompt with AI assistance."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an \"AI generated\" watermark to the result."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanPhantomSubjectToVideo": {
"display_name": "WanPhantomSubjectToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"images": {
"name": "images"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative_text",
"tooltip": null
},
"2": {
"name": "negative_img_text",
"tooltip": null
},
"3": {
"name": "latent",
"tooltip": null
}
}
},
"WanSoundImageToVideo": {
"display_name": "WanSoundImageToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"audio_encoder_output": {
"name": "audio_encoder_output"
},
"ref_image": {
"name": "ref_image"
},
"control_video": {
"name": "control_video"
},
"ref_motion": {
"name": "ref_motion"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"WanSoundImageToVideoExtend": {
"display_name": "WanSoundImageToVideoExtend",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"length": {
"name": "length"
},
"video_latent": {
"name": "video_latent"
},
"audio_encoder_output": {
"name": "audio_encoder_output"
},
"ref_image": {
"name": "ref_image"
},
"control_video": {
"name": "control_video"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"WanTextToImageApi": {
"display_name": "Wan Text to Image",
"description": "Generates image based on text prompt.",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model to use."
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt used to describe the elements and visual features, supports English/Chinese."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt to guide what to avoid."
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Whether to enhance the prompt with AI assistance."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an \"AI generated\" watermark to the result."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanTextToVideoApi": {
"display_name": "Wan Text to Video",
"description": "Generates video based on text prompt.",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model to use."
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt used to describe the elements and visual features, supports English/Chinese."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt to guide what to avoid."
},
"size": {
"name": "size"
},
"duration": {
"name": "duration",
"tooltip": "Available durations: 5 and 10 seconds"
},
"audio": {
"name": "audio",
"tooltip": "Audio must contain a clear, loud voice, without extraneous noise, background music."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"generate_audio": {
"name": "generate_audio",
"tooltip": "If there is no audio input, generate audio automatically."
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Whether to enhance the prompt with AI assistance."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an \"AI generated\" watermark to the result."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanTrackToVideo": {
"display_name": "WanTrackToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"tracks": {
"name": "tracks"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"temperature": {
"name": "temperature"
},
"topk": {
"name": "topk"
},
"start_image": {
"name": "start_image"
},
"clip_vision_output": {
"name": "clip_vision_output"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
}
}
},
"WanVaceToVideo": {
"display_name": "WanVaceToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"strength": {
"name": "strength"
},
"control_video": {
"name": "control_video"
},
"control_masks": {
"name": "control_masks"
},
"reference_image": {
"name": "reference_image"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": null
},
"3": {
"name": "trim_latent",
"tooltip": null
}
}
},
"WebcamCapture": {
"display_name": "Webcam Capture",
"inputs": {
"image": {
"name": "image"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"capture_on_queue": {
"name": "capture_on_queue"
},
"waiting for camera___": {}
}
}
}