Files
ComfyUI_frontend/src/locales/en/nodeDefs.json
Christian Byrne 0a2f567d49 Cherry pick API node fixes (#3839)
Co-authored-by: Terry Jia <terryjia88@gmail.com>
Co-authored-by: Chenlei Hu <hcl@comfy.org>
Co-authored-by: github-actions <github-actions@github.com>
Co-authored-by: Chenlei Hu <huchenlei@proton.me>
Co-authored-by: Comfy Org PR Bot <snomiao+comfy-pr@gmail.com>
2025-05-09 16:28:49 -07:00

8653 lines
191 KiB
JSON
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

{
"AddNoise": {
"display_name": "AddNoise",
"inputs": {
"model": {
"name": "model"
},
"noise": {
"name": "noise"
},
"sigmas": {
"name": "sigmas"
},
"latent_image": {
"name": "latent_image"
}
}
},
"AlignYourStepsScheduler": {
"display_name": "AlignYourStepsScheduler",
"inputs": {
"model_type": {
"name": "model_type"
},
"steps": {
"name": "steps"
},
"denoise": {
"name": "denoise"
}
}
},
"BasicGuider": {
"display_name": "BasicGuider",
"inputs": {
"model": {
"name": "model"
},
"conditioning": {
"name": "conditioning"
}
}
},
"BasicScheduler": {
"display_name": "BasicScheduler",
"inputs": {
"model": {
"name": "model"
},
"scheduler": {
"name": "scheduler"
},
"steps": {
"name": "steps"
},
"denoise": {
"name": "denoise"
}
}
},
"BetaSamplingScheduler": {
"display_name": "BetaSamplingScheduler",
"inputs": {
"model": {
"name": "model"
},
"steps": {
"name": "steps"
},
"alpha": {
"name": "alpha"
},
"beta": {
"name": "beta"
}
}
},
"Canny": {
"display_name": "Canny",
"inputs": {
"image": {
"name": "image"
},
"low_threshold": {
"name": "low_threshold"
},
"high_threshold": {
"name": "high_threshold"
}
}
},
"CFGGuider": {
"display_name": "CFGGuider",
"inputs": {
"model": {
"name": "model"
},
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"cfg": {
"name": "cfg"
}
}
},
"CFGZeroStar": {
"display_name": "CFGZeroStar",
"inputs": {
"model": {
"name": "model"
}
},
"outputs": {
"0": {
"name": "patched_model"
}
}
},
"CheckpointLoader": {
"display_name": "Load Checkpoint With Config (DEPRECATED)",
"inputs": {
"config_name": {
"name": "config_name"
},
"ckpt_name": {
"name": "ckpt_name"
}
}
},
"CheckpointLoaderSimple": {
"display_name": "Load Checkpoint",
"description": "Loads a diffusion model checkpoint, diffusion models are used to denoise latents.",
"inputs": {
"ckpt_name": {
"name": "ckpt_name",
"tooltip": "The name of the checkpoint (model) to load."
}
},
"outputs": {
"0": {
"tooltip": "The model used for denoising latents."
},
"1": {
"tooltip": "The CLIP model used for encoding text prompts."
},
"2": {
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
}
},
"CheckpointSave": {
"display_name": "Save Checkpoint",
"inputs": {
"model": {
"name": "model"
},
"clip": {
"name": "clip"
},
"vae": {
"name": "vae"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"CLIPAttentionMultiply": {
"display_name": "CLIPAttentionMultiply",
"inputs": {
"clip": {
"name": "clip"
},
"q": {
"name": "q"
},
"k": {
"name": "k"
},
"v": {
"name": "v"
},
"out": {
"name": "out"
}
}
},
"CLIPLoader": {
"display_name": "Load CLIP",
"description": "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl\n hidream: llama-3.1 (Recommend) or t5",
"inputs": {
"clip_name": {
"name": "clip_name"
},
"type": {
"name": "type"
},
"device": {
"name": "device"
}
}
},
"CLIPMergeAdd": {
"display_name": "CLIPMergeAdd",
"inputs": {
"clip1": {
"name": "clip1"
},
"clip2": {
"name": "clip2"
}
}
},
"CLIPMergeSimple": {
"display_name": "CLIPMergeSimple",
"inputs": {
"clip1": {
"name": "clip1"
},
"clip2": {
"name": "clip2"
},
"ratio": {
"name": "ratio"
}
}
},
"CLIPMergeSubtract": {
"display_name": "CLIPMergeSubtract",
"inputs": {
"clip1": {
"name": "clip1"
},
"clip2": {
"name": "clip2"
},
"multiplier": {
"name": "multiplier"
}
}
},
"CLIPSave": {
"display_name": "CLIPSave",
"inputs": {
"clip": {
"name": "clip"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"CLIPSetLastLayer": {
"display_name": "CLIP Set Last Layer",
"inputs": {
"clip": {
"name": "clip"
},
"stop_at_clip_layer": {
"name": "stop_at_clip_layer"
}
}
},
"CLIPTextEncode": {
"display_name": "CLIP Text Encode (Prompt)",
"description": "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images.",
"inputs": {
"text": {
"name": "text",
"tooltip": "The text to be encoded."
},
"clip": {
"name": "clip",
"tooltip": "The CLIP model used for encoding the text."
}
},
"outputs": {
"0": {
"tooltip": "A conditioning containing the embedded text used to guide the diffusion model."
}
}
},
"CLIPTextEncodeControlnet": {
"display_name": "CLIPTextEncodeControlnet",
"inputs": {
"clip": {
"name": "clip"
},
"conditioning": {
"name": "conditioning"
},
"text": {
"name": "text"
}
}
},
"CLIPTextEncodeFlux": {
"display_name": "CLIPTextEncodeFlux",
"inputs": {
"clip": {
"name": "clip"
},
"clip_l": {
"name": "clip_l"
},
"t5xxl": {
"name": "t5xxl"
},
"guidance": {
"name": "guidance"
}
}
},
"CLIPTextEncodeHiDream": {
"display_name": "CLIPTextEncodeHiDream",
"inputs": {
"clip": {
"name": "clip"
},
"clip_l": {
"name": "clip_l"
},
"clip_g": {
"name": "clip_g"
},
"t5xxl": {
"name": "t5xxl"
},
"llama": {
"name": "llama"
}
}
},
"CLIPTextEncodeHunyuanDiT": {
"display_name": "CLIPTextEncodeHunyuanDiT",
"inputs": {
"clip": {
"name": "clip"
},
"bert": {
"name": "bert"
},
"mt5xl": {
"name": "mt5xl"
}
}
},
"CLIPTextEncodeLumina2": {
"display_name": "CLIP Text Encode for Lumina2",
"description": "Encodes a system prompt and a user prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images.",
"inputs": {
"system_prompt": {
"name": "system_prompt",
"tooltip": "Lumina2 provide two types of system prompts:Superior: You are an assistant designed to generate superior images with the superior degree of image-text alignment based on textual prompts or user prompts. Alignment: You are an assistant designed to generate high-quality images with the highest degree of image-text alignment based on textual prompts."
},
"user_prompt": {
"name": "user_prompt",
"tooltip": "The text to be encoded."
},
"clip": {
"name": "clip",
"tooltip": "The CLIP model used for encoding the text."
}
},
"outputs": {
"0": {
"tooltip": "A conditioning containing the embedded text used to guide the diffusion model."
}
}
},
"CLIPTextEncodePixArtAlpha": {
"display_name": "CLIPTextEncodePixArtAlpha",
"description": "Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma.",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"text": {
"name": "text"
},
"clip": {
"name": "clip"
}
}
},
"CLIPTextEncodeSD3": {
"display_name": "CLIPTextEncodeSD3",
"inputs": {
"clip": {
"name": "clip"
},
"clip_l": {
"name": "clip_l"
},
"clip_g": {
"name": "clip_g"
},
"t5xxl": {
"name": "t5xxl"
},
"empty_padding": {
"name": "empty_padding"
}
}
},
"CLIPTextEncodeSDXL": {
"display_name": "CLIPTextEncodeSDXL",
"inputs": {
"clip": {
"name": "clip"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"crop_w": {
"name": "crop_w"
},
"crop_h": {
"name": "crop_h"
},
"target_width": {
"name": "target_width"
},
"target_height": {
"name": "target_height"
},
"text_g": {
"name": "text_g"
},
"text_l": {
"name": "text_l"
}
}
},
"CLIPTextEncodeSDXLRefiner": {
"display_name": "CLIPTextEncodeSDXLRefiner",
"inputs": {
"ascore": {
"name": "ascore"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"text": {
"name": "text"
},
"clip": {
"name": "clip"
}
}
},
"CLIPVisionEncode": {
"display_name": "CLIP Vision Encode",
"inputs": {
"clip_vision": {
"name": "clip_vision"
},
"image": {
"name": "image"
},
"crop": {
"name": "crop"
}
}
},
"CLIPVisionLoader": {
"display_name": "Load CLIP Vision",
"inputs": {
"clip_name": {
"name": "clip_name"
}
}
},
"CombineHooks2": {
"display_name": "Combine Hooks [2]",
"inputs": {
"hooks_A": {
"name": "hooks_A"
},
"hooks_B": {
"name": "hooks_B"
}
}
},
"CombineHooks4": {
"display_name": "Combine Hooks [4]",
"inputs": {
"hooks_A": {
"name": "hooks_A"
},
"hooks_B": {
"name": "hooks_B"
},
"hooks_C": {
"name": "hooks_C"
},
"hooks_D": {
"name": "hooks_D"
}
}
},
"CombineHooks8": {
"display_name": "Combine Hooks [8]",
"inputs": {
"hooks_A": {
"name": "hooks_A"
},
"hooks_B": {
"name": "hooks_B"
},
"hooks_C": {
"name": "hooks_C"
},
"hooks_D": {
"name": "hooks_D"
},
"hooks_E": {
"name": "hooks_E"
},
"hooks_F": {
"name": "hooks_F"
},
"hooks_G": {
"name": "hooks_G"
},
"hooks_H": {
"name": "hooks_H"
}
}
},
"ConditioningAverage": {
"display_name": "ConditioningAverage",
"inputs": {
"conditioning_to": {
"name": "conditioning_to"
},
"conditioning_from": {
"name": "conditioning_from"
},
"conditioning_to_strength": {
"name": "conditioning_to_strength"
}
}
},
"ConditioningCombine": {
"display_name": "Conditioning (Combine)",
"inputs": {
"conditioning_1": {
"name": "conditioning_1"
},
"conditioning_2": {
"name": "conditioning_2"
}
}
},
"ConditioningConcat": {
"display_name": "Conditioning (Concat)",
"inputs": {
"conditioning_to": {
"name": "conditioning_to"
},
"conditioning_from": {
"name": "conditioning_from"
}
}
},
"ConditioningSetArea": {
"display_name": "Conditioning (Set Area)",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"strength": {
"name": "strength"
}
}
},
"ConditioningSetAreaPercentage": {
"display_name": "Conditioning (Set Area with Percentage)",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"strength": {
"name": "strength"
}
}
},
"ConditioningSetAreaPercentageVideo": {
"display_name": "ConditioningSetAreaPercentageVideo",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"temporal": {
"name": "temporal"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"z": {
"name": "z"
},
"strength": {
"name": "strength"
}
}
},
"ConditioningSetAreaStrength": {
"display_name": "ConditioningSetAreaStrength",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"strength": {
"name": "strength"
}
}
},
"ConditioningSetDefaultCombine": {
"display_name": "Cond Set Default Combine",
"inputs": {
"cond": {
"name": "cond"
},
"cond_DEFAULT": {
"name": "cond_DEFAULT"
},
"hooks": {
"name": "hooks"
}
}
},
"ConditioningSetMask": {
"display_name": "Conditioning (Set Mask)",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"mask": {
"name": "mask"
},
"strength": {
"name": "strength"
},
"set_cond_area": {
"name": "set_cond_area"
}
}
},
"ConditioningSetProperties": {
"display_name": "Cond Set Props",
"inputs": {
"cond_NEW": {
"name": "cond_NEW"
},
"strength": {
"name": "strength"
},
"set_cond_area": {
"name": "set_cond_area"
},
"mask": {
"name": "mask"
},
"hooks": {
"name": "hooks"
},
"timesteps": {
"name": "timesteps"
}
}
},
"ConditioningSetPropertiesAndCombine": {
"display_name": "Cond Set Props Combine",
"inputs": {
"cond": {
"name": "cond"
},
"cond_NEW": {
"name": "cond_NEW"
},
"strength": {
"name": "strength"
},
"set_cond_area": {
"name": "set_cond_area"
},
"mask": {
"name": "mask"
},
"hooks": {
"name": "hooks"
},
"timesteps": {
"name": "timesteps"
}
}
},
"ConditioningSetTimestepRange": {
"display_name": "ConditioningSetTimestepRange",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"start": {
"name": "start"
},
"end": {
"name": "end"
}
}
},
"ConditioningStableAudio": {
"display_name": "ConditioningStableAudio",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"seconds_start": {
"name": "seconds_start"
},
"seconds_total": {
"name": "seconds_total"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"ConditioningTimestepsRange": {
"display_name": "Timesteps Range",
"inputs": {
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
}
},
"outputs": {
"1": {
"name": "BEFORE_RANGE"
},
"2": {
"name": "AFTER_RANGE"
}
}
},
"ConditioningZeroOut": {
"display_name": "ConditioningZeroOut",
"inputs": {
"conditioning": {
"name": "conditioning"
}
}
},
"ControlNetApply": {
"display_name": "Apply ControlNet (OLD)",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"control_net": {
"name": "control_net"
},
"image": {
"name": "image"
},
"strength": {
"name": "strength"
}
}
},
"ControlNetApplyAdvanced": {
"display_name": "Apply ControlNet",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"control_net": {
"name": "control_net"
},
"image": {
"name": "image"
},
"strength": {
"name": "strength"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
},
"vae": {
"name": "vae"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"ControlNetApplySD3": {
"display_name": "Apply Controlnet with VAE",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"control_net": {
"name": "control_net"
},
"vae": {
"name": "vae"
},
"image": {
"name": "image"
},
"strength": {
"name": "strength"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"ControlNetInpaintingAliMamaApply": {
"display_name": "ControlNetInpaintingAliMamaApply",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"control_net": {
"name": "control_net"
},
"vae": {
"name": "vae"
},
"image": {
"name": "image"
},
"mask": {
"name": "mask"
},
"strength": {
"name": "strength"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"ControlNetLoader": {
"display_name": "Load ControlNet Model",
"inputs": {
"control_net_name": {
"name": "control_net_name"
}
}
},
"CosmosImageToVideoLatent": {
"display_name": "CosmosImageToVideoLatent",
"inputs": {
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"start_image": {
"name": "start_image"
},
"end_image": {
"name": "end_image"
}
}
},
"CreateHookKeyframe": {
"display_name": "Create Hook Keyframe",
"inputs": {
"strength_mult": {
"name": "strength_mult"
},
"start_percent": {
"name": "start_percent"
},
"prev_hook_kf": {
"name": "prev_hook_kf"
}
},
"outputs": {
"0": {
"name": "HOOK_KF"
}
}
},
"CreateHookKeyframesFromFloats": {
"display_name": "Create Hook Keyframes From Floats",
"inputs": {
"floats_strength": {
"name": "floats_strength"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
},
"print_keyframes": {
"name": "print_keyframes"
},
"prev_hook_kf": {
"name": "prev_hook_kf"
}
},
"outputs": {
"0": {
"name": "HOOK_KF"
}
}
},
"CreateHookKeyframesInterpolated": {
"display_name": "Create Hook Keyframes Interp.",
"inputs": {
"strength_start": {
"name": "strength_start"
},
"strength_end": {
"name": "strength_end"
},
"interpolation": {
"name": "interpolation"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
},
"keyframes_count": {
"name": "keyframes_count"
},
"print_keyframes": {
"name": "print_keyframes"
},
"prev_hook_kf": {
"name": "prev_hook_kf"
}
},
"outputs": {
"0": {
"name": "HOOK_KF"
}
}
},
"CreateHookLora": {
"display_name": "Create Hook LoRA",
"inputs": {
"lora_name": {
"name": "lora_name"
},
"strength_model": {
"name": "strength_model"
},
"strength_clip": {
"name": "strength_clip"
},
"prev_hooks": {
"name": "prev_hooks"
}
}
},
"CreateHookLoraModelOnly": {
"display_name": "Create Hook LoRA (MO)",
"inputs": {
"lora_name": {
"name": "lora_name"
},
"strength_model": {
"name": "strength_model"
},
"prev_hooks": {
"name": "prev_hooks"
}
}
},
"CreateHookModelAsLora": {
"display_name": "Create Hook Model as LoRA",
"inputs": {
"ckpt_name": {
"name": "ckpt_name"
},
"strength_model": {
"name": "strength_model"
},
"strength_clip": {
"name": "strength_clip"
},
"prev_hooks": {
"name": "prev_hooks"
}
}
},
"CreateHookModelAsLoraModelOnly": {
"display_name": "Create Hook Model as LoRA (MO)",
"inputs": {
"ckpt_name": {
"name": "ckpt_name"
},
"strength_model": {
"name": "strength_model"
},
"prev_hooks": {
"name": "prev_hooks"
}
}
},
"CreateVideo": {
"display_name": "Create Video",
"description": "Create a video from images.",
"inputs": {
"images": {
"name": "images",
"tooltip": "The images to create a video from."
},
"fps": {
"name": "fps"
},
"audio": {
"name": "audio",
"tooltip": "The audio to add to the video."
}
}
},
"CropMask": {
"display_name": "CropMask",
"inputs": {
"mask": {
"name": "mask"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
}
}
},
"DiffControlNetLoader": {
"display_name": "Load ControlNet Model (diff)",
"inputs": {
"model": {
"name": "model"
},
"control_net_name": {
"name": "control_net_name"
}
}
},
"DifferentialDiffusion": {
"display_name": "Differential Diffusion",
"inputs": {
"model": {
"name": "model"
}
}
},
"DiffusersLoader": {
"display_name": "DiffusersLoader",
"inputs": {
"model_path": {
"name": "model_path"
}
}
},
"DisableNoise": {
"display_name": "DisableNoise"
},
"DualCFGGuider": {
"display_name": "DualCFGGuider",
"inputs": {
"model": {
"name": "model"
},
"cond1": {
"name": "cond1"
},
"cond2": {
"name": "cond2"
},
"negative": {
"name": "negative"
},
"cfg_conds": {
"name": "cfg_conds"
},
"cfg_cond2_negative": {
"name": "cfg_cond2_negative"
}
}
},
"DualCLIPLoader": {
"display_name": "DualCLIPLoader",
"description": "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5\nhidream: at least one of t5 or llama, recommended t5 and llama",
"inputs": {
"clip_name1": {
"name": "clip_name1"
},
"clip_name2": {
"name": "clip_name2"
},
"type": {
"name": "type"
},
"device": {
"name": "device"
}
}
},
"EmptyCosmosLatentVideo": {
"display_name": "EmptyCosmosLatentVideo",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
}
}
},
"EmptyHunyuanLatentVideo": {
"display_name": "EmptyHunyuanLatentVideo",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
}
}
},
"EmptyImage": {
"display_name": "EmptyImage",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"batch_size": {
"name": "batch_size"
},
"color": {
"name": "color"
}
}
},
"EmptyLatentAudio": {
"display_name": "EmptyLatentAudio",
"inputs": {
"seconds": {
"name": "seconds"
},
"batch_size": {
"name": "batch_size",
"tooltip": "The number of latent images in the batch."
}
}
},
"EmptyLatentHunyuan3Dv2": {
"display_name": "EmptyLatentHunyuan3Dv2",
"inputs": {
"resolution": {
"name": "resolution"
},
"batch_size": {
"name": "batch_size",
"tooltip": "The number of latent images in the batch."
}
}
},
"EmptyLatentImage": {
"display_name": "Empty Latent Image",
"description": "Create a new batch of empty latent images to be denoised via sampling.",
"inputs": {
"width": {
"name": "width",
"tooltip": "The width of the latent images in pixels."
},
"height": {
"name": "height",
"tooltip": "The height of the latent images in pixels."
},
"batch_size": {
"name": "batch_size",
"tooltip": "The number of latent images in the batch."
}
},
"outputs": {
"0": {
"tooltip": "The empty latent image batch."
}
}
},
"EmptyLTXVLatentVideo": {
"display_name": "EmptyLTXVLatentVideo",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
}
}
},
"EmptyMochiLatentVideo": {
"display_name": "EmptyMochiLatentVideo",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
}
}
},
"EmptySD3LatentImage": {
"display_name": "EmptySD3LatentImage",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"batch_size": {
"name": "batch_size"
}
}
},
"ExponentialScheduler": {
"display_name": "ExponentialScheduler",
"inputs": {
"steps": {
"name": "steps"
},
"sigma_max": {
"name": "sigma_max"
},
"sigma_min": {
"name": "sigma_min"
}
}
},
"ExtendIntermediateSigmas": {
"display_name": "ExtendIntermediateSigmas",
"inputs": {
"sigmas": {
"name": "sigmas"
},
"steps": {
"name": "steps"
},
"start_at_sigma": {
"name": "start_at_sigma"
},
"end_at_sigma": {
"name": "end_at_sigma"
},
"spacing": {
"name": "spacing"
}
}
},
"FeatherMask": {
"display_name": "FeatherMask",
"inputs": {
"mask": {
"name": "mask"
},
"left": {
"name": "left"
},
"top": {
"name": "top"
},
"right": {
"name": "right"
},
"bottom": {
"name": "bottom"
}
}
},
"FlipSigmas": {
"display_name": "FlipSigmas",
"inputs": {
"sigmas": {
"name": "sigmas"
}
}
},
"FluxDisableGuidance": {
"display_name": "FluxDisableGuidance",
"description": "This node completely disables the guidance embed on Flux and Flux like models",
"inputs": {
"conditioning": {
"name": "conditioning"
}
}
},
"FluxGuidance": {
"display_name": "FluxGuidance",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"guidance": {
"name": "guidance"
}
}
},
"FluxProCannyNode": {
"display_name": "Flux.1 Canny Control Image",
"description": "Generate image using a control image (canny).",
"inputs": {
"control_image": {
"name": "control_image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"prompt_upsampling": {
"name": "prompt_upsampling",
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
},
"canny_low_threshold": {
"name": "canny_low_threshold",
"tooltip": "Low threshold for Canny edge detection; ignored if skip_processing is True"
},
"canny_high_threshold": {
"name": "canny_high_threshold",
"tooltip": "High threshold for Canny edge detection; ignored if skip_processing is True"
},
"skip_preprocessing": {
"name": "skip_preprocessing",
"tooltip": "Whether to skip preprocessing; set to True if control_image already is canny-fied, False if it is a raw image."
},
"guidance": {
"name": "guidance",
"tooltip": "Guidance strength for the image generation process"
},
"steps": {
"name": "steps",
"tooltip": "Number of steps for the image generation process"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"FluxProDepthNode": {
"display_name": "Flux.1 Depth Control Image",
"description": "Generate image using a control image (depth).",
"inputs": {
"control_image": {
"name": "control_image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"prompt_upsampling": {
"name": "prompt_upsampling",
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
},
"skip_preprocessing": {
"name": "skip_preprocessing",
"tooltip": "Whether to skip preprocessing; set to True if control_image already is depth-ified, False if it is a raw image."
},
"guidance": {
"name": "guidance",
"tooltip": "Guidance strength for the image generation process"
},
"steps": {
"name": "steps",
"tooltip": "Number of steps for the image generation process"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"FluxProExpandNode": {
"display_name": "Flux.1 Expand Image",
"description": "Outpaints image based on prompt.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"prompt_upsampling": {
"name": "prompt_upsampling",
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
},
"top": {
"name": "top",
"tooltip": "Number of pixels to expand at the top of the image"
},
"bottom": {
"name": "bottom",
"tooltip": "Number of pixels to expand at the bottom of the image"
},
"left": {
"name": "left",
"tooltip": "Number of pixels to expand at the left side of the image"
},
"right": {
"name": "right",
"tooltip": "Number of pixels to expand at the right side of the image"
},
"guidance": {
"name": "guidance",
"tooltip": "Guidance strength for the image generation process"
},
"steps": {
"name": "steps",
"tooltip": "Number of steps for the image generation process"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"FluxProFillNode": {
"display_name": "Flux.1 Fill Image",
"description": "Inpaints image based on mask and prompt.",
"inputs": {
"image": {
"name": "image"
},
"mask": {
"name": "mask"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"prompt_upsampling": {
"name": "prompt_upsampling",
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
},
"guidance": {
"name": "guidance",
"tooltip": "Guidance strength for the image generation process"
},
"steps": {
"name": "steps",
"tooltip": "Number of steps for the image generation process"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"FluxProUltraImageNode": {
"display_name": "Flux 1.1 [pro] Ultra Image",
"description": "Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"prompt_upsampling": {
"name": "prompt_upsampling",
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio of image; must be between 1:4 and 4:1."
},
"raw": {
"name": "raw",
"tooltip": "When True, generate less processed, more natural-looking images."
},
"image_prompt": {
"name": "image_prompt"
},
"image_prompt_strength": {
"name": "image_prompt_strength",
"tooltip": "Blend between the prompt and the image prompt."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"FreeU": {
"display_name": "FreeU",
"inputs": {
"model": {
"name": "model"
},
"b1": {
"name": "b1"
},
"b2": {
"name": "b2"
},
"s1": {
"name": "s1"
},
"s2": {
"name": "s2"
}
}
},
"FreeU_V2": {
"display_name": "FreeU_V2",
"inputs": {
"model": {
"name": "model"
},
"b1": {
"name": "b1"
},
"b2": {
"name": "b2"
},
"s1": {
"name": "s1"
},
"s2": {
"name": "s2"
}
}
},
"FreSca": {
"display_name": "FreSca",
"description": "Applies frequency-dependent scaling to the guidance",
"inputs": {
"model": {
"name": "model"
},
"scale_low": {
"name": "scale_low",
"tooltip": "Scaling factor for low-frequency components"
},
"scale_high": {
"name": "scale_high",
"tooltip": "Scaling factor for high-frequency components"
},
"freq_cutoff": {
"name": "freq_cutoff",
"tooltip": "Number of frequency indices around center to consider as low-frequency"
}
}
},
"GetVideoComponents": {
"display_name": "Get Video Components",
"description": "Extracts all components from a video: frames, audio, and framerate.",
"inputs": {
"video": {
"name": "video",
"tooltip": "The video to extract components from."
}
},
"outputs": {
"0": {
"name": "images"
},
"1": {
"name": "audio"
},
"2": {
"name": "fps"
}
}
},
"GITSScheduler": {
"display_name": "GITSScheduler",
"inputs": {
"coeff": {
"name": "coeff"
},
"steps": {
"name": "steps"
},
"denoise": {
"name": "denoise"
}
}
},
"GLIGENLoader": {
"display_name": "GLIGENLoader",
"inputs": {
"gligen_name": {
"name": "gligen_name"
}
}
},
"GLIGENTextBoxApply": {
"display_name": "GLIGENTextBoxApply",
"inputs": {
"conditioning_to": {
"name": "conditioning_to"
},
"clip": {
"name": "clip"
},
"gligen_textbox_model": {
"name": "gligen_textbox_model"
},
"text": {
"name": "text"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
}
}
},
"GrowMask": {
"display_name": "GrowMask",
"inputs": {
"mask": {
"name": "mask"
},
"expand": {
"name": "expand"
},
"tapered_corners": {
"name": "tapered_corners"
}
}
},
"Hunyuan3Dv2Conditioning": {
"display_name": "Hunyuan3Dv2Conditioning",
"inputs": {
"clip_vision_output": {
"name": "clip_vision_output"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"Hunyuan3Dv2ConditioningMultiView": {
"display_name": "Hunyuan3Dv2ConditioningMultiView",
"inputs": {
"front": {
"name": "front"
},
"left": {
"name": "left"
},
"back": {
"name": "back"
},
"right": {
"name": "right"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"HunyuanImageToVideo": {
"display_name": "HunyuanImageToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"guidance_type": {
"name": "guidance_type"
},
"start_image": {
"name": "start_image"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "latent"
}
}
},
"HypernetworkLoader": {
"display_name": "HypernetworkLoader",
"inputs": {
"model": {
"name": "model"
},
"hypernetwork_name": {
"name": "hypernetwork_name"
},
"strength": {
"name": "strength"
}
}
},
"HyperTile": {
"display_name": "HyperTile",
"inputs": {
"model": {
"name": "model"
},
"tile_size": {
"name": "tile_size"
},
"swap_size": {
"name": "swap_size"
},
"max_depth": {
"name": "max_depth"
},
"scale_depth": {
"name": "scale_depth"
}
}
},
"IdeogramV1": {
"display_name": "Ideogram V1",
"description": "Generates images synchronously using the Ideogram V1 model.\n\nImages links are available for a limited period of time; if you would like to keep the image, you must download it.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"turbo": {
"name": "turbo",
"tooltip": "Whether to use turbo mode (faster generation, potentially lower quality)"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "The aspect ratio for image generation."
},
"magic_prompt_option": {
"name": "magic_prompt_option",
"tooltip": "Determine if MagicPrompt should be used in generation"
},
"seed": {
"name": "seed"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Description of what to exclude from the image"
},
"num_images": {
"name": "num_images"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"IdeogramV2": {
"display_name": "Ideogram V2",
"description": "Generates images synchronously using the Ideogram V2 model.\n\nImages links are available for a limited period of time; if you would like to keep the image, you must download it.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"turbo": {
"name": "turbo",
"tooltip": "Whether to use turbo mode (faster generation, potentially lower quality)"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "The aspect ratio for image generation. Ignored if resolution is not set to AUTO."
},
"resolution": {
"name": "resolution",
"tooltip": "The resolution for image generation. If not set to AUTO, this overrides the aspect_ratio setting."
},
"magic_prompt_option": {
"name": "magic_prompt_option",
"tooltip": "Determine if MagicPrompt should be used in generation"
},
"seed": {
"name": "seed"
},
"style_type": {
"name": "style_type",
"tooltip": "Style type for generation (V2 only)"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Description of what to exclude from the image"
},
"num_images": {
"name": "num_images"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"IdeogramV3": {
"display_name": "Ideogram V3",
"description": "Generates images synchronously using the Ideogram V3 model.\n\nSupports both regular image generation from text prompts and image editing with mask.\nImages links are available for a limited period of time; if you would like to keep the image, you must download it.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation or editing"
},
"image": {
"name": "image",
"tooltip": "Optional reference image for image editing."
},
"mask": {
"name": "mask",
"tooltip": "Optional mask for inpainting (white areas will be replaced)"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "The aspect ratio for image generation. Ignored if resolution is not set to Auto."
},
"resolution": {
"name": "resolution",
"tooltip": "The resolution for image generation. If not set to Auto, this overrides the aspect_ratio setting."
},
"magic_prompt_option": {
"name": "magic_prompt_option",
"tooltip": "Determine if MagicPrompt should be used in generation"
},
"seed": {
"name": "seed"
},
"num_images": {
"name": "num_images"
},
"rendering_speed": {
"name": "rendering_speed",
"tooltip": "Controls the trade-off between generation speed and quality"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"ImageBatch": {
"display_name": "Batch Images",
"inputs": {
"image1": {
"name": "image1"
},
"image2": {
"name": "image2"
}
}
},
"ImageBlend": {
"display_name": "Image Blend",
"inputs": {
"image1": {
"name": "image1"
},
"image2": {
"name": "image2"
},
"blend_factor": {
"name": "blend_factor"
},
"blend_mode": {
"name": "blend_mode"
}
}
},
"ImageBlur": {
"display_name": "Image Blur",
"inputs": {
"image": {
"name": "image"
},
"blur_radius": {
"name": "blur_radius"
},
"sigma": {
"name": "sigma"
}
}
},
"ImageColorToMask": {
"display_name": "ImageColorToMask",
"inputs": {
"image": {
"name": "image"
},
"color": {
"name": "color"
}
}
},
"ImageCompositeMasked": {
"display_name": "ImageCompositeMasked",
"inputs": {
"destination": {
"name": "destination"
},
"source": {
"name": "source"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"resize_source": {
"name": "resize_source"
},
"mask": {
"name": "mask"
}
}
},
"ImageCrop": {
"display_name": "Image Crop",
"inputs": {
"image": {
"name": "image"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
}
}
},
"ImageFromBatch": {
"display_name": "ImageFromBatch",
"inputs": {
"image": {
"name": "image"
},
"batch_index": {
"name": "batch_index"
},
"length": {
"name": "length"
}
}
},
"ImageInvert": {
"display_name": "Invert Image",
"inputs": {
"image": {
"name": "image"
}
}
},
"ImageOnlyCheckpointLoader": {
"display_name": "Image Only Checkpoint Loader (img2vid model)",
"inputs": {
"ckpt_name": {
"name": "ckpt_name"
}
}
},
"ImageOnlyCheckpointSave": {
"display_name": "ImageOnlyCheckpointSave",
"inputs": {
"model": {
"name": "model"
},
"clip_vision": {
"name": "clip_vision"
},
"vae": {
"name": "vae"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"ImagePadForOutpaint": {
"display_name": "Pad Image for Outpainting",
"inputs": {
"image": {
"name": "image"
},
"left": {
"name": "left"
},
"top": {
"name": "top"
},
"right": {
"name": "right"
},
"bottom": {
"name": "bottom"
},
"feathering": {
"name": "feathering"
}
}
},
"ImageQuantize": {
"display_name": "Image Quantize",
"inputs": {
"image": {
"name": "image"
},
"colors": {
"name": "colors"
},
"dither": {
"name": "dither"
}
}
},
"ImageRGBToYUV": {
"display_name": "ImageRGBToYUV",
"inputs": {
"image": {
"name": "image"
}
},
"outputs": {
"0": {
"name": "Y"
},
"1": {
"name": "U"
},
"2": {
"name": "V"
}
}
},
"ImageScale": {
"display_name": "Upscale Image",
"inputs": {
"image": {
"name": "image"
},
"upscale_method": {
"name": "upscale_method"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"crop": {
"name": "crop"
}
}
},
"ImageScaleBy": {
"display_name": "Upscale Image By",
"inputs": {
"image": {
"name": "image"
},
"upscale_method": {
"name": "upscale_method"
},
"scale_by": {
"name": "scale_by"
}
}
},
"ImageScaleToTotalPixels": {
"display_name": "Scale Image to Total Pixels",
"inputs": {
"image": {
"name": "image"
},
"upscale_method": {
"name": "upscale_method"
},
"megapixels": {
"name": "megapixels"
}
}
},
"ImageSharpen": {
"display_name": "Image Sharpen",
"inputs": {
"image": {
"name": "image"
},
"sharpen_radius": {
"name": "sharpen_radius"
},
"sigma": {
"name": "sigma"
},
"alpha": {
"name": "alpha"
}
}
},
"ImageToMask": {
"display_name": "Convert Image to Mask",
"inputs": {
"image": {
"name": "image"
},
"channel": {
"name": "channel"
}
}
},
"ImageUpscaleWithModel": {
"display_name": "Upscale Image (using Model)",
"inputs": {
"upscale_model": {
"name": "upscale_model"
},
"image": {
"name": "image"
}
}
},
"ImageYUVToRGB": {
"display_name": "ImageYUVToRGB",
"inputs": {
"Y": {
"name": "Y"
},
"U": {
"name": "U"
},
"V": {
"name": "V"
}
}
},
"InpaintModelConditioning": {
"display_name": "InpaintModelConditioning",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"pixels": {
"name": "pixels"
},
"mask": {
"name": "mask"
},
"noise_mask": {
"name": "noise_mask",
"tooltip": "Add a noise mask to the latent so sampling will only happen within the mask. Might improve results or completely break things depending on the model."
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"InstructPixToPixConditioning": {
"display_name": "InstructPixToPixConditioning",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"pixels": {
"name": "pixels"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"InvertMask": {
"display_name": "InvertMask",
"inputs": {
"mask": {
"name": "mask"
}
}
},
"JoinImageWithAlpha": {
"display_name": "Join Image with Alpha",
"inputs": {
"image": {
"name": "image"
},
"alpha": {
"name": "alpha"
}
}
},
"KarrasScheduler": {
"display_name": "KarrasScheduler",
"inputs": {
"steps": {
"name": "steps"
},
"sigma_max": {
"name": "sigma_max"
},
"sigma_min": {
"name": "sigma_min"
},
"rho": {
"name": "rho"
}
}
},
"KlingCameraControlI2VNode": {
"display_name": "Kling Image to Video (Camera Control)",
"description": "Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.",
"inputs": {
"start_frame": {
"name": "start_frame",
"tooltip": "Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix."
},
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt"
},
"cfg_scale": {
"name": "cfg_scale"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"camera_control": {
"name": "camera_control",
"tooltip": "Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation."
}
},
"outputs": {
"1": {
"name": "video_id"
},
"2": {
"name": "duration"
}
}
},
"KlingCameraControls": {
"display_name": "Kling Camera Controls",
"description": "Allows specifying configuration options for Kling Camera Controls and motion control effects.",
"inputs": {
"camera_control_type": {
"name": "camera_control_type"
},
"horizontal_movement": {
"name": "horizontal_movement",
"tooltip": "Controls camera's movement along horizontal axis (x-axis). Negative indicates left, positive indicates right"
},
"vertical_movement": {
"name": "vertical_movement",
"tooltip": "Controls camera's movement along vertical axis (y-axis). Negative indicates downward, positive indicates upward."
},
"pan": {
"name": "pan",
"tooltip": "Controls camera's rotation in vertical plane (x-axis). Negative indicates downward rotation, positive indicates upward rotation."
},
"tilt": {
"name": "tilt",
"tooltip": "Controls camera's rotation in horizontal plane (y-axis). Negative indicates left rotation, positive indicates right rotation."
},
"roll": {
"name": "roll",
"tooltip": "Controls camera's rolling amount (z-axis). Negative indicates counterclockwise, positive indicates clockwise."
},
"zoom": {
"name": "zoom",
"tooltip": "Controls change in camera's focal length. Negative indicates narrower field of view, positive indicates wider field of view."
}
},
"outputs": {
"0": {
"name": "camera_control"
}
}
},
"KlingCameraControlT2VNode": {
"display_name": "Kling Text to Video (Camera Control)",
"description": "Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt"
},
"cfg_scale": {
"name": "cfg_scale"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"camera_control": {
"name": "camera_control",
"tooltip": "Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation."
}
},
"outputs": {
"1": {
"name": "video_id"
},
"2": {
"name": "duration"
}
}
},
"KlingDualCharacterVideoEffectNode": {
"display_name": "Kling Dual Character Video Effects",
"description": "Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.",
"inputs": {
"image_left": {
"name": "image_left",
"tooltip": "Left side image"
},
"image_right": {
"name": "image_right",
"tooltip": "Right side image"
},
"effect_scene": {
"name": "effect_scene"
},
"model_name": {
"name": "model_name"
},
"mode": {
"name": "mode"
},
"duration": {
"name": "duration"
}
},
"outputs": {
"1": {
"name": "duration"
}
}
},
"KlingImage2VideoNode": {
"display_name": "Kling Image to Video",
"description": "Kling Image to Video Node",
"inputs": {
"start_frame": {
"name": "start_frame",
"tooltip": "Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix."
},
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt"
},
"model_name": {
"name": "model_name"
},
"cfg_scale": {
"name": "cfg_scale"
},
"mode": {
"name": "mode"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"duration": {
"name": "duration"
}
},
"outputs": {
"1": {
"name": "video_id"
},
"2": {
"name": "duration"
}
}
},
"KlingImageGenerationNode": {
"display_name": "Kling Image Generation",
"description": "Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt"
},
"image_type": {
"name": "image_type"
},
"image_fidelity": {
"name": "image_fidelity",
"tooltip": "Reference intensity for user-uploaded images"
},
"human_fidelity": {
"name": "human_fidelity",
"tooltip": "Subject reference similarity"
},
"model_name": {
"name": "model_name"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"n": {
"name": "n",
"tooltip": "Number of generated images"
},
"image": {
"name": "image"
}
}
},
"KlingLipSyncAudioToVideoNode": {
"display_name": "Kling Lip Sync Video with Audio",
"description": "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file.",
"inputs": {
"video": {
"name": "video"
},
"audio": {
"name": "audio"
},
"voice_language": {
"name": "voice_language"
}
},
"outputs": {
"1": {
"name": "video_id"
},
"2": {
"name": "duration"
}
}
},
"KlingLipSyncTextToVideoNode": {
"display_name": "Kling Lip Sync Video with Text",
"description": "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt.",
"inputs": {
"video": {
"name": "video"
},
"text": {
"name": "text",
"tooltip": "Text Content for Lip-Sync Video Generation. Required when mode is text2video. Maximum length is 120 characters."
},
"voice": {
"name": "voice"
},
"voice_speed": {
"name": "voice_speed",
"tooltip": "Speech Rate. Valid range: 0.8~2.0, accurate to one decimal place."
}
},
"outputs": {
"1": {
"name": "video_id"
},
"2": {
"name": "duration"
}
}
},
"KlingSingleImageVideoEffectNode": {
"display_name": "Kling Video Effects",
"description": "Achieve different special effects when generating a video based on the effect_scene.",
"inputs": {
"image": {
"name": "image",
"tooltip": " Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1"
},
"effect_scene": {
"name": "effect_scene"
},
"model_name": {
"name": "model_name"
},
"duration": {
"name": "duration"
}
},
"outputs": {
"1": {
"name": "video_id"
},
"2": {
"name": "duration"
}
}
},
"KlingStartEndFrameNode": {
"display_name": "Kling Start-End Frame to Video",
"description": "Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.",
"inputs": {
"start_frame": {
"name": "start_frame",
"tooltip": "Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix."
},
"end_frame": {
"name": "end_frame",
"tooltip": "Reference Image - End frame control. URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px. Base64 should not include data:image prefix."
},
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt"
},
"cfg_scale": {
"name": "cfg_scale"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"mode": {
"name": "mode",
"tooltip": "The configuration to use for the video generation following the format: mode / duration / model_name."
}
},
"outputs": {
"1": {
"name": "video_id"
},
"2": {
"name": "duration"
}
}
},
"KlingTextToVideoNode": {
"display_name": "Kling Text to Video",
"description": "Kling Text to Video Node",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt"
},
"cfg_scale": {
"name": "cfg_scale"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"mode": {
"name": "mode",
"tooltip": "The configuration to use for the video generation following the format: mode / duration / model_name."
}
},
"outputs": {
"1": {
"name": "video_id"
},
"2": {
"name": "duration"
}
}
},
"KlingVideoExtendNode": {
"display_name": "Kling Video Extend",
"description": "Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Positive text prompt for guiding the video extension"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt for elements to avoid in the extended video"
},
"cfg_scale": {
"name": "cfg_scale"
},
"video_id": {
"name": "video_id",
"tooltip": "The ID of the video to be extended. Supports videos generated by text-to-video, image-to-video, and previous video extension operations. Cannot exceed 3 minutes total duration after extension."
}
},
"outputs": {
"1": {
"name": "video_id"
},
"2": {
"name": "duration"
}
}
},
"KlingVirtualTryOnNode": {
"display_name": "Kling Virtual Try On",
"description": "Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human.",
"inputs": {
"human_image": {
"name": "human_image"
},
"cloth_image": {
"name": "cloth_image"
},
"model_name": {
"name": "model_name"
}
}
},
"KSampler": {
"display_name": "KSampler",
"description": "Uses the provided model, positive and negative conditioning to denoise the latent image.",
"inputs": {
"model": {
"name": "model",
"tooltip": "The model used for denoising the input latent."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"steps": {
"name": "steps",
"tooltip": "The number of steps used in the denoising process."
},
"cfg": {
"name": "cfg",
"tooltip": "The Classifier-Free Guidance scale balances creativity and adherence to the prompt. Higher values result in images more closely matching the prompt however too high values will negatively impact quality."
},
"sampler_name": {
"name": "sampler_name",
"tooltip": "The algorithm used when sampling, this can affect the quality, speed, and style of the generated output."
},
"scheduler": {
"name": "scheduler",
"tooltip": "The scheduler controls how noise is gradually removed to form the image."
},
"positive": {
"name": "positive",
"tooltip": "The conditioning describing the attributes you want to include in the image."
},
"negative": {
"name": "negative",
"tooltip": "The conditioning describing the attributes you want to exclude from the image."
},
"latent_image": {
"name": "latent_image",
"tooltip": "The latent image to denoise."
},
"denoise": {
"name": "denoise",
"tooltip": "The amount of denoising applied, lower values will maintain the structure of the initial image allowing for image to image sampling."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": "The denoised latent."
}
}
},
"KSamplerAdvanced": {
"display_name": "KSampler (Advanced)",
"inputs": {
"model": {
"name": "model"
},
"add_noise": {
"name": "add_noise"
},
"noise_seed": {
"name": "noise_seed"
},
"steps": {
"name": "steps"
},
"cfg": {
"name": "cfg"
},
"sampler_name": {
"name": "sampler_name"
},
"scheduler": {
"name": "scheduler"
},
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"latent_image": {
"name": "latent_image"
},
"start_at_step": {
"name": "start_at_step"
},
"end_at_step": {
"name": "end_at_step"
},
"return_with_leftover_noise": {
"name": "return_with_leftover_noise"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"KSamplerSelect": {
"display_name": "KSamplerSelect",
"inputs": {
"sampler_name": {
"name": "sampler_name"
}
}
},
"LaplaceScheduler": {
"display_name": "LaplaceScheduler",
"inputs": {
"steps": {
"name": "steps"
},
"sigma_max": {
"name": "sigma_max"
},
"sigma_min": {
"name": "sigma_min"
},
"mu": {
"name": "mu"
},
"beta": {
"name": "beta"
}
}
},
"LatentAdd": {
"display_name": "LatentAdd",
"inputs": {
"samples1": {
"name": "samples1"
},
"samples2": {
"name": "samples2"
}
}
},
"LatentApplyOperation": {
"display_name": "LatentApplyOperation",
"inputs": {
"samples": {
"name": "samples"
},
"operation": {
"name": "operation"
}
}
},
"LatentApplyOperationCFG": {
"display_name": "LatentApplyOperationCFG",
"inputs": {
"model": {
"name": "model"
},
"operation": {
"name": "operation"
}
}
},
"LatentBatch": {
"display_name": "LatentBatch",
"inputs": {
"samples1": {
"name": "samples1"
},
"samples2": {
"name": "samples2"
}
}
},
"LatentBatchSeedBehavior": {
"display_name": "LatentBatchSeedBehavior",
"inputs": {
"samples": {
"name": "samples"
},
"seed_behavior": {
"name": "seed_behavior"
}
}
},
"LatentBlend": {
"display_name": "Latent Blend",
"inputs": {
"samples1": {
"name": "samples1"
},
"samples2": {
"name": "samples2"
},
"blend_factor": {
"name": "blend_factor"
}
}
},
"LatentComposite": {
"display_name": "Latent Composite",
"inputs": {
"samples_to": {
"name": "samples_to"
},
"samples_from": {
"name": "samples_from"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"feather": {
"name": "feather"
}
}
},
"LatentCompositeMasked": {
"display_name": "LatentCompositeMasked",
"inputs": {
"destination": {
"name": "destination"
},
"source": {
"name": "source"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"resize_source": {
"name": "resize_source"
},
"mask": {
"name": "mask"
}
}
},
"LatentCrop": {
"display_name": "Crop Latent",
"inputs": {
"samples": {
"name": "samples"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
}
}
},
"LatentFlip": {
"display_name": "Flip Latent",
"inputs": {
"samples": {
"name": "samples"
},
"flip_method": {
"name": "flip_method"
}
}
},
"LatentFromBatch": {
"display_name": "Latent From Batch",
"inputs": {
"samples": {
"name": "samples"
},
"batch_index": {
"name": "batch_index"
},
"length": {
"name": "length"
}
}
},
"LatentInterpolate": {
"display_name": "LatentInterpolate",
"inputs": {
"samples1": {
"name": "samples1"
},
"samples2": {
"name": "samples2"
},
"ratio": {
"name": "ratio"
}
}
},
"LatentMultiply": {
"display_name": "LatentMultiply",
"inputs": {
"samples": {
"name": "samples"
},
"multiplier": {
"name": "multiplier"
}
}
},
"LatentOperationSharpen": {
"display_name": "LatentOperationSharpen",
"inputs": {
"sharpen_radius": {
"name": "sharpen_radius"
},
"sigma": {
"name": "sigma"
},
"alpha": {
"name": "alpha"
}
}
},
"LatentOperationTonemapReinhard": {
"display_name": "LatentOperationTonemapReinhard",
"inputs": {
"multiplier": {
"name": "multiplier"
}
}
},
"LatentRotate": {
"display_name": "Rotate Latent",
"inputs": {
"samples": {
"name": "samples"
},
"rotation": {
"name": "rotation"
}
}
},
"LatentSubtract": {
"display_name": "LatentSubtract",
"inputs": {
"samples1": {
"name": "samples1"
},
"samples2": {
"name": "samples2"
}
}
},
"LatentUpscale": {
"display_name": "Upscale Latent",
"inputs": {
"samples": {
"name": "samples"
},
"upscale_method": {
"name": "upscale_method"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"crop": {
"name": "crop"
}
}
},
"LatentUpscaleBy": {
"display_name": "Upscale Latent By",
"inputs": {
"samples": {
"name": "samples"
},
"upscale_method": {
"name": "upscale_method"
},
"scale_by": {
"name": "scale_by"
}
}
},
"Load3D": {
"display_name": "Load 3D",
"inputs": {
"model_file": {
"name": "model_file"
},
"image": {
"name": "image"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"clear": {},
"upload 3d model": {}
},
"outputs": {
"0": {
"name": "image"
},
"1": {
"name": "mask"
},
"2": {
"name": "mesh_path"
},
"3": {
"name": "normal"
},
"4": {
"name": "lineart"
},
"5": {
"name": "camera_info"
}
}
},
"Load3DAnimation": {
"display_name": "Load 3D - Animation",
"inputs": {
"model_file": {
"name": "model_file"
},
"image": {
"name": "image"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"clear": {},
"upload 3d model": {}
},
"outputs": {
"0": {
"name": "image"
},
"1": {
"name": "mask"
},
"2": {
"name": "mesh_path"
},
"3": {
"name": "normal"
},
"4": {
"name": "camera_info"
}
}
},
"LoadAudio": {
"display_name": "LoadAudio",
"inputs": {
"audio": {
"name": "audio"
},
"audioUI": {
"name": "audioUI"
},
"upload": {
"name": "choose file to upload"
}
}
},
"LoadImage": {
"display_name": "Load Image",
"inputs": {
"image": {
"name": "image"
},
"upload": {
"name": "choose file to upload"
}
}
},
"LoadImageMask": {
"display_name": "Load Image (as Mask)",
"inputs": {
"image": {
"name": "image"
},
"channel": {
"name": "channel"
},
"upload": {
"name": "choose file to upload"
}
}
},
"LoadImageOutput": {
"display_name": "Load Image (from Outputs)",
"description": "Load an image from the output folder. When the refresh button is clicked, the node will update the image list and automatically select the first image, allowing for easy iteration.",
"inputs": {
"image": {
"name": "image"
},
"refresh": {},
"upload": {
"name": "choose file to upload"
}
}
},
"LoadLatent": {
"display_name": "LoadLatent",
"inputs": {
"latent": {
"name": "latent"
}
}
},
"LoadVideo": {
"display_name": "Load Video",
"inputs": {
"file": {
"name": "file"
},
"upload": {
"name": "choose file to upload"
}
}
},
"LoraLoader": {
"display_name": "Load LoRA",
"description": "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together.",
"inputs": {
"model": {
"name": "model",
"tooltip": "The diffusion model the LoRA will be applied to."
},
"clip": {
"name": "clip",
"tooltip": "The CLIP model the LoRA will be applied to."
},
"lora_name": {
"name": "lora_name",
"tooltip": "The name of the LoRA."
},
"strength_model": {
"name": "strength_model",
"tooltip": "How strongly to modify the diffusion model. This value can be negative."
},
"strength_clip": {
"name": "strength_clip",
"tooltip": "How strongly to modify the CLIP model. This value can be negative."
}
},
"outputs": {
"0": {
"tooltip": "The modified diffusion model."
},
"1": {
"tooltip": "The modified CLIP model."
}
}
},
"LoraLoaderModelOnly": {
"display_name": "LoraLoaderModelOnly",
"description": "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together.",
"inputs": {
"model": {
"name": "model"
},
"lora_name": {
"name": "lora_name"
},
"strength_model": {
"name": "strength_model"
}
},
"outputs": {
"0": {
"tooltip": "The modified diffusion model."
}
}
},
"LoraSave": {
"display_name": "Extract and Save Lora",
"inputs": {
"filename_prefix": {
"name": "filename_prefix"
},
"rank": {
"name": "rank"
},
"lora_type": {
"name": "lora_type"
},
"bias_diff": {
"name": "bias_diff"
},
"model_diff": {
"name": "model_diff",
"tooltip": "The ModelSubtract output to be converted to a lora."
},
"text_encoder_diff": {
"name": "text_encoder_diff",
"tooltip": "The CLIPSubtract output to be converted to a lora."
}
}
},
"LotusConditioning": {
"display_name": "LotusConditioning",
"outputs": {
"0": {
"name": "conditioning"
}
}
},
"LTXVAddGuide": {
"display_name": "LTXVAddGuide",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"latent": {
"name": "latent"
},
"image": {
"name": "image",
"tooltip": "Image or video to condition the latent video on. Must be 8*n + 1 frames.If the video is not 8*n + 1 frames, it will be cropped to the nearest 8*n + 1 frames."
},
"frame_idx": {
"name": "frame_idx",
"tooltip": "Frame index to start the conditioning at. For single-frame images or videos with 1-8 frames, any frame_idx value is acceptable. For videos with 9+ frames, frame_idx must be divisible by 8, otherwise it will be rounded down to the nearest multiple of 8. Negative values are counted from the end of the video."
},
"strength": {
"name": "strength"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"LTXVConditioning": {
"display_name": "LTXVConditioning",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"frame_rate": {
"name": "frame_rate"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"LTXVCropGuides": {
"display_name": "LTXVCropGuides",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"latent": {
"name": "latent"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"LTXVImgToVideo": {
"display_name": "LTXVImgToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"image": {
"name": "image"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"strength": {
"name": "strength"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"LTXVPreprocess": {
"display_name": "LTXVPreprocess",
"inputs": {
"image": {
"name": "image"
},
"img_compression": {
"name": "img_compression",
"tooltip": "Amount of compression to apply on image."
}
},
"outputs": {
"0": {
"name": "output_image"
}
}
},
"LTXVScheduler": {
"display_name": "LTXVScheduler",
"inputs": {
"steps": {
"name": "steps"
},
"max_shift": {
"name": "max_shift"
},
"base_shift": {
"name": "base_shift"
},
"stretch": {
"name": "stretch",
"tooltip": "Stretch the sigmas to be in the range [terminal, 1]."
},
"terminal": {
"name": "terminal",
"tooltip": "The terminal value of the sigmas after stretching."
},
"latent": {
"name": "latent"
}
}
},
"LumaConceptsNode": {
"display_name": "Luma Concepts",
"description": "Holds one or more Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.",
"inputs": {
"concept1": {
"name": "concept1"
},
"concept2": {
"name": "concept2"
},
"concept3": {
"name": "concept3"
},
"concept4": {
"name": "concept4"
},
"luma_concepts": {
"name": "luma_concepts",
"tooltip": "Optional Camera Concepts to add to the ones chosen here."
}
},
"outputs": {
"0": {
"name": "luma_concepts"
}
}
},
"LumaImageModifyNode": {
"display_name": "Luma Image to Image",
"description": "Modifies images synchronously based on prompt and aspect ratio.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"image_weight": {
"name": "image_weight",
"tooltip": "Weight of the image; the closer to 1.0, the less the image will be modified."
},
"model": {
"name": "model"
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"LumaImageNode": {
"display_name": "Luma Text to Image",
"description": "Generates images synchronously based on prompt and aspect ratio.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation"
},
"model": {
"name": "model"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"style_image_weight": {
"name": "style_image_weight",
"tooltip": "Weight of style image. Ignored if no style_image provided."
},
"image_luma_ref": {
"name": "image_luma_ref",
"tooltip": "Luma Reference node connection to influence generation with input images; up to 4 images can be considered."
},
"style_image": {
"name": "style_image",
"tooltip": "Style reference image; only 1 image will be used."
},
"character_image": {
"name": "character_image",
"tooltip": "Character reference images; can be a batch of multiple, up to 4 images can be considered."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"LumaImageToVideoNode": {
"display_name": "Luma Image to Video",
"description": "Generates videos synchronously based on prompt, input images, and output_size.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the video generation"
},
"model": {
"name": "model"
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration"
},
"loop": {
"name": "loop"
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"first_image": {
"name": "first_image",
"tooltip": "First frame of generated video."
},
"last_image": {
"name": "last_image",
"tooltip": "Last frame of generated video."
},
"luma_concepts": {
"name": "luma_concepts",
"tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"LumaReferenceNode": {
"display_name": "Luma Reference",
"description": "Holds an image and weight for use with Luma Generate Image node.",
"inputs": {
"image": {
"name": "image",
"tooltip": "Image to use as reference."
},
"weight": {
"name": "weight",
"tooltip": "Weight of image reference."
},
"luma_ref": {
"name": "luma_ref"
}
},
"outputs": {
"0": {
"name": "luma_ref"
}
}
},
"LumaVideoNode": {
"display_name": "Luma Text to Video",
"description": "Generates videos synchronously based on prompt and output_size.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the video generation"
},
"model": {
"name": "model"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration"
},
"loop": {
"name": "loop"
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"luma_concepts": {
"name": "luma_concepts",
"tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"Mahiro": {
"display_name": "Mahiro is so cute that she deserves a better guidance function!! (。・ω・。)",
"description": "Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.",
"inputs": {
"model": {
"name": "model"
}
},
"outputs": {
"0": {
"name": "patched_model"
}
}
},
"MaskComposite": {
"display_name": "MaskComposite",
"inputs": {
"destination": {
"name": "destination"
},
"source": {
"name": "source"
},
"x": {
"name": "x"
},
"y": {
"name": "y"
},
"operation": {
"name": "operation"
}
}
},
"MaskPreview": {
"display_name": "MaskPreview",
"description": "Saves the input images to your ComfyUI output directory.",
"inputs": {
"mask": {
"name": "mask"
}
}
},
"MaskToImage": {
"display_name": "Convert Mask to Image",
"inputs": {
"mask": {
"name": "mask"
}
}
},
"MinimaxImageToVideoNode": {
"display_name": "MiniMax Image to Video",
"description": "Generates videos from an image and prompts using MiniMax's API",
"inputs": {
"image": {
"name": "image",
"tooltip": "Image to use as first frame of video generation"
},
"prompt_text": {
"name": "prompt_text",
"tooltip": "Text prompt to guide the video generation"
},
"model": {
"name": "model",
"tooltip": "Model to use for video generation"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"MinimaxTextToVideoNode": {
"display_name": "MiniMax Text to Video",
"description": "Generates videos from prompts using MiniMax's API",
"inputs": {
"prompt_text": {
"name": "prompt_text",
"tooltip": "Text prompt to guide the video generation"
},
"model": {
"name": "model",
"tooltip": "Model to use for video generation"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"ModelComputeDtype": {
"display_name": "ModelComputeDtype",
"inputs": {
"model": {
"name": "model"
},
"dtype": {
"name": "dtype"
}
}
},
"ModelMergeAdd": {
"display_name": "ModelMergeAdd",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
}
}
},
"ModelMergeAuraflow": {
"display_name": "ModelMergeAuraflow",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"init_x_linear_": {
"name": "init_x_linear."
},
"positional_encoding": {
"name": "positional_encoding"
},
"cond_seq_linear_": {
"name": "cond_seq_linear."
},
"register_tokens": {
"name": "register_tokens"
},
"t_embedder_": {
"name": "t_embedder."
},
"double_layers_0_": {
"name": "double_layers.0."
},
"double_layers_1_": {
"name": "double_layers.1."
},
"double_layers_2_": {
"name": "double_layers.2."
},
"double_layers_3_": {
"name": "double_layers.3."
},
"single_layers_0_": {
"name": "single_layers.0."
},
"single_layers_1_": {
"name": "single_layers.1."
},
"single_layers_2_": {
"name": "single_layers.2."
},
"single_layers_3_": {
"name": "single_layers.3."
},
"single_layers_4_": {
"name": "single_layers.4."
},
"single_layers_5_": {
"name": "single_layers.5."
},
"single_layers_6_": {
"name": "single_layers.6."
},
"single_layers_7_": {
"name": "single_layers.7."
},
"single_layers_8_": {
"name": "single_layers.8."
},
"single_layers_9_": {
"name": "single_layers.9."
},
"single_layers_10_": {
"name": "single_layers.10."
},
"single_layers_11_": {
"name": "single_layers.11."
},
"single_layers_12_": {
"name": "single_layers.12."
},
"single_layers_13_": {
"name": "single_layers.13."
},
"single_layers_14_": {
"name": "single_layers.14."
},
"single_layers_15_": {
"name": "single_layers.15."
},
"single_layers_16_": {
"name": "single_layers.16."
},
"single_layers_17_": {
"name": "single_layers.17."
},
"single_layers_18_": {
"name": "single_layers.18."
},
"single_layers_19_": {
"name": "single_layers.19."
},
"single_layers_20_": {
"name": "single_layers.20."
},
"single_layers_21_": {
"name": "single_layers.21."
},
"single_layers_22_": {
"name": "single_layers.22."
},
"single_layers_23_": {
"name": "single_layers.23."
},
"single_layers_24_": {
"name": "single_layers.24."
},
"single_layers_25_": {
"name": "single_layers.25."
},
"single_layers_26_": {
"name": "single_layers.26."
},
"single_layers_27_": {
"name": "single_layers.27."
},
"single_layers_28_": {
"name": "single_layers.28."
},
"single_layers_29_": {
"name": "single_layers.29."
},
"single_layers_30_": {
"name": "single_layers.30."
},
"single_layers_31_": {
"name": "single_layers.31."
},
"modF_": {
"name": "modF."
},
"final_linear_": {
"name": "final_linear."
}
}
},
"ModelMergeBlocks": {
"display_name": "ModelMergeBlocks",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"input": {
"name": "input"
},
"middle": {
"name": "middle"
},
"out": {
"name": "out"
}
}
},
"ModelMergeCosmos14B": {
"display_name": "ModelMergeCosmos14B",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_embedder_": {
"name": "pos_embedder."
},
"extra_pos_embedder_": {
"name": "extra_pos_embedder."
},
"x_embedder_": {
"name": "x_embedder."
},
"t_embedder_": {
"name": "t_embedder."
},
"affline_norm_": {
"name": "affline_norm."
},
"blocks_block0_": {
"name": "blocks.block0."
},
"blocks_block1_": {
"name": "blocks.block1."
},
"blocks_block2_": {
"name": "blocks.block2."
},
"blocks_block3_": {
"name": "blocks.block3."
},
"blocks_block4_": {
"name": "blocks.block4."
},
"blocks_block5_": {
"name": "blocks.block5."
},
"blocks_block6_": {
"name": "blocks.block6."
},
"blocks_block7_": {
"name": "blocks.block7."
},
"blocks_block8_": {
"name": "blocks.block8."
},
"blocks_block9_": {
"name": "blocks.block9."
},
"blocks_block10_": {
"name": "blocks.block10."
},
"blocks_block11_": {
"name": "blocks.block11."
},
"blocks_block12_": {
"name": "blocks.block12."
},
"blocks_block13_": {
"name": "blocks.block13."
},
"blocks_block14_": {
"name": "blocks.block14."
},
"blocks_block15_": {
"name": "blocks.block15."
},
"blocks_block16_": {
"name": "blocks.block16."
},
"blocks_block17_": {
"name": "blocks.block17."
},
"blocks_block18_": {
"name": "blocks.block18."
},
"blocks_block19_": {
"name": "blocks.block19."
},
"blocks_block20_": {
"name": "blocks.block20."
},
"blocks_block21_": {
"name": "blocks.block21."
},
"blocks_block22_": {
"name": "blocks.block22."
},
"blocks_block23_": {
"name": "blocks.block23."
},
"blocks_block24_": {
"name": "blocks.block24."
},
"blocks_block25_": {
"name": "blocks.block25."
},
"blocks_block26_": {
"name": "blocks.block26."
},
"blocks_block27_": {
"name": "blocks.block27."
},
"blocks_block28_": {
"name": "blocks.block28."
},
"blocks_block29_": {
"name": "blocks.block29."
},
"blocks_block30_": {
"name": "blocks.block30."
},
"blocks_block31_": {
"name": "blocks.block31."
},
"blocks_block32_": {
"name": "blocks.block32."
},
"blocks_block33_": {
"name": "blocks.block33."
},
"blocks_block34_": {
"name": "blocks.block34."
},
"blocks_block35_": {
"name": "blocks.block35."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeCosmos7B": {
"display_name": "ModelMergeCosmos7B",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_embedder_": {
"name": "pos_embedder."
},
"extra_pos_embedder_": {
"name": "extra_pos_embedder."
},
"x_embedder_": {
"name": "x_embedder."
},
"t_embedder_": {
"name": "t_embedder."
},
"affline_norm_": {
"name": "affline_norm."
},
"blocks_block0_": {
"name": "blocks.block0."
},
"blocks_block1_": {
"name": "blocks.block1."
},
"blocks_block2_": {
"name": "blocks.block2."
},
"blocks_block3_": {
"name": "blocks.block3."
},
"blocks_block4_": {
"name": "blocks.block4."
},
"blocks_block5_": {
"name": "blocks.block5."
},
"blocks_block6_": {
"name": "blocks.block6."
},
"blocks_block7_": {
"name": "blocks.block7."
},
"blocks_block8_": {
"name": "blocks.block8."
},
"blocks_block9_": {
"name": "blocks.block9."
},
"blocks_block10_": {
"name": "blocks.block10."
},
"blocks_block11_": {
"name": "blocks.block11."
},
"blocks_block12_": {
"name": "blocks.block12."
},
"blocks_block13_": {
"name": "blocks.block13."
},
"blocks_block14_": {
"name": "blocks.block14."
},
"blocks_block15_": {
"name": "blocks.block15."
},
"blocks_block16_": {
"name": "blocks.block16."
},
"blocks_block17_": {
"name": "blocks.block17."
},
"blocks_block18_": {
"name": "blocks.block18."
},
"blocks_block19_": {
"name": "blocks.block19."
},
"blocks_block20_": {
"name": "blocks.block20."
},
"blocks_block21_": {
"name": "blocks.block21."
},
"blocks_block22_": {
"name": "blocks.block22."
},
"blocks_block23_": {
"name": "blocks.block23."
},
"blocks_block24_": {
"name": "blocks.block24."
},
"blocks_block25_": {
"name": "blocks.block25."
},
"blocks_block26_": {
"name": "blocks.block26."
},
"blocks_block27_": {
"name": "blocks.block27."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeFlux1": {
"display_name": "ModelMergeFlux1",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"img_in_": {
"name": "img_in."
},
"time_in_": {
"name": "time_in."
},
"guidance_in": {
"name": "guidance_in"
},
"vector_in_": {
"name": "vector_in."
},
"txt_in_": {
"name": "txt_in."
},
"double_blocks_0_": {
"name": "double_blocks.0."
},
"double_blocks_1_": {
"name": "double_blocks.1."
},
"double_blocks_2_": {
"name": "double_blocks.2."
},
"double_blocks_3_": {
"name": "double_blocks.3."
},
"double_blocks_4_": {
"name": "double_blocks.4."
},
"double_blocks_5_": {
"name": "double_blocks.5."
},
"double_blocks_6_": {
"name": "double_blocks.6."
},
"double_blocks_7_": {
"name": "double_blocks.7."
},
"double_blocks_8_": {
"name": "double_blocks.8."
},
"double_blocks_9_": {
"name": "double_blocks.9."
},
"double_blocks_10_": {
"name": "double_blocks.10."
},
"double_blocks_11_": {
"name": "double_blocks.11."
},
"double_blocks_12_": {
"name": "double_blocks.12."
},
"double_blocks_13_": {
"name": "double_blocks.13."
},
"double_blocks_14_": {
"name": "double_blocks.14."
},
"double_blocks_15_": {
"name": "double_blocks.15."
},
"double_blocks_16_": {
"name": "double_blocks.16."
},
"double_blocks_17_": {
"name": "double_blocks.17."
},
"double_blocks_18_": {
"name": "double_blocks.18."
},
"single_blocks_0_": {
"name": "single_blocks.0."
},
"single_blocks_1_": {
"name": "single_blocks.1."
},
"single_blocks_2_": {
"name": "single_blocks.2."
},
"single_blocks_3_": {
"name": "single_blocks.3."
},
"single_blocks_4_": {
"name": "single_blocks.4."
},
"single_blocks_5_": {
"name": "single_blocks.5."
},
"single_blocks_6_": {
"name": "single_blocks.6."
},
"single_blocks_7_": {
"name": "single_blocks.7."
},
"single_blocks_8_": {
"name": "single_blocks.8."
},
"single_blocks_9_": {
"name": "single_blocks.9."
},
"single_blocks_10_": {
"name": "single_blocks.10."
},
"single_blocks_11_": {
"name": "single_blocks.11."
},
"single_blocks_12_": {
"name": "single_blocks.12."
},
"single_blocks_13_": {
"name": "single_blocks.13."
},
"single_blocks_14_": {
"name": "single_blocks.14."
},
"single_blocks_15_": {
"name": "single_blocks.15."
},
"single_blocks_16_": {
"name": "single_blocks.16."
},
"single_blocks_17_": {
"name": "single_blocks.17."
},
"single_blocks_18_": {
"name": "single_blocks.18."
},
"single_blocks_19_": {
"name": "single_blocks.19."
},
"single_blocks_20_": {
"name": "single_blocks.20."
},
"single_blocks_21_": {
"name": "single_blocks.21."
},
"single_blocks_22_": {
"name": "single_blocks.22."
},
"single_blocks_23_": {
"name": "single_blocks.23."
},
"single_blocks_24_": {
"name": "single_blocks.24."
},
"single_blocks_25_": {
"name": "single_blocks.25."
},
"single_blocks_26_": {
"name": "single_blocks.26."
},
"single_blocks_27_": {
"name": "single_blocks.27."
},
"single_blocks_28_": {
"name": "single_blocks.28."
},
"single_blocks_29_": {
"name": "single_blocks.29."
},
"single_blocks_30_": {
"name": "single_blocks.30."
},
"single_blocks_31_": {
"name": "single_blocks.31."
},
"single_blocks_32_": {
"name": "single_blocks.32."
},
"single_blocks_33_": {
"name": "single_blocks.33."
},
"single_blocks_34_": {
"name": "single_blocks.34."
},
"single_blocks_35_": {
"name": "single_blocks.35."
},
"single_blocks_36_": {
"name": "single_blocks.36."
},
"single_blocks_37_": {
"name": "single_blocks.37."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeLTXV": {
"display_name": "ModelMergeLTXV",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"patchify_proj_": {
"name": "patchify_proj."
},
"adaln_single_": {
"name": "adaln_single."
},
"caption_projection_": {
"name": "caption_projection."
},
"transformer_blocks_0_": {
"name": "transformer_blocks.0."
},
"transformer_blocks_1_": {
"name": "transformer_blocks.1."
},
"transformer_blocks_2_": {
"name": "transformer_blocks.2."
},
"transformer_blocks_3_": {
"name": "transformer_blocks.3."
},
"transformer_blocks_4_": {
"name": "transformer_blocks.4."
},
"transformer_blocks_5_": {
"name": "transformer_blocks.5."
},
"transformer_blocks_6_": {
"name": "transformer_blocks.6."
},
"transformer_blocks_7_": {
"name": "transformer_blocks.7."
},
"transformer_blocks_8_": {
"name": "transformer_blocks.8."
},
"transformer_blocks_9_": {
"name": "transformer_blocks.9."
},
"transformer_blocks_10_": {
"name": "transformer_blocks.10."
},
"transformer_blocks_11_": {
"name": "transformer_blocks.11."
},
"transformer_blocks_12_": {
"name": "transformer_blocks.12."
},
"transformer_blocks_13_": {
"name": "transformer_blocks.13."
},
"transformer_blocks_14_": {
"name": "transformer_blocks.14."
},
"transformer_blocks_15_": {
"name": "transformer_blocks.15."
},
"transformer_blocks_16_": {
"name": "transformer_blocks.16."
},
"transformer_blocks_17_": {
"name": "transformer_blocks.17."
},
"transformer_blocks_18_": {
"name": "transformer_blocks.18."
},
"transformer_blocks_19_": {
"name": "transformer_blocks.19."
},
"transformer_blocks_20_": {
"name": "transformer_blocks.20."
},
"transformer_blocks_21_": {
"name": "transformer_blocks.21."
},
"transformer_blocks_22_": {
"name": "transformer_blocks.22."
},
"transformer_blocks_23_": {
"name": "transformer_blocks.23."
},
"transformer_blocks_24_": {
"name": "transformer_blocks.24."
},
"transformer_blocks_25_": {
"name": "transformer_blocks.25."
},
"transformer_blocks_26_": {
"name": "transformer_blocks.26."
},
"transformer_blocks_27_": {
"name": "transformer_blocks.27."
},
"scale_shift_table": {
"name": "scale_shift_table"
},
"proj_out_": {
"name": "proj_out."
}
}
},
"ModelMergeMochiPreview": {
"display_name": "ModelMergeMochiPreview",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_frequencies_": {
"name": "pos_frequencies."
},
"t_embedder_": {
"name": "t_embedder."
},
"t5_y_embedder_": {
"name": "t5_y_embedder."
},
"t5_yproj_": {
"name": "t5_yproj."
},
"blocks_0_": {
"name": "blocks.0."
},
"blocks_1_": {
"name": "blocks.1."
},
"blocks_2_": {
"name": "blocks.2."
},
"blocks_3_": {
"name": "blocks.3."
},
"blocks_4_": {
"name": "blocks.4."
},
"blocks_5_": {
"name": "blocks.5."
},
"blocks_6_": {
"name": "blocks.6."
},
"blocks_7_": {
"name": "blocks.7."
},
"blocks_8_": {
"name": "blocks.8."
},
"blocks_9_": {
"name": "blocks.9."
},
"blocks_10_": {
"name": "blocks.10."
},
"blocks_11_": {
"name": "blocks.11."
},
"blocks_12_": {
"name": "blocks.12."
},
"blocks_13_": {
"name": "blocks.13."
},
"blocks_14_": {
"name": "blocks.14."
},
"blocks_15_": {
"name": "blocks.15."
},
"blocks_16_": {
"name": "blocks.16."
},
"blocks_17_": {
"name": "blocks.17."
},
"blocks_18_": {
"name": "blocks.18."
},
"blocks_19_": {
"name": "blocks.19."
},
"blocks_20_": {
"name": "blocks.20."
},
"blocks_21_": {
"name": "blocks.21."
},
"blocks_22_": {
"name": "blocks.22."
},
"blocks_23_": {
"name": "blocks.23."
},
"blocks_24_": {
"name": "blocks.24."
},
"blocks_25_": {
"name": "blocks.25."
},
"blocks_26_": {
"name": "blocks.26."
},
"blocks_27_": {
"name": "blocks.27."
},
"blocks_28_": {
"name": "blocks.28."
},
"blocks_29_": {
"name": "blocks.29."
},
"blocks_30_": {
"name": "blocks.30."
},
"blocks_31_": {
"name": "blocks.31."
},
"blocks_32_": {
"name": "blocks.32."
},
"blocks_33_": {
"name": "blocks.33."
},
"blocks_34_": {
"name": "blocks.34."
},
"blocks_35_": {
"name": "blocks.35."
},
"blocks_36_": {
"name": "blocks.36."
},
"blocks_37_": {
"name": "blocks.37."
},
"blocks_38_": {
"name": "blocks.38."
},
"blocks_39_": {
"name": "blocks.39."
},
"blocks_40_": {
"name": "blocks.40."
},
"blocks_41_": {
"name": "blocks.41."
},
"blocks_42_": {
"name": "blocks.42."
},
"blocks_43_": {
"name": "blocks.43."
},
"blocks_44_": {
"name": "blocks.44."
},
"blocks_45_": {
"name": "blocks.45."
},
"blocks_46_": {
"name": "blocks.46."
},
"blocks_47_": {
"name": "blocks.47."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeSD1": {
"display_name": "ModelMergeSD1",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"time_embed_": {
"name": "time_embed."
},
"label_emb_": {
"name": "label_emb."
},
"input_blocks_0_": {
"name": "input_blocks.0."
},
"input_blocks_1_": {
"name": "input_blocks.1."
},
"input_blocks_2_": {
"name": "input_blocks.2."
},
"input_blocks_3_": {
"name": "input_blocks.3."
},
"input_blocks_4_": {
"name": "input_blocks.4."
},
"input_blocks_5_": {
"name": "input_blocks.5."
},
"input_blocks_6_": {
"name": "input_blocks.6."
},
"input_blocks_7_": {
"name": "input_blocks.7."
},
"input_blocks_8_": {
"name": "input_blocks.8."
},
"input_blocks_9_": {
"name": "input_blocks.9."
},
"input_blocks_10_": {
"name": "input_blocks.10."
},
"input_blocks_11_": {
"name": "input_blocks.11."
},
"middle_block_0_": {
"name": "middle_block.0."
},
"middle_block_1_": {
"name": "middle_block.1."
},
"middle_block_2_": {
"name": "middle_block.2."
},
"output_blocks_0_": {
"name": "output_blocks.0."
},
"output_blocks_1_": {
"name": "output_blocks.1."
},
"output_blocks_2_": {
"name": "output_blocks.2."
},
"output_blocks_3_": {
"name": "output_blocks.3."
},
"output_blocks_4_": {
"name": "output_blocks.4."
},
"output_blocks_5_": {
"name": "output_blocks.5."
},
"output_blocks_6_": {
"name": "output_blocks.6."
},
"output_blocks_7_": {
"name": "output_blocks.7."
},
"output_blocks_8_": {
"name": "output_blocks.8."
},
"output_blocks_9_": {
"name": "output_blocks.9."
},
"output_blocks_10_": {
"name": "output_blocks.10."
},
"output_blocks_11_": {
"name": "output_blocks.11."
},
"out_": {
"name": "out."
}
}
},
"ModelMergeSD2": {
"display_name": "ModelMergeSD2",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"time_embed_": {
"name": "time_embed."
},
"label_emb_": {
"name": "label_emb."
},
"input_blocks_0_": {
"name": "input_blocks.0."
},
"input_blocks_1_": {
"name": "input_blocks.1."
},
"input_blocks_2_": {
"name": "input_blocks.2."
},
"input_blocks_3_": {
"name": "input_blocks.3."
},
"input_blocks_4_": {
"name": "input_blocks.4."
},
"input_blocks_5_": {
"name": "input_blocks.5."
},
"input_blocks_6_": {
"name": "input_blocks.6."
},
"input_blocks_7_": {
"name": "input_blocks.7."
},
"input_blocks_8_": {
"name": "input_blocks.8."
},
"input_blocks_9_": {
"name": "input_blocks.9."
},
"input_blocks_10_": {
"name": "input_blocks.10."
},
"input_blocks_11_": {
"name": "input_blocks.11."
},
"middle_block_0_": {
"name": "middle_block.0."
},
"middle_block_1_": {
"name": "middle_block.1."
},
"middle_block_2_": {
"name": "middle_block.2."
},
"output_blocks_0_": {
"name": "output_blocks.0."
},
"output_blocks_1_": {
"name": "output_blocks.1."
},
"output_blocks_2_": {
"name": "output_blocks.2."
},
"output_blocks_3_": {
"name": "output_blocks.3."
},
"output_blocks_4_": {
"name": "output_blocks.4."
},
"output_blocks_5_": {
"name": "output_blocks.5."
},
"output_blocks_6_": {
"name": "output_blocks.6."
},
"output_blocks_7_": {
"name": "output_blocks.7."
},
"output_blocks_8_": {
"name": "output_blocks.8."
},
"output_blocks_9_": {
"name": "output_blocks.9."
},
"output_blocks_10_": {
"name": "output_blocks.10."
},
"output_blocks_11_": {
"name": "output_blocks.11."
},
"out_": {
"name": "out."
}
}
},
"ModelMergeSD3_2B": {
"display_name": "ModelMergeSD3_2B",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_embed_": {
"name": "pos_embed."
},
"x_embedder_": {
"name": "x_embedder."
},
"context_embedder_": {
"name": "context_embedder."
},
"y_embedder_": {
"name": "y_embedder."
},
"t_embedder_": {
"name": "t_embedder."
},
"joint_blocks_0_": {
"name": "joint_blocks.0."
},
"joint_blocks_1_": {
"name": "joint_blocks.1."
},
"joint_blocks_2_": {
"name": "joint_blocks.2."
},
"joint_blocks_3_": {
"name": "joint_blocks.3."
},
"joint_blocks_4_": {
"name": "joint_blocks.4."
},
"joint_blocks_5_": {
"name": "joint_blocks.5."
},
"joint_blocks_6_": {
"name": "joint_blocks.6."
},
"joint_blocks_7_": {
"name": "joint_blocks.7."
},
"joint_blocks_8_": {
"name": "joint_blocks.8."
},
"joint_blocks_9_": {
"name": "joint_blocks.9."
},
"joint_blocks_10_": {
"name": "joint_blocks.10."
},
"joint_blocks_11_": {
"name": "joint_blocks.11."
},
"joint_blocks_12_": {
"name": "joint_blocks.12."
},
"joint_blocks_13_": {
"name": "joint_blocks.13."
},
"joint_blocks_14_": {
"name": "joint_blocks.14."
},
"joint_blocks_15_": {
"name": "joint_blocks.15."
},
"joint_blocks_16_": {
"name": "joint_blocks.16."
},
"joint_blocks_17_": {
"name": "joint_blocks.17."
},
"joint_blocks_18_": {
"name": "joint_blocks.18."
},
"joint_blocks_19_": {
"name": "joint_blocks.19."
},
"joint_blocks_20_": {
"name": "joint_blocks.20."
},
"joint_blocks_21_": {
"name": "joint_blocks.21."
},
"joint_blocks_22_": {
"name": "joint_blocks.22."
},
"joint_blocks_23_": {
"name": "joint_blocks.23."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeSD35_Large": {
"display_name": "ModelMergeSD35_Large",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"pos_embed_": {
"name": "pos_embed."
},
"x_embedder_": {
"name": "x_embedder."
},
"context_embedder_": {
"name": "context_embedder."
},
"y_embedder_": {
"name": "y_embedder."
},
"t_embedder_": {
"name": "t_embedder."
},
"joint_blocks_0_": {
"name": "joint_blocks.0."
},
"joint_blocks_1_": {
"name": "joint_blocks.1."
},
"joint_blocks_2_": {
"name": "joint_blocks.2."
},
"joint_blocks_3_": {
"name": "joint_blocks.3."
},
"joint_blocks_4_": {
"name": "joint_blocks.4."
},
"joint_blocks_5_": {
"name": "joint_blocks.5."
},
"joint_blocks_6_": {
"name": "joint_blocks.6."
},
"joint_blocks_7_": {
"name": "joint_blocks.7."
},
"joint_blocks_8_": {
"name": "joint_blocks.8."
},
"joint_blocks_9_": {
"name": "joint_blocks.9."
},
"joint_blocks_10_": {
"name": "joint_blocks.10."
},
"joint_blocks_11_": {
"name": "joint_blocks.11."
},
"joint_blocks_12_": {
"name": "joint_blocks.12."
},
"joint_blocks_13_": {
"name": "joint_blocks.13."
},
"joint_blocks_14_": {
"name": "joint_blocks.14."
},
"joint_blocks_15_": {
"name": "joint_blocks.15."
},
"joint_blocks_16_": {
"name": "joint_blocks.16."
},
"joint_blocks_17_": {
"name": "joint_blocks.17."
},
"joint_blocks_18_": {
"name": "joint_blocks.18."
},
"joint_blocks_19_": {
"name": "joint_blocks.19."
},
"joint_blocks_20_": {
"name": "joint_blocks.20."
},
"joint_blocks_21_": {
"name": "joint_blocks.21."
},
"joint_blocks_22_": {
"name": "joint_blocks.22."
},
"joint_blocks_23_": {
"name": "joint_blocks.23."
},
"joint_blocks_24_": {
"name": "joint_blocks.24."
},
"joint_blocks_25_": {
"name": "joint_blocks.25."
},
"joint_blocks_26_": {
"name": "joint_blocks.26."
},
"joint_blocks_27_": {
"name": "joint_blocks.27."
},
"joint_blocks_28_": {
"name": "joint_blocks.28."
},
"joint_blocks_29_": {
"name": "joint_blocks.29."
},
"joint_blocks_30_": {
"name": "joint_blocks.30."
},
"joint_blocks_31_": {
"name": "joint_blocks.31."
},
"joint_blocks_32_": {
"name": "joint_blocks.32."
},
"joint_blocks_33_": {
"name": "joint_blocks.33."
},
"joint_blocks_34_": {
"name": "joint_blocks.34."
},
"joint_blocks_35_": {
"name": "joint_blocks.35."
},
"joint_blocks_36_": {
"name": "joint_blocks.36."
},
"joint_blocks_37_": {
"name": "joint_blocks.37."
},
"final_layer_": {
"name": "final_layer."
}
}
},
"ModelMergeSDXL": {
"display_name": "ModelMergeSDXL",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"time_embed_": {
"name": "time_embed."
},
"label_emb_": {
"name": "label_emb."
},
"input_blocks_0": {
"name": "input_blocks.0"
},
"input_blocks_1": {
"name": "input_blocks.1"
},
"input_blocks_2": {
"name": "input_blocks.2"
},
"input_blocks_3": {
"name": "input_blocks.3"
},
"input_blocks_4": {
"name": "input_blocks.4"
},
"input_blocks_5": {
"name": "input_blocks.5"
},
"input_blocks_6": {
"name": "input_blocks.6"
},
"input_blocks_7": {
"name": "input_blocks.7"
},
"input_blocks_8": {
"name": "input_blocks.8"
},
"middle_block_0": {
"name": "middle_block.0"
},
"middle_block_1": {
"name": "middle_block.1"
},
"middle_block_2": {
"name": "middle_block.2"
},
"output_blocks_0": {
"name": "output_blocks.0"
},
"output_blocks_1": {
"name": "output_blocks.1"
},
"output_blocks_2": {
"name": "output_blocks.2"
},
"output_blocks_3": {
"name": "output_blocks.3"
},
"output_blocks_4": {
"name": "output_blocks.4"
},
"output_blocks_5": {
"name": "output_blocks.5"
},
"output_blocks_6": {
"name": "output_blocks.6"
},
"output_blocks_7": {
"name": "output_blocks.7"
},
"output_blocks_8": {
"name": "output_blocks.8"
},
"out_": {
"name": "out."
}
}
},
"ModelMergeSimple": {
"display_name": "ModelMergeSimple",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"ratio": {
"name": "ratio"
}
}
},
"ModelMergeSubtract": {
"display_name": "ModelMergeSubtract",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"multiplier": {
"name": "multiplier"
}
}
},
"ModelMergeWAN2_1": {
"display_name": "ModelMergeWAN2_1",
"description": "1.3B model has 30 blocks, 14B model has 40 blocks. Image to video model has the extra img_emb.",
"inputs": {
"model1": {
"name": "model1"
},
"model2": {
"name": "model2"
},
"patch_embedding_": {
"name": "patch_embedding."
},
"time_embedding_": {
"name": "time_embedding."
},
"time_projection_": {
"name": "time_projection."
},
"text_embedding_": {
"name": "text_embedding."
},
"img_emb_": {
"name": "img_emb."
},
"blocks_0_": {
"name": "blocks.0."
},
"blocks_1_": {
"name": "blocks.1."
},
"blocks_2_": {
"name": "blocks.2."
},
"blocks_3_": {
"name": "blocks.3."
},
"blocks_4_": {
"name": "blocks.4."
},
"blocks_5_": {
"name": "blocks.5."
},
"blocks_6_": {
"name": "blocks.6."
},
"blocks_7_": {
"name": "blocks.7."
},
"blocks_8_": {
"name": "blocks.8."
},
"blocks_9_": {
"name": "blocks.9."
},
"blocks_10_": {
"name": "blocks.10."
},
"blocks_11_": {
"name": "blocks.11."
},
"blocks_12_": {
"name": "blocks.12."
},
"blocks_13_": {
"name": "blocks.13."
},
"blocks_14_": {
"name": "blocks.14."
},
"blocks_15_": {
"name": "blocks.15."
},
"blocks_16_": {
"name": "blocks.16."
},
"blocks_17_": {
"name": "blocks.17."
},
"blocks_18_": {
"name": "blocks.18."
},
"blocks_19_": {
"name": "blocks.19."
},
"blocks_20_": {
"name": "blocks.20."
},
"blocks_21_": {
"name": "blocks.21."
},
"blocks_22_": {
"name": "blocks.22."
},
"blocks_23_": {
"name": "blocks.23."
},
"blocks_24_": {
"name": "blocks.24."
},
"blocks_25_": {
"name": "blocks.25."
},
"blocks_26_": {
"name": "blocks.26."
},
"blocks_27_": {
"name": "blocks.27."
},
"blocks_28_": {
"name": "blocks.28."
},
"blocks_29_": {
"name": "blocks.29."
},
"blocks_30_": {
"name": "blocks.30."
},
"blocks_31_": {
"name": "blocks.31."
},
"blocks_32_": {
"name": "blocks.32."
},
"blocks_33_": {
"name": "blocks.33."
},
"blocks_34_": {
"name": "blocks.34."
},
"blocks_35_": {
"name": "blocks.35."
},
"blocks_36_": {
"name": "blocks.36."
},
"blocks_37_": {
"name": "blocks.37."
},
"blocks_38_": {
"name": "blocks.38."
},
"blocks_39_": {
"name": "blocks.39."
},
"head_": {
"name": "head."
}
}
},
"ModelSamplingAuraFlow": {
"display_name": "ModelSamplingAuraFlow",
"inputs": {
"model": {
"name": "model"
},
"shift": {
"name": "shift"
}
}
},
"ModelSamplingContinuousEDM": {
"display_name": "ModelSamplingContinuousEDM",
"inputs": {
"model": {
"name": "model"
},
"sampling": {
"name": "sampling"
},
"sigma_max": {
"name": "sigma_max"
},
"sigma_min": {
"name": "sigma_min"
}
}
},
"ModelSamplingContinuousV": {
"display_name": "ModelSamplingContinuousV",
"inputs": {
"model": {
"name": "model"
},
"sampling": {
"name": "sampling"
},
"sigma_max": {
"name": "sigma_max"
},
"sigma_min": {
"name": "sigma_min"
}
}
},
"ModelSamplingDiscrete": {
"display_name": "ModelSamplingDiscrete",
"inputs": {
"model": {
"name": "model"
},
"sampling": {
"name": "sampling"
},
"zsnr": {
"name": "zsnr"
}
}
},
"ModelSamplingFlux": {
"display_name": "ModelSamplingFlux",
"inputs": {
"model": {
"name": "model"
},
"max_shift": {
"name": "max_shift"
},
"base_shift": {
"name": "base_shift"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
}
}
},
"ModelSamplingLTXV": {
"display_name": "ModelSamplingLTXV",
"inputs": {
"model": {
"name": "model"
},
"max_shift": {
"name": "max_shift"
},
"base_shift": {
"name": "base_shift"
},
"latent": {
"name": "latent"
}
}
},
"ModelSamplingSD3": {
"display_name": "ModelSamplingSD3",
"inputs": {
"model": {
"name": "model"
},
"shift": {
"name": "shift"
}
}
},
"ModelSamplingStableCascade": {
"display_name": "ModelSamplingStableCascade",
"inputs": {
"model": {
"name": "model"
},
"shift": {
"name": "shift"
}
}
},
"ModelSave": {
"display_name": "ModelSave",
"inputs": {
"model": {
"name": "model"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"Morphology": {
"display_name": "ImageMorphology",
"inputs": {
"image": {
"name": "image"
},
"operation": {
"name": "operation"
},
"kernel_size": {
"name": "kernel_size"
}
}
},
"OpenAIDalle2": {
"display_name": "OpenAI DALL·E 2",
"description": "Generates images synchronously via OpenAI's DALL·E 2 endpoint.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text prompt for DALL·E"
},
"seed": {
"name": "seed",
"tooltip": "not implemented yet in backend"
},
"size": {
"name": "size",
"tooltip": "Image size"
},
"n": {
"name": "n",
"tooltip": "How many images to generate"
},
"image": {
"name": "image",
"tooltip": "Optional reference image for image editing."
},
"mask": {
"name": "mask",
"tooltip": "Optional mask for inpainting (white areas will be replaced)"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"OpenAIDalle3": {
"display_name": "OpenAI DALL·E 3",
"description": "Generates images synchronously via OpenAI's DALL·E 3 endpoint.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text prompt for DALL·E"
},
"seed": {
"name": "seed",
"tooltip": "not implemented yet in backend"
},
"quality": {
"name": "quality",
"tooltip": "Image quality"
},
"style": {
"name": "style",
"tooltip": "Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images."
},
"size": {
"name": "size",
"tooltip": "Image size"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"OpenAIGPTImage1": {
"display_name": "OpenAI GPT Image 1",
"description": "Generates images synchronously via OpenAI's GPT Image 1 endpoint.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text prompt for GPT Image 1"
},
"seed": {
"name": "seed",
"tooltip": "not implemented yet in backend"
},
"quality": {
"name": "quality",
"tooltip": "Image quality, affects cost and generation time."
},
"background": {
"name": "background",
"tooltip": "Return image with or without background"
},
"size": {
"name": "size",
"tooltip": "Image size"
},
"n": {
"name": "n",
"tooltip": "How many images to generate"
},
"image": {
"name": "image",
"tooltip": "Optional reference image for image editing."
},
"mask": {
"name": "mask",
"tooltip": "Optional mask for inpainting (white areas will be replaced)"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"OptimalStepsScheduler": {
"display_name": "OptimalStepsScheduler",
"inputs": {
"model_type": {
"name": "model_type"
},
"steps": {
"name": "steps"
},
"denoise": {
"name": "denoise"
}
}
},
"PairConditioningCombine": {
"display_name": "Cond Pair Combine",
"inputs": {
"positive_A": {
"name": "positive_A"
},
"negative_A": {
"name": "negative_A"
},
"positive_B": {
"name": "positive_B"
},
"negative_B": {
"name": "negative_B"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"PairConditioningSetDefaultCombine": {
"display_name": "Cond Pair Set Default Combine",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"positive_DEFAULT": {
"name": "positive_DEFAULT"
},
"negative_DEFAULT": {
"name": "negative_DEFAULT"
},
"hooks": {
"name": "hooks"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"PairConditioningSetProperties": {
"display_name": "Cond Pair Set Props",
"inputs": {
"positive_NEW": {
"name": "positive_NEW"
},
"negative_NEW": {
"name": "negative_NEW"
},
"strength": {
"name": "strength"
},
"set_cond_area": {
"name": "set_cond_area"
},
"mask": {
"name": "mask"
},
"hooks": {
"name": "hooks"
},
"timesteps": {
"name": "timesteps"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"PairConditioningSetPropertiesAndCombine": {
"display_name": "Cond Pair Set Props Combine",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"positive_NEW": {
"name": "positive_NEW"
},
"negative_NEW": {
"name": "negative_NEW"
},
"strength": {
"name": "strength"
},
"set_cond_area": {
"name": "set_cond_area"
},
"mask": {
"name": "mask"
},
"hooks": {
"name": "hooks"
},
"timesteps": {
"name": "timesteps"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
}
}
},
"PatchModelAddDownscale": {
"display_name": "PatchModelAddDownscale (Kohya Deep Shrink)",
"inputs": {
"model": {
"name": "model"
},
"block_number": {
"name": "block_number"
},
"downscale_factor": {
"name": "downscale_factor"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
},
"downscale_after_skip": {
"name": "downscale_after_skip"
},
"downscale_method": {
"name": "downscale_method"
},
"upscale_method": {
"name": "upscale_method"
}
}
},
"PerpNeg": {
"display_name": "Perp-Neg (DEPRECATED by PerpNegGuider)",
"inputs": {
"model": {
"name": "model"
},
"empty_conditioning": {
"name": "empty_conditioning"
},
"neg_scale": {
"name": "neg_scale"
}
}
},
"PerpNegGuider": {
"display_name": "PerpNegGuider",
"inputs": {
"model": {
"name": "model"
},
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"empty_conditioning": {
"name": "empty_conditioning"
},
"cfg": {
"name": "cfg"
},
"neg_scale": {
"name": "neg_scale"
}
}
},
"PerturbedAttentionGuidance": {
"display_name": "PerturbedAttentionGuidance",
"inputs": {
"model": {
"name": "model"
},
"scale": {
"name": "scale"
}
}
},
"PhotoMakerEncode": {
"display_name": "PhotoMakerEncode",
"inputs": {
"photomaker": {
"name": "photomaker"
},
"image": {
"name": "image"
},
"clip": {
"name": "clip"
},
"text": {
"name": "text"
}
}
},
"PhotoMakerLoader": {
"display_name": "PhotoMakerLoader",
"inputs": {
"photomaker_model_name": {
"name": "photomaker_model_name"
}
}
},
"Pikadditions": {
"display_name": "Pikadditions (Video Object Insertion)",
"description": "Add any object or image into your video. Upload a video and specify what youd like to add to create a seamlessly integrated result.",
"inputs": {
"video": {
"name": "video",
"tooltip": "The video to add an image to."
},
"image": {
"name": "image",
"tooltip": "The image to add to the video."
},
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"Pikaffects": {
"display_name": "Pikaffects (Video Effects)",
"description": "Generate a video with a specific Pikaffect. Supported Pikaffects: Cake-ify, Crumble, Crush, Decapitate, Deflate, Dissolve, Explode, Eye-pop, Inflate, Levitate, Melt, Peel, Poke, Squish, Ta-da, Tear",
"inputs": {
"image": {
"name": "image",
"tooltip": "The reference image to apply the Pikaffect to."
},
"pikaffect": {
"name": "pikaffect"
},
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"PikaImageToVideoNode2_2": {
"display_name": "Pika Image to Video",
"description": "Sends an image and prompt to the Pika API v2.2 to generate a video.",
"inputs": {
"image": {
"name": "image",
"tooltip": "The image to convert to video"
},
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"PikaScenesV2_2": {
"display_name": "Pika Scenes (Video Image Composition)",
"description": "Combine your images to create a video with the objects in them. Upload multiple images as ingredients and generate a high-quality video that incorporates all of them.",
"inputs": {
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration"
},
"ingredients_mode": {
"name": "ingredients_mode"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio (width / height)"
},
"image_ingredient_1": {
"name": "image_ingredient_1",
"tooltip": "Image that will be used as ingredient to create a video."
},
"image_ingredient_2": {
"name": "image_ingredient_2",
"tooltip": "Image that will be used as ingredient to create a video."
},
"image_ingredient_3": {
"name": "image_ingredient_3",
"tooltip": "Image that will be used as ingredient to create a video."
},
"image_ingredient_4": {
"name": "image_ingredient_4",
"tooltip": "Image that will be used as ingredient to create a video."
},
"image_ingredient_5": {
"name": "image_ingredient_5",
"tooltip": "Image that will be used as ingredient to create a video."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"PikaStartEndFrameNode2_2": {
"display_name": "Pika Start and End Frame to Video",
"description": "Generate a video by combining your first and last frame. Upload two images to define the start and end points, and let the AI create a smooth transition between them.",
"inputs": {
"image_start": {
"name": "image_start",
"tooltip": "The first image to combine."
},
"image_end": {
"name": "image_end",
"tooltip": "The last image to combine."
},
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"Pikaswaps": {
"display_name": "Pika Swaps (Video Object Replacement)",
"description": "Swap out any object or region of your video with a new image or object. Define areas to replace either with a mask or coordinates.",
"inputs": {
"video": {
"name": "video",
"tooltip": "The video to swap an object in."
},
"image": {
"name": "image",
"tooltip": "The image used to replace the masked object in the video."
},
"mask": {
"name": "mask",
"tooltip": "Use the mask to define areas in the video to replace"
},
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"PikaTextToVideoNode2_2": {
"display_name": "Pika Text to Video",
"description": "Sends a text prompt to the Pika API v2.2 to generate a video.",
"inputs": {
"prompt_text": {
"name": "prompt_text"
},
"negative_prompt": {
"name": "negative_prompt"
},
"seed": {
"name": "seed"
},
"resolution": {
"name": "resolution"
},
"duration": {
"name": "duration"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio (width / height)"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"PixverseImageToVideoNode": {
"display_name": "PixVerse Image to Video",
"description": "Generates videos synchronously based on prompt and output_size.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the video generation"
},
"quality": {
"name": "quality"
},
"duration_seconds": {
"name": "duration_seconds"
},
"motion_mode": {
"name": "motion_mode"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"pixverse_template": {
"name": "pixverse_template",
"tooltip": "An optional template to influence style of generation, created by the PixVerse Template node."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"PixverseTemplateNode": {
"display_name": "PixVerse Template",
"inputs": {
"template": {
"name": "template"
}
},
"outputs": {
"0": {
"name": "pixverse_template"
}
}
},
"PixverseTextToVideoNode": {
"display_name": "PixVerse Text to Video",
"description": "Generates videos synchronously based on prompt and output_size.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the video generation"
},
"aspect_ratio": {
"name": "aspect_ratio"
},
"quality": {
"name": "quality"
},
"duration_seconds": {
"name": "duration_seconds"
},
"motion_mode": {
"name": "motion_mode"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"pixverse_template": {
"name": "pixverse_template",
"tooltip": "An optional template to influence style of generation, created by the PixVerse Template node."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"PixverseTransitionVideoNode": {
"display_name": "PixVerse Transition Video",
"description": "Generates videos synchronously based on prompt and output_size.",
"inputs": {
"first_frame": {
"name": "first_frame"
},
"last_frame": {
"name": "last_frame"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the video generation"
},
"quality": {
"name": "quality"
},
"duration_seconds": {
"name": "duration_seconds"
},
"motion_mode": {
"name": "motion_mode"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"PolyexponentialScheduler": {
"display_name": "PolyexponentialScheduler",
"inputs": {
"steps": {
"name": "steps"
},
"sigma_max": {
"name": "sigma_max"
},
"sigma_min": {
"name": "sigma_min"
},
"rho": {
"name": "rho"
}
}
},
"PorterDuffImageComposite": {
"display_name": "Porter-Duff Image Composite",
"inputs": {
"source": {
"name": "source"
},
"source_alpha": {
"name": "source_alpha"
},
"destination": {
"name": "destination"
},
"destination_alpha": {
"name": "destination_alpha"
},
"mode": {
"name": "mode"
}
}
},
"Preview3D": {
"display_name": "Preview 3D",
"inputs": {
"model_file": {
"name": "model_file"
},
"camera_info": {
"name": "camera_info"
},
"image": {
"name": "image"
}
}
},
"Preview3DAnimation": {
"display_name": "Preview 3D - Animation",
"inputs": {
"model_file": {
"name": "model_file"
},
"camera_info": {
"name": "camera_info"
},
"image": {
"name": "image"
}
}
},
"PreviewAny": {
"display_name": "Preview Any",
"inputs": {
"source": {
"name": "source"
},
"preview": {}
}
},
"PreviewAudio": {
"display_name": "PreviewAudio",
"inputs": {
"audio": {
"name": "audio"
},
"audioUI": {
"name": "audioUI"
}
}
},
"PreviewImage": {
"display_name": "Preview Image",
"description": "Saves the input images to your ComfyUI output directory.",
"inputs": {
"images": {
"name": "images"
}
}
},
"PrimitiveBoolean": {
"display_name": "Boolean",
"inputs": {
"value": {
"name": "value"
}
}
},
"PrimitiveFloat": {
"display_name": "Float",
"inputs": {
"value": {
"name": "value"
}
}
},
"PrimitiveInt": {
"display_name": "Int",
"inputs": {
"value": {
"name": "value"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"PrimitiveString": {
"display_name": "String",
"inputs": {
"value": {
"name": "value"
}
}
},
"PrimitiveStringMultiline": {
"display_name": "String (Multiline)",
"inputs": {
"value": {
"name": "value"
}
}
},
"QuadrupleCLIPLoader": {
"display_name": "QuadrupleCLIPLoader",
"description": "[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct",
"inputs": {
"clip_name1": {
"name": "clip_name1"
},
"clip_name2": {
"name": "clip_name2"
},
"clip_name3": {
"name": "clip_name3"
},
"clip_name4": {
"name": "clip_name4"
}
}
},
"RandomNoise": {
"display_name": "RandomNoise",
"inputs": {
"noise_seed": {
"name": "noise_seed"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"RebatchImages": {
"display_name": "Rebatch Images",
"inputs": {
"images": {
"name": "images"
},
"batch_size": {
"name": "batch_size"
}
}
},
"RebatchLatents": {
"display_name": "Rebatch Latents",
"inputs": {
"latents": {
"name": "latents"
},
"batch_size": {
"name": "batch_size"
}
}
},
"RecraftColorRGB": {
"display_name": "Recraft Color RGB",
"description": "Create Recraft Color by choosing specific RGB values.",
"inputs": {
"r": {
"name": "r",
"tooltip": "Red value of color."
},
"g": {
"name": "g",
"tooltip": "Green value of color."
},
"b": {
"name": "b",
"tooltip": "Blue value of color."
},
"recraft_color": {
"name": "recraft_color"
}
},
"outputs": {
"0": {
"name": "recraft_color"
}
}
},
"RecraftControls": {
"display_name": "Recraft Controls",
"description": "Create Recraft Controls for customizing Recraft generation.",
"inputs": {
"colors": {
"name": "colors"
},
"background_color": {
"name": "background_color"
}
},
"outputs": {
"0": {
"name": "recraft_controls"
}
}
},
"RecraftCreativeUpscaleNode": {
"display_name": "Recraft Creative Upscale Image",
"description": "Upscale image synchronously.\nEnhances a given raster image using creative upscale tool, boosting resolution with a focus on refining small details and faces.",
"inputs": {
"image": {
"name": "image"
}
}
},
"RecraftCrispUpscaleNode": {
"display_name": "Recraft Crisp Upscale Image",
"description": "Upscale image synchronously.\nEnhances a given raster image using crisp upscale tool, increasing image resolution, making the image sharper and cleaner.",
"inputs": {
"image": {
"name": "image"
}
}
},
"RecraftImageInpaintingNode": {
"display_name": "Recraft Image Inpainting",
"description": "Modify image based on prompt and mask.",
"inputs": {
"image": {
"name": "image"
},
"mask": {
"name": "mask"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation."
},
"n": {
"name": "n",
"tooltip": "The number of images to generate."
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"recraft_style": {
"name": "recraft_style"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"RecraftImageToImageNode": {
"display_name": "Recraft Image to Image",
"description": "Modify image based on prompt and strength.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation."
},
"n": {
"name": "n",
"tooltip": "The number of images to generate."
},
"strength": {
"name": "strength",
"tooltip": "Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity."
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"recraft_style": {
"name": "recraft_style"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"recraft_controls": {
"name": "recraft_controls",
"tooltip": "Optional additional controls over the generation via the Recraft Controls node."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"RecraftRemoveBackgroundNode": {
"display_name": "Recraft Remove Background",
"description": "Remove background from image, and return processed image and mask.",
"inputs": {
"image": {
"name": "image"
}
}
},
"RecraftReplaceBackgroundNode": {
"display_name": "Recraft Replace Background",
"description": "Replace background on image, based on provided prompt.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation."
},
"n": {
"name": "n",
"tooltip": "The number of images to generate."
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"recraft_style": {
"name": "recraft_style"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"RecraftStyleV3DigitalIllustration": {
"display_name": "Recraft Style - Digital Illustration",
"description": "Select realistic_image style and optional substyle.",
"inputs": {
"substyle": {
"name": "substyle"
}
},
"outputs": {
"0": {
"name": "recraft_style"
}
}
},
"RecraftStyleV3InfiniteStyleLibrary": {
"display_name": "Recraft Style - Infinite Style Library",
"description": "Select style based on preexisting UUID from Recraft's Infinite Style Library.",
"inputs": {
"style_id": {
"name": "style_id",
"tooltip": "UUID of style from Infinite Style Library."
}
},
"outputs": {
"0": {
"name": "recraft_style"
}
}
},
"RecraftStyleV3LogoRaster": {
"display_name": "Recraft Style - Logo Raster",
"description": "Select realistic_image style and optional substyle.",
"inputs": {
"substyle": {
"name": "substyle"
}
},
"outputs": {
"0": {
"name": "recraft_style"
}
}
},
"RecraftStyleV3RealisticImage": {
"display_name": "Recraft Style - Realistic Image",
"description": "Select realistic_image style and optional substyle.",
"inputs": {
"substyle": {
"name": "substyle"
}
},
"outputs": {
"0": {
"name": "recraft_style"
}
}
},
"RecraftTextToImageNode": {
"display_name": "Recraft Text to Image",
"description": "Generates images synchronously based on prompt and resolution.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation."
},
"size": {
"name": "size",
"tooltip": "The size of the generated image."
},
"n": {
"name": "n",
"tooltip": "The number of images to generate."
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"recraft_style": {
"name": "recraft_style"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"recraft_controls": {
"name": "recraft_controls",
"tooltip": "Optional additional controls over the generation via the Recraft Controls node."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"RecraftTextToVectorNode": {
"display_name": "Recraft Text to Vector",
"description": "Generates SVG synchronously based on prompt and resolution.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Prompt for the image generation."
},
"substyle": {
"name": "substyle"
},
"size": {
"name": "size",
"tooltip": "The size of the generated image."
},
"n": {
"name": "n",
"tooltip": "The number of images to generate."
},
"seed": {
"name": "seed",
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "An optional text description of undesired elements on an image."
},
"recraft_controls": {
"name": "recraft_controls",
"tooltip": "Optional additional controls over the generation via the Recraft Controls node."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"RecraftVectorizeImageNode": {
"display_name": "Recraft Vectorize Image",
"description": "Generates SVG synchronously from an input image.",
"inputs": {
"image": {
"name": "image"
}
}
},
"RenormCFG": {
"display_name": "RenormCFG",
"inputs": {
"model": {
"name": "model"
},
"cfg_trunc": {
"name": "cfg_trunc"
},
"renorm_cfg": {
"name": "renorm_cfg"
}
}
},
"RepeatImageBatch": {
"display_name": "RepeatImageBatch",
"inputs": {
"image": {
"name": "image"
},
"amount": {
"name": "amount"
}
}
},
"RepeatLatentBatch": {
"display_name": "Repeat Latent Batch",
"inputs": {
"samples": {
"name": "samples"
},
"amount": {
"name": "amount"
}
}
},
"RescaleCFG": {
"display_name": "RescaleCFG",
"inputs": {
"model": {
"name": "model"
},
"multiplier": {
"name": "multiplier"
}
}
},
"SamplerCustom": {
"display_name": "SamplerCustom",
"inputs": {
"model": {
"name": "model"
},
"add_noise": {
"name": "add_noise"
},
"noise_seed": {
"name": "noise_seed"
},
"cfg": {
"name": "cfg"
},
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"sampler": {
"name": "sampler"
},
"sigmas": {
"name": "sigmas"
},
"latent_image": {
"name": "latent_image"
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"name": "output"
},
"1": {
"name": "denoised_output"
}
}
},
"SamplerCustomAdvanced": {
"display_name": "SamplerCustomAdvanced",
"inputs": {
"noise": {
"name": "noise"
},
"guider": {
"name": "guider"
},
"sampler": {
"name": "sampler"
},
"sigmas": {
"name": "sigmas"
},
"latent_image": {
"name": "latent_image"
}
},
"outputs": {
"0": {
"name": "output"
},
"1": {
"name": "denoised_output"
}
}
},
"SamplerDPMAdaptative": {
"display_name": "SamplerDPMAdaptative",
"inputs": {
"order": {
"name": "order"
},
"rtol": {
"name": "rtol"
},
"atol": {
"name": "atol"
},
"h_init": {
"name": "h_init"
},
"pcoeff": {
"name": "pcoeff"
},
"icoeff": {
"name": "icoeff"
},
"dcoeff": {
"name": "dcoeff"
},
"accept_safety": {
"name": "accept_safety"
},
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
}
}
},
"SamplerDPMPP_2M_SDE": {
"display_name": "SamplerDPMPP_2M_SDE",
"inputs": {
"solver_type": {
"name": "solver_type"
},
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
},
"noise_device": {
"name": "noise_device"
}
}
},
"SamplerDPMPP_2S_Ancestral": {
"display_name": "SamplerDPMPP_2S_Ancestral",
"inputs": {
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
}
}
},
"SamplerDPMPP_3M_SDE": {
"display_name": "SamplerDPMPP_3M_SDE",
"inputs": {
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
},
"noise_device": {
"name": "noise_device"
}
}
},
"SamplerDPMPP_SDE": {
"display_name": "SamplerDPMPP_SDE",
"inputs": {
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
},
"r": {
"name": "r"
},
"noise_device": {
"name": "noise_device"
}
}
},
"SamplerEulerAncestral": {
"display_name": "SamplerEulerAncestral",
"inputs": {
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
}
}
},
"SamplerEulerAncestralCFGPP": {
"display_name": "SamplerEulerAncestralCFG++",
"inputs": {
"eta": {
"name": "eta"
},
"s_noise": {
"name": "s_noise"
}
}
},
"SamplerEulerCFGpp": {
"display_name": "SamplerEulerCFG++",
"inputs": {
"version": {
"name": "version"
}
}
},
"SamplerLCMUpscale": {
"display_name": "SamplerLCMUpscale",
"inputs": {
"scale_ratio": {
"name": "scale_ratio"
},
"scale_steps": {
"name": "scale_steps"
},
"upscale_method": {
"name": "upscale_method"
}
}
},
"SamplerLMS": {
"display_name": "SamplerLMS",
"inputs": {
"order": {
"name": "order"
}
}
},
"SaveAnimatedPNG": {
"display_name": "SaveAnimatedPNG",
"inputs": {
"images": {
"name": "images"
},
"filename_prefix": {
"name": "filename_prefix"
},
"fps": {
"name": "fps"
},
"compress_level": {
"name": "compress_level"
}
}
},
"SaveAnimatedWEBP": {
"display_name": "SaveAnimatedWEBP",
"inputs": {
"images": {
"name": "images"
},
"filename_prefix": {
"name": "filename_prefix"
},
"fps": {
"name": "fps"
},
"lossless": {
"name": "lossless"
},
"quality": {
"name": "quality"
},
"method": {
"name": "method"
}
}
},
"SaveAudio": {
"display_name": "SaveAudio",
"inputs": {
"audio": {
"name": "audio"
},
"filename_prefix": {
"name": "filename_prefix"
},
"audioUI": {
"name": "audioUI"
}
}
},
"SaveGLB": {
"display_name": "SaveGLB",
"inputs": {
"mesh": {
"name": "mesh"
},
"filename_prefix": {
"name": "filename_prefix"
},
"image": {
"name": "image"
}
}
},
"SaveImage": {
"display_name": "Save Image",
"description": "Saves the input images to your ComfyUI output directory.",
"inputs": {
"images": {
"name": "images",
"tooltip": "The images to save."
},
"filename_prefix": {
"name": "filename_prefix",
"tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."
}
}
},
"SaveImageWebsocket": {
"display_name": "SaveImageWebsocket",
"inputs": {
"images": {
"name": "images"
}
}
},
"SaveLatent": {
"display_name": "SaveLatent",
"inputs": {
"samples": {
"name": "samples"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"SaveSVG": {
"display_name": "Save SVG",
"description": "Save SVG files on disk.",
"inputs": {
"svg": {
"name": "svg"
},
"filename_prefix": {
"name": "filename_prefix",
"tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."
}
}
},
"SaveVideo": {
"display_name": "Save Video",
"description": "Saves the input images to your ComfyUI output directory.",
"inputs": {
"video": {
"name": "video",
"tooltip": "The video to save."
},
"filename_prefix": {
"name": "filename_prefix",
"tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."
},
"format": {
"name": "format",
"tooltip": "The format to save the video as."
},
"codec": {
"name": "codec",
"tooltip": "The codec to use for the video."
}
}
},
"SaveWEBM": {
"display_name": "SaveWEBM",
"inputs": {
"images": {
"name": "images"
},
"filename_prefix": {
"name": "filename_prefix"
},
"codec": {
"name": "codec"
},
"fps": {
"name": "fps"
},
"crf": {
"name": "crf",
"tooltip": "Higher crf means lower quality with a smaller file size, lower crf means higher quality higher filesize."
}
}
},
"SD_4XUpscale_Conditioning": {
"display_name": "SD_4XUpscale_Conditioning",
"inputs": {
"images": {
"name": "images"
},
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"scale_ratio": {
"name": "scale_ratio"
},
"noise_augmentation": {
"name": "noise_augmentation"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"SDTurboScheduler": {
"display_name": "SDTurboScheduler",
"inputs": {
"model": {
"name": "model"
},
"steps": {
"name": "steps"
},
"denoise": {
"name": "denoise"
}
}
},
"SelfAttentionGuidance": {
"display_name": "Self-Attention Guidance",
"inputs": {
"model": {
"name": "model"
},
"scale": {
"name": "scale"
},
"blur_sigma": {
"name": "blur_sigma"
}
}
},
"SetClipHooks": {
"display_name": "Set CLIP Hooks",
"inputs": {
"clip": {
"name": "clip"
},
"apply_to_conds": {
"name": "apply_to_conds"
},
"schedule_clip": {
"name": "schedule_clip"
},
"hooks": {
"name": "hooks"
}
}
},
"SetFirstSigma": {
"display_name": "SetFirstSigma",
"inputs": {
"sigmas": {
"name": "sigmas"
},
"sigma": {
"name": "sigma"
}
}
},
"SetHookKeyframes": {
"display_name": "Set Hook Keyframes",
"inputs": {
"hooks": {
"name": "hooks"
},
"hook_kf": {
"name": "hook_kf"
}
}
},
"SetLatentNoiseMask": {
"display_name": "Set Latent Noise Mask",
"inputs": {
"samples": {
"name": "samples"
},
"mask": {
"name": "mask"
}
}
},
"SetUnionControlNetType": {
"display_name": "SetUnionControlNetType",
"inputs": {
"control_net": {
"name": "control_net"
},
"type": {
"name": "type"
}
}
},
"SkipLayerGuidanceDiT": {
"display_name": "SkipLayerGuidanceDiT",
"description": "Generic version of SkipLayerGuidance node that can be used on every DiT model.",
"inputs": {
"model": {
"name": "model"
},
"double_layers": {
"name": "double_layers"
},
"single_layers": {
"name": "single_layers"
},
"scale": {
"name": "scale"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
},
"rescaling_scale": {
"name": "rescaling_scale"
}
}
},
"SkipLayerGuidanceSD3": {
"display_name": "SkipLayerGuidanceSD3",
"description": "Generic version of SkipLayerGuidance node that can be used on every DiT model.",
"inputs": {
"model": {
"name": "model"
},
"layers": {
"name": "layers"
},
"scale": {
"name": "scale"
},
"start_percent": {
"name": "start_percent"
},
"end_percent": {
"name": "end_percent"
}
}
},
"SolidMask": {
"display_name": "SolidMask",
"inputs": {
"value": {
"name": "value"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
}
}
},
"SplitImageWithAlpha": {
"display_name": "Split Image with Alpha",
"inputs": {
"image": {
"name": "image"
}
}
},
"SplitSigmas": {
"display_name": "SplitSigmas",
"inputs": {
"sigmas": {
"name": "sigmas"
},
"step": {
"name": "step"
}
},
"outputs": {
"0": {
"name": "high_sigmas"
},
"1": {
"name": "low_sigmas"
}
}
},
"SplitSigmasDenoise": {
"display_name": "SplitSigmasDenoise",
"inputs": {
"sigmas": {
"name": "sigmas"
},
"denoise": {
"name": "denoise"
}
},
"outputs": {
"0": {
"name": "high_sigmas"
},
"1": {
"name": "low_sigmas"
}
}
},
"StabilityStableImageSD_3_5Node": {
"display_name": "Stability AI Stable Diffusion 3.5 Image",
"description": "Generates images synchronously based on prompt and resolution.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results."
},
"model": {
"name": "model"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio of generated image."
},
"style_preset": {
"name": "style_preset",
"tooltip": "Optional desired style of generated image."
},
"cfg_scale": {
"name": "cfg_scale",
"tooltip": "How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt)"
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"image": {
"name": "image"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature."
},
"image_denoise": {
"name": "image_denoise",
"tooltip": "Denoise of input image; 0.0 yields image identical to input, 1.0 is as if no image was provided at all."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"StabilityStableImageUltraNode": {
"display_name": "Stability AI Stable Image Ultra",
"description": "Generates images synchronously based on prompt and resolution.",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly definesWhat you wish to see in the output image. A strong, descriptive prompt that clearly defineselements, colors, and subjects will lead to better results. To control the weight of a given word use the format `(word:weight)`,where `word` is the word you'd like to control the weight of and `weight`is a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`would convey a sky that was blue and green, but more green than blue."
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio of generated image."
},
"style_preset": {
"name": "style_preset",
"tooltip": "Optional desired style of generated image."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"image": {
"name": "image"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "A blurb of text describing what you do not wish to see in the output image. This is an advanced feature."
},
"image_denoise": {
"name": "image_denoise",
"tooltip": "Denoise of input image; 0.0 yields image identical to input, 1.0 is as if no image was provided at all."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"StabilityUpscaleConservativeNode": {
"display_name": "Stability AI Upscale Conservative",
"description": "Upscale image with minimal alterations to 4K resolution.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results."
},
"creativity": {
"name": "creativity",
"tooltip": "Controls the likelihood of creating additional details not heavily conditioned by the init image."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"StabilityUpscaleCreativeNode": {
"display_name": "Stability AI Upscale Creative",
"description": "Upscale image with minimal alterations to 4K resolution.",
"inputs": {
"image": {
"name": "image"
},
"prompt": {
"name": "prompt",
"tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results."
},
"creativity": {
"name": "creativity",
"tooltip": "Controls the likelihood of creating additional details not heavily conditioned by the init image."
},
"style_preset": {
"name": "style_preset",
"tooltip": "Optional desired style of generated image."
},
"seed": {
"name": "seed",
"tooltip": "The random seed used for creating the noise."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature."
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"StabilityUpscaleFastNode": {
"display_name": "Stability AI Upscale Fast",
"description": "Quickly upscales an image via Stability API call to 4x its original size; intended for upscaling low-quality/compressed images.",
"inputs": {
"image": {
"name": "image"
}
}
},
"StableCascade_EmptyLatentImage": {
"display_name": "StableCascade_EmptyLatentImage",
"inputs": {
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"compression": {
"name": "compression"
},
"batch_size": {
"name": "batch_size"
}
},
"outputs": {
"0": {
"name": "stage_c"
},
"1": {
"name": "stage_b"
}
}
},
"StableCascade_StageB_Conditioning": {
"display_name": "StableCascade_StageB_Conditioning",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"stage_c": {
"name": "stage_c"
}
}
},
"StableCascade_StageC_VAEEncode": {
"display_name": "StableCascade_StageC_VAEEncode",
"inputs": {
"image": {
"name": "image"
},
"vae": {
"name": "vae"
},
"compression": {
"name": "compression"
}
},
"outputs": {
"0": {
"name": "stage_c"
},
"1": {
"name": "stage_b"
}
}
},
"StableCascade_SuperResolutionControlnet": {
"display_name": "StableCascade_SuperResolutionControlnet",
"inputs": {
"image": {
"name": "image"
},
"vae": {
"name": "vae"
}
},
"outputs": {
"0": {
"name": "controlnet_input"
},
"1": {
"name": "stage_c"
},
"2": {
"name": "stage_b"
}
}
},
"StableZero123_Conditioning": {
"display_name": "StableZero123_Conditioning",
"inputs": {
"clip_vision": {
"name": "clip_vision"
},
"init_image": {
"name": "init_image"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"batch_size": {
"name": "batch_size"
},
"elevation": {
"name": "elevation"
},
"azimuth": {
"name": "azimuth"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"StableZero123_Conditioning_Batched": {
"display_name": "StableZero123_Conditioning_Batched",
"inputs": {
"clip_vision": {
"name": "clip_vision"
},
"init_image": {
"name": "init_image"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"batch_size": {
"name": "batch_size"
},
"elevation": {
"name": "elevation"
},
"azimuth": {
"name": "azimuth"
},
"elevation_batch_increment": {
"name": "elevation_batch_increment"
},
"azimuth_batch_increment": {
"name": "azimuth_batch_increment"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"StyleModelApply": {
"display_name": "Apply Style Model",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"style_model": {
"name": "style_model"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"strength": {
"name": "strength"
},
"strength_type": {
"name": "strength_type"
}
}
},
"StyleModelLoader": {
"display_name": "Load Style Model",
"inputs": {
"style_model_name": {
"name": "style_model_name"
}
}
},
"SV3D_Conditioning": {
"display_name": "SV3D_Conditioning",
"inputs": {
"clip_vision": {
"name": "clip_vision"
},
"init_image": {
"name": "init_image"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"video_frames": {
"name": "video_frames"
},
"elevation": {
"name": "elevation"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"SVD_img2vid_Conditioning": {
"display_name": "SVD_img2vid_Conditioning",
"inputs": {
"clip_vision": {
"name": "clip_vision"
},
"init_image": {
"name": "init_image"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"video_frames": {
"name": "video_frames"
},
"motion_bucket_id": {
"name": "motion_bucket_id"
},
"fps": {
"name": "fps"
},
"augmentation_level": {
"name": "augmentation_level"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"T5TokenizerOptions": {
"display_name": "T5TokenizerOptions",
"inputs": {
"clip": {
"name": "clip"
},
"min_padding": {
"name": "min_padding"
},
"min_length": {
"name": "min_length"
}
}
},
"TextEncodeHunyuanVideo_ImageToVideo": {
"display_name": "TextEncodeHunyuanVideo_ImageToVideo",
"inputs": {
"clip": {
"name": "clip"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"prompt": {
"name": "prompt"
},
"image_interleave": {
"name": "image_interleave",
"tooltip": "How much the image influences things vs the text prompt. Higher number means more influence from the text prompt."
}
}
},
"ThresholdMask": {
"display_name": "ThresholdMask",
"inputs": {
"mask": {
"name": "mask"
},
"value": {
"name": "value"
}
}
},
"TomePatchModel": {
"display_name": "TomePatchModel",
"inputs": {
"model": {
"name": "model"
},
"ratio": {
"name": "ratio"
}
}
},
"TorchCompileModel": {
"display_name": "TorchCompileModel",
"inputs": {
"model": {
"name": "model"
},
"backend": {
"name": "backend"
}
}
},
"TrimVideoLatent": {
"display_name": "TrimVideoLatent",
"inputs": {
"samples": {
"name": "samples"
},
"trim_amount": {
"name": "trim_amount"
}
}
},
"TripleCLIPLoader": {
"display_name": "TripleCLIPLoader",
"description": "[Recipes]\n\nsd3: clip-l, clip-g, t5",
"inputs": {
"clip_name1": {
"name": "clip_name1"
},
"clip_name2": {
"name": "clip_name2"
},
"clip_name3": {
"name": "clip_name3"
}
}
},
"unCLIPCheckpointLoader": {
"display_name": "unCLIPCheckpointLoader",
"inputs": {
"ckpt_name": {
"name": "ckpt_name"
}
}
},
"unCLIPConditioning": {
"display_name": "unCLIPConditioning",
"inputs": {
"conditioning": {
"name": "conditioning"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"strength": {
"name": "strength"
},
"noise_augmentation": {
"name": "noise_augmentation"
}
}
},
"UNetCrossAttentionMultiply": {
"display_name": "UNetCrossAttentionMultiply",
"inputs": {
"model": {
"name": "model"
},
"q": {
"name": "q"
},
"k": {
"name": "k"
},
"v": {
"name": "v"
},
"out": {
"name": "out"
}
}
},
"UNETLoader": {
"display_name": "Load Diffusion Model",
"inputs": {
"unet_name": {
"name": "unet_name"
},
"weight_dtype": {
"name": "weight_dtype"
}
}
},
"UNetSelfAttentionMultiply": {
"display_name": "UNetSelfAttentionMultiply",
"inputs": {
"model": {
"name": "model"
},
"q": {
"name": "q"
},
"k": {
"name": "k"
},
"v": {
"name": "v"
},
"out": {
"name": "out"
}
}
},
"UNetTemporalAttentionMultiply": {
"display_name": "UNetTemporalAttentionMultiply",
"inputs": {
"model": {
"name": "model"
},
"self_structural": {
"name": "self_structural"
},
"self_temporal": {
"name": "self_temporal"
},
"cross_structural": {
"name": "cross_structural"
},
"cross_temporal": {
"name": "cross_temporal"
}
}
},
"UpscaleModelLoader": {
"display_name": "Load Upscale Model",
"inputs": {
"model_name": {
"name": "model_name"
}
}
},
"VAEDecode": {
"display_name": "VAE Decode",
"description": "Decodes latent images back into pixel space images.",
"inputs": {
"samples": {
"name": "samples",
"tooltip": "The latent to be decoded."
},
"vae": {
"name": "vae",
"tooltip": "The VAE model used for decoding the latent."
}
},
"outputs": {
"0": {
"tooltip": "The decoded image."
}
}
},
"VAEDecodeAudio": {
"display_name": "VAEDecodeAudio",
"inputs": {
"samples": {
"name": "samples"
},
"vae": {
"name": "vae"
}
}
},
"VAEDecodeHunyuan3D": {
"display_name": "VAEDecodeHunyuan3D",
"inputs": {
"samples": {
"name": "samples"
},
"vae": {
"name": "vae"
},
"num_chunks": {
"name": "num_chunks"
},
"octree_resolution": {
"name": "octree_resolution"
}
}
},
"VAEDecodeTiled": {
"display_name": "VAE Decode (Tiled)",
"inputs": {
"samples": {
"name": "samples"
},
"vae": {
"name": "vae"
},
"tile_size": {
"name": "tile_size"
},
"overlap": {
"name": "overlap"
},
"temporal_size": {
"name": "temporal_size",
"tooltip": "Only used for video VAEs: Amount of frames to decode at a time."
},
"temporal_overlap": {
"name": "temporal_overlap",
"tooltip": "Only used for video VAEs: Amount of frames to overlap."
}
}
},
"VAEEncode": {
"display_name": "VAE Encode",
"inputs": {
"pixels": {
"name": "pixels"
},
"vae": {
"name": "vae"
}
}
},
"VAEEncodeAudio": {
"display_name": "VAEEncodeAudio",
"inputs": {
"audio": {
"name": "audio"
},
"vae": {
"name": "vae"
}
}
},
"VAEEncodeForInpaint": {
"display_name": "VAE Encode (for Inpainting)",
"inputs": {
"pixels": {
"name": "pixels"
},
"vae": {
"name": "vae"
},
"mask": {
"name": "mask"
},
"grow_mask_by": {
"name": "grow_mask_by"
}
}
},
"VAEEncodeTiled": {
"display_name": "VAE Encode (Tiled)",
"inputs": {
"pixels": {
"name": "pixels"
},
"vae": {
"name": "vae"
},
"tile_size": {
"name": "tile_size"
},
"overlap": {
"name": "overlap"
},
"temporal_size": {
"name": "temporal_size",
"tooltip": "Only used for video VAEs: Amount of frames to encode at a time."
},
"temporal_overlap": {
"name": "temporal_overlap",
"tooltip": "Only used for video VAEs: Amount of frames to overlap."
}
}
},
"VAELoader": {
"display_name": "Load VAE",
"inputs": {
"vae_name": {
"name": "vae_name"
}
}
},
"VAESave": {
"display_name": "VAESave",
"inputs": {
"vae": {
"name": "vae"
},
"filename_prefix": {
"name": "filename_prefix"
}
}
},
"VeoVideoGenerationNode": {
"display_name": "Google Veo2 Video Generation",
"description": "Generates videos from text prompts using Google's Veo API",
"inputs": {
"prompt": {
"name": "prompt",
"tooltip": "Text description of the video"
},
"aspect_ratio": {
"name": "aspect_ratio",
"tooltip": "Aspect ratio of the output video"
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative text prompt to guide what to avoid in the video"
},
"duration_seconds": {
"name": "duration_seconds",
"tooltip": "Duration of the output video in seconds"
},
"enhance_prompt": {
"name": "enhance_prompt",
"tooltip": "Whether to enhance the prompt with AI assistance"
},
"person_generation": {
"name": "person_generation",
"tooltip": "Whether to allow generating people in the video"
},
"seed": {
"name": "seed",
"tooltip": "Seed for video generation (0 for random)"
},
"image": {
"name": "image",
"tooltip": "Optional reference image to guide video generation"
},
"control_after_generate": {
"name": "control after generate"
}
}
},
"VideoLinearCFGGuidance": {
"display_name": "VideoLinearCFGGuidance",
"inputs": {
"model": {
"name": "model"
},
"min_cfg": {
"name": "min_cfg"
}
}
},
"VideoTriangleCFGGuidance": {
"display_name": "VideoTriangleCFGGuidance",
"inputs": {
"model": {
"name": "model"
},
"min_cfg": {
"name": "min_cfg"
}
}
},
"VoxelToMesh": {
"display_name": "VoxelToMesh",
"inputs": {
"voxel": {
"name": "voxel"
},
"algorithm": {
"name": "algorithm"
},
"threshold": {
"name": "threshold"
}
}
},
"VoxelToMeshBasic": {
"display_name": "VoxelToMeshBasic",
"inputs": {
"voxel": {
"name": "voxel"
},
"threshold": {
"name": "threshold"
}
}
},
"VPScheduler": {
"display_name": "VPScheduler",
"inputs": {
"steps": {
"name": "steps"
},
"beta_d": {
"name": "beta_d"
},
"beta_min": {
"name": "beta_min"
},
"eps_s": {
"name": "eps_s"
}
}
},
"WanFirstLastFrameToVideo": {
"display_name": "WanFirstLastFrameToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"clip_vision_start_image": {
"name": "clip_vision_start_image"
},
"clip_vision_end_image": {
"name": "clip_vision_end_image"
},
"start_image": {
"name": "start_image"
},
"end_image": {
"name": "end_image"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"WanFunControlToVideo": {
"display_name": "WanFunControlToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"start_image": {
"name": "start_image"
},
"control_video": {
"name": "control_video"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"WanFunInpaintToVideo": {
"display_name": "WanFunInpaintToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"start_image": {
"name": "start_image"
},
"end_image": {
"name": "end_image"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"WanImageToVideo": {
"display_name": "WanImageToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"clip_vision_output": {
"name": "clip_vision_output"
},
"start_image": {
"name": "start_image"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
}
}
},
"WanVaceToVideo": {
"display_name": "WanVaceToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"strength": {
"name": "strength"
},
"control_video": {
"name": "control_video"
},
"control_masks": {
"name": "control_masks"
},
"reference_image": {
"name": "reference_image"
}
},
"outputs": {
"0": {
"name": "positive"
},
"1": {
"name": "negative"
},
"2": {
"name": "latent"
},
"3": {
"name": "trim_latent"
}
}
},
"WebcamCapture": {
"display_name": "Webcam Capture",
"inputs": {
"image": {
"name": "image"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"capture_on_queue": {
"name": "capture_on_queue"
},
"waiting for camera___": {}
}
}
}