Compare commits

...

3 Commits

Author SHA1 Message Date
bymyself
1202709996 feat: add SEARCH_ALIASES for model and misc nodes
Add search aliases to model-related and miscellaneous nodes:
- Model nodes: nodes_model_merging.py, nodes_model_advanced.py, nodes_lora_extract.py
- Sampler nodes: nodes_custom_sampler.py, nodes_align_your_steps.py
- Control nodes: nodes_controlnet.py, nodes_attention_multiply.py, nodes_hooks.py
- Training nodes: nodes_train.py, nodes_dataset.py
- Utility nodes: nodes_logic.py, nodes_canny.py, nodes_differential_diffusion.py
- Architecture-specific: nodes_sd3.py, nodes_pixart.py, nodes_lumina2.py, nodes_kandinsky5.py, nodes_hidream.py, nodes_fresca.py, nodes_hunyuan3d.py
- Media nodes: nodes_load_3d.py, nodes_webcam.py, nodes_preview_any.py, nodes_wanmove.py

Uses search_aliases parameter in io.Schema() for v3 nodes, SEARCH_ALIASES class attribute for legacy nodes.
2026-01-21 19:26:51 -08:00
bymyself
dcde86463c Propagate search_aliases through V3 Schema.get_v1_info to NodeInfoV1 2026-01-21 15:26:49 -08:00
bymyself
f02abedcd9 feat: Add search_aliases field to node schema
Adds `search_aliases` field to improve node discoverability. Users can define alternative search terms for nodes (e.g., "text concat" → StringConcatenate).

Changes:
- Add `search_aliases: list[str]` to V3 Schema
- Add `SEARCH_ALIASES` support for V1 nodes
- Include field in `/object_info` response
- Add aliases to high-priority core nodes

V1 usage:
```python
class MyNode:
    SEARCH_ALIASES = ["alt name", "synonym"]
```

V3 usage:
```python
io.Schema(
    node_id="MyNode",
    search_aliases=["alt name", "synonym"],
    ...
)
```

## Related PRs
- Frontend: Comfy-Org/ComfyUI_frontend#XXXX (draft - merge after this)
- Docs: Comfy-Org/docs#XXXX (draft - merge after stable)
2026-01-21 14:25:45 -08:00
30 changed files with 62 additions and 3 deletions

View File

@@ -1249,6 +1249,7 @@ class NodeInfoV1:
experimental: bool=None experimental: bool=None
api_node: bool=None api_node: bool=None
price_badge: dict | None = None price_badge: dict | None = None
search_aliases: list[str]=None
@dataclass @dataclass
class NodeInfoV3: class NodeInfoV3:
@@ -1346,6 +1347,8 @@ class Schema:
hidden: list[Hidden] = field(default_factory=list) hidden: list[Hidden] = field(default_factory=list)
description: str="" description: str=""
"""Node description, shown as a tooltip when hovering over the node.""" """Node description, shown as a tooltip when hovering over the node."""
search_aliases: list[str] = field(default_factory=list)
"""Alternative names for search. Useful for synonyms, abbreviations, or old names after renaming."""
is_input_list: bool = False is_input_list: bool = False
"""A flag indicating if this node implements the additional code necessary to deal with OUTPUT_IS_LIST nodes. """A flag indicating if this node implements the additional code necessary to deal with OUTPUT_IS_LIST nodes.
@@ -1483,6 +1486,7 @@ class Schema:
api_node=self.is_api_node, api_node=self.is_api_node,
python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes"), python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes"),
price_badge=self.price_badge.as_dict(self.inputs) if self.price_badge is not None else None, price_badge=self.price_badge.as_dict(self.inputs) if self.price_badge is not None else None,
search_aliases=self.search_aliases if self.search_aliases else None,
) )
return info return info

View File

@@ -28,6 +28,7 @@ class AlignYourStepsScheduler(io.ComfyNode):
def define_schema(cls) -> io.Schema: def define_schema(cls) -> io.Schema:
return io.Schema( return io.Schema(
node_id="AlignYourStepsScheduler", node_id="AlignYourStepsScheduler",
search_aliases=["AYS scheduler"],
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
inputs=[ inputs=[
io.Combo.Input("model_type", options=["SD1", "SDXL", "SVD"]), io.Combo.Input("model_type", options=["SD1", "SDXL", "SVD"]),

View File

@@ -71,6 +71,7 @@ class CLIPAttentionMultiply(io.ComfyNode):
def define_schema(cls) -> io.Schema: def define_schema(cls) -> io.Schema:
return io.Schema( return io.Schema(
node_id="CLIPAttentionMultiply", node_id="CLIPAttentionMultiply",
search_aliases=["clip attention scale", "text encoder attention"],
category="_for_testing/attention_experiments", category="_for_testing/attention_experiments",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),

View File

@@ -10,6 +10,7 @@ class Canny(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="Canny", node_id="Canny",
search_aliases=["edge detection", "outline", "contour detection", "line art"],
category="image/preprocessors", category="image/preprocessors",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),

View File

@@ -38,6 +38,7 @@ class ControlNetInpaintingAliMamaApply(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="ControlNetInpaintingAliMamaApply", node_id="ControlNetInpaintingAliMamaApply",
search_aliases=["masked controlnet"],
category="conditioning/controlnet", category="conditioning/controlnet",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),

View File

@@ -297,6 +297,7 @@ class ExtendIntermediateSigmas(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="ExtendIntermediateSigmas", node_id="ExtendIntermediateSigmas",
search_aliases=["interpolate sigmas"],
category="sampling/custom_sampling/sigmas", category="sampling/custom_sampling/sigmas",
inputs=[ inputs=[
io.Sigmas.Input("sigmas"), io.Sigmas.Input("sigmas"),
@@ -856,6 +857,7 @@ class DualCFGGuider(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="DualCFGGuider", node_id="DualCFGGuider",
search_aliases=["dual prompt guidance"],
category="sampling/custom_sampling/guiders", category="sampling/custom_sampling/guiders",
inputs=[ inputs=[
io.Model.Input("model"), io.Model.Input("model"),
@@ -883,6 +885,7 @@ class DisableNoise(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="DisableNoise", node_id="DisableNoise",
search_aliases=["zero noise"],
category="sampling/custom_sampling/noise", category="sampling/custom_sampling/noise",
inputs=[], inputs=[],
outputs=[io.Noise.Output()] outputs=[io.Noise.Output()]
@@ -1019,6 +1022,7 @@ class ManualSigmas(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="ManualSigmas", node_id="ManualSigmas",
search_aliases=["custom noise schedule", "define sigmas"],
category="_for_testing/custom_sampling", category="_for_testing/custom_sampling",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[

View File

@@ -1223,11 +1223,11 @@ class ResolutionBucket(io.ComfyNode):
class MakeTrainingDataset(io.ComfyNode): class MakeTrainingDataset(io.ComfyNode):
"""Encode images with VAE and texts with CLIP to create a training dataset.""" """Encode images with VAE and texts with CLIP to create a training dataset."""
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="MakeTrainingDataset", node_id="MakeTrainingDataset",
search_aliases=["encode dataset"],
display_name="Make Training Dataset", display_name="Make Training Dataset",
category="dataset", category="dataset",
is_experimental=True, is_experimental=True,
@@ -1309,11 +1309,11 @@ class MakeTrainingDataset(io.ComfyNode):
class SaveTrainingDataset(io.ComfyNode): class SaveTrainingDataset(io.ComfyNode):
"""Save encoded training dataset (latents + conditioning) to disk.""" """Save encoded training dataset (latents + conditioning) to disk."""
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="SaveTrainingDataset", node_id="SaveTrainingDataset",
search_aliases=["export training data"],
display_name="Save Training Dataset", display_name="Save Training Dataset",
category="dataset", category="dataset",
is_experimental=True, is_experimental=True,
@@ -1410,11 +1410,11 @@ class SaveTrainingDataset(io.ComfyNode):
class LoadTrainingDataset(io.ComfyNode): class LoadTrainingDataset(io.ComfyNode):
"""Load encoded training dataset from disk.""" """Load encoded training dataset from disk."""
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="LoadTrainingDataset", node_id="LoadTrainingDataset",
search_aliases=["import dataset", "training data"],
display_name="Load Training Dataset", display_name="Load Training Dataset",
category="dataset", category="dataset",
is_experimental=True, is_experimental=True,

View File

@@ -11,6 +11,7 @@ class DifferentialDiffusion(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="DifferentialDiffusion", node_id="DifferentialDiffusion",
search_aliases=["inpaint gradient", "variable denoise strength"],
display_name="Differential Diffusion", display_name="Differential Diffusion",
category="_for_testing", category="_for_testing",
inputs=[ inputs=[

View File

@@ -58,6 +58,7 @@ class FreSca(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="FreSca", node_id="FreSca",
search_aliases=["frequency guidance"],
display_name="FreSca", display_name="FreSca",
category="_for_testing", category="_for_testing",
description="Applies frequency-dependent scaling to the guidance", description="Applies frequency-dependent scaling to the guidance",

View File

@@ -38,6 +38,7 @@ class CLIPTextEncodeHiDream(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="CLIPTextEncodeHiDream", node_id="CLIPTextEncodeHiDream",
search_aliases=["hidream prompt"],
category="advanced/conditioning", category="advanced/conditioning",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),

View File

@@ -259,6 +259,7 @@ class SetClipHooks:
return (clip,) return (clip,)
class ConditioningTimestepsRange: class ConditioningTimestepsRange:
SEARCH_ALIASES = ["prompt scheduling", "timestep segments", "conditioning phases"]
NodeId = 'ConditioningTimestepsRange' NodeId = 'ConditioningTimestepsRange'
NodeName = 'Timesteps Range' NodeName = 'Timesteps Range'
@classmethod @classmethod
@@ -468,6 +469,7 @@ class SetHookKeyframes:
return (hooks,) return (hooks,)
class CreateHookKeyframe: class CreateHookKeyframe:
SEARCH_ALIASES = ["hook scheduling", "strength animation", "timed hook"]
NodeId = 'CreateHookKeyframe' NodeId = 'CreateHookKeyframe'
NodeName = 'Create Hook Keyframe' NodeName = 'Create Hook Keyframe'
@classmethod @classmethod
@@ -497,6 +499,7 @@ class CreateHookKeyframe:
return (prev_hook_kf,) return (prev_hook_kf,)
class CreateHookKeyframesInterpolated: class CreateHookKeyframesInterpolated:
SEARCH_ALIASES = ["ease hook strength", "smooth hook transition", "interpolate keyframes"]
NodeId = 'CreateHookKeyframesInterpolated' NodeId = 'CreateHookKeyframesInterpolated'
NodeName = 'Create Hook Keyframes Interp.' NodeName = 'Create Hook Keyframes Interp.'
@classmethod @classmethod
@@ -544,6 +547,7 @@ class CreateHookKeyframesInterpolated:
return (prev_hook_kf,) return (prev_hook_kf,)
class CreateHookKeyframesFromFloats: class CreateHookKeyframesFromFloats:
SEARCH_ALIASES = ["batch keyframes", "strength list to keyframes"]
NodeId = 'CreateHookKeyframesFromFloats' NodeId = 'CreateHookKeyframesFromFloats'
NodeName = 'Create Hook Keyframes From Floats' NodeName = 'Create Hook Keyframes From Floats'
@classmethod @classmethod
@@ -618,6 +622,7 @@ class SetModelHooksOnCond:
# Combine Hooks # Combine Hooks
#------------------------------------------ #------------------------------------------
class CombineHooks: class CombineHooks:
SEARCH_ALIASES = ["merge hooks"]
NodeId = 'CombineHooks2' NodeId = 'CombineHooks2'
NodeName = 'Combine Hooks [2]' NodeName = 'Combine Hooks [2]'
@classmethod @classmethod

View File

@@ -618,6 +618,7 @@ class SaveGLB(IO.ComfyNode):
def define_schema(cls): def define_schema(cls):
return IO.Schema( return IO.Schema(
node_id="SaveGLB", node_id="SaveGLB",
search_aliases=["export 3d model", "save mesh"],
category="3d", category="3d",
is_output_node=True, is_output_node=True,
inputs=[ inputs=[

View File

@@ -104,6 +104,7 @@ class CLIPTextEncodeKandinsky5(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="CLIPTextEncodeKandinsky5", node_id="CLIPTextEncodeKandinsky5",
search_aliases=["kandinsky prompt"],
category="advanced/conditioning/kandinsky5", category="advanced/conditioning/kandinsky5",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),

View File

@@ -75,6 +75,7 @@ class Preview3D(IO.ComfyNode):
def define_schema(cls): def define_schema(cls):
return IO.Schema( return IO.Schema(
node_id="Preview3D", node_id="Preview3D",
search_aliases=["view mesh", "3d viewer"],
display_name="Preview 3D & Animation", display_name="Preview 3D & Animation",
category="3d", category="3d",
is_experimental=True, is_experimental=True,

View File

@@ -224,6 +224,7 @@ class ConvertStringToComboNode(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="ConvertStringToComboNode", node_id="ConvertStringToComboNode",
search_aliases=["string to dropdown", "text to combo"],
display_name="Convert String to Combo", display_name="Convert String to Combo",
category="logic", category="logic",
inputs=[io.String.Input("string")], inputs=[io.String.Input("string")],
@@ -239,6 +240,7 @@ class InvertBooleanNode(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="InvertBooleanNode", node_id="InvertBooleanNode",
search_aliases=["not", "toggle", "negate", "flip boolean"],
display_name="Invert Boolean", display_name="Invert Boolean",
category="logic", category="logic",
inputs=[io.Boolean.Input("boolean")], inputs=[io.Boolean.Input("boolean")],

View File

@@ -78,6 +78,7 @@ class LoraSave(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="LoraSave", node_id="LoraSave",
search_aliases=["export lora"],
display_name="Extract and Save Lora", display_name="Extract and Save Lora",
category="_for_testing", category="_for_testing",
inputs=[ inputs=[

View File

@@ -79,6 +79,7 @@ class CLIPTextEncodeLumina2(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="CLIPTextEncodeLumina2", node_id="CLIPTextEncodeLumina2",
search_aliases=["lumina prompt"],
display_name="CLIP Text Encode for Lumina2", display_name="CLIP Text Encode for Lumina2",
category="conditioning", category="conditioning",
description="Encodes a system prompt and a user prompt using a CLIP model into an embedding " description="Encodes a system prompt and a user prompt using a CLIP model into an embedding "

View File

@@ -299,6 +299,7 @@ class RescaleCFG:
return (m, ) return (m, )
class ModelComputeDtype: class ModelComputeDtype:
SEARCH_ALIASES = ["model precision", "change dtype"]
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",), return {"required": { "model": ("MODEL",),

View File

@@ -91,6 +91,7 @@ class CLIPMergeSimple:
class CLIPSubtract: class CLIPSubtract:
SEARCH_ALIASES = ["clip difference", "text encoder subtract"]
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "clip1": ("CLIP",), return {"required": { "clip1": ("CLIP",),
@@ -113,6 +114,7 @@ class CLIPSubtract:
class CLIPAdd: class CLIPAdd:
SEARCH_ALIASES = ["combine clip"]
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "clip1": ("CLIP",), return {"required": { "clip1": ("CLIP",),
@@ -225,6 +227,7 @@ def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefi
comfy.sd.save_checkpoint(output_checkpoint, model, clip, vae, clip_vision, metadata=metadata, extra_keys=extra_keys) comfy.sd.save_checkpoint(output_checkpoint, model, clip, vae, clip_vision, metadata=metadata, extra_keys=extra_keys)
class CheckpointSave: class CheckpointSave:
SEARCH_ALIASES = ["save model", "export checkpoint", "merge save"]
def __init__(self): def __init__(self):
self.output_dir = folder_paths.get_output_directory() self.output_dir = folder_paths.get_output_directory()
@@ -337,6 +340,7 @@ class VAESave:
return {} return {}
class ModelSave: class ModelSave:
SEARCH_ALIASES = ["export model", "checkpoint save"]
def __init__(self): def __init__(self):
self.output_dir = folder_paths.get_output_directory() self.output_dir = folder_paths.get_output_directory()

View File

@@ -7,6 +7,7 @@ class CLIPTextEncodePixArtAlpha(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="CLIPTextEncodePixArtAlpha", node_id="CLIPTextEncodePixArtAlpha",
search_aliases=["pixart prompt"],
category="advanced/conditioning", category="advanced/conditioning",
description="Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma.", description="Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma.",
inputs=[ inputs=[

View File

@@ -550,6 +550,7 @@ class BatchImagesNode(io.ComfyNode):
node_id="BatchImagesNode", node_id="BatchImagesNode",
display_name="Batch Images", display_name="Batch Images",
category="image", category="image",
search_aliases=["batch", "image batch", "batch images", "combine images", "merge images", "stack images"],
inputs=[ inputs=[
io.Autogrow.Input("images", template=autogrow_template) io.Autogrow.Input("images", template=autogrow_template)
], ],

View File

@@ -16,6 +16,7 @@ class PreviewAny():
OUTPUT_NODE = True OUTPUT_NODE = True
CATEGORY = "utils" CATEGORY = "utils"
SEARCH_ALIASES = ["show output", "inspect", "debug", "print value", "show text"]
def main(self, source=None): def main(self, source=None):
value = 'None' value = 'None'

View File

@@ -65,6 +65,7 @@ class CLIPTextEncodeSD3(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="CLIPTextEncodeSD3", node_id="CLIPTextEncodeSD3",
search_aliases=["sd3 prompt"],
category="advanced/conditioning", category="advanced/conditioning",
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),

View File

@@ -11,6 +11,7 @@ class StringConcatenate(io.ComfyNode):
node_id="StringConcatenate", node_id="StringConcatenate",
display_name="Concatenate", display_name="Concatenate",
category="utils/string", category="utils/string",
search_aliases=["text concat", "join text", "merge text", "combine strings", "concat", "concatenate", "append text", "combine text", "string"],
inputs=[ inputs=[
io.String.Input("string_a", multiline=True), io.String.Input("string_a", multiline=True),
io.String.Input("string_b", multiline=True), io.String.Input("string_b", multiline=True),

View File

@@ -1101,6 +1101,7 @@ class SaveLoRA(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="SaveLoRA", node_id="SaveLoRA",
search_aliases=["export lora"],
display_name="Save LoRA Weights", display_name="Save LoRA Weights",
category="loaders", category="loaders",
is_experimental=True, is_experimental=True,
@@ -1144,6 +1145,7 @@ class LossGraphNode(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="LossGraphNode", node_id="LossGraphNode",
search_aliases=["training chart", "training visualization", "plot loss"],
display_name="Plot Loss Graph", display_name="Plot Loss Graph",
category="training", category="training",
is_experimental=True, is_experimental=True,

View File

@@ -53,6 +53,7 @@ class ImageUpscaleWithModel(io.ComfyNode):
node_id="ImageUpscaleWithModel", node_id="ImageUpscaleWithModel",
display_name="Upscale Image (using Model)", display_name="Upscale Image (using Model)",
category="image/upscaling", category="image/upscaling",
search_aliases=["upscale", "upscaler", "upsc", "enlarge image", "super resolution", "hires", "superres", "increase resolution"],
inputs=[ inputs=[
io.UpscaleModel.Input("upscale_model"), io.UpscaleModel.Input("upscale_model"),
io.Image.Input("image"), io.Image.Input("image"),

View File

@@ -324,6 +324,7 @@ class GenerateTracks(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="GenerateTracks", node_id="GenerateTracks",
search_aliases=["motion paths", "camera movement", "trajectory"],
category="conditioning/video_models", category="conditioning/video_models",
inputs=[ inputs=[
io.Int.Input("width", default=832, min=16, max=4096, step=16), io.Int.Input("width", default=832, min=16, max=4096, step=16),

View File

@@ -5,6 +5,7 @@ MAX_RESOLUTION = nodes.MAX_RESOLUTION
class WebcamCapture(nodes.LoadImage): class WebcamCapture(nodes.LoadImage):
SEARCH_ALIASES = ["camera input", "live capture", "camera feed", "snapshot"]
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return { return {

View File

@@ -70,6 +70,7 @@ class CLIPTextEncode(ComfyNodeABC):
CATEGORY = "conditioning" CATEGORY = "conditioning"
DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images." DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images."
SEARCH_ALIASES = ["text", "prompt", "text prompt", "positive prompt", "negative prompt", "encode text", "text encoder", "encode prompt"]
def encode(self, clip, text): def encode(self, clip, text):
if clip is None: if clip is None:
@@ -86,6 +87,7 @@ class ConditioningCombine:
FUNCTION = "combine" FUNCTION = "combine"
CATEGORY = "conditioning" CATEGORY = "conditioning"
SEARCH_ALIASES = ["combine", "merge conditioning", "combine prompts", "merge prompts", "mix prompts", "add prompt"]
def combine(self, conditioning_1, conditioning_2): def combine(self, conditioning_1, conditioning_2):
return (conditioning_1 + conditioning_2, ) return (conditioning_1 + conditioning_2, )
@@ -294,6 +296,7 @@ class VAEDecode:
CATEGORY = "latent" CATEGORY = "latent"
DESCRIPTION = "Decodes latent images back into pixel space images." DESCRIPTION = "Decodes latent images back into pixel space images."
SEARCH_ALIASES = ["decode", "decode latent", "latent to image", "render latent"]
def decode(self, vae, samples): def decode(self, vae, samples):
latent = samples["samples"] latent = samples["samples"]
@@ -346,6 +349,7 @@ class VAEEncode:
FUNCTION = "encode" FUNCTION = "encode"
CATEGORY = "latent" CATEGORY = "latent"
SEARCH_ALIASES = ["encode", "encode image", "image to latent"]
def encode(self, vae, pixels): def encode(self, vae, pixels):
t = vae.encode(pixels) t = vae.encode(pixels)
@@ -581,6 +585,7 @@ class CheckpointLoaderSimple:
CATEGORY = "loaders" CATEGORY = "loaders"
DESCRIPTION = "Loads a diffusion model checkpoint, diffusion models are used to denoise latents." DESCRIPTION = "Loads a diffusion model checkpoint, diffusion models are used to denoise latents."
SEARCH_ALIASES = ["load model", "checkpoint", "model loader", "load checkpoint", "ckpt", "model"]
def load_checkpoint(self, ckpt_name): def load_checkpoint(self, ckpt_name):
ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name) ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name)
@@ -667,6 +672,7 @@ class LoraLoader:
CATEGORY = "loaders" CATEGORY = "loaders"
DESCRIPTION = "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together." DESCRIPTION = "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together."
SEARCH_ALIASES = ["lora", "load lora", "apply lora", "lora loader", "lora model"]
def load_lora(self, model, clip, lora_name, strength_model, strength_clip): def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
if strength_model == 0 and strength_clip == 0: if strength_model == 0 and strength_clip == 0:
@@ -814,6 +820,7 @@ class ControlNetLoader:
FUNCTION = "load_controlnet" FUNCTION = "load_controlnet"
CATEGORY = "loaders" CATEGORY = "loaders"
SEARCH_ALIASES = ["controlnet", "control net", "cn", "load controlnet", "controlnet loader"]
def load_controlnet(self, control_net_name): def load_controlnet(self, control_net_name):
controlnet_path = folder_paths.get_full_path_or_raise("controlnet", control_net_name) controlnet_path = folder_paths.get_full_path_or_raise("controlnet", control_net_name)
@@ -890,6 +897,7 @@ class ControlNetApplyAdvanced:
FUNCTION = "apply_controlnet" FUNCTION = "apply_controlnet"
CATEGORY = "conditioning/controlnet" CATEGORY = "conditioning/controlnet"
SEARCH_ALIASES = ["controlnet", "apply controlnet", "use controlnet", "control net"]
def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent, vae=None, extra_concat=[]): def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent, vae=None, extra_concat=[]):
if strength == 0: if strength == 0:
@@ -1200,6 +1208,7 @@ class EmptyLatentImage:
CATEGORY = "latent" CATEGORY = "latent"
DESCRIPTION = "Create a new batch of empty latent images to be denoised via sampling." DESCRIPTION = "Create a new batch of empty latent images to be denoised via sampling."
SEARCH_ALIASES = ["empty", "empty latent", "new latent", "create latent", "blank latent", "blank"]
def generate(self, width, height, batch_size=1): def generate(self, width, height, batch_size=1):
latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device) latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device)
@@ -1540,6 +1549,7 @@ class KSampler:
CATEGORY = "sampling" CATEGORY = "sampling"
DESCRIPTION = "Uses the provided model, positive and negative conditioning to denoise the latent image." DESCRIPTION = "Uses the provided model, positive and negative conditioning to denoise the latent image."
SEARCH_ALIASES = ["sampler", "sample", "generate", "denoise", "diffuse", "txt2img", "img2img"]
def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0): def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise) return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
@@ -1604,6 +1614,7 @@ class SaveImage:
CATEGORY = "image" CATEGORY = "image"
DESCRIPTION = "Saves the input images to your ComfyUI output directory." DESCRIPTION = "Saves the input images to your ComfyUI output directory."
SEARCH_ALIASES = ["save", "save image", "export image", "output image", "write image", "download"]
def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
filename_prefix += self.prefix_append filename_prefix += self.prefix_append
@@ -1640,6 +1651,8 @@ class PreviewImage(SaveImage):
self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
self.compress_level = 1 self.compress_level = 1
SEARCH_ALIASES = ["preview", "preview image", "show image", "view image", "display image", "image viewer"]
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": return {"required":
@@ -1658,6 +1671,7 @@ class LoadImage:
} }
CATEGORY = "image" CATEGORY = "image"
SEARCH_ALIASES = ["load image", "open image", "import image", "image input", "upload image", "read image", "image loader"]
RETURN_TYPES = ("IMAGE", "MASK") RETURN_TYPES = ("IMAGE", "MASK")
FUNCTION = "load_image" FUNCTION = "load_image"
@@ -1810,6 +1824,7 @@ class ImageScale:
FUNCTION = "upscale" FUNCTION = "upscale"
CATEGORY = "image/upscaling" CATEGORY = "image/upscaling"
SEARCH_ALIASES = ["resize", "resize image", "scale image", "image resize", "zoom", "zoom in", "change size"]
def upscale(self, image, upscale_method, width, height, crop): def upscale(self, image, upscale_method, width, height, crop):
if width == 0 and height == 0: if width == 0 and height == 0:

View File

@@ -682,6 +682,8 @@ class PromptServer():
if hasattr(obj_class, 'API_NODE'): if hasattr(obj_class, 'API_NODE'):
info['api_node'] = obj_class.API_NODE info['api_node'] = obj_class.API_NODE
info['search_aliases'] = getattr(obj_class, 'SEARCH_ALIASES', [])
return info return info
@routes.get("/object_info") @routes.get("/object_info")