Compare commits

..

2 Commits

Author SHA1 Message Date
bymyself
9c53442a85 fix: address code review feedback
- Fix StyleModelLoader and GLIGENLoader missing placeholders
- Fix function values called without node context
- Fix ellipsis formatting and Vue props destructuring

Amp-Thread-ID: https://ampcode.com/threads/T-019c2c7e-2ac1-7114-9147-b41e6334faa9
2026-02-04 23:14:29 -08:00
bymyself
d50d81613a feat: add placeholder support for empty model dropdowns
Add get_model_placeholder() helper function to folder_paths.py that
generates user-friendly placeholder text for empty model dropdowns.

Update model loader nodes to include placeholder in INPUT_TYPES:
- CheckpointLoaderSimple
- unCLIPCheckpointLoader
- LoraLoader
- LoraLoaderModelOnly
- VAELoader
- ControlNetLoader
- DiffControlNetLoader
- UNETLoader
- CLIPLoader
- DualCLIPLoader
- CLIPVisionLoader

The placeholder text shows 'No models found in ComfyUI/models/{folder} folder . . .'
when the options list is empty, helping users understand where to place models.

Amp-Thread-ID: https://ampcode.com/threads/T-019c2bd5-472a-73a1-842b-4e05cba5d12c
2026-02-04 19:49:21 -08:00
24 changed files with 76 additions and 84 deletions

View File

@@ -1724,9 +1724,11 @@ def soft_empty_cache(force=False):
elif is_mlu():
torch.mlu.empty_cache()
elif torch.cuda.is_available():
torch.cuda.synchronize()
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
if comfy.memory_management.aimdo_allocator is None:
#Pytorch 2.7 and earlier crashes if you try and empty_cache when mempools exist
torch.cuda.synchronize()
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def unload_all_models():
free_memory(1e30, get_torch_device())

View File

@@ -1400,7 +1400,7 @@ class ModelPatcher:
continue
key = "diffusion_model." + k
unet_state_dict[k] = LazyCastingParam(self, key, comfy.utils.get_attr(self.model, key))
return self.model.state_dict_for_saving(unet_state_dict, clip_state_dict=clip_state_dict, vae_state_dict=vae_state_dict, clip_vision_state_dict=clip_vision_state_dict)
return self.model.state_dict_for_saving(unet_state_dict)
def __del__(self):
self.unpin_all_weights()

View File

@@ -19,7 +19,6 @@ def sample_manual_loop_no_classes(
min_tokens: int = 1,
max_new_tokens: int = 2048,
audio_start_id: int = 151669, # The cutoff ID for audio codes
audio_end_id: int = 215669,
eos_token_id: int = 151645,
):
device = model.execution_device
@@ -61,7 +60,6 @@ def sample_manual_loop_no_classes(
remove_logit_value = torch.finfo(cfg_logits.dtype).min
# Only generate audio tokens
cfg_logits[:, :audio_start_id] = remove_logit_value
cfg_logits[:, audio_end_id:] = remove_logit_value
if eos_token_id is not None and eos_token_id < audio_start_id and min_tokens < step:
cfg_logits[:, eos_token_id] = eos_score

View File

@@ -651,10 +651,10 @@ class Llama2_(nn.Module):
mask = None
if attention_mask is not None:
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, seq_len, attention_mask.shape[-1])
mask = mask.masked_fill(mask.to(torch.bool), torch.finfo(x.dtype).min / 4)
mask = mask.masked_fill(mask.to(torch.bool), torch.finfo(x.dtype).min)
if seq_len > 1:
causal_mask = torch.empty(past_len + seq_len, past_len + seq_len, dtype=x.dtype, device=x.device).fill_(torch.finfo(x.dtype).min / 4).triu_(1)
causal_mask = torch.empty(past_len + seq_len, past_len + seq_len, dtype=x.dtype, device=x.device).fill_(torch.finfo(x.dtype).min).triu_(1)
if mask is not None:
mask += causal_mask
else:

View File

@@ -82,12 +82,14 @@ _TYPES = {
def load_safetensors(ckpt):
f = open(ckpt, "rb")
mapping = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
mv = memoryview(mapping)
header_size = struct.unpack("<Q", mapping[:8])[0]
header = json.loads(mapping[8:8+header_size].decode("utf-8"))
mv = mv[8 + header_size:]
with warnings.catch_warnings():
#We are working with read-only RAM by design
warnings.filterwarnings("ignore", message="The given buffer is not writable")
data_area = torch.frombuffer(mapping, dtype=torch.uint8)[8 + header_size:]
sd = {}
for name, info in header.items():
@@ -95,13 +97,7 @@ def load_safetensors(ckpt):
continue
start, end = info["data_offsets"]
if start == end:
sd[name] = torch.empty(info["shape"], dtype =_TYPES[info["dtype"]])
else:
with warnings.catch_warnings():
#We are working with read-only RAM by design
warnings.filterwarnings("ignore", message="The given buffer is not writable")
sd[name] = torch.frombuffer(mv[start:end], dtype=_TYPES[info["dtype"]]).view(info["shape"])
sd[name] = data_area[start:end].view(_TYPES[info["dtype"]]).view(info["shape"])
return sd, header.get("__metadata__", {}),

View File

@@ -1309,7 +1309,6 @@ class NodeInfoV1:
api_node: bool=None
price_badge: dict | None = None
search_aliases: list[str]=None
main_category: str=None
@dataclass
@@ -1431,8 +1430,6 @@ class Schema:
"""Flags a node as expandable, allowing NodeOutput to include 'expand' property."""
accept_all_inputs: bool=False
"""When True, all inputs from the prompt will be passed to the node as kwargs, even if not defined in the schema."""
main_category: str | None = None
"""Optional main category for top-level tabs in the node library (e.g., 'Basic', 'Image Tools', 'Partner Nodes')."""
def validate(self):
'''Validate the schema:
@@ -1539,7 +1536,6 @@ class Schema:
python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes"),
price_badge=self.price_badge.as_dict(self.inputs) if self.price_badge is not None else None,
search_aliases=self.search_aliases if self.search_aliases else None,
main_category=self.main_category,
)
return info

View File

@@ -37,7 +37,6 @@ class TencentTextToModelNode(IO.ComfyNode):
node_id="TencentTextToModelNode",
display_name="Hunyuan3D: Text to Model (Pro)",
category="api node/3d/Tencent",
main_category="3D",
inputs=[
IO.Combo.Input(
"model",
@@ -148,7 +147,6 @@ class TencentImageToModelNode(IO.ComfyNode):
node_id="TencentImageToModelNode",
display_name="Hunyuan3D: Image(s) to Model (Pro)",
category="api node/3d/Tencent",
main_category="3D",
inputs=[
IO.Combo.Input(
"model",

View File

@@ -1936,7 +1936,6 @@ class KlingLipSyncAudioToVideoNode(IO.ComfyNode):
node_id="KlingLipSyncAudioToVideoNode",
display_name="Kling Lip Sync Video with Audio",
category="api node/video/Kling",
main_category="Video Generation",
description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
inputs=[
IO.Video.Input("video"),

View File

@@ -576,7 +576,6 @@ class OpenAIChatNode(IO.ComfyNode):
node_id="OpenAIChatNode",
display_name="OpenAI ChatGPT",
category="api node/text/OpenAI",
main_category="Text Generation",
description="Generate text responses from an OpenAI model.",
inputs=[
IO.String.Input(

View File

@@ -963,7 +963,6 @@ class RecraftRemoveBackgroundNode(IO.ComfyNode):
node_id="RecraftRemoveBackgroundNode",
display_name="Recraft Remove Background",
category="api node/image/Recraft",
main_category="Image Tools",
description="Remove background from image, and return processed image and mask.",
inputs=[
IO.Image.Input("image"),

View File

@@ -624,7 +624,6 @@ class StabilityTextToAudio(IO.ComfyNode):
node_id="StabilityTextToAudio",
display_name="Stability AI Text To Audio",
category="api node/audio/Stability AI",
main_category="Audio",
description=cleandoc(cls.__doc__ or ""),
inputs=[
IO.Combo.Input(

View File

@@ -129,7 +129,6 @@ class SaveAudio(IO.ComfyNode):
search_aliases=["export flac"],
display_name="Save Audio (FLAC)",
category="audio",
main_category="Audio",
inputs=[
IO.Audio.Input("audio"),
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
@@ -271,7 +270,6 @@ class LoadAudio(IO.ComfyNode):
search_aliases=["import audio", "open audio", "audio file"],
display_name="Load Audio",
category="audio",
main_category="Audio",
inputs=[
IO.Combo.Input("audio", upload=IO.UploadType.audio, options=sorted(files)),
],

View File

@@ -12,7 +12,6 @@ class Canny(io.ComfyNode):
node_id="Canny",
search_aliases=["edge detection", "outline", "contour detection", "line art"],
category="image/preprocessors",
main_category="Image Tools/Preprocessing",
inputs=[
io.Image.Input("image"),
io.Float.Input("low_threshold", default=0.4, min=0.01, max=0.99, step=0.01),

View File

@@ -618,24 +618,16 @@ class SaveGLB(IO.ComfyNode):
def define_schema(cls):
return IO.Schema(
node_id="SaveGLB",
display_name="Save 3D Model",
search_aliases=["export 3d model", "save mesh"],
category="3d",
main_category="Basic",
is_output_node=True,
inputs=[
IO.MultiType.Input(
IO.Mesh.Input("mesh"),
types=[
IO.File3DGLB,
IO.File3DGLTF,
IO.File3DOBJ,
IO.File3DFBX,
IO.File3DSTL,
IO.File3DUSDZ,
IO.File3DAny,
],
tooltip="Mesh or 3D file to save",
tooltip="Mesh or GLB file to save",
),
IO.String.Input("filename_prefix", default="mesh/ComfyUI"),
],
@@ -657,8 +649,7 @@ class SaveGLB(IO.ComfyNode):
if isinstance(mesh, Types.File3D):
# Handle File3D input - save BytesIO data to output folder
ext = mesh.format or "glb"
f = f"{filename}_{counter:05}_.{ext}"
f = f"{filename}_{counter:05}_.glb"
mesh.save_to(os.path.join(full_output_folder, f))
results.append({
"filename": f,

View File

@@ -25,7 +25,6 @@ class ImageCrop(IO.ComfyNode):
search_aliases=["trim"],
display_name="Image Crop",
category="image/transform",
main_category="Image Tools",
inputs=[
IO.Image.Input("image"),
IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
@@ -538,7 +537,6 @@ class ImageRotate(IO.ComfyNode):
node_id="ImageRotate",
search_aliases=["turn", "flip orientation"],
category="image/transform",
main_category="Image Tools",
inputs=[
IO.Image.Input("image"),
IO.Combo.Input("rotation", options=["none", "90 degrees", "180 degrees", "270 degrees"]),

View File

@@ -31,7 +31,6 @@ class Load3D(IO.ComfyNode):
node_id="Load3D",
display_name="Load 3D & Animation",
category="3d",
main_category="Basic",
is_experimental=True,
inputs=[
IO.Combo.Input("model_file", options=sorted(files), upload=IO.UploadType.model),
@@ -46,7 +45,6 @@ class Load3D(IO.ComfyNode):
IO.Image.Output(display_name="normal"),
IO.Load3DCamera.Output(display_name="camera_info"),
IO.Video.Output(display_name="recording_video"),
IO.File3DAny.Output(display_name="model_3d"),
],
)
@@ -68,8 +66,7 @@ class Load3D(IO.ComfyNode):
video = InputImpl.VideoFromFile(recording_video_path)
file_3d = Types.File3D(folder_paths.get_annotated_filepath(model_file))
return IO.NodeOutput(output_image, output_mask, model_file, normal_image, image['camera_info'], video, file_3d)
return IO.NodeOutput(output_image, output_mask, model_file, normal_image, image['camera_info'], video)
process = execute # TODO: remove

View File

@@ -77,7 +77,6 @@ class Blur(io.ComfyNode):
return io.Schema(
node_id="ImageBlur",
category="image/postprocessing",
main_category="Image Tools",
inputs=[
io.Image.Input("image"),
io.Int.Input("blur_radius", default=1, min=1, max=31, step=1),

View File

@@ -73,7 +73,6 @@ class SaveVideo(io.ComfyNode):
search_aliases=["export video"],
display_name="Save Video",
category="image/video",
main_category="Basic",
description="Saves the input images to your ComfyUI output directory.",
inputs=[
io.Video.Input("video", tooltip="The video to save."),
@@ -147,7 +146,6 @@ class GetVideoComponents(io.ComfyNode):
search_aliases=["extract frames", "split video", "video to images", "demux"],
display_name="Get Video Components",
category="image/video",
main_category="Video Tools",
description="Extracts all components from a video: frames, audio, and framerate.",
inputs=[
io.Video.Input("video", tooltip="The video to extract components from."),
@@ -176,7 +174,6 @@ class LoadVideo(io.ComfyNode):
search_aliases=["import video", "open video", "video file"],
display_name="Load Video",
category="image/video",
main_category="Basic",
inputs=[
io.Combo.Input("file", options=sorted(files), upload=io.UploadType.video),
],

View File

@@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
__version__ = "0.12.2"
__version__ = "0.12.1"

View File

@@ -472,6 +472,18 @@ def get_save_image_path(filename_prefix: str, output_dir: str, image_width=0, im
counter = 1
return full_output_folder, filename, counter, subfolder, filename_prefix
def get_model_placeholder(folder_name: str) -> str:
"""Generate placeholder text for empty model dropdowns.
Args:
folder_name: The name of the model folder (e.g., "checkpoints", "loras").
Returns:
A user-friendly placeholder string indicating where models should be placed.
"""
return f"No models found in ComfyUI/models/{folder_name} folder..."
def get_input_subfolders() -> list[str]:
"""Returns a list of all subfolder paths in the input directory, recursively.

View File

@@ -192,10 +192,7 @@ import comfy_aimdo.control
import comfy_aimdo.torch
if enables_dynamic_vram():
if comfy.model_management.torch_version_numeric < (2, 8):
logging.warning("Unsupported Pytorch detected. DynamicVRAM support requires Pytorch version 2.8 or later. Falling back to legacy ModelPatcher. VRAM estimates may be unreliable especially on Windows")
comfy.memory_management.aimdo_allocator = None
elif comfy_aimdo.control.init_device(comfy.model_management.get_torch_device().index):
if comfy_aimdo.control.init_device(comfy.model_management.get_torch_device().index):
if args.verbose == 'DEBUG':
comfy_aimdo.control.set_log_debug()
elif args.verbose == 'CRITICAL':
@@ -211,7 +208,7 @@ if enables_dynamic_vram():
comfy.memory_management.aimdo_allocator = comfy_aimdo.torch.get_torch_allocator()
logging.info("DynamicVRAM support detected and enabled")
else:
logging.warning("No working comfy-aimdo install detected. DynamicVRAM support disabled. Falling back to legacy ModelPatcher. VRAM estimates may be unreliable especially on Windows")
logging.info("No working comfy-aimdo install detected. DynamicVRAM support disabled. Falling back to legacy ModelPatcher. VRAM estimates may be unreliable especially on Windows")
comfy.memory_management.aimdo_allocator = None

View File

@@ -69,7 +69,6 @@ class CLIPTextEncode(ComfyNodeABC):
FUNCTION = "encode"
CATEGORY = "conditioning"
MAIN_CATEGORY = "Basic"
DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images."
SEARCH_ALIASES = ["text", "prompt", "text prompt", "positive prompt", "negative prompt", "encode text", "text encoder", "encode prompt"]
@@ -589,7 +588,10 @@ class CheckpointLoaderSimple:
def INPUT_TYPES(s):
return {
"required": {
"ckpt_name": (folder_paths.get_filename_list("checkpoints"), {"tooltip": "The name of the checkpoint (model) to load."}),
"ckpt_name": (folder_paths.get_filename_list("checkpoints"), {
"tooltip": "The name of the checkpoint (model) to load.",
"placeholder": folder_paths.get_model_placeholder("checkpoints")
}),
}
}
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
@@ -639,7 +641,9 @@ class DiffusersLoader:
class unCLIPCheckpointLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), {
"placeholder": folder_paths.get_model_placeholder("checkpoints")
}),
}}
RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
FUNCTION = "load_checkpoint"
@@ -668,8 +672,6 @@ class CLIPSetLastLayer:
return (clip,)
class LoraLoader:
MAIN_CATEGORY = "Image Generation"
def __init__(self):
self.loaded_lora = None
@@ -679,7 +681,10 @@ class LoraLoader:
"required": {
"model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}),
"clip": ("CLIP", {"tooltip": "The CLIP model the LoRA will be applied to."}),
"lora_name": (folder_paths.get_filename_list("loras"), {"tooltip": "The name of the LoRA."}),
"lora_name": (folder_paths.get_filename_list("loras"), {
"tooltip": "The name of the LoRA.",
"placeholder": folder_paths.get_model_placeholder("loras")
}),
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}),
"strength_clip": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the CLIP model. This value can be negative."}),
}
@@ -716,7 +721,9 @@ class LoraLoaderModelOnly(LoraLoader):
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"lora_name": (folder_paths.get_filename_list("loras"), ),
"lora_name": (folder_paths.get_filename_list("loras"), {
"placeholder": folder_paths.get_model_placeholder("loras")
}),
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
}}
RETURN_TYPES = ("MODEL",)
@@ -806,7 +813,9 @@ class VAELoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "vae_name": (s.vae_list(s), )}}
return {"required": { "vae_name": (s.vae_list(s), {
"placeholder": folder_paths.get_model_placeholder("vae")
})}}
RETURN_TYPES = ("VAE",)
FUNCTION = "load_vae"
@@ -833,7 +842,9 @@ class VAELoader:
class ControlNetLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), {
"placeholder": folder_paths.get_model_placeholder("controlnet")
})}}
RETURN_TYPES = ("CONTROL_NET",)
FUNCTION = "load_controlnet"
@@ -852,7 +863,9 @@ class DiffControlNetLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
"control_net_name": (folder_paths.get_filename_list("controlnet"), {
"placeholder": folder_paths.get_model_placeholder("controlnet")
})}}
RETURN_TYPES = ("CONTROL_NET",)
FUNCTION = "load_controlnet"
@@ -950,7 +963,9 @@ class ControlNetApplyAdvanced:
class UNETLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "unet_name": (folder_paths.get_filename_list("diffusion_models"), ),
return {"required": { "unet_name": (folder_paths.get_filename_list("diffusion_models"), {
"placeholder": folder_paths.get_model_placeholder("diffusion_models")
}),
"weight_dtype": (["default", "fp8_e4m3fn", "fp8_e4m3fn_fast", "fp8_e5m2"],)
}}
RETURN_TYPES = ("MODEL",)
@@ -975,7 +990,9 @@ class UNETLoader:
class CLIPLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ),
return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), {
"placeholder": folder_paths.get_model_placeholder("text_encoders")
}),
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image", "flux2", "ovis"], ),
},
"optional": {
@@ -1002,8 +1019,12 @@ class CLIPLoader:
class DualCLIPLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ),
"clip_name2": (folder_paths.get_filename_list("text_encoders"), ),
return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), {
"placeholder": folder_paths.get_model_placeholder("text_encoders")
}),
"clip_name2": (folder_paths.get_filename_list("text_encoders"), {
"placeholder": folder_paths.get_model_placeholder("text_encoders")
}),
"type": (["sdxl", "sd3", "flux", "hunyuan_video", "hidream", "hunyuan_image", "hunyuan_video_15", "kandinsky5", "kandinsky5_image", "ltxv", "newbie", "ace"], ),
},
"optional": {
@@ -1032,7 +1053,9 @@ class DualCLIPLoader:
class CLIPVisionLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), {
"placeholder": folder_paths.get_model_placeholder("clip_vision")
}),
}}
RETURN_TYPES = ("CLIP_VISION",)
FUNCTION = "load_clip"
@@ -1068,7 +1091,9 @@ class CLIPVisionEncode:
class StyleModelLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), {
"placeholder": folder_paths.get_model_placeholder("style_models")
})}}
RETURN_TYPES = ("STYLE_MODEL",)
FUNCTION = "load_style_model"
@@ -1167,7 +1192,9 @@ class unCLIPConditioning:
class GLIGENLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}
return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), {
"placeholder": folder_paths.get_model_placeholder("gligen")
})}}
RETURN_TYPES = ("GLIGEN",)
FUNCTION = "load_gligen"
@@ -1651,7 +1678,6 @@ class SaveImage:
OUTPUT_NODE = True
CATEGORY = "image"
MAIN_CATEGORY = "Basic"
DESCRIPTION = "Saves the input images to your ComfyUI output directory."
SEARCH_ALIASES = ["save", "save image", "export image", "output image", "write image", "download"]
@@ -1710,7 +1736,6 @@ class LoadImage:
}
CATEGORY = "image"
MAIN_CATEGORY = "Basic"
SEARCH_ALIASES = ["load image", "open image", "import image", "image input", "upload image", "read image", "image loader"]
RETURN_TYPES = ("IMAGE", "MASK")
@@ -1868,7 +1893,6 @@ class ImageScale:
FUNCTION = "upscale"
CATEGORY = "image/upscaling"
MAIN_CATEGORY = "Image Tools"
SEARCH_ALIASES = ["resize", "resize image", "scale image", "image resize", "zoom", "zoom in", "change size"]
def upscale(self, image, upscale_method, width, height, crop):
@@ -1908,7 +1932,6 @@ class ImageScaleBy:
class ImageInvert:
SEARCH_ALIASES = ["reverse colors"]
MAIN_CATEGORY = "Image Tools"
@classmethod
def INPUT_TYPES(s):
@@ -1925,7 +1948,6 @@ class ImageInvert:
class ImageBatch:
SEARCH_ALIASES = ["combine images", "merge images", "stack images"]
MAIN_CATEGORY = "Image Tools"
@classmethod
def INPUT_TYPES(s):

View File

@@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
version = "0.12.2"
version = "0.12.1"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.10"

View File

@@ -687,10 +687,6 @@ class PromptServer():
info['api_node'] = obj_class.API_NODE
info['search_aliases'] = getattr(obj_class, 'SEARCH_ALIASES', [])
if hasattr(obj_class, 'MAIN_CATEGORY'):
info['main_category'] = obj_class.MAIN_CATEGORY
return info
@routes.get("/object_info")