feat: add model type mappings for cloud custom nodes (#9392)

## Summary

Adds model-to-node backlinks in `modelToNodeStore.ts` for all
cloud-deployed custom node models that were missing mappings. Without
these, clicking "Use" on a model in the model browser throws an error.

**17 new backlinks added** covering ~340 models across deployed node
packs:

| Category | Directories | Node | Models |
|----------|-------------|------|--------|
| Vision-Language | LLM/Qwen-VL/* (12 specific paths) | AILab_QwenVL /
AILab_QwenVL_PromptEnhancer | ~186 |
| TTS | qwen-tts/* | FB_Qwen3TTSVoiceClone | ~68 |
| Video | SEEDVR2, liveportrait/*, mimicmotion, rife | various | ~33 |
| Depth | depthanything3 | DownloadAndLoadDepthAnythingV3Model | 7 |
| Segmentation | face_parsing, sam3 | various | 4 |
| Diffusers | diffusers/* (Kolors) | DownloadAndLoadKolorsModel | 16 |
| Other | clip/*, dwpose, onnx, detection, UltraShape, sharp | various |
~26 |

**Key fix:** Replaced the top-level `LLM` fallback with specific
`LLM/Qwen-VL/*` paths. The old fallback incorrectly mapped `LLM/llava-*`
models to `AILab_QwenVL`.

Models without deployed node packs (llava/HyVideo, latentsync, sam3d,
sam3dbody, inpaint, vae_approx) are excluded — those are being removed
from `supported_models.json` in Comfy-Org/cloud#2652.

## Test plan
- [ ] Verify "Use" button works for QwenVL models in model browser
- [ ] Verify "Use" button works for TTS, video, depth, segmentation
models
- [ ] Verify no `No node provider registered for category` errors for
deployed models

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
Co-authored-by: GitHub Action <action@github.com>
This commit is contained in:
Deep Mehta
2026-03-04 16:51:42 -08:00
committed by GitHub
parent 120524faa1
commit 0f8473db35
3 changed files with 118 additions and 5 deletions

View File

@@ -228,9 +228,9 @@ describe('assetMetadataUtils', () => {
expected: 'checkpoints'
},
{
name: 'extracts last segment from path-style tags',
tags: ['models', 'models/loras'],
expected: 'loras'
name: 'returns full path for path-style tags',
tags: ['models', 'diffusers/Kolors/text_encoder'],
expected: 'diffusers/Kolors/text_encoder'
},
{
name: 'returns null when only models tag',

View File

@@ -138,8 +138,7 @@ export function getSourceName(url: string): string {
*/
export function getAssetModelType(asset: AssetItem): string | null {
const typeTag = asset.tags?.find((tag) => tag && tag !== 'models')
if (!typeTag) return null
return typeTag.includes('/') ? (typeTag.split('/').pop() ?? null) : typeTag
return typeTag ?? null
}
/**

View File

@@ -232,6 +232,120 @@ export const useModelToNodeStore = defineStore('modelToNode', () => {
// Empty key means the node auto-loads models without a widget selector
quickRegister('FlashVSR', 'FlashVSRNode', '')
quickRegister('FlashVSR-v1.1', 'FlashVSRNode', '')
// SEEDVR2 video upscaling (comfyui-seedvr2)
quickRegister('SEEDVR2', 'SeedVR2LoadDiTModel', 'model')
// Qwen VL vision-language models (comfyui-qwen-vl)
// Register each specific path to avoid LLM fallback catching unrelated models
// (e.g., LLM/llava-* should NOT map to AILab_QwenVL)
quickRegister(
'LLM/Qwen-VL/Qwen2.5-VL-3B-Instruct',
'AILab_QwenVL',
'model_name'
)
quickRegister(
'LLM/Qwen-VL/Qwen2.5-VL-7B-Instruct',
'AILab_QwenVL',
'model_name'
)
quickRegister(
'LLM/Qwen-VL/Qwen3-VL-2B-Instruct',
'AILab_QwenVL',
'model_name'
)
quickRegister(
'LLM/Qwen-VL/Qwen3-VL-2B-Thinking',
'AILab_QwenVL',
'model_name'
)
quickRegister(
'LLM/Qwen-VL/Qwen3-VL-4B-Instruct',
'AILab_QwenVL',
'model_name'
)
quickRegister(
'LLM/Qwen-VL/Qwen3-VL-4B-Thinking',
'AILab_QwenVL',
'model_name'
)
quickRegister(
'LLM/Qwen-VL/Qwen3-VL-8B-Instruct',
'AILab_QwenVL',
'model_name'
)
quickRegister(
'LLM/Qwen-VL/Qwen3-VL-8B-Thinking',
'AILab_QwenVL',
'model_name'
)
quickRegister(
'LLM/Qwen-VL/Qwen3-VL-32B-Instruct',
'AILab_QwenVL',
'model_name'
)
quickRegister(
'LLM/Qwen-VL/Qwen3-VL-32B-Thinking',
'AILab_QwenVL',
'model_name'
)
quickRegister(
'LLM/Qwen-VL/Qwen3-0.6B',
'AILab_QwenVL_PromptEnhancer',
'model_name'
)
quickRegister(
'LLM/Qwen-VL/Qwen3-4B-Instruct-2507',
'AILab_QwenVL_PromptEnhancer',
'model_name'
)
quickRegister('LLM/checkpoints', 'LoadChatGLM3', 'chatglm3_checkpoint')
// Qwen3 TTS speech models (ComfyUI-FunBox)
// Top-level 'qwen-tts' catches all qwen-tts/* subdirs via hierarchical fallback
quickRegister('qwen-tts', 'FB_Qwen3TTSVoiceClone', 'model_choice')
// DepthAnything V3 models (comfyui-depthanythingv2)
quickRegister(
'depthanything3',
'DownloadAndLoadDepthAnythingV3Model',
'model'
)
// LivePortrait face animation models (comfyui-liveportrait)
quickRegister('liveportrait', 'DownloadAndLoadLivePortraitModels', '')
// MimicMotion video generation models (ComfyUI-MimicMotionWrapper)
quickRegister('mimicmotion', 'DownloadAndLoadMimicMotionModel', 'model')
quickRegister('dwpose', 'MimicMotionGetPoses', '')
// Face parsing segmentation models (comfyui_face_parsing)
quickRegister('face_parsing', 'FaceParsingModelLoader(FaceParsing)', '')
// Kolors image generation models (ComfyUI-KolorsWrapper)
// Top-level 'diffusers' catches diffusers/Kolors/* subdirs
quickRegister('diffusers', 'DownloadAndLoadKolorsModel', 'model')
// CLIP models for HunyuanVideo (clip/clip-vit-large-patch14 subdir)
quickRegister('clip', 'CLIPVisionLoader', 'clip_name')
// RIFE video frame interpolation (ComfyUI-RIFE)
quickRegister('rife', 'RIFE VFI', 'ckpt_name')
// SAM3 3D segmentation models (comfyui-sam3)
quickRegister('sam3', 'LoadSAM3Model', 'model_path')
// UltraShape 3D model generation
quickRegister('UltraShape', 'UltraShapeLoadModel', 'checkpoint')
// SHaRP depth estimation
quickRegister('sharp', 'LoadSharpModel', 'checkpoint_path')
// ONNX upscale models (used by OnnxDetectionModelLoader and upscale nodes)
quickRegister('onnx', 'UpscaleModelLoader', 'model_name')
// Detection models (vitpose, yolo)
quickRegister('detection', 'OnnxDetectionModelLoader', 'yolo_model')
}
return {