Update template translation (#4396)

Co-authored-by: github-actions <github-actions@github.com>
This commit is contained in:
ComfyUI Wiki
2025-07-11 14:10:42 +08:00
committed by GitHub
parent 7cf5d1e86b
commit 80e5cf1b9d
21 changed files with 1707 additions and 875 deletions

View File

@@ -547,182 +547,194 @@
"Audio": "Audio",
"Image API": "Image API",
"Video API": "Video API",
"LLM API": "LLM API",
"All": "All Templates"
},
"templateDescription": {
"Basics": {
"default": "Generate images from text descriptions.",
"default": "Generate images from text prompts.",
"image2image": "Transform existing images using text prompts.",
"lora": "Apply LoRA models for specialized styles or subjects.",
"lora": "Generate images with LoRA models for specialized styles or subjects.",
"lora_multiple": "Generate images by combining multiple LoRA models.",
"inpaint_example": "Edit specific parts of images seamlessly.",
"inpain_model_outpainting": "Extend images beyond their original boundaries.",
"embedding_example": "Use textual inversion for consistent styles.",
"gligen_textbox_example": "Specify the location and size of objects.",
"lora_multiple": "Combine multiple LoRA models for unique results."
"inpaint_model_outpainting": "Extend images beyond their original boundaries.",
"embedding_example": "Generate images using textual inversion for consistent styles.",
"gligen_textbox_example": "Generate images with precise object placement using text boxes."
},
"Flux": {
"flux_dev_checkpoint_example": "Create images using Flux development models.",
"flux_schnell": "Generate images quickly with Flux Schnell.",
"flux_fill_inpaint_example": "Fill in missing parts of images.",
"flux_fill_outpaint_example": "Extend images using Flux outpainting.",
"flux_canny_model_example": "Generate images from edge detection.",
"flux_depth_lora_example": "Create images with depth-aware LoRA.",
"flux_redux_model_example": "Transfer style from a reference image to guide image generation with Flux."
"flux_kontext_dev_basic": "Edit image using Flux Kontext with full node visibility, perfect for learning the workflow.",
"flux_kontext_dev_grouped": "Streamlined version of Flux Kontext with grouped nodes for cleaner workspace.",
"flux_dev_checkpoint_example": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.",
"flux_schnell": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.",
"flux_dev_full_text_to_image": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.",
"flux_schnell_full_text_to_image": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.",
"flux_fill_inpaint_example": "Fill missing parts of images using Flux inpainting.",
"flux_fill_outpaint_example": "Extend images beyond boundaries using Flux outpainting.",
"flux_canny_model_example": "Generate images guided by edge detection using Flux Canny.",
"flux_depth_lora_example": "Generate images guided by depth information using Flux LoRA.",
"flux_redux_model_example": "Generate images by transferring style from reference images using Flux Redux."
},
"Image": {
"hidream_i1_dev": "Generate images with HiDream I1 Dev.",
"hidream_i1_fast": "Generate images quickly with HiDream I1.",
"hidream_i1_full": "Generate images with HiDream I1.",
"hidream_e1_full": "Edit images with HiDream E1.",
"sd3_5_simple_example": "Generate images with SD 3.5.",
"sd3_5_large_canny_controlnet_example": "Use edge detection to guide image generation with SD 3.5.",
"sd3_5_large_depth": "Create depth-aware images with SD 3.5.",
"sd3_5_large_blur": "Generate images from blurred reference images with SD 3.5.",
"sdxl_simple_example": "Create high-quality images with SDXL.",
"sdxl_refiner_prompt_example": "Enhance SDXL outputs with refiners.",
"sdxl_revision_text_prompts": "Transfer concepts from reference images to guide image generation with SDXL.",
"sdxl_revision_zero_positive": "Add text prompts alongside reference images to guide image generation with SDXL.",
"sdxlturbo_example": "Generate images in a single step with SDXL Turbo."
"image_omnigen2_t2i": "Generate high-quality images from text prompts using OmniGen2's unified 7B multimodal model with dual-path architecture.",
"image_omnigen2_image_edit": "Edit images with natural language instructions using OmniGen2's advanced image editing capabilities and text rendering support.",
"image_cosmos_predict2_2B_t2i": "Generate images with Cosmos-Predict2 2B T2I, delivering physically accurate, high-fidelity, and detail-rich image generation.",
"image_chroma_text_to_image": "Chroma is modified from flux and has some changes in the architecture.",
"hidream_i1_dev": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware.",
"hidream_i1_fast": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware.",
"hidream_i1_full": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output.",
"hidream_e1_full": "Edit images with HiDream E1 - Professional natural language image editing model.",
"sd3_5_simple_example": "Generate images using SD 3.5.",
"sd3_5_large_canny_controlnet_example": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
"sd3_5_large_depth": "Generate images guided by depth information using SD 3.5.",
"sd3_5_large_blur": "Generate images guided by blurred reference images using SD 3.5.",
"sdxl_simple_example": "Generate high-quality images using SDXL.",
"sdxl_refiner_prompt_example": "Enhance SDXL images using refiner models.",
"sdxl_revision_text_prompts": "Generate images by transferring concepts from reference images using SDXL Revision.",
"sdxl_revision_zero_positive": "Generate images using both text prompts and reference images with SDXL Revision.",
"sdxlturbo_example": "Generate images in a single step using SDXL Turbo.",
"image_lotus_depth_v1_1": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention."
},
"Video": {
"text_to_video_wan": "Quickly Generate videos from text descriptions.",
"image_to_video_wan": "Quickly Generate videos from images.",
"wan2_1_fun_inp": "Create videos from start and end frames.",
"wan2_1_fun_control": "Guide video generation with pose, depth, edge controls and more.",
"wan2_1_flf2v_720_f16": "Generate video through controlling the first and last frames.",
"ltxv_text_to_video": "Generate videos from text descriptions.",
"ltxv_image_to_video": "Convert still images into videos.",
"mochi_text_to_video_example": "Create videos with Mochi model.",
"hunyuan_video_text_to_video": "Generate videos using Hunyuan model.",
"image_to_video": "Transform images into animated videos.",
"txt_to_image_to_video": "Generate images from text and then convert them into videos."
"video_cosmos_predict2_2B_video2world_480p_16fps": "Generate videos with Cosmos-Predict2 2B Video2World, generating physically accurate, high-fidelity, and consistent video simulations.",
"video_wan_vace_14B_t2v": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
"video_wan_vace_14B_ref2v": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
"video_wan_vace_14B_v2v": "Generate videos by controlling input videos and reference images using Wan VACE.",
"video_wan_vace_outpainting": "Generate extended videos by expanding video size using Wan VACE outpainting.",
"video_wan_vace_flf2v": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
"video_wan_vace_inpainting": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
"video_wan2_1_fun_camera_v1_1_1_3B": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
"video_wan2_1_fun_camera_v1_1_14B": "Generate high-quality videos with advanced camera control using the full 14B model",
"text_to_video_wan": "Generate videos from text prompts using Wan 2.1.",
"image_to_video_wan": "Generate videos from images using Wan 2.1.",
"wan2_1_fun_inp": "Generate videos from start and end frames using Wan 2.1 inpainting.",
"wan2_1_fun_control": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
"wan2_1_flf2v_720_f16": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
"ltxv_text_to_video": "Generate videos from text prompts.",
"ltxv_image_to_video": "Generate videos from still images.",
"mochi_text_to_video_example": "Generate videos from text prompts using Mochi model.",
"hunyuan_video_text_to_video": "Generate videos from text prompts using Hunyuan model.",
"image_to_video": "Generate videos from still images.",
"txt_to_image_to_video": "Generate videos by first creating images from text prompts."
},
"Image API": {
"api_openai_image_1_t2i": "Use GPT Image 1 API to generate images from text descriptions.",
"api_openai_image_1_i2i": "Use GPT Image 1 API to generate images from images.",
"api_openai_image_1_inpaint": "Use GPT Image 1 API to inpaint images.",
"api_openai_image_1_multi_inputs": "Use GPT Image 1 API with multiple inputs to generate images.",
"api-openai-dall-e-2-t2i": "Use Dall-E 2 API to generate images from text descriptions.",
"api-openai-dall-e-2-inpaint": "Use Dall-E 2 API to inpaint images.",
"api-openai-dall-e-3-t2i": "Use Dall-E 3 API to generate images from text descriptions.",
"api_bfl_flux_pro_t2i": "Create images with FLUX.1 [pro]'s excellent prompt following, visual quality, image detail and output diversity.",
"api_stability_sd3_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
"api_ideogram_v3_t2i": "Generate images with high-quality image-prompt alignment, photorealism, and text rendering. Create professional-quality logos, promotional posters, landing page concepts, product photography, and more. Effortlessly craft sophisticated spatial compositions with intricate backgrounds, precise and nuanced lighting and colors, and lifelike environmental detail.",
"api_bfl_flux_1_kontext_multiple_images_input": "Input multiple images and edit them with Flux.1 Kontext.",
"api_bfl_flux_1_kontext_pro_image": "Edit images with Flux.1 Kontext pro image.",
"api_bfl_flux_1_kontext_max_image": "Edit images with Flux.1 Kontext max image.",
"api_bfl_flux_pro_t2i": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
"api_luma_photon_i2i": "Guide image generation using a combination of images and prompt.",
"api_luma_photon_style_ref": "Apply and blend style references with exact control. Luma Photon captures the essence of each reference image, letting you combine distinct visual elements while maintaining professional quality.",
"api_recraft_image_gen_with_color_control": "Create a custom palette to reuse for multiple images or hand-pick colors for each photo. Match your brand's color palette and craft visuals that are distinctly yours.",
"api_luma_photon_style_ref": "Generate images by blending style references with precise control using Luma Photon.",
"api_recraft_image_gen_with_color_control": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
"api_recraft_image_gen_with_style_control": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.",
"api_recraft_vector_gen": "Go from a text prompt to vector image with Recraft's AI vector generator. Produce the best-quality vector art for logos, posters, icon sets, ads, banners and mockups. Perfect your designs with sharp, high-quality SVG files. Create branded vector illustrations for your app or website in seconds."
"api_recraft_vector_gen": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
"api_runway_text_to_image": "Generate high-quality images from text prompts using Runway's AI model.",
"api_runway_reference_to_image": "Generate new images based on reference styles and compositions with Runway's AI.",
"api_stability_ai_stable_image_ultra_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
"api_stability_ai_i2i": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
"api_stability_ai_sd3_5_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
"api_stability_ai_sd3_5_i2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
"api_ideogram_v3_t2i": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
"api_openai_image_1_t2i": "Generate images from text prompts using OpenAI GPT Image 1 API.",
"api_openai_image_1_i2i": "Generate images from input images using OpenAI GPT Image 1 API.",
"api_openai_image_1_inpaint": "Edit images using inpainting with OpenAI GPT Image 1 API.",
"api_openai_image_1_multi_inputs": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
"api_openai_dall_e_2_t2i": "Generate images from text prompts using OpenAI Dall-E 2 API.",
"api_openai_dall_e_2_inpaint": "Edit images using inpainting with OpenAI Dall-E 2 API.",
"api_openai_dall_e_3_t2i": "Generate images from text prompts using OpenAI Dall-E 3 API."
},
"Video API": {
"api_moonvalley_text_to_video": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.",
"api_moonvalley_image_to_video": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.",
"api_kling_i2v": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
"api_kling_effects": "Generate dynamic videos by applying visual effects to images using Kling.",
"api_kling_flf": "Generate videos through controlling the first and last frames.",
"api_luma_i2v": "Take static images and instantly create magical high quality animations.",
"api_kling_i2v": "Create videos with great prompt adherence for actions, expressions, and camera movements. Now supporting complex prompts with sequential actions for you to be the director of your scene.",
"api_veo2_i2v": "Use Google Veo2 API to generate videos from images.",
"api_hailuo_minimax_i2v": "Create refined videos from images and text, including CGI integration and trendy photo effects like viral AI hugging. Choose from a variety of video styles and themes to match your creative vision.",
"api_pika_scene": "Use multiple images as ingredients and generate videos that incorporate all of them.",
"api_pixverse_template_i2v": "Transforms static images into dynamic videos with motion and effects.",
"api_pixverse_t2v": "Generate videos with accurate prompt interpretation and stunning video dynamics."
"api_luma_t2v": "High-quality videos can be generated using simple prompts.",
"api_hailuo_minimax_t2v": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
"api_hailuo_minimax_i2v": "Generate refined videos from images and text with CGI integration using MiniMax.",
"api_pixverse_i2v": "Generate dynamic videos from static images with motion and effects using PixVerse.",
"api_pixverse_template_i2v": "Generate dynamic videos from static images with motion and effects using PixVerse.",
"api_pixverse_t2v": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
"api_runway_gen3a_turbo_image_to_video": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
"api_runway_gen4_turo_image_to_video": "Generate dynamic videos from images using Runway Gen4 Turbo.",
"api_runway_first_last_frame": "Generate smooth video transitions between two keyframes with Runway's precision.",
"api_pika_i2v": "Generate smooth animated videos from single static images using Pika AI.",
"api_pika_scene": "Generate videos that incorporate multiple input images using Pika Scenes.",
"api_veo2_i2v": "Generate videos from images using Google Veo2 API."
},
"3D API": {
"api_rodin_image_to_model": "Generate detailed 3D models from single photos using Rodin AI.",
"api_rodin_multiview_to_model": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.",
"api_tripo_text_to_model": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
"api_tripo_image_to_model": "Generate professional 3D assets from 2D images using Tripo engine.",
"api_tripo_multiview_to_model": "Build 3D models from multiple angles with Tripo's advanced scanner."
},
"LLM API": {
"api_openai_chat": "Engage with OpenAI's advanced language models for intelligent conversations.",
"api_google_gemini": "Experience Google's multimodal AI with Gemini's reasoning capabilities."
},
"Upscaling": {
"hiresfix_latent_workflow": "Enhance image quality in latent space.",
"esrgan_example": "Use upscale models to enhance image quality.",
"hiresfix_esrgan_workflow": "Use upscale models during intermediate steps.",
"latent_upscale_different_prompt_model": "Upscale and change prompt across passes."
"hiresfix_latent_workflow": "Upscale images by enhancing quality in latent space.",
"esrgan_example": "Upscale images using ESRGAN models to enhance quality.",
"hiresfix_esrgan_workflow": "Upscale images using ESRGAN models during intermediate generation steps.",
"latent_upscale_different_prompt_model": "Upscale images while changing prompts across generation passes."
},
"ControlNet": {
"controlnet_example": "Control image generation with reference images.",
"2_pass_pose_worship": "Generate images from pose references.",
"depth_controlnet": "Create images with depth-aware generation.",
"depth_t2i_adapter": "Quickly generate depth-aware images with a T2I adapter.",
"mixing_controlnets": "Combine multiple ControlNet models together."
"controlnet_example": "Generate images guided by scribble reference images using ControlNet.",
"2_pass_pose_worship": "Generate images guided by pose references using ControlNet.",
"depth_controlnet": "Generate images guided by depth information using ControlNet.",
"depth_t2i_adapter": "Generate images guided by depth information using T2I adapter.",
"mixing_controlnets": "Generate images by combining multiple ControlNet models."
},
"Area Composition": {
"area_composition": "Control image composition with areas.",
"area_composition_reversed": "Reverse area composition workflow.",
"area_composition_square_area_for_subject": "Create consistent subject placement."
"area_composition": "Generate images by controlling composition with defined areas.",
"area_composition_square_area_for_subject": "Generate images with consistent subject placement using area composition."
},
"3D": {
"hunyuan3d-non-multiview-train": "Use Hunyuan3D 2.0 to generate models from a single view.",
"hunyuan-3d-multiview-elf": " Use Hunyuan3D 2mv to generate models from multiple views.",
"hunyuan-3d-turbo": "Use Hunyuan3D 2mv turbo to generate models from multiple views.",
"stable_zero123_example": "Generate 3D views from single images."
"3d_hunyuan3d_image_to_model": "Generate 3D models from single images using Hunyuan3D 2.0.",
"3d_hunyuan3d_multiview_to_model": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
"3d_hunyuan3d_multiview_to_model_turbo": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
"stable_zero123_example": "Generate 3D views from single images using Stable Zero123."
},
"Audio": {
"stable_audio_example": "Generate audio from text descriptions."
"audio_stable_audio_example": "Generate audio from text prompts using Stable Audio.",
"audio_ace_step_1_t2a_instrumentals": "Generate instrumental music from text prompts using ACE-Step v1.",
"audio_ace_step_1_t2a_song": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
"audio_ace_step_1_m2m_editing": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M."
}
},
"template": {
"Flux": {
"flux_dev_checkpoint_example": "Flux Dev",
"flux_schnell": "Flux Schnell",
"flux_fill_inpaint_example": "Flux Inpaint",
"flux_fill_outpaint_example": "Flux Outpaint",
"flux_canny_model_example": "Flux Canny Model",
"flux_redux_model_example": "Flux Redux Model",
"flux_depth_lora_example": "Flux Depth Lora"
},
"Basics": {
"default": "Image Generation",
"image2image": "Image to Image",
"embedding_example": "Embedding",
"gligen_textbox_example": "Gligen Textbox",
"lora": "Lora",
"lora_multiple": "Lora Multiple",
"lora": "LoRA",
"lora_multiple": "LoRA Multiple",
"inpaint_example": "Inpaint",
"inpain_model_outpainting": "Outpaint"
"inpaint_model_outpainting": "Outpaint",
"embedding_example": "Embedding",
"gligen_textbox_example": "Gligen Textbox"
},
"ControlNet": {
"controlnet_example": "Scribble ControlNet",
"2_pass_pose_worship": "Pose ControlNet 2 Pass",
"depth_controlnet": "Depth ControlNet",
"depth_t2i_adapter": "Depth T2I Adapter",
"mixing_controlnets": "Mixing ControlNets"
},
"Upscaling": {
"esrgan_example": "ESRGAN",
"hiresfix_latent_workflow": "Upscale",
"hiresfix_esrgan_workflow": "HiresFix ESRGAN Workflow",
"latent_upscale_different_prompt_model": "Latent Upscale Different Prompt Model"
},
"Video": {
"text_to_video_wan": "Wan 2.1 Text to Video",
"image_to_video_wan": "Wan 2.1 Image to Video",
"image_to_video": "SVD Image to Video",
"txt_to_image_to_video": "SVD Text to Image to Video",
"ltxv_image_to_video": "LTXV Image to Video",
"ltxv_text_to_video": "LTXV Text to Video",
"mochi_text_to_video_example": "Mochi Text to Video",
"hunyuan_video_text_to_video": "Hunyuan Video Text to Video",
"wan2_1_fun_inp": "Wan 2.1 Inpainting",
"wan2_1_fun_control": "Wan 2.1 ControlNet"
},
"Image API": {
"api_openai_image_1_t2i": "OpenAI Image-1 Text to Image",
"api_openai_image_1_i2i": "OpenAI Image-1 Image to Image",
"api_openai_image_1_inpaint": "OpenAI Image-1 Inpaint",
"api_openai_image_1_multi_inputs": "OpenAI Image-1 Multi Inputs",
"api-openai-dall-e-2-t2i": "Dall-E 2 Text to Image",
"api-openai-dall-e-2-inpaint": "Dall-E 2 Inpaint",
"api-openai-dall-e-3-t2i": "Dall-E 3 Text to Image",
"api_bfl_flux_pro_t2i": "BFL Flux 1.1[pro] Ultra Text to Image",
"api_stability_sd3_t2i": "Stability AI Stable Image Ultra Text to Image",
"api_ideogram_v3_t2i": "Ideogram V3 Text to Image",
"api_luma_photon_i2i": "Luma Photon Image to Image",
"api_luma_photon_style_ref": "Luma Photon Style Reference",
"api_recraft_image_gen_with_color_control": "Recraft Color Control Image Generation",
"api_recraft_image_gen_with_style_control": "Recraft Style Control Image Generation",
"api_recraft_vector_gen": "Recraft Vector Generation"
},
"Video API": {
"api_luma_i2v": "Luma Image to Video",
"api_kling_i2v": "Kling Image to Video",
"api_veo2_i2v": "Veo2 Image to Video",
"api_hailuo_minimax_i2v": "MiniMax Image to Video",
"api_pika_scene": "Pika Scenes: Images to Video",
"api_pixverse_template_i2v": "PixVerse Template Effects: Image to Video",
"api_pixverse_t2v": "PixVerse Text to Video"
"Flux": {
"flux_kontext_dev_basic": "Flux Kontext Dev(Basic)",
"flux_kontext_dev_grouped": "Flux Kontext Dev(Grouped)",
"flux_dev_checkpoint_example": "Flux Dev fp8",
"flux_schnell": "Flux Schnell fp8",
"flux_dev_full_text_to_image": "Flux Dev full text to image",
"flux_schnell_full_text_to_image": "Flux Schnell full text to image",
"flux_fill_inpaint_example": "Flux Inpaint",
"flux_fill_outpaint_example": "Flux Outpaint",
"flux_canny_model_example": "Flux Canny Model",
"flux_depth_lora_example": "Flux Depth LoRA",
"flux_redux_model_example": "Flux Redux Model"
},
"Image": {
"image_omnigen2_t2i": "OmniGen2 Text to Image",
"image_omnigen2_image_edit": "OmniGen2 Image Edit",
"image_cosmos_predict2_2B_t2i": "Cosmos Predict2 2B T2I",
"image_chroma_text_to_image": "Chroma text to image",
"hidream_i1_dev": "HiDream I1 Dev",
"hidream_i1_fast": "HiDream I1 Fast",
"hidream_i1_full": "HiDream I1 Full",
"hidream_e1_full": "HiDream E1 Full",
"sd3_5_simple_example": "SD3.5 Simple",
"sd3_5_large_canny_controlnet_example": "SD3.5 Large Canny ControlNet",
"sd3_5_large_depth": "SD3.5 Large Depth",
@@ -732,23 +744,114 @@
"sdxl_revision_text_prompts": "SDXL Revision Text Prompts",
"sdxl_revision_zero_positive": "SDXL Revision Zero Positive",
"sdxlturbo_example": "SDXL Turbo",
"hidream_i1_dev": "HiDream I1 Dev",
"hidream_i1_fast": "HiDream I1 Fast",
"hidream_i1_full": "HiDream I1 Full"
"image_lotus_depth_v1_1": "Lotus Depth"
},
"Video": {
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos Predict2 2B Video2World 480p 16fps",
"video_wan_vace_14B_t2v": "Wan VACE Text to Video",
"video_wan_vace_14B_ref2v": "Wan VACE Reference to Video",
"video_wan_vace_14B_v2v": "Wan VACE Control Video",
"video_wan_vace_outpainting": "Wan VACE Outpainting",
"video_wan_vace_flf2v": "Wan VACE First-Last Frame",
"video_wan_vace_inpainting": "Wan VACE Inpainting",
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B",
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B",
"text_to_video_wan": "Wan 2.1 Text to Video",
"image_to_video_wan": "Wan 2.1 Image to Video",
"wan2_1_fun_inp": "Wan 2.1 Inpainting",
"wan2_1_fun_control": "Wan 2.1 ControlNet",
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16",
"ltxv_text_to_video": "LTXV Text to Video",
"ltxv_image_to_video": "LTXV Image to Video",
"mochi_text_to_video_example": "Mochi Text to Video",
"hunyuan_video_text_to_video": "Hunyuan Video Text to Video",
"image_to_video": "SVD Image to Video",
"txt_to_image_to_video": "SVD Text to Image to Video"
},
"Image API": {
"api_bfl_flux_1_kontext_multiple_images_input": "BFL Flux.1 Kontext Multiple Image Input",
"api_bfl_flux_1_kontext_pro_image": "BFL Flux.1 Kontext Pro",
"api_bfl_flux_1_kontext_max_image": "BFL Flux.1 Kontext Max",
"api_bfl_flux_pro_t2i": "BFL Flux[Pro]: Text to Image",
"api_luma_photon_i2i": "Luma Photon: Image to Image",
"api_luma_photon_style_ref": "Luma Photon: Style Reference",
"api_recraft_image_gen_with_color_control": "Recraft: Color Control Image Generation",
"api_recraft_image_gen_with_style_control": "Recraft: Style Control Image Generation",
"api_recraft_vector_gen": "Recraft: Vector Generation",
"api_runway_text_to_image": "Runway: Text to Image",
"api_runway_reference_to_image": "Runway: Reference to Image",
"api_stability_ai_stable_image_ultra_t2i": "Stability AI: Stable Image Ultra Text to Image",
"api_stability_ai_i2i": "Stability AI: Image to Image",
"api_stability_ai_sd3_5_t2i": "Stability AI: SD3.5 Text to Image",
"api_stability_ai_sd3_5_i2i": "Stability AI: SD3.5 Image to Image",
"api_ideogram_v3_t2i": "Ideogram V3: Text to Image",
"api_openai_image_1_t2i": "OpenAI: GPT-Image-1 Text to Image",
"api_openai_image_1_i2i": "OpenAI: GPT-Image-1 Image to Image",
"api_openai_image_1_inpaint": "OpenAI: GPT-Image-1 Inpaint",
"api_openai_image_1_multi_inputs": "OpenAI: GPT-Image-1 Multi Inputs",
"api_openai_dall_e_2_t2i": "OpenAI: Dall-E 2 Text to Image",
"api_openai_dall_e_2_inpaint": "OpenAI: Dall-E 2 Inpaint",
"api_openai_dall_e_3_t2i": "OpenAI: Dall-E 3 Text to Image"
},
"Video API": {
"api_moonvalley_text_to_video": "Moonvalley: Text to Video",
"api_moonvalley_image_to_video": "Moonvalley: Image to Video",
"api_kling_i2v": "Kling: Image to Video",
"api_kling_effects": "Kling: Video Effects",
"api_kling_flf": "Kling: FLF2V",
"api_luma_i2v": "Luma: Image to Video",
"api_luma_t2v": "Luma: Text to Video",
"api_hailuo_minimax_t2v": "MiniMax: Text to Video",
"api_hailuo_minimax_i2v": "MiniMax: Image to Video",
"api_pixverse_i2v": "PixVerse: Image to Video",
"api_pixverse_template_i2v": "PixVerse Templates: Image to Video",
"api_pixverse_t2v": "PixVerse: Text to Video",
"api_runway_gen3a_turbo_image_to_video": "Runway: Gen3a Turbo Image to Video",
"api_runway_gen4_turo_image_to_video": "Runway: Gen4 Turbo Image to Video",
"api_runway_first_last_frame": "Runway: First Last Frame to Video",
"api_pika_i2v": "Pika: Image to Video",
"api_pika_scene": "Pika Scenes: Images to Video",
"api_veo2_i2v": "Veo2: Image to Video"
},
"3D API": {
"api_rodin_image_to_model": "Rodin: Image to Model",
"api_rodin_multiview_to_model": "Rodin: Multiview to Model",
"api_tripo_text_to_model": "Tripo: Text to Model",
"api_tripo_image_to_model": "Tripo: Image to Model",
"api_tripo_multiview_to_model": "Tripo: Multiview to Model"
},
"LLM API": {
"api_openai_chat": "OpenAI: Chat",
"api_google_gemini": "Google Gemini: Chat"
},
"Upscaling": {
"hiresfix_latent_workflow": "Upscale",
"esrgan_example": "ESRGAN",
"hiresfix_esrgan_workflow": "HiresFix ESRGAN Workflow",
"latent_upscale_different_prompt_model": "Latent Upscale Different Prompt Model"
},
"ControlNet": {
"controlnet_example": "Scribble ControlNet",
"2_pass_pose_worship": "Pose ControlNet 2 Pass",
"depth_controlnet": "Depth ControlNet",
"depth_t2i_adapter": "Depth T2I Adapter",
"mixing_controlnets": "Mixing ControlNets"
},
"Area Composition": {
"area_composition": "Area Composition",
"area_composition_reversed": "Area Composition Reversed",
"area_composition_square_area_for_subject": "Area Composition Square Area for Subject"
},
"3D": {
"stable_zero123_example": "Stable Zero123",
"hunyuan3d-non-multiview-train": "Hunyuan3D 2.0",
"hunyuan-3d-multiview-elf": "Hunyuan3D 2.0 MV",
"hunyuan-3d-turbo": "Hunyuan3D 2.0 MV Turbo"
"3d_hunyuan3d_image_to_model": "Hunyuan3D 2.0",
"3d_hunyuan3d_multiview_to_model": "Hunyuan3D 2.0 MV",
"3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3D 2.0 MV Turbo",
"stable_zero123_example": "Stable Zero123"
},
"Audio": {
"stable_audio_example": "Stable Audio"
"audio_stable_audio_example": "Stable Audio",
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1 Text to Instrumentals Music",
"audio_ace_step_1_t2a_song": "ACE Step v1 Text to Song",
"audio_ace_step_1_m2m_editing": "ACE Step v1 M2M Editing"
}
}
},

View File

@@ -322,6 +322,7 @@
},
"Comfy_UseNewMenu": {
"name": "Use new menu",
"tooltip": "Menu bar position. On mobile devices, the menu is always shown at the top.",
"options": {
"Disabled": "Disabled",
"Top": "Top",