diff --git a/browser_tests/tests/vueNodes/groups/groups.spec.ts-snapshots/vue-groups-create-group-chromium-linux.png b/browser_tests/tests/vueNodes/groups/groups.spec.ts-snapshots/vue-groups-create-group-chromium-linux.png index 36dddbb835..cd35644892 100644 Binary files a/browser_tests/tests/vueNodes/groups/groups.spec.ts-snapshots/vue-groups-create-group-chromium-linux.png and b/browser_tests/tests/vueNodes/groups/groups.spec.ts-snapshots/vue-groups-create-group-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/groups/groups.spec.ts-snapshots/vue-groups-fit-to-contents-chromium-linux.png b/browser_tests/tests/vueNodes/groups/groups.spec.ts-snapshots/vue-groups-fit-to-contents-chromium-linux.png index a27406c39f..3cbe7d24ce 100644 Binary files a/browser_tests/tests/vueNodes/groups/groups.spec.ts-snapshots/vue-groups-fit-to-contents-chromium-linux.png and b/browser_tests/tests/vueNodes/groups/groups.spec.ts-snapshots/vue-groups-fit-to-contents-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-dragging-link-chromium-linux.png b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-dragging-link-chromium-linux.png index ae210142df..37c3b3af7d 100644 Binary files a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-dragging-link-chromium-linux.png and b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-dragging-link-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-input-drag-ctrl-alt-chromium-linux.png b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-input-drag-ctrl-alt-chromium-linux.png index f7727feb94..f7625584b3 100644 Binary files a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-input-drag-ctrl-alt-chromium-linux.png and b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-input-drag-ctrl-alt-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-input-drag-reuses-origin-chromium-linux.png b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-input-drag-reuses-origin-chromium-linux.png index 30ffa4e0b9..dbfa157839 100644 Binary files a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-input-drag-reuses-origin-chromium-linux.png and b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-input-drag-reuses-origin-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-reroute-input-drag-chromium-linux.png b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-reroute-input-drag-chromium-linux.png index ff5b860c43..25627f8773 100644 Binary files a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-reroute-input-drag-chromium-linux.png and b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-reroute-input-drag-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-reroute-output-shift-drag-chromium-linux.png b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-reroute-output-shift-drag-chromium-linux.png index 610b15f1ad..c05af6991b 100644 Binary files a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-reroute-output-shift-drag-chromium-linux.png and b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-reroute-output-shift-drag-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-shift-output-multi-link-chromium-linux.png b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-shift-output-multi-link-chromium-linux.png index 9b42088965..5feb89c723 100644 Binary files a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-shift-output-multi-link-chromium-linux.png and b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-shift-output-multi-link-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-snap-to-node-chromium-linux.png b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-snap-to-node-chromium-linux.png index c7ea9a72ee..22e6a3b833 100644 Binary files a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-snap-to-node-chromium-linux.png and b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-snap-to-node-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-snap-to-slot-chromium-linux.png b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-snap-to-slot-chromium-linux.png index 6eaf27c951..2a23c2e2d1 100644 Binary files a/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-snap-to-slot-chromium-linux.png and b/browser_tests/tests/vueNodes/interactions/links/linkInteraction.spec.ts-snapshots/vue-node-snap-to-slot-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/interactions/node/move.spec.ts-snapshots/vue-node-moved-node-chromium-linux.png b/browser_tests/tests/vueNodes/interactions/node/move.spec.ts-snapshots/vue-node-moved-node-chromium-linux.png index 49b4a78d0c..1f7bc28f40 100644 Binary files a/browser_tests/tests/vueNodes/interactions/node/move.spec.ts-snapshots/vue-node-moved-node-chromium-linux.png and b/browser_tests/tests/vueNodes/interactions/node/move.spec.ts-snapshots/vue-node-moved-node-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/nodeStates/bypass.spec.ts-snapshots/vue-node-bypassed-state-chromium-linux.png b/browser_tests/tests/vueNodes/nodeStates/bypass.spec.ts-snapshots/vue-node-bypassed-state-chromium-linux.png index fa66f6cd1c..542f270ba6 100644 Binary files a/browser_tests/tests/vueNodes/nodeStates/bypass.spec.ts-snapshots/vue-node-bypassed-state-chromium-linux.png and b/browser_tests/tests/vueNodes/nodeStates/bypass.spec.ts-snapshots/vue-node-bypassed-state-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/nodeStates/colors.spec.ts-snapshots/vue-node-custom-color-blue-chromium-linux.png b/browser_tests/tests/vueNodes/nodeStates/colors.spec.ts-snapshots/vue-node-custom-color-blue-chromium-linux.png index eb8c0b8cba..22281aa654 100644 Binary files a/browser_tests/tests/vueNodes/nodeStates/colors.spec.ts-snapshots/vue-node-custom-color-blue-chromium-linux.png and b/browser_tests/tests/vueNodes/nodeStates/colors.spec.ts-snapshots/vue-node-custom-color-blue-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/nodeStates/colors.spec.ts-snapshots/vue-node-custom-colors-dark-all-colors-chromium-linux.png b/browser_tests/tests/vueNodes/nodeStates/colors.spec.ts-snapshots/vue-node-custom-colors-dark-all-colors-chromium-linux.png index f7ff7c7a78..1769f095ba 100644 Binary files a/browser_tests/tests/vueNodes/nodeStates/colors.spec.ts-snapshots/vue-node-custom-colors-dark-all-colors-chromium-linux.png and b/browser_tests/tests/vueNodes/nodeStates/colors.spec.ts-snapshots/vue-node-custom-colors-dark-all-colors-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/nodeStates/colors.spec.ts-snapshots/vue-node-custom-colors-light-all-colors-chromium-linux.png b/browser_tests/tests/vueNodes/nodeStates/colors.spec.ts-snapshots/vue-node-custom-colors-light-all-colors-chromium-linux.png index 25ffc18d35..48cdf29d8b 100644 Binary files a/browser_tests/tests/vueNodes/nodeStates/colors.spec.ts-snapshots/vue-node-custom-colors-light-all-colors-chromium-linux.png and b/browser_tests/tests/vueNodes/nodeStates/colors.spec.ts-snapshots/vue-node-custom-colors-light-all-colors-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/nodeStates/lod.spec.ts-snapshots/vue-nodes-default-chromium-linux.png b/browser_tests/tests/vueNodes/nodeStates/lod.spec.ts-snapshots/vue-nodes-default-chromium-linux.png index a1c8b6d5c2..b74ef804d1 100644 Binary files a/browser_tests/tests/vueNodes/nodeStates/lod.spec.ts-snapshots/vue-nodes-default-chromium-linux.png and b/browser_tests/tests/vueNodes/nodeStates/lod.spec.ts-snapshots/vue-nodes-default-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/nodeStates/lod.spec.ts-snapshots/vue-nodes-lod-inactive-chromium-linux.png b/browser_tests/tests/vueNodes/nodeStates/lod.spec.ts-snapshots/vue-nodes-lod-inactive-chromium-linux.png index df9ee2c930..7065b299f0 100644 Binary files a/browser_tests/tests/vueNodes/nodeStates/lod.spec.ts-snapshots/vue-nodes-lod-inactive-chromium-linux.png and b/browser_tests/tests/vueNodes/nodeStates/lod.spec.ts-snapshots/vue-nodes-lod-inactive-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/nodeStates/mute.spec.ts-snapshots/vue-node-muted-state-chromium-linux.png b/browser_tests/tests/vueNodes/nodeStates/mute.spec.ts-snapshots/vue-node-muted-state-chromium-linux.png index c47f1b2fe3..6973c889af 100644 Binary files a/browser_tests/tests/vueNodes/nodeStates/mute.spec.ts-snapshots/vue-node-muted-state-chromium-linux.png and b/browser_tests/tests/vueNodes/nodeStates/mute.spec.ts-snapshots/vue-node-muted-state-chromium-linux.png differ diff --git a/browser_tests/tests/vueNodes/widgets/load/uploadWidgets.spec.ts-snapshots/vue-nodes-upload-widgets-chromium-linux.png b/browser_tests/tests/vueNodes/widgets/load/uploadWidgets.spec.ts-snapshots/vue-nodes-upload-widgets-chromium-linux.png index 4526b27080..7882700839 100644 Binary files a/browser_tests/tests/vueNodes/widgets/load/uploadWidgets.spec.ts-snapshots/vue-nodes-upload-widgets-chromium-linux.png and b/browser_tests/tests/vueNodes/widgets/load/uploadWidgets.spec.ts-snapshots/vue-nodes-upload-widgets-chromium-linux.png differ diff --git a/packages/design-system/src/css/style.css b/packages/design-system/src/css/style.css index 95084d28fa..3e7ec98913 100644 --- a/packages/design-system/src/css/style.css +++ b/packages/design-system/src/css/style.css @@ -73,6 +73,7 @@ --color-jade-400: #47e469; --color-jade-600: #00cd72; + --color-graphite-400: #9C9EAB; --color-gold-400: #fcbf64; --color-gold-500: #fdab34; @@ -227,7 +228,7 @@ --brand-yellow: var(--color-electric-400); --brand-blue: var(--color-sapphire-700); --secondary-background: var(--color-smoke-200); - --secondary-background-hover: var(--color-smoke-400); + --secondary-background-hover: var(--color-smoke-200); --secondary-background-selected: var(--color-smoke-600); --base-background: var(--color-white); --primary-background: var(--color-azure-400); @@ -242,6 +243,17 @@ --muted-background: var(--color-smoke-700); --accent-background: var(--color-smoke-800); + /* Component/Node tokens from design system light */ + --component-node-background: var(--color-white); + --component-node-border: var(--color-border-default); + --component-node-foreground: var(--base-foreground); + --component-node-foreground-secondary: var(--color-muted-foreground); + --component-node-widget-background: var(--secondary-background); + --component-node-widget-background-hovered: var(--secondary-background-hover); + --component-node-widget-background-selected: var(--secondary-background-selected); + --component-node-widget-background-disabled: var(--color-alpha-ash-500-20); + --component-node-widget-background-highlighted: var(--color-ash-500); + /* Default UI element color palette variables */ --palette-contrast-mix-color: #fff; --palette-interface-panel-surface: var(--comfy-menu-bg); @@ -301,7 +313,7 @@ --node-component-surface-highlight: var(--color-slate-100); --node-component-surface-hovered: var(--color-charcoal-600); --node-component-surface-selected: var(--color-charcoal-200); - --node-component-surface: var(--color-charcoal-800); + --node-component-surface: var(--color-charcoal-600); --node-component-tooltip: var(--color-white); --node-component-tooltip-border: var(--color-slate-300); --node-component-tooltip-surface: var(--color-charcoal-800); @@ -339,6 +351,17 @@ --border-subtle: var(--color-charcoal-300); --muted-background: var(--color-charcoal-100); --accent-background: var(--color-charcoal-100); + + /* Component/Node tokens from design dark system */ + --component-node-background: var(--color-charcoal-600); + --component-node-border: var(--color-charcoal-100); + --component-node-foreground: var(--base-foreground); + --component-node-foreground-secondary: var(--color-muted-foreground); + --component-node-widget-background: var(--secondary-background-hover); + --component-node-widget-background-hovered: var(--secondary-background-selected); + --component-node-widget-background-selected: var(--color-charcoal-100); + --component-node-widget-background-disabled: var(--color-alpha-charcoal-600-30); + --component-node-widget-background-highlighted: var(--color-graphite-400); } @theme inline { @@ -406,6 +429,17 @@ --color-text-primary: var(--text-primary); --color-input-surface: var(--input-surface); + /* Component/Node design tokens */ + --color-component-node-background: var(--component-node-background); + --color-component-node-border: var(--component-node-border); + --color-component-node-foreground: var(--component-node-foreground); + --color-component-node-foreground-secondary: var(--component-node-foreground-secondary); + --color-component-node-widget-background: var(--component-node-widget-background); + --color-component-node-widget-background-hovered: var(--component-node-widget-background-hovered); + --color-component-node-widget-background-selected: var(--component-node-widget-background-selected); + --color-component-node-widget-background-disabled: var(--component-node-widget-background-disabled); + --color-component-node-widget-background-highlighted: var(--component-node-widget-background-highlighted); + /* Semantic tokens */ --color-base-foreground: var(--base-foreground); --color-muted-foreground: var(--muted-foreground); diff --git a/public/assets/images/comfy-cloud-logo.svg b/public/assets/images/comfy-cloud-logo.svg new file mode 100644 index 0000000000..4a701878c5 --- /dev/null +++ b/public/assets/images/comfy-cloud-logo.svg @@ -0,0 +1,4 @@ + + + + diff --git a/src/composables/auth/useFirebaseAuthActions.ts b/src/composables/auth/useFirebaseAuthActions.ts index a02532f564..eed7eb021f 100644 --- a/src/composables/auth/useFirebaseAuthActions.ts +++ b/src/composables/auth/useFirebaseAuthActions.ts @@ -1,7 +1,6 @@ import { FirebaseError } from 'firebase/app' import { AuthErrorCodes } from 'firebase/auth' import { ref } from 'vue' -import { useRouter } from 'vue-router' import { useErrorHandling } from '@/composables/useErrorHandling' import type { ErrorRecoveryStrategy } from '@/composables/useErrorHandling' @@ -61,8 +60,7 @@ export const useFirebaseAuthActions = () => { if (isCloud) { try { - const router = useRouter() - await router.push({ name: 'cloud-login' }) + window.location.href = '/cloud/login' } catch (error) { // needed for local development until we bring in cloud login pages. window.location.reload() diff --git a/src/composables/graph/useGraphNodeManager.ts b/src/composables/graph/useGraphNodeManager.ts index 2e9c54a2cf..24b2b1a64f 100644 --- a/src/composables/graph/useGraphNodeManager.ts +++ b/src/composables/graph/useGraphNodeManager.ts @@ -269,10 +269,13 @@ export function useGraphNodeManager(graph: LGraph): GraphNodeManager { const updatedWidgets = currentData.widgets.map((w) => w.name === widgetName ? { ...w, value: validateWidgetValue(value) } : w ) - vueNodeData.set(nodeId, { + // Create a completely new object to ensure Vue reactivity triggers + const updatedData = { ...currentData, widgets: updatedWidgets - }) + } + + vueNodeData.set(nodeId, updatedData) } catch (error) { // Ignore widget update errors to prevent cascade failures } diff --git a/src/composables/graph/useWidgetValue.ts b/src/composables/graph/useWidgetValue.ts index 87658f83f7..20e018881a 100644 --- a/src/composables/graph/useWidgetValue.ts +++ b/src/composables/graph/useWidgetValue.ts @@ -2,16 +2,17 @@ * Composable for managing widget value synchronization between Vue and LiteGraph * Provides consistent pattern for immediate UI updates and LiteGraph callbacks */ -import { ref, watch } from 'vue' +import { computed, toValue, ref, watch } from 'vue' import type { Ref } from 'vue' import type { SimplifiedWidget, WidgetValue } from '@/types/simplifiedWidget' +import type { MaybeRefOrGetter } from '@vueuse/core' interface UseWidgetValueOptions { /** The widget configuration from LiteGraph */ widget: SimplifiedWidget - /** The current value from parent component */ - modelValue: T + /** The current value from parent component (can be a value or a getter function) */ + modelValue: MaybeRefOrGetter /** Default value if modelValue is null/undefined */ defaultValue: T /** Emit function from component setup */ @@ -46,8 +47,21 @@ export function useWidgetValue({ emit, transform }: UseWidgetValueOptions): UseWidgetValueReturn { - // Local value for immediate UI updates - const localValue = ref(modelValue ?? defaultValue) + // Ref for immediate UI feedback before value flows back through modelValue + const newProcessedValue = ref(null) + + // Computed that prefers the immediately processed value, then falls back to modelValue + const localValue = computed( + () => newProcessedValue.value ?? toValue(modelValue) ?? defaultValue + ) + + // Clear newProcessedValue when modelValue updates (allowing external changes to flow through) + watch( + () => toValue(modelValue), + () => { + newProcessedValue.value = null + } + ) // Handle user changes const onChange = (newValue: U) => { @@ -71,21 +85,13 @@ export function useWidgetValue({ } } - // 1. Update local state for immediate UI feedback - localValue.value = processedValue + // Set for immediate UI feedback + newProcessedValue.value = processedValue - // 2. Emit to parent component + // Emit to parent component emit('update:modelValue', processedValue) } - // Watch for external updates from LiteGraph - watch( - () => modelValue, - (newValue) => { - localValue.value = newValue ?? defaultValue - } - ) - return { localValue: localValue as Ref, onChange @@ -97,7 +103,7 @@ export function useWidgetValue({ */ export function useStringWidgetValue( widget: SimplifiedWidget, - modelValue: string, + modelValue: string | (() => string), emit: (event: 'update:modelValue', value: string) => void ) { return useWidgetValue({ @@ -114,7 +120,7 @@ export function useStringWidgetValue( */ export function useNumberWidgetValue( widget: SimplifiedWidget, - modelValue: number, + modelValue: number | (() => number), emit: (event: 'update:modelValue', value: number) => void ) { return useWidgetValue({ @@ -137,7 +143,7 @@ export function useNumberWidgetValue( */ export function useBooleanWidgetValue( widget: SimplifiedWidget, - modelValue: boolean, + modelValue: boolean | (() => boolean), emit: (event: 'update:modelValue', value: boolean) => void ) { return useWidgetValue({ diff --git a/src/extensions/core/uploadAudio.ts b/src/extensions/core/uploadAudio.ts index cd2ff97e1e..dfefc88a0f 100644 --- a/src/extensions/core/uploadAudio.ts +++ b/src/extensions/core/uploadAudio.ts @@ -58,6 +58,9 @@ async function uploadFile( getResourceURL(...splitFilePath(path)) ) audioWidget.value = path + + // Manually trigger the callback to update VueNodes + audioWidget.callback?.(path) } } else { useToastStore().addAlert(resp.status + ' - ' + resp.statusText) diff --git a/src/locales/en/main.json b/src/locales/en/main.json index 38f9dc2448..1492d3b808 100644 --- a/src/locales/en/main.json +++ b/src/locales/en/main.json @@ -180,6 +180,10 @@ "title": "Title", "edit": "Edit", "copy": "Copy", + "copyJobId": "Copy Job ID", + "copied": "Copied", + "jobIdCopied": "Job ID copied to clipboard", + "failedToCopyJobId": "Failed to copy job ID", "imageUrl": "Image URL", "clear": "Clear", "clearAll": "Clear all", @@ -607,17 +611,8 @@ "nodes": "Nodes", "models": "Models", "workflows": "Workflows", - "templates": "Templates", - "console": "Console", - "menu": "Menu", - "assets": "Assets", - "imported": "Imported", - "generated": "Generated" + "templates": "Templates" }, - "noFilesFound": "No files found", - "noImportedFiles": "No imported files found", - "noGeneratedFiles": "No generated files found", - "noFilesFoundMessage": "Upload files or generate content to see them here", "browseTemplates": "Browse example templates", "openWorkflow": "Open workflow in local file system", "newBlankWorkflow": "Create a new blank workflow", @@ -759,6 +754,310 @@ "Partner Nodes": "Partner Nodes", "Generation Type": "Generation Type" }, + "templateDescription": { + "Basics": { + "default": "Generate images from text prompts.", + "image2image": "Transform existing images using text prompts.", + "lora": "Generate images with LoRA models for specialized styles or subjects.", + "lora_multiple": "Generate images by combining multiple LoRA models.", + "inpaint_example": "Edit specific parts of images seamlessly.", + "inpaint_model_outpainting": "Extend images beyond their original boundaries.", + "embedding_example": "Generate images using textual inversion for consistent styles.", + "gligen_textbox_example": "Generate images with precise object placement using text boxes." + }, + "Flux": { + "flux_kontext_dev_basic": "Edit image using Flux Kontext with full node visibility, perfect for learning the workflow.", + "flux_kontext_dev_grouped": "Streamlined version of Flux Kontext with grouped nodes for cleaner workspace.", + "flux_dev_checkpoint_example": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.", + "flux_schnell": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.", + "flux_dev_full_text_to_image": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.", + "flux_schnell_full_text_to_image": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.", + "flux_fill_inpaint_example": "Fill missing parts of images using Flux inpainting.", + "flux_fill_outpaint_example": "Extend images beyond boundaries using Flux outpainting.", + "flux_canny_model_example": "Generate images guided by edge detection using Flux Canny.", + "flux_depth_lora_example": "Generate images guided by depth information using Flux LoRA.", + "flux_redux_model_example": "Generate images by transferring style from reference images using Flux Redux." + }, + "Image": { + "image_omnigen2_t2i": "Generate high-quality images from text prompts using OmniGen2's unified 7B multimodal model with dual-path architecture.", + "image_omnigen2_image_edit": "Edit images with natural language instructions using OmniGen2's advanced image editing capabilities and text rendering support.", + "image_cosmos_predict2_2B_t2i": "Generate images with Cosmos-Predict2 2B T2I, delivering physically accurate, high-fidelity, and detail-rich image generation.", + "image_chroma_text_to_image": "Chroma is modified from flux and has some changes in the architecture.", + "hidream_i1_dev": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware.", + "hidream_i1_fast": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware.", + "hidream_i1_full": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output.", + "hidream_e1_full": "Edit images with HiDream E1 - Professional natural language image editing model.", + "sd3_5_simple_example": "Generate images using SD 3.5.", + "sd3_5_large_canny_controlnet_example": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.", + "sd3_5_large_depth": "Generate images guided by depth information using SD 3.5.", + "sd3_5_large_blur": "Generate images guided by blurred reference images using SD 3.5.", + "sdxl_simple_example": "Generate high-quality images using SDXL.", + "sdxl_refiner_prompt_example": "Enhance SDXL images using refiner models.", + "sdxl_revision_text_prompts": "Generate images by transferring concepts from reference images using SDXL Revision.", + "sdxl_revision_zero_positive": "Generate images using both text prompts and reference images with SDXL Revision.", + "sdxlturbo_example": "Generate images in a single step using SDXL Turbo.", + "image_lotus_depth_v1_1": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention." + }, + "Video": { + "video_cosmos_predict2_2B_video2world_480p_16fps": "Generate videos with Cosmos-Predict2 2B Video2World, generating physically accurate, high-fidelity, and consistent video simulations.", + "video_wan_vace_14B_t2v": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.", + "video_wan_vace_14B_ref2v": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.", + "video_wan_vace_14B_v2v": "Generate videos by controlling input videos and reference images using Wan VACE.", + "video_wan_vace_outpainting": "Generate extended videos by expanding video size using Wan VACE outpainting.", + "video_wan_vace_flf2v": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.", + "video_wan_vace_inpainting": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.", + "video_wan2_1_fun_camera_v1_1_1_3B": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.", + "video_wan2_1_fun_camera_v1_1_14B": "Generate high-quality videos with advanced camera control using the full 14B model", + "text_to_video_wan": "Generate videos from text prompts using Wan 2.1.", + "image_to_video_wan": "Generate videos from images using Wan 2.1.", + "wan2_1_fun_inp": "Generate videos from start and end frames using Wan 2.1 inpainting.", + "wan2_1_fun_control": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.", + "wan2_1_flf2v_720_f16": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.", + "ltxv_text_to_video": "Generate videos from text prompts.", + "ltxv_image_to_video": "Generate videos from still images.", + "mochi_text_to_video_example": "Generate videos from text prompts using Mochi model.", + "hunyuan_video_text_to_video": "Generate videos from text prompts using Hunyuan model.", + "image_to_video": "Generate videos from still images.", + "txt_to_image_to_video": "Generate videos by first creating images from text prompts." + }, + "Image API": { + "api_bfl_flux_1_kontext_multiple_images_input": "Input multiple images and edit them with Flux.1 Kontext.", + "api_bfl_flux_1_kontext_pro_image": "Edit images with Flux.1 Kontext pro image.", + "api_bfl_flux_1_kontext_max_image": "Edit images with Flux.1 Kontext max image.", + "api_bfl_flux_pro_t2i": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.", + "api_luma_photon_i2i": "Guide image generation using a combination of images and prompt.", + "api_luma_photon_style_ref": "Generate images by blending style references with precise control using Luma Photon.", + "api_recraft_image_gen_with_color_control": "Generate images with custom color palettes and brand-specific visuals using Recraft.", + "api_recraft_image_gen_with_style_control": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.", + "api_recraft_vector_gen": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.", + "api_runway_text_to_image": "Generate high-quality images from text prompts using Runway's AI model.", + "api_runway_reference_to_image": "Generate new images based on reference styles and compositions with Runway's AI.", + "api_stability_ai_stable_image_ultra_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.", + "api_stability_ai_i2i": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.", + "api_stability_ai_sd3_5_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.", + "api_stability_ai_sd3_5_i2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.", + "api_ideogram_v3_t2i": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.", + "api_openai_image_1_t2i": "Generate images from text prompts using OpenAI GPT Image 1 API.", + "api_openai_image_1_i2i": "Generate images from input images using OpenAI GPT Image 1 API.", + "api_openai_image_1_inpaint": "Edit images using inpainting with OpenAI GPT Image 1 API.", + "api_openai_image_1_multi_inputs": "Generate images from multiple inputs using OpenAI GPT Image 1 API.", + "api_openai_dall_e_2_t2i": "Generate images from text prompts using OpenAI Dall-E 2 API.", + "api_openai_dall_e_2_inpaint": "Edit images using inpainting with OpenAI Dall-E 2 API.", + "api_openai_dall_e_3_t2i": "Generate images from text prompts using OpenAI Dall-E 3 API." + }, + "Video API": { + "api_moonvalley_text_to_video": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.", + "api_moonvalley_image_to_video": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.", + "api_kling_i2v": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.", + "api_kling_effects": "Generate dynamic videos by applying visual effects to images using Kling.", + "api_kling_flf": "Generate videos through controlling the first and last frames.", + "api_luma_i2v": "Take static images and instantly create magical high quality animations.", + "api_luma_t2v": "High-quality videos can be generated using simple prompts.", + "api_hailuo_minimax_t2v": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.", + "api_hailuo_minimax_i2v": "Generate refined videos from images and text with CGI integration using MiniMax.", + "api_pixverse_i2v": "Generate dynamic videos from static images with motion and effects using PixVerse.", + "api_pixverse_template_i2v": "Generate dynamic videos from static images with motion and effects using PixVerse.", + "api_pixverse_t2v": "Generate videos with accurate prompt interpretation and stunning video dynamics.", + "api_runway_gen3a_turbo_image_to_video": "Generate cinematic videos from static images using Runway Gen3a Turbo.", + "api_runway_gen4_turo_image_to_video": "Generate dynamic videos from images using Runway Gen4 Turbo.", + "api_runway_first_last_frame": "Generate smooth video transitions between two keyframes with Runway's precision.", + "api_pika_i2v": "Generate smooth animated videos from single static images using Pika AI.", + "api_pika_scene": "Generate videos that incorporate multiple input images using Pika Scenes.", + "api_veo2_i2v": "Generate videos from images using Google Veo2 API." + }, + "3D API": { + "api_rodin_image_to_model": "Generate detailed 3D models from single photos using Rodin AI.", + "api_rodin_multiview_to_model": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.", + "api_tripo_text_to_model": "Craft 3D objects from descriptions with Tripo's text-driven modeling.", + "api_tripo_image_to_model": "Generate professional 3D assets from 2D images using Tripo engine.", + "api_tripo_multiview_to_model": "Build 3D models from multiple angles with Tripo's advanced scanner." + }, + "LLM API": { + "api_openai_chat": "Engage with OpenAI's advanced language models for intelligent conversations.", + "api_google_gemini": "Experience Google's multimodal AI with Gemini's reasoning capabilities." + }, + "Upscaling": { + "hiresfix_latent_workflow": "Upscale images by enhancing quality in latent space.", + "esrgan_example": "Upscale images using ESRGAN models to enhance quality.", + "hiresfix_esrgan_workflow": "Upscale images using ESRGAN models during intermediate generation steps.", + "latent_upscale_different_prompt_model": "Upscale images while changing prompts across generation passes." + }, + "ControlNet": { + "controlnet_example": "Generate images guided by scribble reference images using ControlNet.", + "2_pass_pose_worship": "Generate images guided by pose references using ControlNet.", + "depth_controlnet": "Generate images guided by depth information using ControlNet.", + "depth_t2i_adapter": "Generate images guided by depth information using T2I adapter.", + "mixing_controlnets": "Generate images by combining multiple ControlNet models." + }, + "Area Composition": { + "area_composition": "Generate images by controlling composition with defined areas.", + "area_composition_square_area_for_subject": "Generate images with consistent subject placement using area composition." + }, + "3D": { + "3d_hunyuan3d_image_to_model": "Generate 3D models from single images using Hunyuan3D 2.0.", + "3d_hunyuan3d_multiview_to_model": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.", + "3d_hunyuan3d_multiview_to_model_turbo": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.", + "stable_zero123_example": "Generate 3D views from single images using Stable Zero123." + }, + "Audio": { + "audio_stable_audio_example": "Generate audio from text prompts using Stable Audio.", + "audio_ace_step_1_t2a_instrumentals": "Generate instrumental music from text prompts using ACE-Step v1.", + "audio_ace_step_1_t2a_song": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.", + "audio_ace_step_1_m2m_editing": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M." + } + }, + "template": { + "Basics": { + "default": "Image Generation", + "image2image": "Image to Image", + "lora": "LoRA", + "lora_multiple": "LoRA Multiple", + "inpaint_example": "Inpaint", + "inpaint_model_outpainting": "Outpaint", + "embedding_example": "Embedding", + "gligen_textbox_example": "Gligen Textbox" + }, + "Flux": { + "flux_kontext_dev_basic": "Flux Kontext Dev(Basic)", + "flux_kontext_dev_grouped": "Flux Kontext Dev(Grouped)", + "flux_dev_checkpoint_example": "Flux Dev fp8", + "flux_schnell": "Flux Schnell fp8", + "flux_dev_full_text_to_image": "Flux Dev full text to image", + "flux_schnell_full_text_to_image": "Flux Schnell full text to image", + "flux_fill_inpaint_example": "Flux Inpaint", + "flux_fill_outpaint_example": "Flux Outpaint", + "flux_canny_model_example": "Flux Canny Model", + "flux_depth_lora_example": "Flux Depth LoRA", + "flux_redux_model_example": "Flux Redux Model" + }, + "Image": { + "image_omnigen2_t2i": "OmniGen2 Text to Image", + "image_omnigen2_image_edit": "OmniGen2 Image Edit", + "image_cosmos_predict2_2B_t2i": "Cosmos Predict2 2B T2I", + "image_chroma_text_to_image": "Chroma text to image", + "hidream_i1_dev": "HiDream I1 Dev", + "hidream_i1_fast": "HiDream I1 Fast", + "hidream_i1_full": "HiDream I1 Full", + "hidream_e1_full": "HiDream E1 Full", + "sd3_5_simple_example": "SD3.5 Simple", + "sd3_5_large_canny_controlnet_example": "SD3.5 Large Canny ControlNet", + "sd3_5_large_depth": "SD3.5 Large Depth", + "sd3_5_large_blur": "SD3.5 Large Blur", + "sdxl_simple_example": "SDXL Simple", + "sdxl_refiner_prompt_example": "SDXL Refiner Prompt", + "sdxl_revision_text_prompts": "SDXL Revision Text Prompts", + "sdxl_revision_zero_positive": "SDXL Revision Zero Positive", + "sdxlturbo_example": "SDXL Turbo", + "image_lotus_depth_v1_1": "Lotus Depth" + }, + "Video": { + "video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos Predict2 2B Video2World 480p 16fps", + "video_wan_vace_14B_t2v": "Wan VACE Text to Video", + "video_wan_vace_14B_ref2v": "Wan VACE Reference to Video", + "video_wan_vace_14B_v2v": "Wan VACE Control Video", + "video_wan_vace_outpainting": "Wan VACE Outpainting", + "video_wan_vace_flf2v": "Wan VACE First-Last Frame", + "video_wan_vace_inpainting": "Wan VACE Inpainting", + "video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B", + "video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B", + "text_to_video_wan": "Wan 2.1 Text to Video", + "image_to_video_wan": "Wan 2.1 Image to Video", + "wan2_1_fun_inp": "Wan 2.1 Inpainting", + "wan2_1_fun_control": "Wan 2.1 ControlNet", + "wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16", + "ltxv_text_to_video": "LTXV Text to Video", + "ltxv_image_to_video": "LTXV Image to Video", + "mochi_text_to_video_example": "Mochi Text to Video", + "hunyuan_video_text_to_video": "Hunyuan Video Text to Video", + "image_to_video": "SVD Image to Video", + "txt_to_image_to_video": "SVD Text to Image to Video" + }, + "Image API": { + "api_bfl_flux_1_kontext_multiple_images_input": "BFL Flux.1 Kontext Multiple Image Input", + "api_bfl_flux_1_kontext_pro_image": "BFL Flux.1 Kontext Pro", + "api_bfl_flux_1_kontext_max_image": "BFL Flux.1 Kontext Max", + "api_bfl_flux_pro_t2i": "BFL Flux[Pro]: Text to Image", + "api_luma_photon_i2i": "Luma Photon: Image to Image", + "api_luma_photon_style_ref": "Luma Photon: Style Reference", + "api_recraft_image_gen_with_color_control": "Recraft: Color Control Image Generation", + "api_recraft_image_gen_with_style_control": "Recraft: Style Control Image Generation", + "api_recraft_vector_gen": "Recraft: Vector Generation", + "api_runway_text_to_image": "Runway: Text to Image", + "api_runway_reference_to_image": "Runway: Reference to Image", + "api_stability_ai_stable_image_ultra_t2i": "Stability AI: Stable Image Ultra Text to Image", + "api_stability_ai_i2i": "Stability AI: Image to Image", + "api_stability_ai_sd3_5_t2i": "Stability AI: SD3.5 Text to Image", + "api_stability_ai_sd3_5_i2i": "Stability AI: SD3.5 Image to Image", + "api_ideogram_v3_t2i": "Ideogram V3: Text to Image", + "api_openai_image_1_t2i": "OpenAI: GPT-Image-1 Text to Image", + "api_openai_image_1_i2i": "OpenAI: GPT-Image-1 Image to Image", + "api_openai_image_1_inpaint": "OpenAI: GPT-Image-1 Inpaint", + "api_openai_image_1_multi_inputs": "OpenAI: GPT-Image-1 Multi Inputs", + "api_openai_dall_e_2_t2i": "OpenAI: Dall-E 2 Text to Image", + "api_openai_dall_e_2_inpaint": "OpenAI: Dall-E 2 Inpaint", + "api_openai_dall_e_3_t2i": "OpenAI: Dall-E 3 Text to Image" + }, + "Video API": { + "api_moonvalley_text_to_video": "Moonvalley: Text to Video", + "api_moonvalley_image_to_video": "Moonvalley: Image to Video", + "api_kling_i2v": "Kling: Image to Video", + "api_kling_effects": "Kling: Video Effects", + "api_kling_flf": "Kling: FLF2V", + "api_luma_i2v": "Luma: Image to Video", + "api_luma_t2v": "Luma: Text to Video", + "api_hailuo_minimax_t2v": "MiniMax: Text to Video", + "api_hailuo_minimax_i2v": "MiniMax: Image to Video", + "api_pixverse_i2v": "PixVerse: Image to Video", + "api_pixverse_template_i2v": "PixVerse Templates: Image to Video", + "api_pixverse_t2v": "PixVerse: Text to Video", + "api_runway_gen3a_turbo_image_to_video": "Runway: Gen3a Turbo Image to Video", + "api_runway_gen4_turo_image_to_video": "Runway: Gen4 Turbo Image to Video", + "api_runway_first_last_frame": "Runway: First Last Frame to Video", + "api_pika_i2v": "Pika: Image to Video", + "api_pika_scene": "Pika Scenes: Images to Video", + "api_veo2_i2v": "Veo2: Image to Video" + }, + "3D API": { + "api_rodin_image_to_model": "Rodin: Image to Model", + "api_rodin_multiview_to_model": "Rodin: Multiview to Model", + "api_tripo_text_to_model": "Tripo: Text to Model", + "api_tripo_image_to_model": "Tripo: Image to Model", + "api_tripo_multiview_to_model": "Tripo: Multiview to Model" + }, + "LLM API": { + "api_openai_chat": "OpenAI: Chat", + "api_google_gemini": "Google Gemini: Chat" + }, + "Upscaling": { + "hiresfix_latent_workflow": "Upscale", + "esrgan_example": "ESRGAN", + "hiresfix_esrgan_workflow": "HiresFix ESRGAN Workflow", + "latent_upscale_different_prompt_model": "Latent Upscale Different Prompt Model" + }, + "ControlNet": { + "controlnet_example": "Scribble ControlNet", + "2_pass_pose_worship": "Pose ControlNet 2 Pass", + "depth_controlnet": "Depth ControlNet", + "depth_t2i_adapter": "Depth T2I Adapter", + "mixing_controlnets": "Mixing ControlNets" + }, + "Area Composition": { + "area_composition": "Area Composition", + "area_composition_square_area_for_subject": "Area Composition Square Area for Subject" + }, + "3D": { + "3d_hunyuan3d_image_to_model": "Hunyuan3D 2.0", + "3d_hunyuan3d_multiview_to_model": "Hunyuan3D 2.0 MV", + "3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3D 2.0 MV Turbo", + "stable_zero123_example": "Stable Zero123" + }, + "Audio": { + "audio_stable_audio_example": "Stable Audio", + "audio_ace_step_1_t2a_instrumentals": "ACE-Step v1 Text to Instrumentals Music", + "audio_ace_step_1_t2a_song": "ACE Step v1 Text to Song", + "audio_ace_step_1_m2m_editing": "ACE Step v1 M2M Editing" + } + }, "categories": "Categories", "resetFilters": "Clear Filters", "sorting": "Sort by", @@ -1435,6 +1734,7 @@ "camera": "Camera", "light": "Light", "switchingMaterialMode": "Switching Material Mode...", + "edgeThreshold": "Edge Threshold", "export": "Export", "exportModel": "Export Model", "exportingModel": "Exporting model...", @@ -1445,7 +1745,8 @@ "normal": "Normal", "wireframe": "Wireframe", "original": "Original", - "depth": "Depth" + "depth": "Depth", + "lineart": "Lineart" }, "upDirections": { "original": "Original" @@ -1471,10 +1772,7 @@ "exportSettings": "Export Settings", "modelSettings": "Model Settings" }, - "openIn3DViewer": "Open in 3D Viewer", - "dropToLoad": "Drop 3D model to load", - "unsupportedFileType": "Unsupported file type (supports .gltf, .glb, .obj, .fbx, .stl)", - "uploadingModel": "Uploading 3D model..." + "openIn3DViewer": "Open in 3D Viewer" }, "toastMessages": { "nothingToQueue": "Nothing to queue", @@ -1559,7 +1857,12 @@ "confirmPasswordLabel": "Confirm Password", "confirmPasswordPlaceholder": "Enter the same password again", "forgotPassword": "Forgot password?", - "loginButton": "Log in", + "passwordResetInstructions": "Enter your email address and we'll send you a link to reset your password.", + "sendResetLink": "Send reset link", + "backToLogin": "Back to login", + "didntReceiveEmail": "Didn't receive an email? Contact us at", + "passwordResetError": "Failed to send password reset email. Please try again.", + "loginButton": "Sign in", "orContinueWith": "Or continue with", "loginWithGoogle": "Log in with Google", "loginWithGithub": "Log in with Github", @@ -1596,6 +1899,20 @@ "success": "Password Updated", "successDetail": "Your password has been updated successfully" }, + "errors": { + "auth/invalid-email": "Please enter a valid email address.", + "auth/user-disabled": "This account has been disabled. Please contact support.", + "auth/user-not-found": "No account found with this email. Would you like to create a new account?", + "auth/wrong-password": "The password you entered is incorrect. Please try again.", + "auth/email-already-in-use": "An account with this email already exists. Try signing in instead.", + "auth/weak-password": "Password is too weak. Please use a stronger password with at least 6 characters.", + "auth/too-many-requests": "Too many login attempts. Please wait a moment and try again.", + "auth/operation-not-allowed": "This sign-in method is not currently supported.", + "auth/invalid-credential": "Invalid login credentials. Please check your email and password.", + "auth/network-request-failed": "Network error. Please check your connection and try again.", + "auth/popup-closed-by-user": "Sign-in was cancelled. Please try again.", + "auth/cancelled-popup-request": "Sign-in was cancelled. Please try again." + }, "deleteAccount": { "deleteAccount": "Delete Account", "confirmTitle": "Delete Account", @@ -1778,6 +2095,128 @@ "renderBypassState": "Render Bypass State", "renderErrorState": "Render Error State" }, + "cloudOnboarding": { + "survey": { + "title": "Cloud Survey", + "placeholder": "Survey questions placeholder", + "steps": { + "familiarity": "How familiar are you with ComfyUI?", + "purpose": "What will you primarily use ComfyUI for?", + "industry": "What's your primary industry?", + "making": "What do you plan on making?" + }, + "questions": { + "familiarity": "How familiar are you with ComfyUI?", + "purpose": "What will you primarily use ComfyUI for?", + "industry": "What's your primary industry?", + "making": "What do you plan on making?" + }, + "options": { + "familiarity": { + "new": "New to ComfyUI (never used it before)", + "starting": "Just getting started (following tutorials)", + "basics": "Comfortable with basics", + "advanced": "Advanced user (custom workflows)", + "expert": "Expert (help others)" + }, + "purpose": { + "personal": "Personal projects / hobby", + "community": "Community contributions (nodes, workflows, etc.)", + "client": "Client work (freelance)", + "inhouse": "My own workplace (in-house)", + "research": "Academic research" + }, + "industry": { + "film_tv_animation": "Film, TV, & animation", + "gaming": "Gaming", + "marketing": "Marketing & advertising", + "architecture": "Architecture", + "product_design": "Product & graphic design", + "fine_art": "Fine art & illustration", + "software": "Software & technology", + "education": "Education", + "other": "Other", + "otherPlaceholder": "Please specify" + }, + "making": { + "images": "Images", + "video": "Video & animation", + "3d": "3D assets", + "audio": "Audio / music", + "custom_nodes": "Custom nodes & workflows" + } + } + }, + "forgotPassword": { + "title": "Forgot Password", + "instructions": "Enter your email address and we'll send you a link to reset your password.", + "emailLabel": "Email", + "emailPlaceholder": "Enter your email", + "sendResetLink": "Send reset link", + "backToLogin": "Back to login", + "didntReceiveEmail": "Didn't receive an email? Contact us at", + "passwordResetSent": "Password reset email sent", + "passwordResetError": "Failed to send password reset email. Please try again.", + "emailRequired": "Email is required" + }, + "privateBeta": { + "title": "Cloud is currently in private beta", + "desc": "Sign in to join the waitlist. We’ll notify you when it’s your turn. Already been notified? Sign in start using Cloud." + }, + "start": { + "title": "start creating in seconds", + "desc": "Zero setup required. Works on any device.", + "explain": "Generate multiple outputs at once. Share workflows with ease.", + "learnAboutButton": "Learn about Cloud", + "wantToRun": "Want to run ComfyUI locally instead?", + "download": "Download ComfyUI" + }, + "checkingStatus": "Checking your account status...", + "retrying": "Retrying...", + "retry": "Try Again", + "authTimeout": { + "title": "Connection Taking Too Long", + "message": "We're having trouble connecting to ComfyUI Cloud. This could be due to a slow connection or temporary service issue.", + "restart": "Sign Out & Try Again", + "troubleshooting": "Common causes:", + "causes": [ + "Corporate firewall or proxy blocking authentication services", + "VPN or network restrictions", + "Browser extensions interfering with requests", + "Regional network limitations", + "Try a different browser or network" + ], + "technicalDetails": "Technical Details", + "helpText": "Need help? Contact", + "supportLink": "support" + } + }, + "cloudFooter_needHelp": "Need Help?", + "cloudStart_title": "start creating in seconds", + "cloudStart_desc": "Zero setup required. Works on any device.", + "cloudStart_explain": "Generate multiple outputs at once. Share workflows with ease.", + "cloudStart_learnAboutButton": "Learn about Cloud", + "cloudStart_wantToRun": "Want to run ComfyUI locally instead?", + "cloudStart_download": "Download ComfyUI", + "cloudWaitlist_questionsText": "Questions? Contact us", + "cloudWaitlist_contactLink": "here", + "cloudSorryContactSupport_title": "Sorry, contact support", + "cloudPrivateBeta_title": "Cloud is currently in private beta", + "cloudPrivateBeta_desc": "Sign in to join the waitlist. We'll notify you when it's your turn. Already been notified? Sign in start using Cloud.", + "cloudForgotPassword_title": "Forgot Password", + "cloudForgotPassword_instructions": "Enter your email address and we'll send you a link to reset your password.", + "cloudForgotPassword_emailLabel": "Email", + "cloudForgotPassword_emailPlaceholder": "Enter your email", + "cloudForgotPassword_sendResetLink": "Send reset link", + "cloudForgotPassword_backToLogin": "Back to login", + "cloudForgotPassword_didntReceiveEmail": "Didn't receive an email?", + "cloudForgotPassword_emailRequired": "Email is required", + "cloudForgotPassword_passwordResetSent": "Password reset sent", + "cloudForgotPassword_passwordResetError": "Failed to send password reset email", + "cloudSurvey_steps_familiarity": "How familiar are you with ComfyUI?", + "cloudSurvey_steps_purpose": "What will you primarily use ComfyUI for?", + "cloudSurvey_steps_industry": "What's your primary industry?", + "cloudSurvey_steps_making": "What do you plan on making?", "assetBrowser": { "assets": "Assets", "browseAssets": "Browse Assets", diff --git a/src/locales/en/nodeDefs.json b/src/locales/en/nodeDefs.json index 37500a7652..81930c80f8 100644 --- a/src/locales/en/nodeDefs.json +++ b/src/locales/en/nodeDefs.json @@ -4640,7 +4640,10 @@ }, "height": { "name": "height" - } + }, + "clear": {}, + "upload 3d model": {}, + "upload extra resources": {} }, "outputs": { "0": { @@ -8802,6 +8805,9 @@ }, "camera_info": { "name": "camera_info" + }, + "image": { + "name": "image" } } }, diff --git a/src/platform/cloud/onboarding/CloudAuthTimeoutView.vue b/src/platform/cloud/onboarding/CloudAuthTimeoutView.vue new file mode 100644 index 0000000000..dc317064b0 --- /dev/null +++ b/src/platform/cloud/onboarding/CloudAuthTimeoutView.vue @@ -0,0 +1,97 @@ + + + diff --git a/src/platform/cloud/onboarding/CloudForgotPasswordView.vue b/src/platform/cloud/onboarding/CloudForgotPasswordView.vue new file mode 100644 index 0000000000..411d086fb8 --- /dev/null +++ b/src/platform/cloud/onboarding/CloudForgotPasswordView.vue @@ -0,0 +1,126 @@ + + + + diff --git a/src/platform/cloud/onboarding/CloudLoginView.vue b/src/platform/cloud/onboarding/CloudLoginView.vue new file mode 100644 index 0000000000..64e50a3c94 --- /dev/null +++ b/src/platform/cloud/onboarding/CloudLoginView.vue @@ -0,0 +1,131 @@ + + + diff --git a/src/platform/cloud/onboarding/CloudSignupView.vue b/src/platform/cloud/onboarding/CloudSignupView.vue new file mode 100644 index 0000000000..bcd49a3884 --- /dev/null +++ b/src/platform/cloud/onboarding/CloudSignupView.vue @@ -0,0 +1,177 @@ + + + + diff --git a/src/platform/cloud/onboarding/CloudSorryContactSupportView.vue b/src/platform/cloud/onboarding/CloudSorryContactSupportView.vue new file mode 100644 index 0000000000..b3ceba97d8 --- /dev/null +++ b/src/platform/cloud/onboarding/CloudSorryContactSupportView.vue @@ -0,0 +1,22 @@ + + + + + diff --git a/src/platform/cloud/onboarding/CloudSurveyView.vue b/src/platform/cloud/onboarding/CloudSurveyView.vue new file mode 100644 index 0000000000..45fc0680fa --- /dev/null +++ b/src/platform/cloud/onboarding/CloudSurveyView.vue @@ -0,0 +1,387 @@ + + + + + diff --git a/src/platform/cloud/onboarding/UserCheckView.vue b/src/platform/cloud/onboarding/UserCheckView.vue new file mode 100644 index 0000000000..1260327280 --- /dev/null +++ b/src/platform/cloud/onboarding/UserCheckView.vue @@ -0,0 +1,102 @@ + + + diff --git a/src/platform/cloud/onboarding/assets/css/fonts.css b/src/platform/cloud/onboarding/assets/css/fonts.css new file mode 100644 index 0000000000..a3464f5f2d --- /dev/null +++ b/src/platform/cloud/onboarding/assets/css/fonts.css @@ -0,0 +1,33 @@ +/* ABC ROM Extended — full face mapping */ +@font-face { + font-family: 'ABC ROM Extended'; + src: + local('ABC ROM Extended Black Italic'), + local('ABCRom BlackItalic'), + url('../fonts/ABCROMExtended-BlackItalic.woff2') format('woff2'), + url('../fonts/ABCROMExtended-BlackItalic.woff') format('woff'); + font-weight: 900; + font-style: italic; + font-display: swap; +} + +/* Prevent browser from synthesizing fake bold/italic which can cause mismatches */ +.hero-title, +.font-abcrom { + font-family: 'ABC ROM Extended', sans-serif; + font-synthesis: none; /* no faux bold/italic */ + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + text-rendering: optimizeLegibility; +} + +/* Figma-like hero style */ +.hero-title { + font-size: 32px; + font-weight: 900; + font-style: italic; + text-transform: uppercase; + text-shadow: 0 4px 4px rgb(0 0 0 / 0.25); + /* Figma has leading-trim/text-edge which CSS doesn't support; emulate with tight line-height */ + line-height: 1.1; +} \ No newline at end of file diff --git a/src/platform/cloud/onboarding/assets/fonts/ABCROMExtended-BlackItalic.woff b/src/platform/cloud/onboarding/assets/fonts/ABCROMExtended-BlackItalic.woff new file mode 100644 index 0000000000..5d14e46a7e Binary files /dev/null and b/src/platform/cloud/onboarding/assets/fonts/ABCROMExtended-BlackItalic.woff differ diff --git a/src/platform/cloud/onboarding/assets/fonts/ABCROMExtended-BlackItalic.woff2 b/src/platform/cloud/onboarding/assets/fonts/ABCROMExtended-BlackItalic.woff2 new file mode 100644 index 0000000000..1e8bb595f8 Binary files /dev/null and b/src/platform/cloud/onboarding/assets/fonts/ABCROMExtended-BlackItalic.woff2 differ diff --git a/src/platform/cloud/onboarding/assets/videos/thumbnail.png b/src/platform/cloud/onboarding/assets/videos/thumbnail.png new file mode 100644 index 0000000000..e5266b6a6c Binary files /dev/null and b/src/platform/cloud/onboarding/assets/videos/thumbnail.png differ diff --git a/src/platform/cloud/onboarding/assets/videos/video.mp4 b/src/platform/cloud/onboarding/assets/videos/video.mp4 new file mode 100644 index 0000000000..b179abb18d Binary files /dev/null and b/src/platform/cloud/onboarding/assets/videos/video.mp4 differ diff --git a/src/platform/cloud/onboarding/auth.ts b/src/platform/cloud/onboarding/auth.ts new file mode 100644 index 0000000000..75d183dc66 --- /dev/null +++ b/src/platform/cloud/onboarding/auth.ts @@ -0,0 +1,235 @@ +import * as Sentry from '@sentry/vue' +import { isEmpty } from 'es-toolkit/compat' + +import { api } from '@/scripts/api' + +interface UserCloudStatus { + status: 'active' +} + +const ONBOARDING_SURVEY_KEY = 'onboarding_survey' + +/** + * Helper function to capture API errors with Sentry + */ +function captureApiError( + error: Error, + endpoint: string, + errorType: 'http_error' | 'network_error', + httpStatus?: number, + operation?: string, + extraContext?: Record +) { + const tags: Record = { + api_endpoint: endpoint, + error_type: errorType + } + + if (httpStatus !== undefined) { + tags.http_status = httpStatus + } + + if (operation) { + tags.operation = operation + } + + const sentryOptions: any = { + tags, + extra: extraContext ? { ...extraContext } : undefined + } + + Sentry.captureException(error, sentryOptions) +} + +/** + * Helper function to check if error is already handled HTTP error + */ +function isHttpError(error: unknown, errorMessagePrefix: string): boolean { + return error instanceof Error && error.message.startsWith(errorMessagePrefix) +} + +export async function getUserCloudStatus(): Promise { + try { + const response = await api.fetchApi('/user', { + method: 'GET', + headers: { + 'Content-Type': 'application/json' + } + }) + if (!response.ok) { + const error = new Error(`Failed to get user: ${response.statusText}`) + captureApiError( + error, + '/user', + 'http_error', + response.status, + undefined, + { + api: { + method: 'GET', + endpoint: '/user', + status_code: response.status, + status_text: response.statusText + } + } + ) + throw error + } + + return response.json() + } catch (error) { + // Only capture network errors (not HTTP errors we already captured) + if (!isHttpError(error, 'Failed to get user:')) { + captureApiError(error as Error, '/user', 'network_error') + } + throw error + } +} + +export async function getSurveyCompletedStatus(): Promise { + try { + const response = await api.fetchApi(`/settings/${ONBOARDING_SURVEY_KEY}`, { + method: 'GET', + headers: { + 'Content-Type': 'application/json' + } + }) + if (!response.ok) { + // Not an error case - survey not completed is a valid state + Sentry.addBreadcrumb({ + category: 'auth', + message: 'Survey status check returned non-ok response', + level: 'info', + data: { + status: response.status, + endpoint: `/settings/${ONBOARDING_SURVEY_KEY}` + } + }) + return false + } + const data = await response.json() + // Check if data exists and is not empty + return !isEmpty(data.value) + } catch (error) { + // Network error - still capture it as it's not thrown from above + Sentry.captureException(error, { + tags: { + api_endpoint: '/settings/{key}', + error_type: 'network_error' + }, + extra: { + route_template: '/settings/{key}', + route_actual: `/settings/${ONBOARDING_SURVEY_KEY}` + }, + level: 'warning' + }) + return false + } +} + +// @ts-expect-error - Unused function kept for future use +async function postSurveyStatus(): Promise { + try { + const response = await api.fetchApi(`/settings/${ONBOARDING_SURVEY_KEY}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ [ONBOARDING_SURVEY_KEY]: undefined }) + }) + + if (!response.ok) { + const error = new Error( + `Failed to post survey status: ${response.statusText}` + ) + captureApiError( + error, + '/settings/{key}', + 'http_error', + response.status, + 'post_survey_status', + { + route_template: '/settings/{key}', + route_actual: `/settings/${ONBOARDING_SURVEY_KEY}` + } + ) + throw error + } + } catch (error) { + // Only capture network errors (not HTTP errors we already captured) + if (!isHttpError(error, 'Failed to post survey status:')) { + captureApiError( + error as Error, + '/settings/{key}', + 'network_error', + undefined, + 'post_survey_status', + { + route_template: '/settings/{key}', + route_actual: `/settings/${ONBOARDING_SURVEY_KEY}` + } + ) + } + throw error + } +} + +export async function submitSurvey( + survey: Record +): Promise { + try { + Sentry.addBreadcrumb({ + category: 'auth', + message: 'Submitting survey', + level: 'info', + data: { + survey_fields: Object.keys(survey) + } + }) + + const response = await api.fetchApi('/settings', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ [ONBOARDING_SURVEY_KEY]: survey }) + }) + + if (!response.ok) { + const error = new Error(`Failed to submit survey: ${response.statusText}`) + captureApiError( + error, + '/settings', + 'http_error', + response.status, + 'submit_survey', + { + survey: { + field_count: Object.keys(survey).length, + field_names: Object.keys(survey) + } + } + ) + throw error + } + + // Log successful survey submission + Sentry.addBreadcrumb({ + category: 'auth', + message: 'Survey submitted successfully', + level: 'info' + }) + } catch (error) { + // Only capture network errors (not HTTP errors we already captured) + if (!isHttpError(error, 'Failed to submit survey:')) { + captureApiError( + error as Error, + '/settings', + 'network_error', + undefined, + 'submit_survey' + ) + } + throw error + } +} diff --git a/src/platform/cloud/onboarding/components/CloudLayoutView.vue b/src/platform/cloud/onboarding/components/CloudLayoutView.vue new file mode 100644 index 0000000000..fde68f7da2 --- /dev/null +++ b/src/platform/cloud/onboarding/components/CloudLayoutView.vue @@ -0,0 +1,16 @@ + + + diff --git a/src/platform/cloud/onboarding/components/CloudLogo.vue b/src/platform/cloud/onboarding/components/CloudLogo.vue new file mode 100644 index 0000000000..c3cf3fbe79 --- /dev/null +++ b/src/platform/cloud/onboarding/components/CloudLogo.vue @@ -0,0 +1,9 @@ + diff --git a/src/platform/cloud/onboarding/components/CloudSignInForm.vue b/src/platform/cloud/onboarding/components/CloudSignInForm.vue new file mode 100644 index 0000000000..a7c85971de --- /dev/null +++ b/src/platform/cloud/onboarding/components/CloudSignInForm.vue @@ -0,0 +1,128 @@ + + + + diff --git a/src/platform/cloud/onboarding/components/CloudTemplate.vue b/src/platform/cloud/onboarding/components/CloudTemplate.vue new file mode 100644 index 0000000000..9185725f07 --- /dev/null +++ b/src/platform/cloud/onboarding/components/CloudTemplate.vue @@ -0,0 +1,80 @@ + + + + diff --git a/src/platform/cloud/onboarding/components/CloudTemplateFooter.vue b/src/platform/cloud/onboarding/components/CloudTemplateFooter.vue new file mode 100644 index 0000000000..38c3b77225 --- /dev/null +++ b/src/platform/cloud/onboarding/components/CloudTemplateFooter.vue @@ -0,0 +1,32 @@ + + + diff --git a/src/platform/cloud/onboarding/onboardingCloudRoutes.ts b/src/platform/cloud/onboarding/onboardingCloudRoutes.ts new file mode 100644 index 0000000000..83fed65e8a --- /dev/null +++ b/src/platform/cloud/onboarding/onboardingCloudRoutes.ts @@ -0,0 +1,72 @@ +import type { RouteRecordRaw } from 'vue-router' + +export const cloudOnboardingRoutes: RouteRecordRaw[] = [ + { + path: '/cloud', + component: () => + import('@/platform/cloud/onboarding/components/CloudLayoutView.vue'), + children: [ + { + path: 'login', + name: 'cloud-login', + component: () => + import('@/platform/cloud/onboarding/CloudLoginView.vue'), + beforeEnter: async (to, _from, next) => { + // Only redirect if not explicitly switching accounts + if (!to.query.switchAccount) { + const { useCurrentUser } = await import( + '@/composables/auth/useCurrentUser' + ) + const { isLoggedIn } = useCurrentUser() + + if (isLoggedIn.value) { + // User is already logged in, redirect to user-check + // user-check will handle survey, or main page routing + return next({ name: 'cloud-user-check' }) + } + } + next() + } + }, + { + path: 'signup', + name: 'cloud-signup', + component: () => + import('@/platform/cloud/onboarding/CloudSignupView.vue') + }, + { + path: 'forgot-password', + name: 'cloud-forgot-password', + component: () => + import('@/platform/cloud/onboarding/CloudForgotPasswordView.vue') + }, + { + path: 'survey', + name: 'cloud-survey', + component: () => + import('@/platform/cloud/onboarding/CloudSurveyView.vue'), + meta: { requiresAuth: true } + }, + { + path: 'user-check', + name: 'cloud-user-check', + component: () => + import('@/platform/cloud/onboarding/UserCheckView.vue'), + meta: { requiresAuth: true } + }, + { + path: 'sorry-contact-support', + name: 'cloud-sorry-contact-support', + component: () => + import('@/platform/cloud/onboarding/CloudSorryContactSupportView.vue') + }, + { + path: 'auth-timeout', + name: 'cloud-auth-timeout', + component: () => + import('@/platform/cloud/onboarding/CloudAuthTimeoutView.vue'), + props: true + } + ] + } +] diff --git a/src/platform/cloud/onboarding/skeletons/CloudLoginViewSkeleton.vue b/src/platform/cloud/onboarding/skeletons/CloudLoginViewSkeleton.vue new file mode 100644 index 0000000000..9727cb6bbf --- /dev/null +++ b/src/platform/cloud/onboarding/skeletons/CloudLoginViewSkeleton.vue @@ -0,0 +1,47 @@ + + + diff --git a/src/platform/cloud/onboarding/skeletons/CloudSurveyViewSkeleton.vue b/src/platform/cloud/onboarding/skeletons/CloudSurveyViewSkeleton.vue new file mode 100644 index 0000000000..1632ecfbc5 --- /dev/null +++ b/src/platform/cloud/onboarding/skeletons/CloudSurveyViewSkeleton.vue @@ -0,0 +1,30 @@ + + + diff --git a/src/platform/remoteConfig/types.ts b/src/platform/remoteConfig/types.ts index 010c00ad26..939c6bbf18 100644 --- a/src/platform/remoteConfig/types.ts +++ b/src/platform/remoteConfig/types.ts @@ -14,7 +14,6 @@ type ServerHealthAlert = { */ export type RemoteConfig = { mixpanel_token?: string - require_whitelist?: boolean subscription_required?: boolean server_health_alert?: ServerHealthAlert } diff --git a/src/platform/telemetry/providers/cloud/MixpanelTelemetryProvider.ts b/src/platform/telemetry/providers/cloud/MixpanelTelemetryProvider.ts index 419ab6329d..d11c884544 100644 --- a/src/platform/telemetry/providers/cloud/MixpanelTelemetryProvider.ts +++ b/src/platform/telemetry/providers/cloud/MixpanelTelemetryProvider.ts @@ -146,6 +146,10 @@ export class MixpanelTelemetryProvider implements TelemetryProvider { } } + trackSignupOpened(): void { + this.trackEvent(TelemetryEvents.USER_SIGN_UP_OPENED) + } + trackAuth(metadata: AuthMetadata): void { this.trackEvent(TelemetryEvents.USER_AUTH_COMPLETED, metadata) } diff --git a/src/platform/telemetry/types.ts b/src/platform/telemetry/types.ts index f16304e7c8..d089c886a9 100644 --- a/src/platform/telemetry/types.ts +++ b/src/platform/telemetry/types.ts @@ -258,6 +258,7 @@ export interface WorkflowCreatedMetadata { */ export interface TelemetryProvider { // Authentication flow events + trackSignupOpened(): void trackAuth(metadata: AuthMetadata): void trackUserLoggedIn(): void @@ -334,6 +335,7 @@ export interface TelemetryProvider { */ export const TelemetryEvents = { // Authentication Flow + USER_SIGN_UP_OPENED: 'app:user_sign_up_opened', USER_AUTH_COMPLETED: 'app:user_auth_completed', USER_LOGGED_IN: 'app:user_logged_in', diff --git a/src/renderer/extensions/vueNodes/components/LGraphNode.vue b/src/renderer/extensions/vueNodes/components/LGraphNode.vue index ead65d2ccd..13979f6651 100644 --- a/src/renderer/extensions/vueNodes/components/LGraphNode.vue +++ b/src/renderer/extensions/vueNodes/components/LGraphNode.vue @@ -8,10 +8,10 @@ :data-node-id="nodeData.id" :class=" cn( - 'bg-node-component-surface lg-node absolute', + 'bg-component-node-background lg-node absolute', 'h-min w-min contain-style contain-layout min-h-(--node-height) min-w-(--node-width)', 'rounded-2xl touch-none flex flex-col', - 'border-1 border-solid border-node-component-border', + 'border-1 border-solid border-component-node-border', // hover (only when node should handle events) shouldHandleNodePointerEvents && 'hover:ring-7 ring-node-component-ring', @@ -23,7 +23,8 @@ bypassed, 'before:rounded-2xl before:pointer-events-none before:absolute before:inset-0': muted, - 'will-change-transform': isDragging + 'will-change-transform': isDragging, + 'ring-4 ring-primary-500 bg-primary-500/10': isDraggingOver }, shouldHandleNodePointerEvents @@ -36,13 +37,16 @@ transform: `translate(${position.x ?? 0}px, ${(position.y ?? 0) - LiteGraph.NODE_TITLE_HEIGHT}px)`, zIndex: zIndex, opacity: nodeOpacity, - '--node-component-surface': nodeBodyBackgroundColor + '--component-node-background': nodeBodyBackgroundColor }, dragStyle ]" v-bind="pointerHandlers" @wheel="handleWheel" @contextmenu="handleContextMenu" + @dragover.prevent="handleDragOver" + @dragleave="handleDragLeave" + @drop="handleDrop" >
diff --git a/src/renderer/extensions/vueNodes/components/NodeHeader.vue b/src/renderer/extensions/vueNodes/components/NodeHeader.vue index 5758fed622..da3b5a9f0e 100644 --- a/src/renderer/extensions/vueNodes/components/NodeHeader.vue +++ b/src/renderer/extensions/vueNodes/components/NodeHeader.vue @@ -7,7 +7,7 @@ :class=" cn( 'lg-node-header p-4 rounded-t-2xl w-full min-w-50', - 'bg-node-component-header-surface text-node-component-header', + 'text-node-component-header', collapsed && 'rounded-2xl' ) " diff --git a/src/renderer/extensions/vueNodes/widgets/components/WidgetFileUpload.vue b/src/renderer/extensions/vueNodes/widgets/components/WidgetFileUpload.vue index 60b9aeb288..912bfb2f93 100644 --- a/src/renderer/extensions/vueNodes/widgets/components/WidgetFileUpload.vue +++ b/src/renderer/extensions/vueNodes/widgets/components/WidgetFileUpload.vue @@ -26,7 +26,7 @@ size="small" :pt="{ option: 'text-xs', - dropdownIcon: 'text-button-icon' + dropdownIcon: 'text-component-node-foreground-secondary' }" />
@@ -116,7 +120,7 @@ const inputNumberPt = useNumberWidgetButtonPt({