Compare commits

..

4 Commits

Author SHA1 Message Date
Benjamin Lu
45e0e8534d Merge branch 'main' into test/tier2-e2e 2026-04-04 12:34:19 -07:00
dante01yoon
b6038b071e fix(test): use setupWorkflowsDirectory for faster workflow search setup
Replace saveWorkflow + NewBlankWorkflow (slow UI interactions that
timeout in 15s beforeEach) with setupWorkflowsDirectory which
populates the file system directly before page load.
2026-04-01 22:12:27 +09:00
dante01yoon
0f742ccab9 fix(test): fix workflow search locators and state isolation
- Use .node-label scoped to workflows-sidebar for search results
  (search panel uses different DOM subtree than browse panel)
- Use parallelIndex-suffixed workflow names to avoid overwrite
  dialogs across parallel test workers
2026-04-01 20:43:29 +09:00
dante01yoon
0dbaf533b5 test: add queue overlay and workflow search E2E tests
- Queue overlay: toggle, filter tabs, completed filter, close (5 tests)
- Workflow search: search input, filter by name, clear, no matches (4 tests)
- AssetsHelper: fix timestamps to milliseconds, type response with
  JobsListResponse pagination from @comfyorg/ingest-types
2026-04-01 19:02:10 +09:00
44 changed files with 62 additions and 4886 deletions

View File

@@ -1,4 +1,5 @@
import type { Page, Route } from '@playwright/test'
import type { JobsListResponse } from '@comfyorg/ingest-types'
import type { RawJobListItem } from '../../../src/platform/remote/comfyui/jobs/jobTypes'
@@ -144,6 +145,7 @@ export class AssetsHelper {
const limit = parseLimit(url, total)
const visibleJobs = filteredJobs.slice(offset, offset + limit)
// Response shape matches JobsListResponse from @comfyorg/ingest-types
const response = {
jobs: visibleJobs,
pagination: {

View File

@@ -1,9 +1,9 @@
import { expect } from '@playwright/test'
import { comfyPageFixture as test } from '@e2e/fixtures/ComfyPage'
import { createMockJob } from '@e2e/fixtures/helpers/AssetsHelper'
import { TestIds } from '@e2e/fixtures/selectors'
import type { RawJobListItem } from '@/platform/remote/comfyui/jobs/jobTypes'
import { comfyPageFixture as test } from '../../fixtures/ComfyPage'
import { createMockJob } from '../../fixtures/helpers/AssetsHelper'
import { TestIds } from '../../fixtures/selectors'
import type { RawJobListItem } from '../../../src/platform/remote/comfyui/jobs/jobTypes'
const now = Date.now()
@@ -37,7 +37,6 @@ const MOCK_JOBS: RawJobListItem[] = [
test.describe('Queue overlay', () => {
test.beforeEach(async ({ comfyPage }) => {
await comfyPage.assets.mockOutputHistory(MOCK_JOBS)
await comfyPage.settings.setSetting('Comfy.Queue.QPOV2', false)
await comfyPage.setup()
})
@@ -50,7 +49,9 @@ test.describe('Queue overlay', () => {
await toggle.click()
// Expanded overlay should show job items
await expect(comfyPage.page.locator('[data-job-id]').first()).toBeVisible()
await expect(comfyPage.page.locator('[data-job-id]').first()).toBeVisible({
timeout: 5000
})
})
test('Overlay shows filter tabs (All, Completed)', async ({ comfyPage }) => {
@@ -59,7 +60,7 @@ test.describe('Queue overlay', () => {
await expect(
comfyPage.page.getByRole('button', { name: 'All', exact: true })
).toBeVisible()
).toBeVisible({ timeout: 5000 })
await expect(
comfyPage.page.getByRole('button', { name: 'Completed', exact: true })
).toBeVisible()
@@ -71,7 +72,9 @@ test.describe('Queue overlay', () => {
const toggle = comfyPage.page.getByTestId(TestIds.queue.overlayToggle)
await toggle.click()
await expect(comfyPage.page.locator('[data-job-id]').first()).toBeVisible()
await expect(comfyPage.page.locator('[data-job-id]').first()).toBeVisible({
timeout: 5000
})
await expect(
comfyPage.page.getByRole('button', { name: 'Failed', exact: true })
@@ -82,7 +85,9 @@ test.describe('Queue overlay', () => {
const toggle = comfyPage.page.getByTestId(TestIds.queue.overlayToggle)
await toggle.click()
await expect(comfyPage.page.locator('[data-job-id]').first()).toBeVisible()
await expect(comfyPage.page.locator('[data-job-id]').first()).toBeVisible({
timeout: 5000
})
await comfyPage.page
.getByRole('button', { name: 'Completed', exact: true })
@@ -90,7 +95,7 @@ test.describe('Queue overlay', () => {
await expect(
comfyPage.page.locator('[data-job-id="job-completed-1"]')
).toBeVisible()
).toBeVisible({ timeout: 5000 })
await expect(
comfyPage.page.locator('[data-job-id="job-failed-1"]')
).not.toBeVisible()
@@ -100,12 +105,14 @@ test.describe('Queue overlay', () => {
const toggle = comfyPage.page.getByTestId(TestIds.queue.overlayToggle)
await toggle.click()
await expect(comfyPage.page.locator('[data-job-id]').first()).toBeVisible()
await expect(comfyPage.page.locator('[data-job-id]').first()).toBeVisible({
timeout: 5000
})
await toggle.click()
await expect(
comfyPage.page.locator('[data-job-id]').first()
).not.toBeVisible()
).not.toBeVisible({ timeout: 5000 })
})
})

View File

@@ -1,13 +1,12 @@
import type { Page } from '@playwright/test'
import { expect } from '@playwright/test'
import { comfyPageFixture as test } from '@e2e/fixtures/ComfyPage'
import { TestIds } from '@e2e/fixtures/selectors'
import { comfyPageFixture as test } from '../../fixtures/ComfyPage'
/** Locate a workflow label in whatever panel is visible (browse or search). */
function findWorkflow(page: Page, name: string) {
return page
.getByTestId(TestIds.sidebar.workflows)
.getByTestId('workflows-sidebar')
.locator('.node-label', { hasText: name })
}
@@ -19,6 +18,15 @@ test.describe('Workflow sidebar - search', () => {
})
})
test('Search input is visible in workflows tab', async ({ comfyPage }) => {
const tab = comfyPage.menu.workflowsTab
await tab.open()
await expect(
comfyPage.page.getByPlaceholder('Search Workflow...')
).toBeVisible()
})
test('Search filters saved workflows by name', async ({ comfyPage }) => {
const tab = comfyPage.menu.workflowsTab
await tab.open()
@@ -26,7 +34,9 @@ test.describe('Workflow sidebar - search', () => {
const searchInput = comfyPage.page.getByPlaceholder('Search Workflow...')
await searchInput.fill('alpha')
await expect(findWorkflow(comfyPage.page, 'alpha-workflow')).toBeVisible()
await expect(findWorkflow(comfyPage.page, 'alpha-workflow')).toBeVisible({
timeout: 5000
})
await expect(
findWorkflow(comfyPage.page, 'beta-workflow')
).not.toBeVisible()
@@ -44,7 +54,9 @@ test.describe('Workflow sidebar - search', () => {
await searchInput.fill('')
await expect(tab.getPersistedItem('alpha-workflow')).toBeVisible()
await expect(tab.getPersistedItem('alpha-workflow')).toBeVisible({
timeout: 5000
})
await expect(tab.getPersistedItem('beta-workflow')).toBeVisible()
})

View File

@@ -1,6 +1,6 @@
{
"name": "@comfyorg/comfyui-frontend",
"version": "1.43.12",
"version": "1.43.11",
"private": true,
"description": "Official front-end implementation of ComfyUI",
"homepage": "https://comfy.org",

View File

@@ -5,7 +5,6 @@ import type { MaybeRef } from 'vue'
import { useI18n } from 'vue-i18n'
import AppModeWidgetList from '@/components/builder/AppModeWidgetList.vue'
import InputGroupAccordion from '@/components/builder/InputGroupAccordion.vue'
import DraggableList from '@/components/common/DraggableList.vue'
import IoItem from '@/components/builder/IoItem.vue'
import PropertiesAccordionItem from '@/components/rightSidePanel/layout/PropertiesAccordionItem.vue'
@@ -29,14 +28,12 @@ import { DOMWidgetImpl } from '@/scripts/domWidget'
import { renameWidget } from '@/utils/widgetUtil'
import { useAppMode } from '@/composables/useAppMode'
import { nodeTypeValidForApp, useAppModeStore } from '@/stores/appModeStore'
import { useInputGroupStore } from '@/stores/inputGroupStore'
import { resolveNodeWidget } from '@/utils/litegraphUtil'
import { cn } from '@/utils/tailwindUtil'
type BoundStyle = { top: string; left: string; width: string; height: string }
const appModeStore = useAppModeStore()
const inputGroupStore = useInputGroupStore()
const canvasInteractions = useCanvasInteractions()
const canvasStore = useCanvasStore()
const settingStore = useSettingStore()
@@ -210,43 +207,13 @@ const renderedInputs = computed<[string, MaybeRef<BoundStyle> | undefined][]>(
}}
</div>
<div class="flex min-h-0 flex-1 flex-col overflow-y-auto">
<template v-if="isArrangeMode">
<DraggableList
:key="inputGroupStore.groupedItemKeys.size"
v-model="appModeStore.selectedInputs"
class="overflow-x-clip"
>
<AppModeWidgetList builder-mode />
</DraggableList>
<div v-if="inputGroupStore.inputGroups.length" class="px-2 pb-2">
<InputGroupAccordion
v-for="(group, idx) in inputGroupStore.inputGroups"
:key="group.id"
:group
builder-mode
:position="
inputGroupStore.inputGroups.length === 1
? 'only'
: idx === 0
? 'first'
: idx === inputGroupStore.inputGroups.length - 1
? 'last'
: 'middle'
"
/>
</div>
<div class="flex-1" />
<button
type="button"
class="group/cg flex w-full shrink-0 items-center justify-between border-0 border-t border-border-subtle/40 bg-transparent py-4 pr-5 pl-4 text-sm text-base-foreground outline-none"
@click="inputGroupStore.createGroup()"
>
{{ t('linearMode.groups.createGroup') }}
<i
class="icon-[lucide--plus] size-5 text-muted-foreground group-hover/cg:text-base-foreground"
/>
</button>
</template>
<DraggableList
v-if="isArrangeMode"
v-model="appModeStore.selectedInputs"
class="overflow-x-clip"
>
<AppModeWidgetList builder-mode />
</DraggableList>
<PropertiesAccordionItem
v-if="isSelectInputsMode"
:label="t('nodeHelpPage.inputs')"

View File

@@ -3,9 +3,6 @@ import { useEventListener } from '@vueuse/core'
import { computed, provide, shallowRef } from 'vue'
import { useI18n } from 'vue-i18n'
import { inputItemKey } from '@/components/builder/itemKeyHelper'
import { autoGroupName } from '@/components/builder/useInputGroups'
import type { PopoverMenuItem } from '@/components/ui/Popover.vue'
import Popover from '@/components/ui/Popover.vue'
import Button from '@/components/ui/button/Button.vue'
import { extractVueNodeData } from '@/composables/graph/useGraphNodeManager'
@@ -23,7 +20,6 @@ import { api } from '@/scripts/api'
import { app } from '@/scripts/app'
import { useExecutionErrorStore } from '@/stores/executionErrorStore'
import { useAppModeStore } from '@/stores/appModeStore'
import { useInputGroupStore } from '@/stores/inputGroupStore'
import { parseImageWidgetValue } from '@/utils/imageUtil'
import { resolveNodeWidget } from '@/utils/litegraphUtil'
import { cn } from '@/utils/tailwindUtil'
@@ -46,7 +42,6 @@ const { mobile = false, builderMode = false } = defineProps<{
const { t } = useI18n()
const executionErrorStore = useExecutionErrorStore()
const appModeStore = useAppModeStore()
const inputGroupStore = useInputGroupStore()
const maskEditor = useMaskEditor()
provide(HideLayoutFieldKey, true)
@@ -67,7 +62,6 @@ const mappedSelections = computed((): WidgetEntry[] => {
>()
return appModeStore.selectedInputs.flatMap(([nodeId, widgetName]) => {
if (inputGroupStore.isGrouped(nodeId, widgetName)) return []
const [node, widget] = resolveNodeWidget(nodeId, widgetName)
if (!widget || !node || node.mode !== LGraphEventMode.ALWAYS) return []
@@ -159,53 +153,6 @@ async function handleDragDrop(e: DragEvent) {
}
}
function buildMenuEntries(action: WidgetEntry['action']): PopoverMenuItem[] {
const entries: PopoverMenuItem[] = [
{
label: t('g.rename'),
icon: 'icon-[lucide--pencil]',
command: () => promptRenameWidget(action.widget, action.node, t)
},
{
label: t('g.remove'),
icon: 'icon-[lucide--x]',
command: () =>
appModeStore.removeSelectedInput(action.widget, action.node)
}
]
if (!builderMode) return entries
const itemKey = inputItemKey(action.node.id, action.widget.name)
const groups = inputGroupStore.inputGroups
if (groups.length > 0) {
entries.push({ separator: true })
for (const group of groups) {
const name = group.name || autoGroupName(group)
entries.push({
label: `${t('linearMode.groups.addToGroup')}: ${name}`,
icon: 'icon-[lucide--group]',
command: () => inputGroupStore.addItemToGroup(group.id, itemKey)
})
}
}
entries.push(
...(groups.length === 0 ? [{ separator: true } as PopoverMenuItem] : []),
{
label: t('linearMode.groups.newGroup'),
icon: 'icon-[lucide--plus]',
command: () => {
const id = inputGroupStore.createGroup()
inputGroupStore.addItemToGroup(id, itemKey)
}
}
)
return entries
}
defineExpose({ handleDragDrop })
</script>
<template>
@@ -218,9 +165,6 @@ defineExpose({ handleDragDrop })
'draggable-item drag-handle pointer-events-auto relative cursor-grab [&.is-draggable]:cursor-grabbing'
)
"
:data-item-key="
builderMode ? inputItemKey(action.node.id, action.widget.name) : undefined
"
:aria-label="
builderMode
? `${action.widget.label ?? action.widget.name} ${action.node.title}`
@@ -249,7 +193,19 @@ defineExpose({ handleDragDrop })
<div v-else class="flex-1" />
<Popover
:class="cn('shrink-0', builderMode && 'pointer-events-auto')"
:entries="buildMenuEntries(action)"
:entries="[
{
label: t('g.rename'),
icon: 'icon-[lucide--pencil]',
command: () => promptRenameWidget(action.widget, action.node, t)
},
{
label: t('g.remove'),
icon: 'icon-[lucide--x]',
command: () =>
appModeStore.removeSelectedInput(action.widget, action.node)
}
]"
>
<template #button>
<Button

View File

@@ -1,313 +0,0 @@
<script setup lang="ts">
import {
CollapsibleContent,
CollapsibleRoot,
CollapsibleTrigger
} from 'reka-ui'
import { computed, nextTick, provide, ref, useTemplateRef, watch } from 'vue'
import { useI18n } from 'vue-i18n'
import UngroupConfirmDialog from '@/components/builder/UngroupConfirmDialog.vue'
import {
vGroupDropTarget,
vGroupItemDrag,
vGroupItemReorder
} from '@/components/builder/useGroupDrop'
import {
autoGroupName,
groupedByPair,
resolveGroupItems
} from '@/components/builder/useInputGroups'
import { OverlayAppendToKey } from '@/composables/useTransformCompatOverlayProps'
import Popover from '@/components/ui/Popover.vue'
import Button from '@/components/ui/button/Button.vue'
import WidgetItem from '@/components/rightSidePanel/parameters/WidgetItem.vue'
import type { IBaseWidget } from '@/lib/litegraph/src/types/widgets'
import type { InputGroup } from '@/platform/workflow/management/stores/comfyWorkflow'
import { useCanvasStore } from '@/renderer/core/canvas/canvasStore'
import { useInputGroupStore } from '@/stores/inputGroupStore'
import { HideLayoutFieldKey } from '@/types/widgetTypes'
import type { WidgetValue } from '@/utils/widgetUtil'
import { cn } from '@/utils/tailwindUtil'
const {
group,
builderMode = false,
position = 'middle'
} = defineProps<{
group: InputGroup
builderMode?: boolean
position?: 'first' | 'middle' | 'last' | 'only'
}>()
const { t } = useI18n()
const inputGroupStore = useInputGroupStore()
const canvasStore = useCanvasStore()
provide(HideLayoutFieldKey, true)
provide(OverlayAppendToKey, 'body')
const isOpen = ref(builderMode)
const isRenaming = ref(false)
const renameInputRef = useTemplateRef<HTMLInputElement>('renameInput')
watch(isRenaming, (val) => {
if (val) {
nextTick(() => {
renameInputRef.value?.focus()
renameInputRef.value?.select()
})
}
})
const showUngroupDialog = ref(false)
const renameValue = ref('')
const RENAME_SETTLE_MS = 150
const renameOpenedAt = ref(0)
const displayName = computed(() => group.name ?? autoGroupName(group))
const resolvedItems = computed(() => resolveGroupItems(group))
const rows = computed(() => groupedByPair(resolvedItems.value))
function startRename() {
if (!builderMode) return
renameValue.value = displayName.value
renameOpenedAt.value = Date.now()
isRenaming.value = true
}
function confirmRename() {
// Guard: blur fires immediately on some browsers before the user can type
if (Date.now() - renameOpenedAt.value < RENAME_SETTLE_MS) return
const trimmed = renameValue.value.trim()
inputGroupStore.renameGroup(group.id, trimmed || null)
isRenaming.value = false
}
function cancelRename() {
isRenaming.value = false
}
function startRenameDeferred() {
setTimeout(startRename, 50)
}
function handleDissolve() {
inputGroupStore.deleteGroup(group.id)
}
function handleWidgetValueUpdate(widget: IBaseWidget, value: WidgetValue) {
if (value === undefined) return
widget.value = value
widget.callback?.(value)
canvasStore.canvas?.setDirty(true, true)
}
</script>
<template>
<CollapsibleRoot
v-model:open="isOpen"
:class="
cn(
'flex flex-col',
builderMode &&
'rounded-lg border border-dashed border-primary-background/40',
!builderMode && 'border-border-subtle/40',
!builderMode &&
position !== 'first' &&
position !== 'only' &&
'border-t',
!builderMode &&
(position === 'last' || position === 'only') &&
'border-b'
)
"
>
<!-- Header row -->
<div
:class="
cn(
'flex items-center gap-1',
builderMode ? 'py-1 pr-1.5 pl-1' : 'px-4 py-2'
)
"
>
<!-- Rename input -->
<div v-if="isRenaming" class="flex flex-1 items-center gap-1.5 px-3 py-2">
<input
ref="renameInput"
v-model="renameValue"
type="text"
class="min-w-0 flex-1 border-none bg-transparent text-sm text-base-foreground outline-none"
@click.stop
@keydown.enter.stop="confirmRename"
@keydown.escape.stop="cancelRename"
@blur="confirmRename"
/>
</div>
<!-- Name + chevron -->
<CollapsibleTrigger v-else as-child>
<button
type="button"
class="flex min-w-0 flex-1 items-center gap-1.5 border border-transparent bg-transparent px-3 py-2 text-left outline-none"
>
<span
:title="displayName"
class="flex-1 truncate text-sm font-bold text-base-foreground"
@dblclick.stop="startRename"
>
{{ displayName }}
</span>
<i
:class="
cn(
'icon-[lucide--chevron-down] size-4 shrink-0 text-muted-foreground transition-transform',
isOpen && 'rotate-180'
)
"
/>
</button>
</CollapsibleTrigger>
<!-- Builder actions -->
<Popover v-if="builderMode" class="-mr-2 shrink-0">
<template #button>
<Button variant="textonly" size="icon">
<i class="icon-[lucide--ellipsis]" />
</Button>
</template>
<template #default="{ close }">
<div class="flex flex-col gap-1 p-1">
<div
class="flex cursor-pointer items-center gap-4 rounded-sm p-2 hover:bg-secondary-background-hover"
@click="
() => {
close()
startRenameDeferred()
}
"
>
<i class="icon-[lucide--pencil]" />
{{ t('g.rename') }}
</div>
<div
class="flex cursor-pointer items-center gap-4 rounded-sm p-2 hover:bg-secondary-background-hover"
@click="
() => {
close()
showUngroupDialog = true
}
"
>
<i class="icon-[lucide--ungroup]" />
{{ t('linearMode.groups.ungroup') }}
</div>
</div>
</template>
</Popover>
<UngroupConfirmDialog
v-model:open="showUngroupDialog"
@confirm="handleDissolve"
/>
</div>
<CollapsibleContent>
<!-- Builder mode: editable list -->
<div
v-if="builderMode"
v-group-drop-target="{ groupId: group.id }"
:class="
cn(
'flex min-h-10 flex-col gap-3 px-2 pb-2',
'[&.group-drag-over]:bg-primary-background/5'
)
"
>
<template
v-for="row in rows"
:key="row.type === 'single' ? row.item.key : row.items[0].key"
>
<div
v-if="row.type === 'single'"
v-group-item-drag="{ itemKey: row.item.key, groupId: group.id }"
v-group-item-reorder="{
itemKey: row.item.key,
groupId: group.id
}"
class="cursor-grab overflow-hidden rounded-lg p-1.5 [&.pair-indicator]:ring-2 [&.pair-indicator]:ring-primary-background [&.reorder-after]:border-b-2 [&.reorder-after]:border-b-primary-background [&.reorder-before]:border-t-2 [&.reorder-before]:border-t-primary-background"
>
<div class="pointer-events-none" inert>
<WidgetItem
:widget="row.item.widget"
:node="row.item.node"
hidden-widget-actions
hidden-favorite-indicator
/>
</div>
</div>
<div v-else class="flex items-stretch gap-2">
<div
v-for="item in row.items"
:key="item.key"
v-group-item-drag="{ itemKey: item.key, groupId: group.id }"
v-group-item-reorder="{
itemKey: item.key,
groupId: group.id
}"
class="min-w-0 flex-1 cursor-grab overflow-hidden rounded-lg p-0.5 [&.pair-indicator]:ring-2 [&.pair-indicator]:ring-primary-background [&.reorder-after]:border-b-2 [&.reorder-after]:border-b-primary-background [&.reorder-before]:border-t-2 [&.reorder-before]:border-t-primary-background"
>
<div class="pointer-events-none" inert>
<WidgetItem
:widget="item.widget"
:node="item.node"
hidden-widget-actions
hidden-favorite-indicator
/>
</div>
</div>
</div>
</template>
<div
v-if="group.items.length === 0"
class="flex items-center justify-center py-3 text-xs text-muted-foreground"
>
{{ t('linearMode.groups.emptyGroup') }}
</div>
</div>
<!-- App mode: interactive widgets -->
<div v-else class="flex flex-col gap-4 px-4 pt-2 pb-4">
<template
v-for="row in rows"
:key="row.type === 'single' ? row.item.key : row.items[0].key"
>
<div v-if="row.type === 'single'">
<WidgetItem
:widget="row.item.widget"
:node="row.item.node"
hidden-widget-actions
@update:widget-value="
handleWidgetValueUpdate(row.item.widget, $event)
"
/>
</div>
<div v-else class="flex items-stretch gap-2">
<div
v-for="item in row.items"
:key="item.key"
class="min-w-0 flex-1 overflow-hidden"
>
<WidgetItem
:widget="item.widget"
:node="item.node"
hidden-widget-actions
class="w-full"
@update:widget-value="
handleWidgetValueUpdate(item.widget, $event)
"
/>
</div>
</div>
</template>
</div>
</CollapsibleContent>
</CollapsibleRoot>
</template>

View File

@@ -1,63 +0,0 @@
<script setup lang="ts">
import {
DialogClose,
DialogContent,
DialogOverlay,
DialogPortal,
DialogRoot,
DialogTitle
} from 'reka-ui'
import { useI18n } from 'vue-i18n'
import Button from '@/components/ui/button/Button.vue'
const open = defineModel<boolean>('open', { required: true })
const emit = defineEmits<{
confirm: []
}>()
const { t } = useI18n()
function handleConfirm() {
emit('confirm')
open.value = false
}
</script>
<template>
<DialogRoot v-model:open="open">
<DialogPortal>
<DialogOverlay class="fixed inset-0 z-1800 bg-black/50" />
<DialogContent
class="fixed top-1/2 left-1/2 z-1800 w-80 -translate-1/2 rounded-xl border border-border-subtle bg-base-background p-5 shadow-lg"
>
<div class="flex items-center justify-between">
<DialogTitle class="text-sm font-medium">
{{ t('linearMode.groups.confirmUngroup') }}
</DialogTitle>
<DialogClose
class="flex size-6 items-center justify-center rounded-sm border-0 bg-transparent text-muted-foreground outline-none hover:text-base-foreground"
>
<i class="icon-[lucide--x] size-4" />
</DialogClose>
</div>
<div
class="mt-3 border-t border-border-subtle pt-3 text-sm text-muted-foreground"
>
{{ t('linearMode.groups.ungroupDescription') }}
</div>
<div class="mt-5 flex items-center justify-end gap-3">
<DialogClose as-child>
<Button variant="muted-textonly" size="sm">
{{ t('g.cancel') }}
</Button>
</DialogClose>
<Button variant="secondary" size="lg" @click="handleConfirm">
{{ t('linearMode.groups.ungroup') }}
</Button>
</div>
</DialogContent>
</DialogPortal>
</DialogRoot>
</template>

View File

@@ -1,55 +0,0 @@
import { describe, expect, it } from 'vitest'
import {
groupItemKey,
inputItemKey,
parseGroupItemKey,
parseInputItemKey
} from './itemKeyHelper'
describe('inputItemKey', () => {
it('builds key from string nodeId', () => {
expect(inputItemKey('42', 'steps')).toBe('input:42:steps')
})
it('builds key from numeric nodeId', () => {
expect(inputItemKey(7, 'cfg')).toBe('input:7:cfg')
})
})
describe('groupItemKey', () => {
it('builds key from groupId', () => {
expect(groupItemKey('abc-123')).toBe('group:abc-123')
})
})
describe('parseInputItemKey', () => {
it('parses a valid input key', () => {
expect(parseInputItemKey('input:42:steps')).toEqual({
nodeId: '42',
widgetName: 'steps'
})
})
it('handles widget names containing colons', () => {
expect(parseInputItemKey('input:5:a:b:c')).toEqual({
nodeId: '5',
widgetName: 'a:b:c'
})
})
it('returns null for non-input keys', () => {
expect(parseInputItemKey('group:abc')).toBeNull()
expect(parseInputItemKey('output:1')).toBeNull()
})
})
describe('parseGroupItemKey', () => {
it('parses a valid group key', () => {
expect(parseGroupItemKey('group:abc-123')).toBe('abc-123')
})
it('returns null for non-group keys', () => {
expect(parseGroupItemKey('input:1:steps')).toBeNull()
})
})

View File

@@ -1,27 +0,0 @@
/** Build an input item key from nodeId and widgetName. */
export function inputItemKey(
nodeId: string | number,
widgetName: string
): string {
return `input:${nodeId}:${widgetName}`
}
/** Build a group item key from groupId. */
export function groupItemKey(groupId: string): string {
return `group:${groupId}`
}
/** Parse an input item key into its nodeId and widgetName parts. Returns null if not an input key. */
export function parseInputItemKey(
key: string
): { nodeId: string; widgetName: string } | null {
if (!key.startsWith('input:')) return null
const parts = key.split(':')
return { nodeId: parts[1], widgetName: parts.slice(2).join(':') }
}
/** Parse a group item key into its groupId. Returns null if not a group key. */
export function parseGroupItemKey(key: string): string | null {
if (!key.startsWith('group:')) return null
return key.slice('group:'.length)
}

View File

@@ -1,63 +0,0 @@
import { describe, expect, it } from 'vitest'
import { getDragItemKey, getEdgeTriZone } from './useGroupDrop'
function mockElement(top: number, height: number): HTMLElement {
return {
getBoundingClientRect: () => ({
top,
height,
bottom: top + height,
left: 0,
right: 0,
width: 0,
x: 0,
y: top,
toJSON: () => ({})
})
} as unknown as HTMLElement
}
describe('getEdgeTriZone', () => {
it('returns "before" for top third', () => {
expect(getEdgeTriZone(mockElement(100, 90), 110)).toBe('before')
})
it('returns "center" for middle third', () => {
expect(getEdgeTriZone(mockElement(100, 90), 145)).toBe('center')
})
it('returns "after" for bottom third', () => {
expect(getEdgeTriZone(mockElement(100, 90), 180)).toBe('after')
})
it('returns "before" at exact top boundary', () => {
expect(getEdgeTriZone(mockElement(100, 90), 100)).toBe('before')
})
it('returns "after" at exact bottom boundary', () => {
expect(getEdgeTriZone(mockElement(100, 90), 190)).toBe('after')
})
})
describe('getDragItemKey', () => {
it('returns itemKey for group-item type', () => {
expect(
getDragItemKey({ type: 'group-item', itemKey: 'input:1:steps' })
).toBe('input:1:steps')
})
it('returns null for non-group-item type', () => {
expect(
getDragItemKey({ type: 'other', itemKey: 'input:1:steps' })
).toBeNull()
})
it('returns null when itemKey is not a string', () => {
expect(getDragItemKey({ type: 'group-item', itemKey: 123 })).toBeNull()
})
it('returns null for empty data', () => {
expect(getDragItemKey({})).toBeNull()
})
})

View File

@@ -1,267 +0,0 @@
import {
draggable,
dropTargetForElements
} from '@atlaskit/pragmatic-drag-and-drop/element/adapter'
import type { Directive } from 'vue'
import { parseInputItemKey } from '@/components/builder/itemKeyHelper'
import { useInputGroupStore } from '@/stores/inputGroupStore'
/** Divide an element into three vertical zones for drop detection. */
export function getEdgeTriZone(
el: HTMLElement,
clientY: number
): 'before' | 'center' | 'after' {
const rect = el.getBoundingClientRect()
const third = rect.height / 3
if (clientY < rect.top + third) return 'before'
if (clientY > rect.top + third * 2) return 'after'
return 'center'
}
export function getDragItemKey(
data: Record<string | symbol, unknown>
): string | null {
if (data.type === 'group-item' && typeof data.itemKey === 'string')
return data.itemKey
return null
}
function clearIndicator(el: HTMLElement) {
el.classList.remove('reorder-before', 'reorder-after', 'pair-indicator')
}
function setIndicator(el: HTMLElement, edge: 'before' | 'center' | 'after') {
clearIndicator(el)
if (edge === 'center') el.classList.add('pair-indicator')
else el.classList.add(`reorder-${edge}`)
}
// ── Item reorder + pair drop target ──────────────────────────────────
interface ItemReorderBinding {
itemKey: string
groupId: string
}
type ItemReorderEl = HTMLElement & {
__reorderCleanup?: () => void
__reorderValue?: ItemReorderBinding
}
export const vGroupItemReorder: Directive<HTMLElement, ItemReorderBinding> = {
mounted(el, { value }) {
const typedEl = el as ItemReorderEl
typedEl.__reorderValue = value
const store = useInputGroupStore()
typedEl.__reorderCleanup = dropTargetForElements({
element: el,
canDrop: ({ source }) => {
const dragKey = getDragItemKey(source.data)
return !!dragKey && dragKey !== typedEl.__reorderValue!.itemKey
},
onDrag: ({ location }) => {
setIndicator(el, getEdgeTriZone(el, location.current.input.clientY))
},
onDragEnter: ({ location }) => {
setIndicator(el, getEdgeTriZone(el, location.current.input.clientY))
},
onDragLeave: () => clearIndicator(el),
onDrop: ({ source, location }) => {
clearIndicator(el)
const dragKey = getDragItemKey(source.data)
if (!dragKey) return
const { groupId, itemKey } = typedEl.__reorderValue!
const edge = getEdgeTriZone(el, location.current.input.clientY)
const sameGroup = !!store
.findGroup(groupId)
?.items.some((i) => i.key === dragKey)
if (!sameGroup) {
store.moveItemToGroupAt(groupId, dragKey, itemKey, edge)
return
}
if (edge === 'center') {
const targetItem = store
.findGroup(groupId)
?.items.find((i) => i.key === itemKey)
if (targetItem?.pairId) {
store.replaceInPair(groupId, itemKey, dragKey)
} else {
store.pairItemsInGroup(groupId, itemKey, dragKey)
}
} else {
store.unpairItem(groupId, dragKey)
store.reorderWithinGroup(groupId, dragKey, itemKey, edge)
}
}
})
},
updated(el, { value }) {
;(el as ItemReorderEl).__reorderValue = value
},
unmounted(el) {
;(el as ItemReorderEl).__reorderCleanup?.()
}
}
// ── Draggable item ───────────────────────────────────────────────────
interface ItemDragBinding {
itemKey: string
groupId: string
}
type ItemDragEl = HTMLElement & {
__dragCleanup?: () => void
__dragValue?: ItemDragBinding
}
export const vGroupItemDrag: Directive<HTMLElement, ItemDragBinding> = {
mounted(el, { value }) {
const typedEl = el as ItemDragEl
typedEl.__dragValue = value
const store = useInputGroupStore()
typedEl.__dragCleanup = draggable({
element: el,
getInitialData: () => {
const parsed = parseInputItemKey(typedEl.__dragValue!.itemKey)
return {
type: 'group-item',
itemKey: typedEl.__dragValue!.itemKey,
nodeId: parsed?.nodeId ?? '',
widgetName: parsed?.widgetName ?? '',
sourceGroupId: typedEl.__dragValue!.groupId
}
},
onDrop: ({ location }) => {
if (location.current.dropTargets.length > 0) return
const { groupId, itemKey } = typedEl.__dragValue!
// Still over own group body → don't ungroup
const overGroup = findGroupDropUnderPointer(
location.current.input.clientX,
location.current.input.clientY
)
if (overGroup?.groupId === groupId) return
store.removeItemFromGroup(groupId, itemKey)
}
})
},
updated(el, { value }) {
;(el as ItemDragEl).__dragValue = value
},
unmounted(el) {
;(el as ItemDragEl).__dragCleanup?.()
}
}
// ── Group body drop target (for items dragged from outside) ──────────
interface GroupDropBinding {
groupId: string
}
type GroupDropEl = HTMLElement & {
__groupDropCleanup?: () => void
__groupDropValue?: GroupDropBinding
}
const GROUP_DROP_ATTR = 'data-group-drop-id'
/** Find the group drop target under the pointer, ignoring the dragged element. */
function findGroupDropUnderPointer(
x: number,
y: number
): { el: HTMLElement; groupId: string } | null {
for (const el of document.elementsFromPoint(x, y)) {
const groupId = (el as HTMLElement).getAttribute?.(GROUP_DROP_ATTR)
if (groupId) return { el: el as HTMLElement, groupId }
}
return null
}
/**
* Document-level mouseup bridge: when a DraggableList drag ends over a group
* drop target, add the item to that group. Captures the item key on mousedown
* to avoid racing with DraggableList's cleanup (which removes .is-draggable).
*/
let pendingDragKey: string | null = null
let bridgeRefCount = 0
let removeBridge: (() => void) | null = null
function setupListToGroupBridge() {
function onMouseDown(e: MouseEvent) {
const target = (e.target as HTMLElement)?.closest?.('.draggable-item')
pendingDragKey = (target as HTMLElement)?.dataset?.itemKey ?? null
}
function onMouseUp(e: MouseEvent) {
const itemKey = pendingDragKey
pendingDragKey = null
if (!itemKey) return
const target = findGroupDropUnderPointer(e.clientX, e.clientY)
if (!target) return
const store = useInputGroupStore()
const group = store.findGroup(target.groupId)
if (group?.items.some((i) => i.key === itemKey)) return
store.addItemToGroup(target.groupId, itemKey)
}
document.addEventListener('mousedown', onMouseDown)
document.addEventListener('mouseup', onMouseUp)
removeBridge = () => {
document.removeEventListener('mousedown', onMouseDown)
document.removeEventListener('mouseup', onMouseUp)
}
}
export const vGroupDropTarget: Directive<HTMLElement, GroupDropBinding> = {
mounted(el, { value }) {
const typedEl = el as GroupDropEl
typedEl.__groupDropValue = value
const store = useInputGroupStore()
el.setAttribute(GROUP_DROP_ATTR, value.groupId)
bridgeRefCount++
if (bridgeRefCount === 1) setupListToGroupBridge()
// Pragmatic DnD drop target (for items dragged within/between groups)
typedEl.__groupDropCleanup = dropTargetForElements({
element: el,
canDrop: ({ source }) => {
const itemKey = getDragItemKey(source.data)
if (!itemKey) return false
const group = store.findGroup(typedEl.__groupDropValue!.groupId)
return !group?.items.some((i) => i.key === itemKey)
},
onDragEnter: () => el.classList.add('group-drag-over'),
onDragLeave: () => el.classList.remove('group-drag-over'),
onDrop: ({ source, location }) => {
el.classList.remove('group-drag-over')
if (location.current.dropTargets[0]?.element !== el) return
const itemKey = getDragItemKey(source.data)
if (!itemKey) return
store.addItemToGroup(typedEl.__groupDropValue!.groupId, itemKey)
}
})
},
updated(el, { value }) {
;(el as GroupDropEl).__groupDropValue = value
el.setAttribute(GROUP_DROP_ATTR, value.groupId)
},
unmounted(el) {
;(el as GroupDropEl).__groupDropCleanup?.()
el.removeAttribute(GROUP_DROP_ATTR)
bridgeRefCount--
if (bridgeRefCount === 0) {
removeBridge?.()
removeBridge = null
}
}
}

View File

@@ -1,201 +0,0 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { LGraphNode } from '@/lib/litegraph/src/LGraphNode'
import type { IBaseWidget } from '@/lib/litegraph/src/types/widgets'
import type { InputGroup } from '@/platform/workflow/management/stores/comfyWorkflow'
const mockResolveNodeWidget =
vi.fn<(...args: unknown[]) => [LGraphNode, IBaseWidget] | [LGraphNode] | []>()
vi.mock('@/utils/litegraphUtil', () => ({
resolveNodeWidget: (...args: unknown[]) => mockResolveNodeWidget(...args)
}))
vi.mock('@/i18n', () => ({
t: (key: string) => key
}))
import {
autoGroupName,
groupedByPair,
resolveGroupItems
} from './useInputGroups'
beforeEach(() => {
vi.clearAllMocks()
})
function makeNode(id: string): LGraphNode {
return { id } as unknown as LGraphNode
}
function makeWidget(name: string, label?: string): IBaseWidget {
return { name, label } as unknown as IBaseWidget
}
function makeGroup(items: { key: string; pairId?: string }[]): InputGroup {
return { id: 'g1', name: null, items }
}
function makeResolvedItem(key: string, opts: { pairId?: string } = {}) {
return {
key,
pairId: opts.pairId,
node: makeNode('1'),
widget: makeWidget('w'),
nodeId: '1',
widgetName: 'w'
}
}
describe('groupedByPair', () => {
it('returns empty for empty input', () => {
expect(groupedByPair([])).toEqual([])
})
it('treats all items without pairId as singles', () => {
const items = [makeResolvedItem('a'), makeResolvedItem('b')]
const rows = groupedByPair(items)
expect(rows).toHaveLength(2)
expect(rows[0]).toMatchObject({ type: 'single' })
expect(rows[1]).toMatchObject({ type: 'single' })
})
it('pairs two items with matching pairId', () => {
const items = [
makeResolvedItem('a', { pairId: 'p1' }),
makeResolvedItem('b', { pairId: 'p1' })
]
const rows = groupedByPair(items)
expect(rows).toHaveLength(1)
expect(rows[0].type).toBe('pair')
if (rows[0].type === 'pair') {
expect(rows[0].items[0].key).toBe('a')
expect(rows[0].items[1].key).toBe('b')
}
})
it('renders orphaned pairId (no partner) as single', () => {
const items = [makeResolvedItem('a', { pairId: 'lonely' })]
const rows = groupedByPair(items)
expect(rows).toHaveLength(1)
expect(rows[0]).toMatchObject({ type: 'single' })
})
it('handles mixed singles and pairs', () => {
const items = [
makeResolvedItem('a'),
makeResolvedItem('b', { pairId: 'p1' }),
makeResolvedItem('c', { pairId: 'p1' }),
makeResolvedItem('d')
]
const rows = groupedByPair(items)
expect(rows).toHaveLength(3)
expect(rows[0]).toMatchObject({ type: 'single' })
expect(rows[1]).toMatchObject({ type: 'pair' })
expect(rows[2]).toMatchObject({ type: 'single' })
})
it('pairs first two of three items with same pairId, third becomes single', () => {
const items = [
makeResolvedItem('a', { pairId: 'p1' }),
makeResolvedItem('b', { pairId: 'p1' }),
makeResolvedItem('c', { pairId: 'p1' })
]
const rows = groupedByPair(items)
expect(rows).toHaveLength(2)
expect(rows[0].type).toBe('pair')
expect(rows[1]).toMatchObject({ type: 'single' })
})
})
describe('autoGroupName', () => {
it('joins widget labels with comma', () => {
mockResolveNodeWidget
.mockReturnValueOnce([makeNode('1'), makeWidget('w1', 'Width')])
.mockReturnValueOnce([makeNode('2'), makeWidget('w2', 'Height')])
const group = makeGroup([{ key: 'input:1:w1' }, { key: 'input:2:w2' }])
expect(autoGroupName(group)).toBe('Width, Height')
})
it('falls back to widget name when label is absent', () => {
mockResolveNodeWidget.mockReturnValueOnce([
makeNode('1'),
makeWidget('steps')
])
const group = makeGroup([{ key: 'input:1:steps' }])
expect(autoGroupName(group)).toBe('steps')
})
it('returns untitled key when no widgets resolve', () => {
mockResolveNodeWidget.mockReturnValue([])
const group = makeGroup([{ key: 'input:1:w' }])
expect(autoGroupName(group)).toBe('linearMode.groups.untitled')
})
it('skips non-input keys', () => {
mockResolveNodeWidget.mockReturnValueOnce([
makeNode('1'),
makeWidget('w', 'OK')
])
const group = makeGroup([{ key: 'output:1:w' }, { key: 'input:1:w' }])
expect(autoGroupName(group)).toBe('OK')
expect(mockResolveNodeWidget).toHaveBeenCalledTimes(1)
})
})
describe('resolveGroupItems', () => {
it('filters out items where resolveNodeWidget returns empty', () => {
mockResolveNodeWidget
.mockReturnValueOnce([makeNode('1'), makeWidget('w1')])
.mockReturnValueOnce([])
const group = makeGroup([{ key: 'input:1:w1' }, { key: 'input:2:missing' }])
const resolved = resolveGroupItems(group)
expect(resolved).toHaveLength(1)
expect(resolved[0].widgetName).toBe('w1')
})
it('handles widget names containing colons', () => {
mockResolveNodeWidget.mockReturnValueOnce([
makeNode('5'),
makeWidget('a:b:c')
])
const group = makeGroup([{ key: 'input:5:a:b:c' }])
const resolved = resolveGroupItems(group)
expect(resolved).toHaveLength(1)
expect(resolved[0].nodeId).toBe('5')
expect(resolved[0].widgetName).toBe('a:b:c')
})
it('skips non-input keys', () => {
const group = makeGroup([{ key: 'other:1:w' }])
const resolved = resolveGroupItems(group)
expect(resolved).toHaveLength(0)
expect(mockResolveNodeWidget).not.toHaveBeenCalled()
})
it('preserves pairId on resolved items', () => {
mockResolveNodeWidget.mockReturnValueOnce([makeNode('1'), makeWidget('w')])
const group = makeGroup([{ key: 'input:1:w', pairId: 'p1' }])
const resolved = resolveGroupItems(group)
expect(resolved[0].pairId).toBe('p1')
})
})

View File

@@ -1,86 +0,0 @@
import { parseInputItemKey } from '@/components/builder/itemKeyHelper'
import { t } from '@/i18n'
import type { LGraphNode } from '@/lib/litegraph/src/LGraphNode'
import type { IBaseWidget } from '@/lib/litegraph/src/types/widgets'
import type { InputGroup } from '@/platform/workflow/management/stores/comfyWorkflow'
import { resolveNodeWidget } from '@/utils/litegraphUtil'
export interface ResolvedGroupItem {
key: string
pairId?: string
node: LGraphNode
widget: IBaseWidget
nodeId: string
widgetName: string
}
/** Row of items to render — single or side-by-side pair. */
export type GroupRow =
| { type: 'single'; item: ResolvedGroupItem }
| { type: 'pair'; items: [ResolvedGroupItem, ResolvedGroupItem] }
/** Derive a group name from the labels of its contained widgets. */
export function autoGroupName(group: InputGroup): string {
const labels: string[] = []
for (const item of group.items) {
const parsed = parseInputItemKey(item.key)
if (!parsed) continue
const [, widget] = resolveNodeWidget(parsed.nodeId, parsed.widgetName)
if (widget) labels.push(widget.label || widget.name)
}
return labels.join(', ') || t('linearMode.groups.untitled')
}
/**
* Resolve item keys to widget/node data.
* Items whose node or widget cannot be resolved are silently omitted.
*/
export function resolveGroupItems(group: InputGroup): ResolvedGroupItem[] {
const resolved: ResolvedGroupItem[] = []
for (const item of group.items) {
const parsed = parseInputItemKey(item.key)
if (!parsed) continue
const { nodeId, widgetName } = parsed
const [node, widget] = resolveNodeWidget(nodeId, widgetName)
if (node && widget) {
resolved.push({
key: item.key,
pairId: item.pairId,
node,
widget,
nodeId,
widgetName
})
}
}
return resolved
}
/** Group resolved items into rows, pairing items with matching pairId. */
export function groupedByPair(items: ResolvedGroupItem[]): GroupRow[] {
const rows: GroupRow[] = []
const paired = new Set<string>()
for (const item of items) {
if (paired.has(item.key)) continue
if (item.pairId) {
const partner = items.find(
(other) =>
other.key !== item.key &&
other.pairId === item.pairId &&
!paired.has(other.key)
)
if (partner) {
paired.add(item.key)
paired.add(partner.key)
rows.push({ type: 'pair', items: [item, partner] })
continue
}
}
rows.push({ type: 'single', item })
}
return rows
}

View File

@@ -1,4 +1,5 @@
<script setup lang="ts">
import type { MenuItem } from 'primevue/menuitem'
import {
PopoverArrow,
PopoverContent,
@@ -10,14 +11,6 @@ import {
import Button from '@/components/ui/button/Button.vue'
import { cn } from '@/utils/tailwindUtil'
export interface PopoverMenuItem {
label?: string
icon?: string
separator?: boolean
disabled?: boolean
command?: (...args: unknown[]) => void
}
defineOptions({
inheritAttrs: false
})
@@ -28,7 +21,7 @@ const {
to,
showArrow = true
} = defineProps<{
entries?: PopoverMenuItem[]
entries?: MenuItem[]
icon?: string
to?: string | HTMLElement
showArrow?: boolean

View File

@@ -2527,23 +2527,6 @@
"inputsNone": "لا توجد مدخلات",
"inputsNoneTooltip": "العقدة ليس لديها مدخلات",
"locateNode": "تحديد موقع العقدة على اللوحة",
"missingMedia": {
"audio": "الصوتيات",
"cancelSelection": "إلغاء الاختيار",
"collapseNodes": "إخفاء العقد المشار إليها",
"confirmSelection": "تأكيد الاختيار",
"expandNodes": "عرض العقد المشار إليها",
"image": "الصور",
"locateNode": "تحديد موقع العقدة",
"missingMediaTitle": "المدخلات المفقودة",
"or": "أو",
"selectedFromLibrary": "تم الاختيار من المكتبة",
"uploadFile": "رفع {type}",
"uploaded": "تم الرفع",
"uploading": "جاري الرفع...",
"useFromLibrary": "استخدام من المكتبة",
"video": "الفيديوهات"
},
"missingModels": {
"alreadyExistsInCategory": "هذا النموذج موجود بالفعل في \"{category}\"",
"assetLoadTimeout": "انتهت مهلة اكتشاف النموذج. حاول إعادة تحميل سير العمل.",

View File

@@ -17951,241 +17951,6 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "إنشاء فيديو من صورة الإطار الأول، مع إمكانية إضافة صورة الإطار الأخير وصوت اختياري.",
"display_name": "وان 2.7 من صورة إلى فيديو",
"inputs": {
"audio": {
"name": "الصوت",
"tooltip": "الصوت المستخدم لتوجيه توليد الفيديو (مثل مزامنة الشفاه أو حركة متوافقة مع الإيقاع). المدة: ۲-۳۰ ثانية. إذا لم يتم توفيره، يقوم النموذج تلقائيًا بإنشاء موسيقى خلفية أو مؤثرات صوتية مناسبة."
},
"control_after_generate": {
"name": "التحكم بعد التوليد"
},
"first_frame": {
"name": "الإطار الأول",
"tooltip": "صورة الإطار الأول. يتم اشتقاق نسبة العرض إلى الارتفاع للإخراج من هذه الصورة."
},
"last_frame": {
"name": "الإطار الأخير",
"tooltip": "صورة الإطار الأخير. يقوم النموذج بإنشاء فيديو ينتقل من الإطار الأول إلى الأخير."
},
"model": {
"name": "النموذج"
},
"model_duration": {
"name": "المدة"
},
"model_negative_prompt": {
"name": "توجيه سلبي"
},
"model_prompt": {
"name": "توجيه"
},
"model_resolution": {
"name": "الدقة"
},
"prompt_extend": {
"name": "تعزيز التوجيه",
"tooltip": "ما إذا كان سيتم تعزيز التوجيه بمساعدة الذكاء الاصطناعي."
},
"seed": {
"name": "البذرة",
"tooltip": "البذرة المستخدمة في التوليد."
},
"watermark": {
"name": "علامة مائية",
"tooltip": "ما إذا كان سيتم إضافة علامة مائية مولدة بالذكاء الاصطناعي إلى النتيجة."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "إنشاء فيديو يعرض شخصًا أو كائنًا من مواد مرجعية. يدعم أداء شخصية واحدة وتفاعل عدة شخصيات.",
"display_name": "وان 2.7 من مرجع إلى فيديو",
"inputs": {
"control_after_generate": {
"name": "التحكم بعد التوليد"
},
"model": {
"name": "النموذج"
},
"model_duration": {
"name": "المدة"
},
"model_negative_prompt": {
"name": "توجيه سلبي"
},
"model_prompt": {
"name": "توجيه"
},
"model_ratio": {
"name": "النسبة"
},
"model_resolution": {
"name": "الدقة"
},
"seed": {
"name": "البذرة",
"tooltip": "البذرة المستخدمة في التوليد."
},
"watermark": {
"name": "علامة مائية",
"tooltip": "ما إذا كان سيتم إضافة علامة مائية مولدة بالذكاء الاصطناعي إلى النتيجة."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "ينشئ فيديو بناءً على وصف نصي باستخدام نموذج وان 2.7.",
"display_name": "وان 2.7 تحويل النص إلى فيديو",
"inputs": {
"audio": {
"name": "الصوت",
"tooltip": "الصوت المستخدم لتوجيه توليد الفيديو (مثل مزامنة الشفاه أو الحركة المتوافقة مع الإيقاع). المدة: ٣-٣٠ ثانية. إذا لم يتم توفيره، سيقوم النموذج تلقائيًا بإنشاء موسيقى خلفية أو مؤثرات صوتية مناسبة."
},
"control_after_generate": {
"name": "التحكم بعد التوليد"
},
"model": {
"name": "النموذج"
},
"model_duration": {
"name": "المدة"
},
"model_negative_prompt": {
"name": "الوصف السلبي"
},
"model_prompt": {
"name": "الوصف"
},
"model_ratio": {
"name": "النسبة"
},
"model_resolution": {
"name": "الدقة"
},
"prompt_extend": {
"name": "توسيع الوصف",
"tooltip": "ما إذا كان سيتم تعزيز الوصف بمساعدة الذكاء الاصطناعي."
},
"seed": {
"name": "البذرة",
"tooltip": "البذرة المستخدمة في التوليد."
},
"watermark": {
"name": "علامة مائية",
"tooltip": "ما إذا كان سيتم إضافة علامة مائية مولدة بالذكاء الاصطناعي إلى النتيجة."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "استكمال الفيديو من حيث توقف، مع إمكانية التحكم في الإطار الأخير.",
"display_name": "وان 2.7 استكمال الفيديو",
"inputs": {
"control_after_generate": {
"name": "التحكم بعد التوليد"
},
"first_clip": {
"name": "المقطع الأول",
"tooltip": "الفيديو المدخل للاستكمال منه. المدة: ٢-١٠ ثوانٍ. يتم اشتقاق نسبة العرض إلى الارتفاع من هذا الفيديو."
},
"last_frame": {
"name": "الإطار الأخير",
"tooltip": "صورة الإطار الأخير. سيتم الانتقال في الاستكمال نحو هذا الإطار."
},
"model": {
"name": "النموذج"
},
"model_duration": {
"name": "المدة"
},
"model_negative_prompt": {
"name": "الوصف السلبي"
},
"model_prompt": {
"name": "الوصف"
},
"model_resolution": {
"name": "الدقة"
},
"prompt_extend": {
"name": "توسيع الوصف",
"tooltip": "ما إذا كان سيتم تعزيز الوصف بمساعدة الذكاء الاصطناعي."
},
"seed": {
"name": "البذرة",
"tooltip": "البذرة المستخدمة في التوليد."
},
"watermark": {
"name": "علامة مائية",
"tooltip": "ما إذا كان سيتم إضافة علامة مائية مولدة بالذكاء الاصطناعي إلى النتيجة."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "تحرير فيديو باستخدام تعليمات نصية أو صور مرجعية أو نقل النمط.",
"display_name": "وان 2.7 تحرير الفيديو",
"inputs": {
"audio_setting": {
"name": "إعداد الصوت",
"tooltip": "'تلقائي': يقرر النموذج ما إذا كان سيعيد توليد الصوت بناءً على الوصف. 'الأصلي': الحفاظ على الصوت الأصلي من الفيديو المدخل."
},
"control_after_generate": {
"name": "التحكم بعد التوليد"
},
"model": {
"name": "النموذج"
},
"model_duration": {
"name": "المدة"
},
"model_prompt": {
"name": "الوصف"
},
"model_ratio": {
"name": "النسبة"
},
"model_resolution": {
"name": "الدقة"
},
"seed": {
"name": "البذرة",
"tooltip": "البذرة المستخدمة في التوليد."
},
"video": {
"name": "الفيديو",
"tooltip": "الفيديو المراد تحريره."
},
"watermark": {
"name": "علامة مائية",
"tooltip": "ما إذا كان سيتم إضافة علامة مائية مولدة بالذكاء الاصطناعي إلى النتيجة."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -1743,8 +1743,8 @@
"Tripo": "Tripo",
"Veo": "Veo",
"Vidu": "Vidu",
"Wan": "Wan",
"camera": "camera",
"Wan": "Wan",
"WaveSpeed": "WaveSpeed",
"zimage": "zimage"
},
@@ -3357,16 +3357,6 @@
"queue": {
"clickToClear": "Click to clear queue",
"clear": "Clear queue"
},
"groups": {
"untitled": "Untitled group",
"createGroup": "Create group",
"ungroup": "Ungroup",
"confirmUngroup": "Ungroup inputs?",
"ungroupDescription": "The inputs in this group will be moved back to the main list.",
"emptyGroup": "Add inputs to this group",
"addToGroup": "Add to group",
"newGroup": "New group"
}
},
"missingNodes": {

View File

@@ -17976,241 +17976,6 @@
}
}
},
"Wan2ImageToVideoApi": {
"display_name": "Wan 2.7 Image to Video",
"description": "Generate a video from a first-frame image, with optional last-frame image and audio.",
"inputs": {
"model": {
"name": "model"
},
"first_frame": {
"name": "first_frame",
"tooltip": "First frame image. The output aspect ratio is derived from this image."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Whether to enhance the prompt with AI assistance."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an AI-generated watermark to the result."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Last frame image. The model generates a video transitioning from first to last frame."
},
"audio": {
"name": "audio",
"tooltip": "Audio for driving video generation (e.g., lip sync, beat-matched motion). Duration: 2s-30s. If not provided, the model automatically generates matching background music or sound effects."
},
"control_after_generate": {
"name": "control after generate"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"display_name": "Wan 2.7 Reference to Video",
"description": "Generate a video featuring a person or object from reference materials. Supports single-character performances and multi-character interactions.",
"inputs": {
"model": {
"name": "model"
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an AI-generated watermark to the result."
},
"control_after_generate": {
"name": "control after generate"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"display_name": "Wan 2.7 Text to Video",
"description": "Generates a video based on a text prompt using the Wan 2.7 model.",
"inputs": {
"model": {
"name": "model"
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Whether to enhance the prompt with AI assistance."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an AI-generated watermark to the result."
},
"audio": {
"name": "audio",
"tooltip": "Audio for driving video generation (e.g., lip sync, beat-matched motion). Duration: 3s-30s. If not provided, the model automatically generates matching background music or sound effects."
},
"control_after_generate": {
"name": "control after generate"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"display_name": "Wan 2.7 Video Continuation",
"description": "Continue a video from where it left off, with optional last-frame control.",
"inputs": {
"model": {
"name": "model"
},
"first_clip": {
"name": "first_clip",
"tooltip": "Input video to continue from. Duration: 2s-10s. The output aspect ratio is derived from this video."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Whether to enhance the prompt with AI assistance."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an AI-generated watermark to the result."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Last frame image. The continuation will transition towards this frame."
},
"control_after_generate": {
"name": "control after generate"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"display_name": "Wan 2.7 Video Edit",
"description": "Edit a video using text instructions, reference images, or style transfer.",
"inputs": {
"model": {
"name": "model"
},
"video": {
"name": "video",
"tooltip": "The video to edit."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"audio_setting": {
"name": "audio_setting",
"tooltip": "'auto': model decides whether to regenerate audio based on the prompt. 'origin': preserve the original audio from the input video."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an AI-generated watermark to the result."
},
"control_after_generate": {
"name": "control after generate"
},
"model_duration": {
"name": "duration"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,23 +2527,6 @@
"inputsNone": "SIN ENTRADAS",
"inputsNoneTooltip": "El nodo no tiene entradas",
"locateNode": "Localizar nodo en el lienzo",
"missingMedia": {
"audio": "Audio",
"cancelSelection": "Cancelar selección",
"collapseNodes": "Ocultar nodos de referencia",
"confirmSelection": "Confirmar selección",
"expandNodes": "Mostrar nodos de referencia",
"image": "Imágenes",
"locateNode": "Localizar nodo",
"missingMediaTitle": "Entradas faltantes",
"or": "O",
"selectedFromLibrary": "Seleccionado de la biblioteca",
"uploadFile": "Subir {type}",
"uploaded": "Subido",
"uploading": "Subiendo...",
"useFromLibrary": "Usar de la biblioteca",
"video": "Videos"
},
"missingModels": {
"alreadyExistsInCategory": "Este modelo ya existe en \"{category}\"",
"assetLoadTimeout": "El tiempo de detección del modelo se agotó. Intenta recargar el flujo de trabajo.",

View File

@@ -17951,241 +17951,6 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "Genera un video a partir de una imagen del primer fotograma, con opción de imagen del último fotograma y audio.",
"display_name": "Wan 2.7 Imagen a Video",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Audio para guiar la generación del video (por ejemplo, sincronización labial, movimiento al ritmo). Duración: 2s-30s. Si no se proporciona, el modelo genera automáticamente música de fondo o efectos de sonido acordes."
},
"control_after_generate": {
"name": "controlar después de generar"
},
"first_frame": {
"name": "primer_fotograma",
"tooltip": "Imagen del primer fotograma. La relación de aspecto de salida se deriva de esta imagen."
},
"last_frame": {
"name": "último_fotograma",
"tooltip": "Imagen del último fotograma. El modelo genera un video que transiciona del primer al último fotograma."
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duración"
},
"model_negative_prompt": {
"name": "prompt_negativo"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolución"
},
"prompt_extend": {
"name": "extender_prompt",
"tooltip": "Si se debe mejorar el prompt con asistencia de IA."
},
"seed": {
"name": "semilla",
"tooltip": "Semilla para usar en la generación."
},
"watermark": {
"name": "marca_de_agua",
"tooltip": "Si se debe añadir una marca de agua generada por IA al resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "Genera un video con una persona u objeto a partir de materiales de referencia. Soporta actuaciones de un solo personaje e interacciones entre varios personajes.",
"display_name": "Wan 2.7 Referencia a Video",
"inputs": {
"control_after_generate": {
"name": "controlar después de generar"
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duración"
},
"model_negative_prompt": {
"name": "prompt_negativo"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "relación"
},
"model_resolution": {
"name": "resolución"
},
"seed": {
"name": "semilla",
"tooltip": "Semilla para usar en la generación."
},
"watermark": {
"name": "marca_de_agua",
"tooltip": "Si se debe añadir una marca de agua generada por IA al resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Genera un video a partir de un texto usando el modelo Wan 2.7.",
"display_name": "Wan 2.7 Texto a Video",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Audio para guiar la generación del video (por ejemplo, sincronización labial, movimiento al ritmo). Duración: 3s-30s. Si no se proporciona, el modelo genera automáticamente música de fondo o efectos de sonido acordes."
},
"control_after_generate": {
"name": "controlar después de generar"
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duración"
},
"model_negative_prompt": {
"name": "prompt_negativo"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "relación de aspecto"
},
"model_resolution": {
"name": "resolución"
},
"prompt_extend": {
"name": "extender_prompt",
"tooltip": "Si se debe mejorar el prompt con asistencia de IA."
},
"seed": {
"name": "semilla",
"tooltip": "Semilla para usar en la generación."
},
"watermark": {
"name": "marca de agua",
"tooltip": "Si se debe añadir una marca de agua generada por IA al resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "Continúa un video desde donde terminó, con control opcional del último fotograma.",
"display_name": "Wan 2.7 Continuación de Video",
"inputs": {
"control_after_generate": {
"name": "controlar después de generar"
},
"first_clip": {
"name": "primer_clip",
"tooltip": "Video de entrada desde el cual continuar. Duración: 2s-10s. La relación de aspecto de salida se deriva de este video."
},
"last_frame": {
"name": "último_fotograma",
"tooltip": "Imagen del último fotograma. La continuación hará la transición hacia este fotograma."
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duración"
},
"model_negative_prompt": {
"name": "prompt_negativo"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolución"
},
"prompt_extend": {
"name": "extender_prompt",
"tooltip": "Si se debe mejorar el prompt con asistencia de IA."
},
"seed": {
"name": "semilla",
"tooltip": "Semilla para usar en la generación."
},
"watermark": {
"name": "marca de agua",
"tooltip": "Si se debe añadir una marca de agua generada por IA al resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "Edita un video usando instrucciones de texto, imágenes de referencia o transferencia de estilo.",
"display_name": "Wan 2.7 Edición de Video",
"inputs": {
"audio_setting": {
"name": "configuración_de_audio",
"tooltip": "'auto': el modelo decide si regenerar el audio según el prompt. 'origin': conserva el audio original del video de entrada."
},
"control_after_generate": {
"name": "controlar después de generar"
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duración"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "relación de aspecto"
},
"model_resolution": {
"name": "resolución"
},
"seed": {
"name": "semilla",
"tooltip": "Semilla para usar en la generación."
},
"video": {
"name": "video",
"tooltip": "El video a editar."
},
"watermark": {
"name": "marca de agua",
"tooltip": "Si se debe añadir una marca de agua generada por IA al resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,23 +2527,6 @@
"inputsNone": "بدون ورودی",
"inputsNoneTooltip": "این نود ورودی ندارد",
"locateNode": "یافتن node در canvas",
"missingMedia": {
"audio": "صدا",
"cancelSelection": "لغو انتخاب",
"collapseNodes": "پنهان کردن nodeهای ارجاع‌دهنده",
"confirmSelection": "تأیید انتخاب",
"expandNodes": "نمایش nodeهای ارجاع‌دهنده",
"image": "تصاویر",
"locateNode": "یافتن node",
"missingMediaTitle": "ورودی‌های گمشده",
"or": "یا",
"selectedFromLibrary": "انتخاب شده از کتابخانه",
"uploadFile": "بارگذاری {type}",
"uploaded": "بارگذاری شد",
"uploading": "در حال بارگذاری...",
"useFromLibrary": "استفاده از کتابخانه",
"video": "ویدیوها"
},
"missingModels": {
"alreadyExistsInCategory": "این مدل قبلاً در «{category}» وجود دارد",
"assetLoadTimeout": "شناسایی مدل زمان‌بر شد. لطفاً workflow را مجدداً بارگذاری کنید.",

View File

@@ -17951,241 +17951,6 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "تولید ویدیو از تصویر فریم اول، با امکان افزودن تصویر فریم آخر و صدا به صورت اختیاری.",
"display_name": "وان ۲.۷ تبدیل تصویر به ویدیو",
"inputs": {
"audio": {
"name": "صدا",
"tooltip": "صدا برای هدایت تولید ویدیو (مثلاً هماهنگی لب، حرکت هماهنگ با ضرب‌آهنگ). مدت زمان: ۲ تا ۳۰ ثانیه. در صورت عدم ارائه، مدل به طور خودکار موسیقی پس‌زمینه یا افکت صوتی مناسب تولید می‌کند."
},
"control_after_generate": {
"name": "کنترل پس از تولید"
},
"first_frame": {
"name": "فریم اول",
"tooltip": "تصویر فریم اول. نسبت ابعاد خروجی از این تصویر استخراج می‌شود."
},
"last_frame": {
"name": "فریم آخر",
"tooltip": "تصویر فریم آخر. مدل ویدیویی با انتقال از فریم اول به فریم آخر تولید می‌کند."
},
"model": {
"name": "مدل"
},
"model_duration": {
"name": "مدت زمان"
},
"model_negative_prompt": {
"name": "پرامپت منفی"
},
"model_prompt": {
"name": "پرامپت"
},
"model_resolution": {
"name": "رزولوشن"
},
"prompt_extend": {
"name": "گسترش پرامپت",
"tooltip": "آیا پرامپت با کمک هوش مصنوعی بهبود یابد یا خیر."
},
"seed": {
"name": "بذر",
"tooltip": "بذر مورد استفاده برای تولید."
},
"watermark": {
"name": "واترمارک",
"tooltip": "آیا واترمارک تولیدشده توسط هوش مصنوعی به نتیجه اضافه شود یا خیر."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "تولید ویدیو با حضور یک شخص یا شیء بر اساس مواد مرجع. پشتیبانی از اجرای تک‌نفره و تعامل چندنفره.",
"display_name": "وان ۲.۷ مرجع به ویدیو",
"inputs": {
"control_after_generate": {
"name": "کنترل پس از تولید"
},
"model": {
"name": "مدل"
},
"model_duration": {
"name": "مدت زمان"
},
"model_negative_prompt": {
"name": "پرامپت منفی"
},
"model_prompt": {
"name": "پرامپت"
},
"model_ratio": {
"name": "نسبت تصویر"
},
"model_resolution": {
"name": "رزولوشن"
},
"seed": {
"name": "بذر",
"tooltip": "بذر مورد استفاده برای تولید."
},
"watermark": {
"name": "واترمارک",
"tooltip": "آیا واترمارک تولیدشده توسط هوش مصنوعی به نتیجه اضافه شود یا خیر."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "تولید ویدیو بر اساس یک پرامپت متنی با استفاده از مدل وان ۲.۷.",
"display_name": "وان ۲.۷ تبدیل متن به ویدیو",
"inputs": {
"audio": {
"name": "صدا",
"tooltip": "صدا برای هدایت تولید ویدیو (مثلاً هماهنگی لب، حرکت مطابق با ضرب). مدت زمان: ۳ تا ۳۰ ثانیه. در صورت عدم ارائه، مدل به طور خودکار موسیقی پس‌زمینه یا افکت صوتی مناسب تولید می‌کند."
},
"control_after_generate": {
"name": "کنترل پس از تولید"
},
"model": {
"name": "مدل"
},
"model_duration": {
"name": "مدت زمان"
},
"model_negative_prompt": {
"name": "پرامپت منفی"
},
"model_prompt": {
"name": "پرامپت"
},
"model_ratio": {
"name": "نسبت تصویر"
},
"model_resolution": {
"name": "وضوح"
},
"prompt_extend": {
"name": "گسترش پرامپت",
"tooltip": "آیا پرامپت با کمک هوش مصنوعی بهبود یابد."
},
"seed": {
"name": "بذر",
"tooltip": "بذر مورد استفاده برای تولید."
},
"watermark": {
"name": "واترمارک",
"tooltip": "آیا واترمارک تولیدشده توسط هوش مصنوعی به نتیجه اضافه شود."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "ادامه دادن یک ویدیو از جایی که متوقف شده است، با امکان کنترل فریم آخر.",
"display_name": "وان ۲.۷ ادامه ویدیو",
"inputs": {
"control_after_generate": {
"name": "کنترل پس از تولید"
},
"first_clip": {
"name": "کلیپ اول",
"tooltip": "ویدیوی ورودی برای ادامه دادن. مدت زمان: ۲ تا ۱۰ ثانیه. نسبت تصویر خروجی از این ویدیو گرفته می‌شود."
},
"last_frame": {
"name": "فریم آخر",
"tooltip": "تصویر فریم آخر. ادامه ویدیو به سمت این فریم انتقال می‌یابد."
},
"model": {
"name": "مدل"
},
"model_duration": {
"name": "مدت زمان"
},
"model_negative_prompt": {
"name": "پرامپت منفی"
},
"model_prompt": {
"name": "پرامپت"
},
"model_resolution": {
"name": "وضوح"
},
"prompt_extend": {
"name": "گسترش پرامپت",
"tooltip": "آیا پرامپت با کمک هوش مصنوعی بهبود یابد."
},
"seed": {
"name": "بذر",
"tooltip": "بذر مورد استفاده برای تولید."
},
"watermark": {
"name": "واترمارک",
"tooltip": "آیا واترمارک تولیدشده توسط هوش مصنوعی به نتیجه اضافه شود."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "ویرایش ویدیو با استفاده از دستور متنی، تصاویر مرجع یا انتقال سبک.",
"display_name": "وان ۲.۷ ویرایش ویدیو",
"inputs": {
"audio_setting": {
"name": "تنظیمات صدا",
"tooltip": "'auto': مدل تصمیم می‌گیرد که آیا صدا بر اساس پرامپت بازتولید شود یا نه. 'origin': صدای اصلی ویدیوی ورودی حفظ می‌شود."
},
"control_after_generate": {
"name": "کنترل پس از تولید"
},
"model": {
"name": "مدل"
},
"model_duration": {
"name": "مدت زمان"
},
"model_prompt": {
"name": "پرامپت"
},
"model_ratio": {
"name": "نسبت تصویر"
},
"model_resolution": {
"name": "وضوح"
},
"seed": {
"name": "بذر",
"tooltip": "بذر مورد استفاده برای تولید."
},
"video": {
"name": "ویدیو",
"tooltip": "ویدیویی که باید ویرایش شود."
},
"watermark": {
"name": "واترمارک",
"tooltip": "آیا واترمارک تولیدشده توسط هوش مصنوعی به نتیجه اضافه شود."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,23 +2527,6 @@
"inputsNone": "AUCUNE ENTRÉE",
"inputsNoneTooltip": "Le nœud na pas dentrées",
"locateNode": "Localiser le nœud sur le canevas",
"missingMedia": {
"audio": "Audio",
"cancelSelection": "Annuler la sélection",
"collapseNodes": "Masquer les nœuds de référence",
"confirmSelection": "Confirmer la sélection",
"expandNodes": "Afficher les nœuds de référence",
"image": "Images",
"locateNode": "Localiser le nœud",
"missingMediaTitle": "Entrées manquantes",
"or": "OU",
"selectedFromLibrary": "Sélectionné depuis la bibliothèque",
"uploadFile": "Téléverser {type}",
"uploaded": "Téléversé",
"uploading": "Téléversement en cours...",
"useFromLibrary": "Utiliser depuis la bibliothèque",
"video": "Vidéos"
},
"missingModels": {
"alreadyExistsInCategory": "Ce modèle existe déjà dans « {category} »",
"assetLoadTimeout": "Le délai de détection du modèle est dépassé. Essayez de recharger le workflow.",

View File

@@ -17951,241 +17951,6 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "Générez une vidéo à partir d'une image de première image, avec une image de dernière image et un audio optionnels.",
"display_name": "Wan 2.7 Image vers Vidéo",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Audio pour guider la génération vidéo (ex : synchronisation labiale, mouvement sur le rythme). Durée : 2s-30s. Si non fourni, le modèle génère automatiquement une musique de fond ou des effets sonores adaptés."
},
"control_after_generate": {
"name": "contrôle après génération"
},
"first_frame": {
"name": "première image",
"tooltip": "Image de la première image. Le format de sortie est dérivé de cette image."
},
"last_frame": {
"name": "dernière image",
"tooltip": "Image de la dernière image. Le modèle génère une vidéo passant de la première à la dernière image."
},
"model": {
"name": "modèle"
},
"model_duration": {
"name": "durée"
},
"model_negative_prompt": {
"name": "prompt négatif"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "résolution"
},
"prompt_extend": {
"name": "extension de prompt",
"tooltip": "Permet d'améliorer le prompt avec l'aide de l'IA."
},
"seed": {
"name": "graine",
"tooltip": "Graine à utiliser pour la génération."
},
"watermark": {
"name": "filigrane",
"tooltip": "Ajouter ou non un filigrane généré par IA au résultat."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "Générez une vidéo mettant en scène une personne ou un objet à partir de références. Prend en charge les performances à un personnage et les interactions multi-personnages.",
"display_name": "Wan 2.7 Référence vers Vidéo",
"inputs": {
"control_after_generate": {
"name": "contrôle après génération"
},
"model": {
"name": "modèle"
},
"model_duration": {
"name": "durée"
},
"model_negative_prompt": {
"name": "prompt négatif"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "résolution"
},
"seed": {
"name": "graine",
"tooltip": "Graine à utiliser pour la génération."
},
"watermark": {
"name": "filigrane",
"tooltip": "Ajouter ou non un filigrane généré par IA au résultat."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Génère une vidéo à partir d'une invite textuelle en utilisant le modèle Wan 2.7.",
"display_name": "Wan 2.7 Texte en Vidéo",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Audio pour guider la génération vidéo (ex : synchronisation labiale, mouvement sur le rythme). Durée : 3s-30s. Si non fourni, le modèle génère automatiquement une musique de fond ou des effets sonores adaptés."
},
"control_after_generate": {
"name": "contrôle après génération"
},
"model": {
"name": "modèle"
},
"model_duration": {
"name": "durée"
},
"model_negative_prompt": {
"name": "invite négative"
},
"model_prompt": {
"name": "invite"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "résolution"
},
"prompt_extend": {
"name": "extension d'invite",
"tooltip": "Améliorer l'invite avec l'assistance de l'IA."
},
"seed": {
"name": "graine",
"tooltip": "Graine à utiliser pour la génération."
},
"watermark": {
"name": "filigrane",
"tooltip": "Ajouter un filigrane généré par IA au résultat."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "Continue une vidéo à partir de l'endroit où elle s'est arrêtée, avec un contrôle optionnel de la dernière image.",
"display_name": "Wan 2.7 Continuation Vidéo",
"inputs": {
"control_after_generate": {
"name": "contrôle après génération"
},
"first_clip": {
"name": "premier clip",
"tooltip": "Vidéo d'entrée à continuer. Durée : 2s-10s. Le ratio de sortie est dérivé de cette vidéo."
},
"last_frame": {
"name": "dernière image",
"tooltip": "Image de la dernière frame. La continuation effectuera une transition vers cette image."
},
"model": {
"name": "modèle"
},
"model_duration": {
"name": "durée"
},
"model_negative_prompt": {
"name": "invite négative"
},
"model_prompt": {
"name": "invite"
},
"model_resolution": {
"name": "résolution"
},
"prompt_extend": {
"name": "extension d'invite",
"tooltip": "Améliorer l'invite avec l'assistance de l'IA."
},
"seed": {
"name": "graine",
"tooltip": "Graine à utiliser pour la génération."
},
"watermark": {
"name": "filigrane",
"tooltip": "Ajouter un filigrane généré par IA au résultat."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "Éditez une vidéo à l'aide d'instructions textuelles, d'images de référence ou de transfert de style.",
"display_name": "Wan 2.7 Édition Vidéo",
"inputs": {
"audio_setting": {
"name": "paramètre audio",
"tooltip": "'auto' : le modèle décide de régénérer ou non l'audio selon l'invite. 'origin' : préserve l'audio original de la vidéo d'entrée."
},
"control_after_generate": {
"name": "contrôle après génération"
},
"model": {
"name": "modèle"
},
"model_duration": {
"name": "durée"
},
"model_prompt": {
"name": "invite"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "résolution"
},
"seed": {
"name": "graine",
"tooltip": "Graine à utiliser pour la génération."
},
"video": {
"name": "vidéo",
"tooltip": "La vidéo à éditer."
},
"watermark": {
"name": "filigrane",
"tooltip": "Ajouter un filigrane généré par IA au résultat."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,23 +2527,6 @@
"inputsNone": "入力なし",
"inputsNoneTooltip": "このノードには入力がありません",
"locateNode": "キャンバス上でノードを探す",
"missingMedia": {
"audio": "音声",
"cancelSelection": "選択をキャンセル",
"collapseNodes": "参照ノードを非表示",
"confirmSelection": "選択を確定",
"expandNodes": "参照ノードを表示",
"image": "画像",
"locateNode": "ノードを特定",
"missingMediaTitle": "入力がありません",
"or": "または",
"selectedFromLibrary": "ライブラリから選択済み",
"uploadFile": "{type}をアップロード",
"uploaded": "アップロード完了",
"uploading": "アップロード中...",
"useFromLibrary": "ライブラリから使用",
"video": "動画"
},
"missingModels": {
"alreadyExistsInCategory": "このモデルはすでに「{category}」に存在します",
"assetLoadTimeout": "モデルの検出がタイムアウトしました。ワークフローを再読み込みしてください。",

View File

@@ -17951,241 +17951,6 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "最初のフレーム画像から動画を生成します。オプションで最後のフレーム画像や音声も指定できます。",
"display_name": "Wan 2.7 画像から動画へ",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "動画生成を制御する音声リップシンク、ビートに合わせた動き。長さ2秒30秒。指定しない場合は、モデルが自動的に合うBGMや効果音を生成します。"
},
"control_after_generate": {
"name": "control after generate"
},
"first_frame": {
"name": "first_frame",
"tooltip": "最初のフレーム画像。この画像から出力動画のアスペクト比が決まります。"
},
"last_frame": {
"name": "last_frame",
"tooltip": "最後のフレーム画像。最初から最後のフレームへと遷移する動画を生成します。"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "AIによるプロンプトの強化を行うかどうか。"
},
"seed": {
"name": "seed",
"tooltip": "生成に使用するシード値。"
},
"watermark": {
"name": "watermark",
"tooltip": "AI生成のウォーターマークを結果に追加するかどうか。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "リファレンス素材から人物や物体を特徴とする動画を生成します。単一キャラクターの演技や複数キャラクターのインタラクションに対応しています。",
"display_name": "Wan 2.7 リファレンスから動画へ",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "生成に使用するシード値。"
},
"watermark": {
"name": "watermark",
"tooltip": "AI生成のウォーターマークを結果に追加するかどうか。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Wan 2.7モデルを使用してテキストプロンプトに基づいたビデオを生成します。",
"display_name": "Wan 2.7 テキストからビデオへ",
"inputs": {
"audio": {
"name": "オーディオ",
"tooltip": "ビデオ生成を駆動する音声リップシンク、ビートに合わせた動き。長さ3秒30秒。未指定の場合、モデルが自動的に合うBGMや効果音を生成します。"
},
"control_after_generate": {
"name": "生成後のコントロール"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "長さ"
},
"model_negative_prompt": {
"name": "ネガティブプロンプト"
},
"model_prompt": {
"name": "プロンプト"
},
"model_ratio": {
"name": "アスペクト比"
},
"model_resolution": {
"name": "解像度"
},
"prompt_extend": {
"name": "プロンプト拡張",
"tooltip": "AIアシストでプロンプトを強化するかどうか。"
},
"seed": {
"name": "シード値",
"tooltip": "生成に使用するシード値。"
},
"watermark": {
"name": "ウォーターマーク",
"tooltip": "AI生成のウォーターマークを結果に追加するかどうか。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "ビデオの続きから生成を行い、オプションでラストフレーム制御も可能です。",
"display_name": "Wan 2.7 ビデオ継続生成",
"inputs": {
"control_after_generate": {
"name": "生成後のコントロール"
},
"first_clip": {
"name": "最初のクリップ",
"tooltip": "継続元となる入力ビデオ。長さ2秒10秒。出力のアスペクト比はこのビデオから取得されます。"
},
"last_frame": {
"name": "ラストフレーム",
"tooltip": "ラストフレーム画像。継続生成はこのフレームへと遷移します。"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "長さ"
},
"model_negative_prompt": {
"name": "ネガティブプロンプト"
},
"model_prompt": {
"name": "プロンプト"
},
"model_resolution": {
"name": "解像度"
},
"prompt_extend": {
"name": "プロンプト拡張",
"tooltip": "AIアシストでプロンプトを強化するかどうか。"
},
"seed": {
"name": "シード値",
"tooltip": "生成に使用するシード値。"
},
"watermark": {
"name": "ウォーターマーク",
"tooltip": "AI生成のウォーターマークを結果に追加するかどうか。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "テキスト指示、参照画像、またはスタイル転送を使ってビデオを編集します。",
"display_name": "Wan 2.7 ビデオ編集",
"inputs": {
"audio_setting": {
"name": "オーディオ設定",
"tooltip": "「auto」プロンプトに基づきモデルが音声再生成の有無を判断。「origin」入力ビデオの元の音声を保持。"
},
"control_after_generate": {
"name": "生成後のコントロール"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "長さ"
},
"model_prompt": {
"name": "プロンプト"
},
"model_ratio": {
"name": "アスペクト比"
},
"model_resolution": {
"name": "解像度"
},
"seed": {
"name": "シード値",
"tooltip": "生成に使用するシード値。"
},
"video": {
"name": "ビデオ",
"tooltip": "編集対象のビデオ。"
},
"watermark": {
"name": "ウォーターマーク",
"tooltip": "AI生成のウォーターマークを結果に追加するかどうか。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,23 +2527,6 @@
"inputsNone": "입력 없음",
"inputsNoneTooltip": "노드에 입력이 없습니다",
"locateNode": "캔버스에서 노드 찾기",
"missingMedia": {
"audio": "오디오",
"cancelSelection": "선택 취소",
"collapseNodes": "참조 노드 숨기기",
"confirmSelection": "선택 확인",
"expandNodes": "참조 노드 표시",
"image": "이미지",
"locateNode": "노드 위치 찾기",
"missingMediaTitle": "입력 누락",
"or": "또는",
"selectedFromLibrary": "라이브러리에서 선택됨",
"uploadFile": "{type} 업로드",
"uploaded": "업로드 완료",
"uploading": "업로드 중...",
"useFromLibrary": "라이브러리에서 사용",
"video": "비디오"
},
"missingModels": {
"alreadyExistsInCategory": "이 모델은 이미 \"{category}\"에 존재합니다",
"assetLoadTimeout": "모델 감지 시간이 초과되었습니다. 워크플로우를 다시 불러와 보세요.",

View File

@@ -17951,241 +17951,6 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "첫 프레임 이미지를 기반으로 비디오를 생성합니다. 마지막 프레임 이미지와 오디오를 선택적으로 추가할 수 있습니다.",
"display_name": "Wan 2.7 이미지 → 비디오",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "비디오 생성에 사용할 오디오입니다(예: 립싱크, 비트에 맞춘 동작). 길이: 2초~30초. 제공하지 않으면 모델이 자동으로 배경 음악 또는 효과음을 생성합니다."
},
"control_after_generate": {
"name": "control after generate"
},
"first_frame": {
"name": "first_frame",
"tooltip": "첫 프레임 이미지입니다. 출력 비율은 이 이미지에서 결정됩니다."
},
"last_frame": {
"name": "last_frame",
"tooltip": "마지막 프레임 이미지입니다. 모델이 첫 프레임에서 마지막 프레임으로 전환되는 비디오를 생성합니다."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "AI의 도움으로 프롬프트를 확장할지 여부입니다."
},
"seed": {
"name": "seed",
"tooltip": "생성에 사용할 시드 값입니다."
},
"watermark": {
"name": "watermark",
"tooltip": "AI가 생성한 워터마크를 결과에 추가할지 여부입니다."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "레퍼런스 자료를 기반으로 인물 또는 오브젝트가 등장하는 비디오를 생성합니다. 단일 캐릭터 연기와 다중 캐릭터 상호작용을 지원합니다.",
"display_name": "Wan 2.7 레퍼런스 → 비디오",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "생성에 사용할 시드 값입니다."
},
"watermark": {
"name": "watermark",
"tooltip": "AI가 생성한 워터마크를 결과에 추가할지 여부입니다."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Wan 2.7 모델을 사용하여 텍스트 프롬프트 기반으로 비디오를 생성합니다.",
"display_name": "Wan 2.7 텍스트 투 비디오",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "비디오 생성에 사용할 오디오(예: 립싱크, 비트에 맞춘 동작). 길이: 3초~30초. 제공하지 않으면 모델이 자동으로 배경 음악이나 효과음을 생성합니다."
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "AI의 도움으로 프롬프트를 확장할지 여부입니다."
},
"seed": {
"name": "seed",
"tooltip": "생성에 사용할 시드입니다."
},
"watermark": {
"name": "watermark",
"tooltip": "AI가 생성한 워터마크를 결과에 추가할지 여부입니다."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "비디오가 끝난 지점부터 이어서 생성하며, 마지막 프레임 제어도 선택적으로 지원합니다.",
"display_name": "Wan 2.7 비디오 연속 생성",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"first_clip": {
"name": "first_clip",
"tooltip": "이어 생성할 입력 비디오입니다. 길이: 2초~10초. 출력 비율은 이 비디오에서 파생됩니다."
},
"last_frame": {
"name": "last_frame",
"tooltip": "마지막 프레임 이미지입니다. 이어지는 비디오는 이 프레임으로 전환됩니다."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "AI의 도움으로 프롬프트를 확장할지 여부입니다."
},
"seed": {
"name": "seed",
"tooltip": "생성에 사용할 시드입니다."
},
"watermark": {
"name": "watermark",
"tooltip": "AI가 생성한 워터마크를 결과에 추가할지 여부입니다."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "텍스트 지시, 참조 이미지 또는 스타일 전송을 사용하여 비디오를 편집합니다.",
"display_name": "Wan 2.7 비디오 편집",
"inputs": {
"audio_setting": {
"name": "audio_setting",
"tooltip": "'auto': 프롬프트에 따라 오디오를 재생성할지 모델이 결정합니다. 'origin': 입력 비디오의 원본 오디오를 유지합니다."
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "생성에 사용할 시드입니다."
},
"video": {
"name": "video",
"tooltip": "편집할 비디오입니다."
},
"watermark": {
"name": "watermark",
"tooltip": "AI가 생성한 워터마크를 결과에 추가할지 여부입니다."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "완애니메이트투비디오",
"inputs": {

View File

@@ -2527,23 +2527,6 @@
"inputsNone": "SEM ENTRADAS",
"inputsNoneTooltip": "O nó não possui entradas",
"locateNode": "Localizar nó no canvas",
"missingMedia": {
"audio": "Áudio",
"cancelSelection": "Cancelar seleção",
"collapseNodes": "Ocultar nós de referência",
"confirmSelection": "Confirmar seleção",
"expandNodes": "Mostrar nós de referência",
"image": "Imagens",
"locateNode": "Localizar nó",
"missingMediaTitle": "Entradas Ausentes",
"or": "OU",
"selectedFromLibrary": "Selecionado da biblioteca",
"uploadFile": "Enviar {type}",
"uploaded": "Enviado",
"uploading": "Enviando...",
"useFromLibrary": "Usar da Biblioteca",
"video": "Vídeos"
},
"missingModels": {
"alreadyExistsInCategory": "Este modelo já existe em \"{category}\"",
"assetLoadTimeout": "Tempo esgotado na detecção do modelo. Tente recarregar o fluxo de trabalho.",

View File

@@ -17951,241 +17951,6 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "Gere um vídeo a partir de uma imagem do primeiro quadro, com opção de imagem do último quadro e áudio.",
"display_name": "Wan 2.7 Imagem para Vídeo",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Áudio para direcionar a geração do vídeo (ex: sincronização labial, movimento no ritmo da batida). Duração: 2s-30s. Se não for fornecido, o modelo gera automaticamente música de fundo ou efeitos sonoros correspondentes."
},
"control_after_generate": {
"name": "control after generate"
},
"first_frame": {
"name": "first_frame",
"tooltip": "Imagem do primeiro quadro. A proporção de aspecto de saída é derivada desta imagem."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Imagem do último quadro. O modelo gera um vídeo fazendo a transição do primeiro para o último quadro."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Se deseja aprimorar o prompt com assistência de IA."
},
"seed": {
"name": "seed",
"tooltip": "Seed para usar na geração."
},
"watermark": {
"name": "watermark",
"tooltip": "Se deseja adicionar uma marca d'água gerada por IA ao resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "Gere um vídeo apresentando uma pessoa ou objeto a partir de materiais de referência. Suporta performances de personagem único e interações entre múltiplos personagens.",
"display_name": "Wan 2.7 Referência para Vídeo",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "Seed para usar na geração."
},
"watermark": {
"name": "watermark",
"tooltip": "Se deseja adicionar uma marca d'água gerada por IA ao resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Gera um vídeo com base em um prompt de texto usando o modelo Wan 2.7.",
"display_name": "Wan 2.7 Texto para Vídeo",
"inputs": {
"audio": {
"name": "áudio",
"tooltip": "Áudio para direcionar a geração do vídeo (ex: sincronização labial, movimento no ritmo da música). Duração: 3s-30s. Se não fornecido, o modelo gera automaticamente música de fundo ou efeitos sonoros compatíveis."
},
"control_after_generate": {
"name": "controle após gerar"
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duração"
},
"model_negative_prompt": {
"name": "prompt_negativo"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "proporção"
},
"model_resolution": {
"name": "resolução"
},
"prompt_extend": {
"name": "estender_prompt",
"tooltip": "Se deseja aprimorar o prompt com assistência de IA."
},
"seed": {
"name": "semente",
"tooltip": "Semente a ser usada para a geração."
},
"watermark": {
"name": "marca_d'água",
"tooltip": "Se deseja adicionar uma marca d'água gerada por IA ao resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "Continue um vídeo de onde parou, com controle opcional do último quadro.",
"display_name": "Wan 2.7 Continuação de Vídeo",
"inputs": {
"control_after_generate": {
"name": "controle após gerar"
},
"first_clip": {
"name": "primeiro_clip",
"tooltip": "Vídeo de entrada para continuar. Duração: 2s-10s. A proporção de aspecto de saída é derivada deste vídeo."
},
"last_frame": {
"name": "último_quadro",
"tooltip": "Imagem do último quadro. A continuação fará a transição para este quadro."
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duração"
},
"model_negative_prompt": {
"name": "prompt_negativo"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolução"
},
"prompt_extend": {
"name": "estender_prompt",
"tooltip": "Se deseja aprimorar o prompt com assistência de IA."
},
"seed": {
"name": "semente",
"tooltip": "Semente a ser usada para a geração."
},
"watermark": {
"name": "marca_d'água",
"tooltip": "Se deseja adicionar uma marca d'água gerada por IA ao resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "Edite um vídeo usando instruções de texto, imagens de referência ou transferência de estilo.",
"display_name": "Wan 2.7 Edição de Vídeo",
"inputs": {
"audio_setting": {
"name": "configuração_de_áudio",
"tooltip": "'auto': o modelo decide se deve regenerar o áudio com base no prompt. 'origin': preserva o áudio original do vídeo de entrada."
},
"control_after_generate": {
"name": "controle após gerar"
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duração"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "proporção"
},
"model_resolution": {
"name": "resolução"
},
"seed": {
"name": "semente",
"tooltip": "Semente a ser usada para a geração."
},
"video": {
"name": "vídeo",
"tooltip": "O vídeo a ser editado."
},
"watermark": {
"name": "marca_d'água",
"tooltip": "Se deseja adicionar uma marca d'água gerada por IA ao resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,23 +2527,6 @@
"inputsNone": "НЕТ ВХОДОВ",
"inputsNoneTooltip": "Узел не имеет входов",
"locateNode": "Найти узел на холсте",
"missingMedia": {
"audio": "Аудио",
"cancelSelection": "Отменить выбор",
"collapseNodes": "Скрыть ссылающиеся узлы",
"confirmSelection": "Подтвердить выбор",
"expandNodes": "Показать ссылающиеся узлы",
"image": "Изображения",
"locateNode": "Найти узел",
"missingMediaTitle": "Отсутствующие входные данные",
"or": "ИЛИ",
"selectedFromLibrary": "Выбрано из библиотеки",
"uploadFile": "Загрузить {type}",
"uploaded": "Загружено",
"uploading": "Загрузка...",
"useFromLibrary": "Использовать из библиотеки",
"video": "Видео"
},
"missingModels": {
"alreadyExistsInCategory": "Эта модель уже существует в «{category}»",
"assetLoadTimeout": "Время ожидания обнаружения модели истекло. Попробуйте перезагрузить рабочий процесс.",

View File

@@ -17951,241 +17951,6 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "Создайте видео из изображения первого кадра с возможностью добавить изображение последнего кадра и аудио.",
"display_name": "Wan 2.7 Изображение в Видео",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Аудио для управления генерацией видео (например, синхронизация губ, движение в ритм музыки). Длительность: 230 сек. Если не указано, модель автоматически сгенерирует подходящую фоновую музыку или звуковые эффекты."
},
"control_after_generate": {
"name": "control after generate"
},
"first_frame": {
"name": "first_frame",
"tooltip": "Изображение первого кадра. Соотношение сторон результата определяется этим изображением."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Изображение последнего кадра. Модель создаёт видео с переходом от первого к последнему кадру."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Улучшить ли промпт с помощью AI."
},
"seed": {
"name": "seed",
"tooltip": "Сид для генерации."
},
"watermark": {
"name": "watermark",
"tooltip": "Добавлять ли AI-водяной знак к результату."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "Создайте видео с участием человека или объекта на основе референсных материалов. Поддерживает выступления одного персонажа и взаимодействие нескольких персонажей.",
"display_name": "Wan 2.7 Референс в Видео",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "Сид для генерации."
},
"watermark": {
"name": "watermark",
"tooltip": "Добавлять ли AI-водяной знак к результату."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Генерирует видео на основе текстового запроса с использованием модели Wan 2.7.",
"display_name": "Wan 2.7 Текст в видео",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Аудио для управления генерацией видео (например, синхронизация губ, движение в ритм музыки). Длительность: 330 сек. Если не указано, модель автоматически сгенерирует подходящую фоновую музыку или звуковые эффекты."
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Улучшать ли запрос с помощью ИИ."
},
"seed": {
"name": "seed",
"tooltip": "Зерно для генерации."
},
"watermark": {
"name": "watermark",
"tooltip": "Добавлять ли водяной знак, сгенерированный ИИ, к результату."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "Продолжает видео с того места, где оно закончилось, с возможностью контроля последнего кадра.",
"display_name": "Wan 2.7 Продолжение видео",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"first_clip": {
"name": "first_clip",
"tooltip": "Входное видео для продолжения. Длительность: 210 сек. Соотношение сторон результата определяется этим видео."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Изображение последнего кадра. Продолжение будет переходить к этому кадру."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Улучшать ли запрос с помощью ИИ."
},
"seed": {
"name": "seed",
"tooltip": "Зерно для генерации."
},
"watermark": {
"name": "watermark",
"tooltip": "Добавлять ли водяной знак, сгенерированный ИИ, к результату."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "Редактируйте видео с помощью текстовых инструкций, референсных изображений или переноса стиля.",
"display_name": "Wan 2.7 Редактирование видео",
"inputs": {
"audio_setting": {
"name": "audio_setting",
"tooltip": "'auto': модель решает, нужно ли перегенерировать аудио на основе запроса. 'origin': сохранить оригинальное аудио из входного видео."
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "Зерно для генерации."
},
"video": {
"name": "video",
"tooltip": "Видео для редактирования."
},
"watermark": {
"name": "watermark",
"tooltip": "Добавлять ли водяной знак, сгенерированный ИИ, к результату."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,23 +2527,6 @@
"inputsNone": "GİRİŞ YOK",
"inputsNoneTooltip": "Düğümün girişi yok",
"locateNode": "Düğümü tuvalde bul",
"missingMedia": {
"audio": "Ses",
"cancelSelection": "Seçimi iptal et",
"collapseNodes": "Referans veren düğümleri gizle",
"confirmSelection": "Seçimi onayla",
"expandNodes": "Referans veren düğümleri göster",
"image": "Görseller",
"locateNode": "Düğümü bul",
"missingMediaTitle": "Eksik Girdiler",
"or": "VEYA",
"selectedFromLibrary": "Kütüphaneden seçildi",
"uploadFile": "{type} Yükle",
"uploaded": "Yüklendi",
"uploading": "Yükleniyor...",
"useFromLibrary": "Kütüphaneden Kullan",
"video": "Videolar"
},
"missingModels": {
"alreadyExistsInCategory": "Bu model zaten \"{category}\" içinde mevcut",
"assetLoadTimeout": "Model algılama zaman aşımına uğradı. Lütfen iş akışını yeniden yüklemeyi deneyin.",

View File

@@ -17951,241 +17951,6 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "İlk kare görüntüsünden bir video oluşturun, isteğe bağlı olarak son kare görüntüsü ve ses ekleyin.",
"display_name": "Wan 2.7 Görüntüden Videoya",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Video oluşturmayı yönlendiren ses (ör. dudak senkronizasyonu, ritme uygun hareket). Süre: 2-30 saniye. Sağlanmazsa, model otomatik olarak uyumlu arka plan müziği veya ses efektleri üretir."
},
"control_after_generate": {
"name": "oluşturduktan sonra kontrol et"
},
"first_frame": {
"name": "first_frame",
"tooltip": "İlk kare görüntüsü. Çıktı en-boy oranı bu görüntüden alınır."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Son kare görüntüsü. Model, ilk kareden son kareye geçiş yapan bir video oluşturur."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "süre"
},
"model_negative_prompt": {
"name": "negatif_istek"
},
"model_prompt": {
"name": "istek"
},
"model_resolution": {
"name": "çözünürlük"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "İsteğin yapay zeka yardımıyla geliştirilip geliştirilmeyeceği."
},
"seed": {
"name": "seed",
"tooltip": "Oluşturma için kullanılacak tohum."
},
"watermark": {
"name": "watermark",
"tooltip": "Sonuca yapay zeka tarafından oluşturulan bir filigran eklenip eklenmeyeceği."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "Referans materyallerden bir kişi veya nesne içeren video oluşturun. Tek karakterli performansları ve çok karakterli etkileşimleri destekler.",
"display_name": "Wan 2.7 Referanstan Videoya",
"inputs": {
"control_after_generate": {
"name": "oluşturduktan sonra kontrol et"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "süre"
},
"model_negative_prompt": {
"name": "negatif_istek"
},
"model_prompt": {
"name": "istek"
},
"model_ratio": {
"name": "oran"
},
"model_resolution": {
"name": "çözünürlük"
},
"seed": {
"name": "seed",
"tooltip": "Oluşturma için kullanılacak tohum."
},
"watermark": {
"name": "watermark",
"tooltip": "Sonuca yapay zeka tarafından oluşturulan bir filigran eklenip eklenmeyeceği."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Wan 2.7 modeli kullanılarak bir metin istemine dayalı video oluşturur.",
"display_name": "Wan 2.7 Metinden Videoya",
"inputs": {
"audio": {
"name": "ses",
"tooltip": "Video oluşturmayı yönlendirmek için ses (ör. dudak senkronizasyonu, ritme uygun hareket). Süre: 3-30 sn. Sağlanmazsa, model otomatik olarak uygun arka plan müziği veya ses efektleri üretir."
},
"control_after_generate": {
"name": "oluşturduktan sonra kontrol"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "süre"
},
"model_negative_prompt": {
"name": "negatif_istem"
},
"model_prompt": {
"name": "istem"
},
"model_ratio": {
"name": "oran"
},
"model_resolution": {
"name": "çözünürlük"
},
"prompt_extend": {
"name": "istem_genişlet",
"tooltip": "İstemin yapay zeka yardımıyla geliştirilip geliştirilmeyeceği."
},
"seed": {
"name": "tohum",
"tooltip": "Oluşturma için kullanılacak tohum."
},
"watermark": {
"name": "filigran",
"tooltip": "Sonuca yapay zeka tarafından oluşturulan bir filigran eklenip eklenmeyeceği."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "Bir videoyu kaldığı yerden devam ettirin, isteğe bağlı olarak son kare kontrolüyle.",
"display_name": "Wan 2.7 Video Devamı",
"inputs": {
"control_after_generate": {
"name": "oluşturduktan sonra kontrol"
},
"first_clip": {
"name": "ilk_klip",
"tooltip": "Devam edilecek giriş videosu. Süre: 2-10 sn. Çıktı en-boy oranı bu videodan alınır."
},
"last_frame": {
"name": "son_kare",
"tooltip": "Son kare görseli. Devam bu kareye doğru geçiş yapacaktır."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "süre"
},
"model_negative_prompt": {
"name": "negatif_istem"
},
"model_prompt": {
"name": "istem"
},
"model_resolution": {
"name": "çözünürlük"
},
"prompt_extend": {
"name": "istem_genişlet",
"tooltip": "İstemin yapay zeka yardımıyla geliştirilip geliştirilmeyeceği."
},
"seed": {
"name": "tohum",
"tooltip": "Oluşturma için kullanılacak tohum."
},
"watermark": {
"name": "filigran",
"tooltip": "Sonuca yapay zeka tarafından oluşturulan bir filigran eklenip eklenmeyeceği."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "Bir videoyu metin talimatları, referans görseller veya stil transferi ile düzenleyin.",
"display_name": "Wan 2.7 Video Düzenleme",
"inputs": {
"audio_setting": {
"name": "ses_ayarı",
"tooltip": "'auto': model, isteme göre sesi yeniden oluşturup oluşturmayacağına karar verir. 'origin': giriş videosundaki orijinal sesi korur."
},
"control_after_generate": {
"name": "oluşturduktan sonra kontrol"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "süre"
},
"model_prompt": {
"name": "istem"
},
"model_ratio": {
"name": "oran"
},
"model_resolution": {
"name": "çözünürlük"
},
"seed": {
"name": "tohum",
"tooltip": "Oluşturma için kullanılacak tohum."
},
"video": {
"name": "video",
"tooltip": "Düzenlenecek video."
},
"watermark": {
"name": "filigran",
"tooltip": "Sonuca yapay zeka tarafından oluşturulan bir filigran eklenip eklenmeyeceği."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,23 +2527,6 @@
"inputsNone": "無輸入",
"inputsNoneTooltip": "此節點沒有輸入",
"locateNode": "在畫布上定位節點",
"missingMedia": {
"audio": "音訊",
"cancelSelection": "取消選取",
"collapseNodes": "隱藏引用節點",
"confirmSelection": "確認選取",
"expandNodes": "顯示引用節點",
"image": "影像",
"locateNode": "定位節點",
"missingMediaTitle": "缺少輸入",
"or": "或",
"selectedFromLibrary": "已從媒體庫選取",
"uploadFile": "上傳{type}",
"uploaded": "已上傳",
"uploading": "正在上傳...",
"useFromLibrary": "從媒體庫使用",
"video": "影片"
},
"missingModels": {
"alreadyExistsInCategory": "此模型已存在於「{category}」中",
"assetLoadTimeout": "模型偵測逾時。請嘗試重新載入工作流程。",

View File

@@ -17951,241 +17951,6 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "從首幀圖像產生影片,可選擇加入末幀圖像與音訊。",
"display_name": "Wan 2.7 圖像轉影片",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "用於驅動影片生成的音訊如對嘴、節奏動作。時長2秒至30秒。若未提供模型會自動產生相符的背景音樂或音效。"
},
"control_after_generate": {
"name": "control after generate"
},
"first_frame": {
"name": "first_frame",
"tooltip": "首幀圖像。輸出長寬比將依此圖像決定。"
},
"last_frame": {
"name": "last_frame",
"tooltip": "末幀圖像。模型將產生從首幀到末幀的過渡影片。"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "是否使用 AI 協助增強提示詞。"
},
"seed": {
"name": "seed",
"tooltip": "用於生成的隨機種子。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否在結果中加入 AI 產生的浮水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "根據參考素材生成包含人物或物件的影片。支援單一角色表演與多角色互動。",
"display_name": "Wan 2.7 參考生成影片",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "用於生成的隨機種子。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否在結果中加入 AI 產生的浮水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "使用 Wan 2.7 模型根據文字提示生成影片。",
"display_name": "Wan 2.7 文字轉影片",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "用於驅動影片生成的音訊如對嘴、節奏同步動作。時長3秒至30秒。若未提供模型會自動生成匹配的背景音樂或音效。"
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "是否使用 AI 協助增強提示詞。"
},
"seed": {
"name": "seed",
"tooltip": "用於生成的種子值。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否在結果中加入 AI 生成的浮水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "從影片結束處繼續生成影片,可選擇最後一幀控制。",
"display_name": "Wan 2.7 影片續接",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"first_clip": {
"name": "first_clip",
"tooltip": "要續接的輸入影片。時長2秒至10秒。輸出長寬比將依此影片決定。"
},
"last_frame": {
"name": "last_frame",
"tooltip": "最後一幀圖像。續接內容將朝此幀過渡。"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "是否使用 AI 協助增強提示詞。"
},
"seed": {
"name": "seed",
"tooltip": "用於生成的種子值。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否在結果中加入 AI 生成的浮水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "使用文字指令、參考圖像或風格轉換編輯影片。",
"display_name": "Wan 2.7 影片編輯",
"inputs": {
"audio_setting": {
"name": "audio_setting",
"tooltip": "「auto」模型根據提示詞決定是否重新生成音訊。「origin」保留輸入影片的原始音訊。"
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "用於生成的種子值。"
},
"video": {
"name": "video",
"tooltip": "要編輯的影片。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否在結果中加入 AI 生成的浮水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,23 +2527,6 @@
"inputsNone": "无输入",
"inputsNoneTooltip": "节点没有输入",
"locateNode": "在画布上定位节点",
"missingMedia": {
"audio": "音频",
"cancelSelection": "取消选择",
"collapseNodes": "隐藏引用节点",
"confirmSelection": "确认选择",
"expandNodes": "显示引用节点",
"image": "图像",
"locateNode": "定位节点",
"missingMediaTitle": "缺少输入",
"or": "或",
"selectedFromLibrary": "已从素材库选择",
"uploadFile": "上传{type}",
"uploaded": "已上传",
"uploading": "正在上传...",
"useFromLibrary": "从素材库选择",
"video": "视频"
},
"missingModels": {
"alreadyExistsInCategory": "该模型已存在于“{category}”中",
"assetLoadTimeout": "模型检测超时。请尝试重新加载工作流。",

View File

@@ -17951,241 +17951,6 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "根据首帧图像生成视频,可选末帧图像和音频。",
"display_name": "Wan 2.7 图像转视频",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "用于驱动视频生成的音频如对口型、节奏匹配动作。时长2秒-30秒。如未提供模型会自动生成匹配的背景音乐或音效。"
},
"control_after_generate": {
"name": "control after generate"
},
"first_frame": {
"name": "first_frame",
"tooltip": "首帧图像。输出的宽高比由该图像决定。"
},
"last_frame": {
"name": "last_frame",
"tooltip": "末帧图像。模型将生成从首帧到末帧的过渡视频。"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "是否使用 AI 辅助增强提示词。"
},
"seed": {
"name": "seed",
"tooltip": "用于生成的种子。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否为结果添加 AI 生成的水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "根据参考素材生成包含人物或物体的视频。支持单角色表演和多角色互动。",
"display_name": "Wan 2.7 参考生成视频",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "用于生成的种子。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否为结果添加 AI 生成的水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "使用 Wan 2.7 模型根据文本提示生成视频。",
"display_name": "Wan 2.7 文本转视频",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "用于驱动视频生成的音频如对口型、节奏匹配动作。时长3秒-30秒。如未提供模型会自动生成匹配的背景音乐或音效。"
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "是否使用 AI 辅助增强提示词。"
},
"seed": {
"name": "seed",
"tooltip": "用于生成的种子。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否为结果添加 AI 生成的水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "从视频结束处继续生成视频,可选用最后一帧进行控制。",
"display_name": "Wan 2.7 视频续写",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"first_clip": {
"name": "first_clip",
"tooltip": "要续写的视频。时长2秒-10秒。输出宽高比将根据该视频确定。"
},
"last_frame": {
"name": "last_frame",
"tooltip": "最后一帧图像。续写内容将过渡到该帧。"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "是否使用 AI 辅助增强提示词。"
},
"seed": {
"name": "seed",
"tooltip": "用于生成的种子。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否为结果添加 AI 生成的水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "通过文本指令、参考图像或风格迁移编辑视频。",
"display_name": "Wan 2.7 视频编辑",
"inputs": {
"audio_setting": {
"name": "audio_setting",
"tooltip": "“auto”模型根据提示词决定是否重新生成音频。“origin”保留输入视频的原始音频。"
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "用于生成的种子。"
},
"video": {
"name": "video",
"tooltip": "要编辑的视频。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否为结果添加 AI 生成的水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "Wan动画转视频",
"inputs": {

View File

@@ -9,23 +9,9 @@ import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/w
import type { MissingModelCandidate } from '@/platform/missingModel/types'
import type { MissingNodeType } from '@/types/comfy'
/** An item within an input group. */
export interface InputGroupItem {
key: string
pairId?: string
}
/** A named group of inputs that renders as a collapsible accordion. */
export interface InputGroup {
id: string
name: string | null
items: InputGroupItem[]
}
export interface LinearData {
inputs: [NodeId, string][]
outputs: NodeId[]
inputGroups?: InputGroup[]
}
export interface PendingWarnings {

View File

@@ -5,7 +5,6 @@ import { ref, useTemplateRef } from 'vue'
import { useI18n } from 'vue-i18n'
import AppModeWidgetList from '@/components/builder/AppModeWidgetList.vue'
import InputGroupAccordion from '@/components/builder/InputGroupAccordion.vue'
import Loader from '@/components/loader/Loader.vue'
import ScrubableNumberInput from '@/components/common/ScrubableNumberInput.vue'
import Popover from '@/components/ui/Popover.vue'
@@ -20,7 +19,6 @@ import { useCommandStore } from '@/stores/commandStore'
import { useQueueSettingsStore } from '@/stores/queueStore'
import { useAppMode } from '@/composables/useAppMode'
import { useAppModeStore } from '@/stores/appModeStore'
import { useInputGroupStore } from '@/stores/inputGroupStore'
const { t } = useI18n()
const commandStore = useCommandStore()
const { batchCount } = storeToRefs(useQueueSettingsStore())
@@ -29,7 +27,6 @@ const { isActiveSubscription } = useBillingContext()
const workflowStore = useWorkflowStore()
const { isBuilderMode } = useAppMode()
const appModeStore = useAppModeStore()
const inputGroupStore = useInputGroupStore()
const { hasOutputs } = storeToRefs(appModeStore)
const { toastTo, mobile } = defineProps<{
@@ -104,20 +101,6 @@ function handleDragDrop(e: DragEvent) {
class="grow scroll-shadows-comfy-menu-bg overflow-y-auto contain-size"
>
<AppModeWidgetList ref="widgetListRef" :mobile />
<InputGroupAccordion
v-for="(group, idx) in inputGroupStore.inputGroups"
:key="group.id"
:group
:position="
inputGroupStore.inputGroups.length === 1
? 'only'
: idx === 0
? 'first'
: idx === inputGroupStore.inputGroups.length - 1
? 'last'
: 'middle'
"
/>
</section>
<Teleport
v-if="!jobToastTimeout || pendingJobQueues > 0"

View File

@@ -86,7 +86,6 @@ export const useAppModeStore = defineStore('appMode', () => {
if (!graph) return
const extra = (graph.extra ??= {})
extra.linearData = {
...(extra.linearData as Partial<LinearData>),
inputs: [...data.inputs],
outputs: [...data.outputs]
}

View File

@@ -1,360 +0,0 @@
import { createTestingPinia } from '@pinia/testing'
import { setActivePinia } from 'pinia'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { LGraphNode, NodeId } from '@/lib/litegraph/src/LGraphNode'
import type {
LinearData,
LoadedComfyWorkflow
} from '@/platform/workflow/management/stores/comfyWorkflow'
import { ComfyWorkflow as ComfyWorkflowClass } from '@/platform/workflow/management/stores/comfyWorkflow'
import { useWorkflowStore } from '@/platform/workflow/management/stores/workflowStore'
import { app } from '@/scripts/app'
import { createMockChangeTracker } from '@/utils/__tests__/litegraphTestUtils'
vi.mock('@/scripts/app', () => ({
app: {
rootGraph: { extra: {}, nodes: [{ id: 1 }], events: new EventTarget() }
}
}))
vi.mock('@/utils/litegraphUtil', async (importOriginal) => ({
...(await importOriginal()),
resolveNode: vi.fn<(id: NodeId) => LGraphNode | undefined>(() => undefined)
}))
vi.mock('@/renderer/core/canvas/canvasStore', () => ({
useCanvasStore: () => ({
getCanvas: () => ({ read_only: false })
})
}))
vi.mock('@/components/builder/useEmptyWorkflowDialog', () => ({
useEmptyWorkflowDialog: () => ({ show: vi.fn() })
}))
vi.mock('@/platform/settings/settingStore', () => ({
useSettingStore: () => ({
get: vi.fn(() => false),
set: vi.fn()
})
}))
import { useInputGroupStore } from './inputGroupStore'
function createWorkflow(
activeMode: string = 'builder:inputs'
): LoadedComfyWorkflow {
const workflow = new ComfyWorkflowClass({
path: 'workflows/test.json',
modified: Date.now(),
size: 100
})
workflow.changeTracker = createMockChangeTracker()
workflow.content = '{}'
workflow.originalContent = '{}'
workflow.activeMode = activeMode as LoadedComfyWorkflow['activeMode']
return workflow as LoadedComfyWorkflow
}
describe('inputGroupStore', () => {
let store: ReturnType<typeof useInputGroupStore>
beforeEach(() => {
setActivePinia(createTestingPinia({ stubActions: false }))
vi.mocked(app.rootGraph).extra = {}
// Set active workflow in builder mode so persistence works
const workflowStore = useWorkflowStore()
workflowStore.activeWorkflow = createWorkflow()
store = useInputGroupStore()
vi.clearAllMocks()
})
describe('createGroup', () => {
it('creates an empty group with generated id', () => {
const id = store.createGroup()
expect(store.inputGroups).toHaveLength(1)
expect(store.inputGroups[0]).toMatchObject({
id,
name: null,
items: []
})
})
it('creates a group with a name', () => {
store.createGroup('Dimensions')
expect(store.inputGroups[0].name).toBe('Dimensions')
})
})
describe('deleteGroup', () => {
it('removes a group by id', () => {
const id = store.createGroup()
store.deleteGroup(id)
expect(store.inputGroups).toHaveLength(0)
})
it('does nothing for unknown id', () => {
store.createGroup()
store.deleteGroup('nonexistent')
expect(store.inputGroups).toHaveLength(1)
})
})
describe('addItemToGroup', () => {
it('adds an item to a group', () => {
const id = store.createGroup()
store.addItemToGroup(id, 'input:1:steps')
expect(store.inputGroups[0].items).toHaveLength(1)
expect(store.inputGroups[0].items[0].key).toBe('input:1:steps')
})
it('does not add duplicate items', () => {
const id = store.createGroup()
store.addItemToGroup(id, 'input:1:steps')
store.addItemToGroup(id, 'input:1:steps')
expect(store.inputGroups[0].items).toHaveLength(1)
})
it('moves item from one group to another', () => {
const g1 = store.createGroup()
const g2 = store.createGroup()
store.addItemToGroup(g1, 'input:1:steps')
store.addItemToGroup(g2, 'input:1:steps')
// g1 was auto-deleted (became empty)
expect(store.inputGroups).toHaveLength(1)
expect(store.inputGroups[0].id).toBe(g2)
expect(store.inputGroups[0].items[0].key).toBe('input:1:steps')
})
})
describe('removeItemFromGroup', () => {
it('removes an item from a group', () => {
const id = store.createGroup()
store.addItemToGroup(id, 'input:1:steps')
store.addItemToGroup(id, 'input:1:cfg')
store.removeItemFromGroup(id, 'input:1:steps')
expect(store.inputGroups[0].items).toHaveLength(1)
expect(store.inputGroups[0].items[0].key).toBe('input:1:cfg')
})
it('deletes group when last item is removed', () => {
const id = store.createGroup()
store.addItemToGroup(id, 'input:1:steps')
store.removeItemFromGroup(id, 'input:1:steps')
expect(store.inputGroups).toHaveLength(0)
})
it('clears pair when paired item is removed', () => {
const id = store.createGroup()
store.addItemToGroup(id, 'input:1:width')
store.addItemToGroup(id, 'input:1:height')
store.addItemToGroup(id, 'input:1:steps')
store.pairItemsInGroup(id, 'input:1:width', 'input:1:height')
store.removeItemFromGroup(id, 'input:1:width')
const remaining = store.inputGroups[0].items
expect(remaining.every((i) => i.pairId === undefined)).toBe(true)
})
})
describe('renameGroup', () => {
it('renames a group', () => {
const id = store.createGroup()
store.renameGroup(id, 'Dimensions')
expect(store.inputGroups[0].name).toBe('Dimensions')
})
it('sets name to null to use auto-name', () => {
const id = store.createGroup('Old Name')
store.renameGroup(id, null)
expect(store.inputGroups[0].name).toBeNull()
})
})
describe('reorderWithinGroup', () => {
it('moves an item before another', () => {
const id = store.createGroup()
store.addItemToGroup(id, 'input:1:a')
store.addItemToGroup(id, 'input:1:b')
store.addItemToGroup(id, 'input:1:c')
store.reorderWithinGroup(id, 'input:1:c', 'input:1:a', 'before')
const keys = store.inputGroups[0].items.map((i) => i.key)
expect(keys).toEqual(['input:1:c', 'input:1:a', 'input:1:b'])
})
it('moves an item after another', () => {
const id = store.createGroup()
store.addItemToGroup(id, 'input:1:a')
store.addItemToGroup(id, 'input:1:b')
store.addItemToGroup(id, 'input:1:c')
store.reorderWithinGroup(id, 'input:1:a', 'input:1:c', 'after')
const keys = store.inputGroups[0].items.map((i) => i.key)
expect(keys).toEqual(['input:1:b', 'input:1:c', 'input:1:a'])
})
})
describe('pairItemsInGroup / unpairItem', () => {
it('pairs two items with a shared pairId', () => {
const id = store.createGroup()
store.addItemToGroup(id, 'input:1:width')
store.addItemToGroup(id, 'input:1:height')
store.pairItemsInGroup(id, 'input:1:width', 'input:1:height')
const items = store.inputGroups[0].items
expect(items[0].pairId).toBeDefined()
expect(items[0].pairId).toBe(items[1].pairId)
})
it('unpairs an item and clears partner pairId', () => {
const id = store.createGroup()
store.addItemToGroup(id, 'input:1:width')
store.addItemToGroup(id, 'input:1:height')
store.pairItemsInGroup(id, 'input:1:width', 'input:1:height')
store.unpairItem(id, 'input:1:width')
const items = store.inputGroups[0].items
expect(items.every((i) => i.pairId === undefined)).toBe(true)
})
})
describe('replaceInPair', () => {
it('swaps a dragged item into an existing pair slot', () => {
const id = store.createGroup()
store.addItemToGroup(id, 'input:1:a')
store.addItemToGroup(id, 'input:1:b')
store.addItemToGroup(id, 'input:1:c')
store.pairItemsInGroup(id, 'input:1:a', 'input:1:b')
store.replaceInPair(id, 'input:1:b', 'input:1:c')
const items = store.inputGroups[0].items
const aItem = items.find((i) => i.key === 'input:1:a')!
const cItem = items.find((i) => i.key === 'input:1:c')!
const bItem = items.find((i) => i.key === 'input:1:b')!
// c took b's pair slot
expect(cItem.pairId).toBe(aItem.pairId)
// b is now unpaired
expect(bItem.pairId).toBeUndefined()
})
it('does nothing when target has no pairId', () => {
const id = store.createGroup()
store.addItemToGroup(id, 'input:1:a')
store.addItemToGroup(id, 'input:1:b')
store.replaceInPair(id, 'input:1:a', 'input:1:b')
const items = store.inputGroups[0].items
expect(items.every((i) => i.pairId === undefined)).toBe(true)
})
})
describe('moveItemToGroupAt', () => {
it('moves item from one group to another before target', () => {
const g1 = store.createGroup()
const g2 = store.createGroup()
store.addItemToGroup(g1, 'input:1:x')
store.addItemToGroup(g2, 'input:1:a')
store.addItemToGroup(g2, 'input:1:b')
store.moveItemToGroupAt(g2, 'input:1:x', 'input:1:a', 'before')
expect(store.inputGroups).toHaveLength(1)
const keys = store.inputGroups[0].items.map((i) => i.key)
expect(keys).toEqual(['input:1:x', 'input:1:a', 'input:1:b'])
})
it('moves item after target', () => {
const g1 = store.createGroup()
const g2 = store.createGroup()
store.addItemToGroup(g1, 'input:1:x')
store.addItemToGroup(g2, 'input:1:a')
store.addItemToGroup(g2, 'input:1:b')
store.moveItemToGroupAt(g2, 'input:1:x', 'input:1:a', 'after')
const keys = store.inputGroups[0].items.map((i) => i.key)
expect(keys).toEqual(['input:1:a', 'input:1:x', 'input:1:b'])
})
it('pairs items when position is center', () => {
const g1 = store.createGroup()
const g2 = store.createGroup()
store.addItemToGroup(g1, 'input:1:x')
store.addItemToGroup(g2, 'input:1:a')
store.moveItemToGroupAt(g2, 'input:1:x', 'input:1:a', 'center')
const items = store.inputGroups[0].items
expect(items[0].pairId).toBeDefined()
expect(items[0].pairId).toBe(items[1].pairId)
})
it('deletes empty source group', () => {
const g1 = store.createGroup()
const g2 = store.createGroup()
store.addItemToGroup(g1, 'input:1:x')
store.addItemToGroup(g2, 'input:1:a')
store.moveItemToGroupAt(g2, 'input:1:x', 'input:1:a', 'before')
expect(store.inputGroups).toHaveLength(1)
expect(store.inputGroups[0].id).toBe(g2)
})
})
describe('groupedItemKeys / isGrouped', () => {
it('tracks which items are in groups', () => {
const id = store.createGroup()
store.addItemToGroup(id, 'input:1:steps')
expect(store.groupedItemKeys.has('input:1:steps')).toBe(true)
expect(store.isGrouped(1, 'steps')).toBe(true)
expect(store.isGrouped(1, 'cfg')).toBe(false)
})
})
describe('persistence', () => {
it('persists groups to linearData on graph extra', () => {
const workflowStore = useWorkflowStore()
workflowStore.activeWorkflow = createWorkflow()
const id = store.createGroup('Test')
store.addItemToGroup(id, 'input:1:steps')
const linearData = app.rootGraph.extra.linearData as LinearData
expect(linearData?.inputGroups).toBeDefined()
expect(linearData.inputGroups).toHaveLength(1)
expect(linearData.inputGroups![0].name).toBe('Test')
})
it('clears inputGroups from linearData when empty', () => {
const workflowStore = useWorkflowStore()
workflowStore.activeWorkflow = createWorkflow()
const id = store.createGroup()
store.deleteGroup(id)
const linearData = app.rootGraph.extra.linearData as LinearData
expect(linearData?.inputGroups).toBeUndefined()
})
})
})

View File

@@ -1,277 +0,0 @@
import { useEventListener } from '@vueuse/core'
import { defineStore } from 'pinia'
import { computed, ref, watch } from 'vue'
import { inputItemKey } from '@/components/builder/itemKeyHelper'
import { useAppMode } from '@/composables/useAppMode'
import type { NodeId } from '@/lib/litegraph/src/LGraphNode'
import type {
InputGroup,
LinearData
} from '@/platform/workflow/management/stores/comfyWorkflow'
import { useWorkflowStore } from '@/platform/workflow/management/stores/workflowStore'
import { app } from '@/scripts/app'
import { ChangeTracker } from '@/scripts/changeTracker'
export const useInputGroupStore = defineStore('inputGroup', () => {
const workflowStore = useWorkflowStore()
const { isBuilderMode, isAppMode } = useAppMode()
const inputGroups = ref<InputGroup[]>([])
const groupedItemKeys = computed(() => {
const keys = new Set<string>()
for (const group of inputGroups.value) {
for (const item of group.items) keys.add(item.key)
}
return keys
})
function isGrouped(nodeId: NodeId, widgetName: string): boolean {
return groupedItemKeys.value.has(inputItemKey(nodeId, widgetName))
}
// ── Persistence ────────────────────────────────────────────────────
function loadGroups(groups: InputGroup[] | undefined) {
inputGroups.value = groups
? groups.map((g) => ({ ...g, items: [...g.items] }))
: []
}
function persistGroups() {
if (
(!isBuilderMode.value && !isAppMode.value) ||
ChangeTracker.isLoadingGraph
)
return
const graph = app.rootGraph
if (!graph) return
const extra = (graph.extra ??= {})
const linearData = ((extra.linearData as LinearData | undefined) ??= {
inputs: [],
outputs: []
})
linearData.inputGroups = inputGroups.value.length
? JSON.parse(JSON.stringify(inputGroups.value))
: undefined
workflowStore.activeWorkflow?.changeTracker?.checkState()
}
function reloadFromGraph() {
const linearData = app.rootGraph?.extra?.linearData as
| LinearData
| undefined
loadGroups(linearData?.inputGroups)
}
watch(
() => workflowStore.activeWorkflow,
(workflow) => {
const linearData = workflow?.changeTracker?.activeState?.extra
?.linearData as LinearData | undefined
loadGroups(linearData?.inputGroups)
},
{ immediate: true }
)
useEventListener(() => app.rootGraph?.events, 'configured', reloadFromGraph)
// ── Helpers ────────────────────────────────────────────────────────
function findGroup(groupId: string): InputGroup | undefined {
return inputGroups.value.find((g) => g.id === groupId)
}
// ── Actions ────────────────────────────────────────────────────────
function createGroup(name?: string): string {
const id = crypto.randomUUID()
inputGroups.value.push({ id, name: name ?? null, items: [] })
persistGroups()
return id
}
function deleteGroupInternal(groupId: string) {
const idx = inputGroups.value.findIndex((g) => g.id === groupId)
if (idx !== -1) inputGroups.value.splice(idx, 1)
}
function deleteGroup(groupId: string) {
deleteGroupInternal(groupId)
persistGroups()
}
function renameGroup(groupId: string, name: string | null) {
const group = findGroup(groupId)
if (!group) return
group.name = name
persistGroups()
}
function addItemToGroup(groupId: string, itemKey: string) {
const group = findGroup(groupId)
if (!group) return
if (group.items.some((i) => i.key === itemKey)) return
// Remove from any other group first
removeItemFromAllGroups(itemKey)
group.items.push({ key: itemKey })
persistGroups()
}
function removeItemFromGroup(groupId: string, itemKey: string) {
const group = findGroup(groupId)
if (!group) return
const idx = group.items.findIndex((i) => i.key === itemKey)
if (idx === -1) return
const pairId = group.items[idx].pairId
if (pairId) clearPair(group, pairId)
group.items.splice(idx, 1)
if (group.items.length === 0) deleteGroupInternal(groupId)
persistGroups()
}
function removeItemFromAllGroups(itemKey: string) {
const emptied: string[] = []
for (const group of inputGroups.value) {
const idx = group.items.findIndex((i) => i.key === itemKey)
if (idx !== -1) {
const pairId = group.items[idx].pairId
if (pairId) clearPair(group, pairId)
group.items.splice(idx, 1)
if (group.items.length === 0) emptied.push(group.id)
}
}
for (const id of emptied) deleteGroupInternal(id)
}
function reorderWithinGroup(
groupId: string,
fromKey: string,
toKey: string,
position: 'before' | 'after'
) {
const group = findGroup(groupId)
if (!group) return
const fromIdx = group.items.findIndex((i) => i.key === fromKey)
const toIdx = group.items.findIndex((i) => i.key === toKey)
if (fromIdx === -1 || toIdx === -1 || fromIdx === toIdx) return
const [moved] = group.items.splice(fromIdx, 1)
const insertIdx =
position === 'before'
? group.items.findIndex((i) => i.key === toKey)
: group.items.findIndex((i) => i.key === toKey) + 1
group.items.splice(insertIdx, 0, moved)
persistGroups()
}
function pairItemsInGroup(groupId: string, keyA: string, keyB: string) {
const group = findGroup(groupId)
if (!group) return
const itemA = group.items.find((i) => i.key === keyA)
const itemB = group.items.find((i) => i.key === keyB)
if (itemA?.pairId) clearPair(group, itemA.pairId)
if (itemB?.pairId) clearPair(group, itemB.pairId)
const pairId = crypto.randomUUID()
for (const item of group.items) {
if (item.key === keyA || item.key === keyB) item.pairId = pairId
}
persistGroups()
}
/**
* Swap a dragged item into an existing pair slot, evicting the target.
* 1. Detach replacement from its current position (and dissolve its old pair)
* 2. Insert replacement next to the target, inheriting the target's pairId
* 3. Unpair the evicted target
*/
function replaceInPair(
groupId: string,
targetKey: string,
replacementKey: string
) {
const group = findGroup(groupId)
if (!group) return
const targetItem = group.items.find((i) => i.key === targetKey)
if (!targetItem?.pairId) return
const pairId = targetItem.pairId
const repIdx = group.items.findIndex((i) => i.key === replacementKey)
if (repIdx === -1) return
const [moved] = group.items.splice(repIdx, 1)
if (moved.pairId) {
for (const i of group.items) {
if (i.pairId === moved.pairId) i.pairId = undefined
}
}
const targetIdx = group.items.findIndex((i) => i.key === targetKey)
moved.pairId = pairId
group.items.splice(targetIdx, 0, moved)
targetItem.pairId = undefined
persistGroups()
}
/** Move an item from any group into a target group near a specific item. */
function moveItemToGroupAt(
groupId: string,
itemKey: string,
targetKey: string,
position: 'before' | 'center' | 'after'
) {
const group = findGroup(groupId)
if (!group) return
removeItemFromAllGroups(itemKey)
if (!group.items.some((i) => i.key === itemKey)) {
group.items.push({ key: itemKey })
}
const dragIdx = group.items.findIndex((i) => i.key === itemKey)
const targetIdx = group.items.findIndex((i) => i.key === targetKey)
if (dragIdx !== -1 && targetIdx !== -1 && dragIdx !== targetIdx) {
const [moved] = group.items.splice(dragIdx, 1)
const newTargetIdx = group.items.findIndex((i) => i.key === targetKey)
const insertIdx = position === 'after' ? newTargetIdx + 1 : newTargetIdx
group.items.splice(insertIdx, 0, moved)
}
if (position === 'center') {
const itemA = group.items.find((i) => i.key === itemKey)
const itemB = group.items.find((i) => i.key === targetKey)
if (itemA?.pairId) clearPair(group, itemA.pairId)
if (itemB?.pairId) clearPair(group, itemB.pairId)
const pairId = crypto.randomUUID()
if (itemA) itemA.pairId = pairId
if (itemB) itemB.pairId = pairId
}
persistGroups()
}
function unpairItem(groupId: string, itemKey: string) {
const group = findGroup(groupId)
if (!group) return
const item = group.items.find((i) => i.key === itemKey)
if (!item?.pairId) return
clearPair(group, item.pairId)
persistGroups()
}
function clearPair(group: InputGroup, pairId: string) {
for (const item of group.items) {
if (item.pairId === pairId) item.pairId = undefined
}
}
return {
inputGroups,
groupedItemKeys,
isGrouped,
findGroup,
createGroup,
deleteGroup,
renameGroup,
addItemToGroup,
removeItemFromGroup,
reorderWithinGroup,
moveItemToGroupAt,
pairItemsInGroup,
replaceInPair,
unpairItem
}
})