Compare commits

...

9 Commits

Author SHA1 Message Date
Johnpaul
dda9cf68f2 Merge remote-tracking branch 'origin/main' into fix/coderabbit-issue-9226 2026-04-06 18:22:04 +01:00
Dante
64f75f0727 test(assets): add E2E tests for delete confirmation flow (#10785)
## Summary

Add E2E Playwright tests for the asset delete confirmation dialog flow.

## Changes

- **What**: New `Assets sidebar - delete confirmation` describe block in
`assets.spec.ts` covering right-click delete showing confirmation
dialog, confirming delete removes asset with success toast, and
cancelling delete preserves asset. Added `mockDeleteHistory()` to
`AssetsHelper` to intercept POST `/api/history` delete payloads and
update mock state.

## Review Focus

Tests use existing `ConfirmDialog` page object and `AssetsHelper` mock
infrastructure. The `mockDeleteHistory` handler removes jobs from the
in-memory mock array so subsequent `/api/jobs` fetches reflect the
deletion.

Fixes #10781

┆Issue is synchronized with this [Notion
page](https://www.notion.so/PR-10785-test-assets-add-E2E-tests-for-delete-confirmation-flow-3356d73d365081fb90c8e2a69de3a666)
by [Unito](https://www.unito.io)
2026-04-06 10:17:23 -07:00
Johnpaul
e93476174f fix: use AbortSignal.timeout for upload timeout
Replace manual AbortController+setTimeout with AbortSignal.timeout(),
fixing unreachable clearTimeout on error path and distinguishing
TimeoutError from AbortError for future cancel support.
2026-04-06 18:17:00 +01:00
Dante
0d535631a5 refactor(test): extract dialog page objects from inline getByRole usage (#10822)
## Summary

Extract inline `getByRole('dialog')` calls across E2E tests into
reusable page objects.

## Changes

- **What**: Extract `ConfirmDialog` class from `ComfyPage.ts` into
`browser_tests/fixtures/components/ConfirmDialog.ts` with new `save`
button locator. Add `MediaLightbox` and `TemplatesDialog` page objects.
Refactor 4 test files to use these page objects instead of raw dialog
locators.
- **Skipped**: `appModeDropdownClipping.spec.ts` uses
`getByRole('dialog')` for a PrimeVue Popover (not a true dialog), left
as-is.

## Review Focus

The `ConfirmDialog.click()` method now supports a `save` action used by
`workflowPersistence.spec.ts`, which also waits for the dialog mask to
disappear and workflow service to settle.

Fixes #10723

┆Issue is synchronized with this [Notion
page](https://www.notion.so/PR-10822-refactor-test-extract-dialog-page-objects-from-inline-getByRole-usage-3366d73d365081b3bc0ee7ef0ddce658)
by [Unito](https://www.unito.io)
2026-04-06 09:49:32 -07:00
Comfy Org PR Bot
6c1bf7a3cf 1.43.12 (#10782)
Patch version increment to 1.43.12

**Base branch:** `main`

---------

Co-authored-by: christian-byrne <72887196+christian-byrne@users.noreply.github.com>
Co-authored-by: github-actions <github-actions@github.com>
2026-04-04 23:48:20 -07:00
Benjamin Lu
b61e15293c test: address review comments on new browser tests (#10852) 2026-04-04 19:26:55 -07:00
Dante
899660b135 test: add queue overlay and workflow search E2E tests (#10802)
## Summary
- Add queue overlay E2E tests: toggle, filter tabs, completed filter,
close (5 tests)
- Add workflow sidebar search E2E tests: search input, filter by name,
clear, no matches (4 tests)
- Fix AssetsHelper mock timestamps from seconds to milliseconds
(matching backend's `int(time.time() * 1000)`)
- Type AssetsHelper response pagination with `JobsListResponse` from
`@comfyorg/ingest-types`

## Test plan
- [ ] CI passes all Playwright shards
- [ ] `pnpm typecheck:browser` passes
- [ ] `pnpm lint` passes

┆Issue is synchronized with this [Notion
page](https://www.notion.so/PR-10802-test-add-queue-overlay-and-workflow-search-E2E-tests-3356d73d365081018df8c7061c0854ee)
by [Unito](https://www.unito.io)

---------

Co-authored-by: Benjamin Lu <benjaminlu1107@gmail.com>
2026-04-04 13:15:17 -07:00
Johnpaul Chiwetelu
fb559ac5ac Merge branch 'main' into fix/coderabbit-issue-9226 2026-03-10 02:21:27 +01:00
CodeRabbit Fixer
c971cb5646 fix: Add timeout and abort mechanism for image upload (#9226)
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-06 17:49:33 +01:00
38 changed files with 3435 additions and 89 deletions

View File

@@ -14,9 +14,12 @@ import { VueNodeHelpers } from '@e2e/fixtures/VueNodeHelpers'
import { BottomPanel } from '@e2e/fixtures/components/BottomPanel'
import { ComfyNodeSearchBox } from '@e2e/fixtures/components/ComfyNodeSearchBox'
import { ComfyNodeSearchBoxV2 } from '@e2e/fixtures/components/ComfyNodeSearchBoxV2'
import { ConfirmDialog } from '@e2e/fixtures/components/ConfirmDialog'
import { ContextMenu } from '@e2e/fixtures/components/ContextMenu'
import { MediaLightbox } from '@e2e/fixtures/components/MediaLightbox'
import { QueuePanel } from '@e2e/fixtures/components/QueuePanel'
import { SettingDialog } from '@e2e/fixtures/components/SettingDialog'
import { TemplatesDialog } from '@e2e/fixtures/components/TemplatesDialog'
import {
AssetsSidebarTab,
ModelLibrarySidebarTab,
@@ -131,48 +134,6 @@ class ComfyMenu {
}
}
type KeysOfType<T, Match> = {
[K in keyof T]: T[K] extends Match ? K : never
}[keyof T]
class ConfirmDialog {
public readonly root: Locator
public readonly delete: Locator
public readonly overwrite: Locator
public readonly reject: Locator
public readonly confirm: Locator
constructor(public readonly page: Page) {
this.root = page.getByRole('dialog')
this.delete = this.root.getByRole('button', { name: 'Delete' })
this.overwrite = this.root.getByRole('button', { name: 'Overwrite' })
this.reject = this.root.getByRole('button', { name: 'Cancel' })
this.confirm = this.root.getByRole('button', { name: 'Confirm' })
}
async click(locator: KeysOfType<ConfirmDialog, Locator>) {
const loc = this[locator]
await loc.waitFor({ state: 'visible' })
await loc.click()
// Wait for the dialog mask to disappear after confirming
const mask = this.page.locator('.p-dialog-mask')
const count = await mask.count()
if (count > 0) {
await mask.first().waitFor({ state: 'hidden', timeout: 3000 })
}
// Wait for workflow service to finish if it's busy
await this.page.waitForFunction(
() =>
(window.app?.extensionManager as WorkspaceStore | undefined)?.workflow
?.isBusy === false,
undefined,
{ timeout: 3000 }
)
}
}
export class ComfyPage {
public readonly url: string
// All canvas position operations are based on default view of canvas.
@@ -196,6 +157,8 @@ export class ComfyPage {
public readonly templates: ComfyTemplates
public readonly settingDialog: SettingDialog
public readonly confirmDialog: ConfirmDialog
public readonly templatesDialog: TemplatesDialog
public readonly mediaLightbox: MediaLightbox
public readonly vueNodes: VueNodeHelpers
public readonly appMode: AppModeHelper
public readonly subgraph: SubgraphHelper
@@ -244,6 +207,8 @@ export class ComfyPage {
this.templates = new ComfyTemplates(page)
this.settingDialog = new SettingDialog(page, this)
this.confirmDialog = new ConfirmDialog(page)
this.templatesDialog = new TemplatesDialog(page)
this.mediaLightbox = new MediaLightbox(page)
this.vueNodes = new VueNodeHelpers(page)
this.appMode = new AppModeHelper(this)
this.subgraph = new SubgraphHelper(this)

View File

@@ -0,0 +1,44 @@
import type { Locator, Page } from '@playwright/test'
import type { WorkspaceStore } from '../../types/globals'
type KeysOfType<T, Match> = {
[K in keyof T]: T[K] extends Match ? K : never
}[keyof T]
export class ConfirmDialog {
public readonly root: Locator
public readonly delete: Locator
public readonly overwrite: Locator
public readonly reject: Locator
public readonly confirm: Locator
public readonly save: Locator
constructor(public readonly page: Page) {
this.root = page.getByRole('dialog')
this.delete = this.root.getByRole('button', { name: 'Delete' })
this.overwrite = this.root.getByRole('button', { name: 'Overwrite' })
this.reject = this.root.getByRole('button', { name: 'Cancel' })
this.confirm = this.root.getByRole('button', { name: 'Confirm' })
this.save = this.root.getByRole('button', { name: 'Save' })
}
async click(locator: KeysOfType<ConfirmDialog, Locator>) {
const loc = this[locator]
await loc.waitFor({ state: 'visible' })
await loc.click()
// Wait for this confirm dialog to close (not all dialogs — another
// dialog like save-as may open immediately after).
await this.root.waitFor({ state: 'hidden', timeout: 5000 }).catch(() => {})
// Wait for workflow service to finish if it's busy
await this.page.waitForFunction(
() =>
(window.app?.extensionManager as WorkspaceStore | undefined)?.workflow
?.isBusy === false,
undefined,
{ timeout: 3000 }
)
}
}

View File

@@ -0,0 +1,11 @@
import type { Locator, Page } from '@playwright/test'
export class MediaLightbox {
public readonly root: Locator
public readonly closeButton: Locator
constructor(public readonly page: Page) {
this.root = page.getByRole('dialog')
this.closeButton = this.root.getByLabel('Close')
}
}

View File

@@ -0,0 +1,19 @@
import type { Locator, Page } from '@playwright/test'
export class TemplatesDialog {
public readonly root: Locator
constructor(public readonly page: Page) {
this.root = page.getByRole('dialog')
}
filterByHeading(name: string): Locator {
return this.root.filter({
has: this.page.getByRole('heading', { name, exact: true })
})
}
getCombobox(name: RegExp | string): Locator {
return this.root.getByRole('combobox', { name })
}
}

View File

@@ -1,20 +1,22 @@
import type { Page, Route } from '@playwright/test'
import type { JobsListResponse } from '@comfyorg/ingest-types'
import type { RawJobListItem } from '../../../src/platform/remote/comfyui/jobs/jobTypes'
const jobsListRoutePattern = /\/api\/jobs(?:\?.*)?$/
const inputFilesRoutePattern = /\/internal\/files\/input(?:\?.*)?$/
const historyRoutePattern = /\/api\/history$/
/** Factory to create a mock completed job with preview output. */
export function createMockJob(
overrides: Partial<RawJobListItem> & { id: string }
): RawJobListItem {
const now = Date.now() / 1000
const now = Date.now()
return {
status: 'completed',
create_time: now,
execution_start_time: now,
execution_end_time: now + 5,
execution_end_time: now + 5000,
preview_output: {
filename: `output_${overrides.id}.png`,
subfolder: '',
@@ -33,13 +35,13 @@ export function createMockJobs(
count: number,
baseOverrides?: Partial<RawJobListItem>
): RawJobListItem[] {
const now = Date.now() / 1000
const now = Date.now()
return Array.from({ length: count }, (_, i) =>
createMockJob({
id: `job-${String(i + 1).padStart(3, '0')}`,
create_time: now - i * 60,
execution_start_time: now - i * 60,
execution_end_time: now - i * 60 + 5 + i,
create_time: now - i * 60_000,
execution_start_time: now - i * 60_000,
execution_end_time: now - i * 60_000 + 5000 + i * 1000,
preview_output: {
filename: `image_${String(i + 1).padStart(3, '0')}.png`,
subfolder: '',
@@ -88,6 +90,8 @@ export class AssetsHelper {
private jobsRouteHandler: ((route: Route) => Promise<void>) | null = null
private inputFilesRouteHandler: ((route: Route) => Promise<void>) | null =
null
private deleteHistoryRouteHandler: ((route: Route) => Promise<void>) | null =
null
private generatedJobs: RawJobListItem[] = []
private importedFiles: string[] = []
@@ -143,18 +147,23 @@ export class AssetsHelper {
const limit = parseLimit(url, total)
const visibleJobs = filteredJobs.slice(offset, offset + limit)
const response = {
jobs: visibleJobs,
pagination: {
offset,
limit,
total,
has_more: offset + visibleJobs.length < total
}
} satisfies {
jobs: unknown[]
pagination: JobsListResponse['pagination']
}
await route.fulfill({
status: 200,
contentType: 'application/json',
body: JSON.stringify({
jobs: visibleJobs,
pagination: {
offset,
limit,
total,
has_more: offset + visibleJobs.length < total
}
})
body: JSON.stringify(response)
})
}
@@ -179,6 +188,36 @@ export class AssetsHelper {
await this.page.route(inputFilesRoutePattern, this.inputFilesRouteHandler)
}
/**
* Mock the POST /api/history endpoint used for deleting history items.
* On receiving a `{ delete: [id] }` payload, removes matching jobs from
* the in-memory mock state so subsequent /api/jobs fetches reflect the
* deletion.
*/
async mockDeleteHistory(): Promise<void> {
if (this.deleteHistoryRouteHandler) return
this.deleteHistoryRouteHandler = async (route: Route) => {
const request = route.request()
if (request.method() !== 'POST') {
await route.continue()
return
}
const body = request.postDataJSON() as { delete?: string[] }
if (body.delete) {
const idsToRemove = new Set(body.delete)
this.generatedJobs = this.generatedJobs.filter(
(job) => !idsToRemove.has(job.id)
)
}
await route.fulfill({ status: 200, body: '{}' })
}
await this.page.route(historyRoutePattern, this.deleteHistoryRouteHandler)
}
async mockEmptyState(): Promise<void> {
await this.mockOutputHistory([])
await this.mockInputFiles([])
@@ -200,5 +239,13 @@ export class AssetsHelper {
)
this.inputFilesRouteHandler = null
}
if (this.deleteHistoryRouteHandler) {
await this.page.unroute(
historyRoutePattern,
this.deleteHistoryRouteHandler
)
this.deleteHistoryRouteHandler = null
}
}
}

View File

@@ -18,15 +18,13 @@ test.describe('Confirm dialog text wrapping', { tag: ['@mobile'] }, () => {
.catch(() => {})
}, longFilename)
const dialog = comfyPage.page.getByRole('dialog')
await expect(dialog).toBeVisible()
const { root, confirm, reject } = comfyPage.confirmDialog
await expect(root).toBeVisible()
const confirmButton = dialog.getByRole('button', { name: 'Confirm' })
await expect(confirmButton).toBeVisible()
await expect(confirmButton).toBeInViewport()
await expect(confirm).toBeVisible()
await expect(confirm).toBeInViewport()
const cancelButton = dialog.getByRole('button', { name: 'Cancel' })
await expect(cancelButton).toBeVisible()
await expect(cancelButton).toBeInViewport()
await expect(reject).toBeVisible()
await expect(reject).toBeInViewport()
})
})

View File

@@ -0,0 +1,111 @@
import { expect } from '@playwright/test'
import { comfyPageFixture as test } from '@e2e/fixtures/ComfyPage'
import { createMockJob } from '@e2e/fixtures/helpers/AssetsHelper'
import { TestIds } from '@e2e/fixtures/selectors'
import type { RawJobListItem } from '@/platform/remote/comfyui/jobs/jobTypes'
const now = Date.now()
const MOCK_JOBS: RawJobListItem[] = [
createMockJob({
id: 'job-completed-1',
status: 'completed',
create_time: now - 60_000,
execution_start_time: now - 60_000,
execution_end_time: now - 50_000,
outputs_count: 2
}),
createMockJob({
id: 'job-completed-2',
status: 'completed',
create_time: now - 120_000,
execution_start_time: now - 120_000,
execution_end_time: now - 115_000,
outputs_count: 1
}),
createMockJob({
id: 'job-failed-1',
status: 'failed',
create_time: now - 30_000,
execution_start_time: now - 30_000,
execution_end_time: now - 28_000,
outputs_count: 0
})
]
test.describe('Queue overlay', () => {
test.beforeEach(async ({ comfyPage }) => {
await comfyPage.assets.mockOutputHistory(MOCK_JOBS)
await comfyPage.settings.setSetting('Comfy.Queue.QPOV2', false)
await comfyPage.setup()
})
test.afterEach(async ({ comfyPage }) => {
await comfyPage.assets.clearMocks()
})
test('Toggle button opens expanded queue overlay', async ({ comfyPage }) => {
const toggle = comfyPage.page.getByTestId(TestIds.queue.overlayToggle)
await toggle.click()
// Expanded overlay should show job items
await expect(comfyPage.page.locator('[data-job-id]').first()).toBeVisible()
})
test('Overlay shows filter tabs (All, Completed)', async ({ comfyPage }) => {
const toggle = comfyPage.page.getByTestId(TestIds.queue.overlayToggle)
await toggle.click()
await expect(
comfyPage.page.getByRole('button', { name: 'All', exact: true })
).toBeVisible()
await expect(
comfyPage.page.getByRole('button', { name: 'Completed', exact: true })
).toBeVisible()
})
test('Overlay shows Failed tab when failed jobs exist', async ({
comfyPage
}) => {
const toggle = comfyPage.page.getByTestId(TestIds.queue.overlayToggle)
await toggle.click()
await expect(comfyPage.page.locator('[data-job-id]').first()).toBeVisible()
await expect(
comfyPage.page.getByRole('button', { name: 'Failed', exact: true })
).toBeVisible()
})
test('Completed filter shows only completed jobs', async ({ comfyPage }) => {
const toggle = comfyPage.page.getByTestId(TestIds.queue.overlayToggle)
await toggle.click()
await expect(comfyPage.page.locator('[data-job-id]').first()).toBeVisible()
await comfyPage.page
.getByRole('button', { name: 'Completed', exact: true })
.click()
await expect(
comfyPage.page.locator('[data-job-id="job-completed-1"]')
).toBeVisible()
await expect(
comfyPage.page.locator('[data-job-id="job-failed-1"]')
).not.toBeVisible()
})
test('Toggling overlay again closes it', async ({ comfyPage }) => {
const toggle = comfyPage.page.getByTestId(TestIds.queue.overlayToggle)
await toggle.click()
await expect(comfyPage.page.locator('[data-job-id]').first()).toBeVisible()
await toggle.click()
await expect(
comfyPage.page.locator('[data-job-id]').first()
).not.toBeVisible()
})
})

View File

@@ -41,30 +41,28 @@ test.describe('MediaLightbox', { tag: ['@slow'] }, () => {
await assetCard.hover()
await assetCard.getByLabel('Zoom in').click()
const gallery = comfyPage.page.getByRole('dialog')
await expect(gallery).toBeVisible()
return { gallery }
const { root } = comfyPage.mediaLightbox
await expect(root).toBeVisible()
}
test('opens gallery and shows dialog with close button', async ({
comfyPage
}) => {
const { gallery } = await runAndOpenGallery(comfyPage)
await expect(gallery.getByLabel('Close')).toBeVisible()
await runAndOpenGallery(comfyPage)
await expect(comfyPage.mediaLightbox.closeButton).toBeVisible()
})
test('closes gallery on Escape key', async ({ comfyPage }) => {
await runAndOpenGallery(comfyPage)
await comfyPage.page.keyboard.press('Escape')
await expect(comfyPage.page.getByRole('dialog')).not.toBeVisible()
await expect(comfyPage.mediaLightbox.root).not.toBeVisible()
})
test('closes gallery when clicking close button', async ({ comfyPage }) => {
const { gallery } = await runAndOpenGallery(comfyPage)
await runAndOpenGallery(comfyPage)
await gallery.getByLabel('Close').click()
await expect(comfyPage.page.getByRole('dialog')).not.toBeVisible()
await comfyPage.mediaLightbox.closeButton.click()
await expect(comfyPage.mediaLightbox.root).not.toBeVisible()
})
})

View File

@@ -676,3 +676,83 @@ test.describe('Assets sidebar - settings menu', () => {
await expect(tab.gridViewOption).toBeVisible()
})
})
// ==========================================================================
// 11. Delete confirmation
// ==========================================================================
test.describe('Assets sidebar - delete confirmation', () => {
test.beforeEach(async ({ comfyPage }) => {
await comfyPage.assets.mockOutputHistory(SAMPLE_JOBS)
await comfyPage.assets.mockDeleteHistory()
await comfyPage.assets.mockInputFiles([])
await comfyPage.setup()
})
test.afterEach(async ({ comfyPage }) => {
await comfyPage.assets.clearMocks()
})
test('Right-click delete shows confirmation dialog', async ({
comfyPage
}) => {
const tab = comfyPage.menu.assetsTab
await tab.open()
await tab.waitForAssets()
await tab.assetCards.first().click({ button: 'right' })
await tab.contextMenuItem('Delete').click()
const dialog = comfyPage.confirmDialog.root
await expect(dialog).toBeVisible()
await expect(dialog.getByText('Delete this asset?')).toBeVisible()
await expect(
dialog.getByText('This asset will be permanently removed.')
).toBeVisible()
})
test('Confirming delete removes asset and shows success toast', async ({
comfyPage
}) => {
const tab = comfyPage.menu.assetsTab
await tab.open()
await tab.waitForAssets()
const initialCount = await tab.assetCards.count()
await tab.assetCards.first().click({ button: 'right' })
await tab.contextMenuItem('Delete').click()
const dialog = comfyPage.confirmDialog.root
await expect(dialog).toBeVisible()
await comfyPage.confirmDialog.delete.click()
await expect(dialog).not.toBeVisible()
await expect(tab.assetCards).toHaveCount(initialCount - 1, {
timeout: 5000
})
const successToast = comfyPage.page.locator('.p-toast-message-success')
await expect(successToast).toBeVisible({ timeout: 5000 })
})
test('Cancelling delete preserves asset', async ({ comfyPage }) => {
const tab = comfyPage.menu.assetsTab
await tab.open()
await tab.waitForAssets()
const initialCount = await tab.assetCards.count()
await tab.assetCards.first().click({ button: 'right' })
await tab.contextMenuItem('Delete').click()
const dialog = comfyPage.confirmDialog.root
await expect(dialog).toBeVisible()
await comfyPage.confirmDialog.reject.click()
await expect(dialog).not.toBeVisible()
await expect(tab.assetCards).toHaveCount(initialCount)
})
})

View File

@@ -0,0 +1,65 @@
import type { Page } from '@playwright/test'
import { expect } from '@playwright/test'
import { comfyPageFixture as test } from '@e2e/fixtures/ComfyPage'
import { TestIds } from '@e2e/fixtures/selectors'
/** Locate a workflow label in whatever panel is visible (browse or search). */
function findWorkflow(page: Page, name: string) {
return page
.getByTestId(TestIds.sidebar.workflows)
.locator('.node-label', { hasText: name })
}
test.describe('Workflow sidebar - search', () => {
test.beforeEach(async ({ comfyPage }) => {
await comfyPage.workflow.setupWorkflowsDirectory({
'alpha-workflow.json': 'default.json',
'beta-workflow.json': 'default.json'
})
})
test('Search filters saved workflows by name', async ({ comfyPage }) => {
const tab = comfyPage.menu.workflowsTab
await tab.open()
const searchInput = comfyPage.page.getByPlaceholder('Search Workflow...')
await searchInput.fill('alpha')
await expect(findWorkflow(comfyPage.page, 'alpha-workflow')).toBeVisible()
await expect(
findWorkflow(comfyPage.page, 'beta-workflow')
).not.toBeVisible()
})
test('Clearing search restores all workflows', async ({ comfyPage }) => {
const tab = comfyPage.menu.workflowsTab
await tab.open()
const searchInput = comfyPage.page.getByPlaceholder('Search Workflow...')
await searchInput.fill('alpha')
await expect(
findWorkflow(comfyPage.page, 'beta-workflow')
).not.toBeVisible()
await searchInput.fill('')
await expect(tab.getPersistedItem('alpha-workflow')).toBeVisible()
await expect(tab.getPersistedItem('beta-workflow')).toBeVisible()
})
test('Search with no matches shows empty results', async ({ comfyPage }) => {
const tab = comfyPage.menu.workflowsTab
await tab.open()
const searchInput = comfyPage.page.getByPlaceholder('Search Workflow...')
await searchInput.fill('nonexistent_xyz')
await expect(
findWorkflow(comfyPage.page, 'alpha-workflow')
).not.toBeVisible()
await expect(
findWorkflow(comfyPage.page, 'beta-workflow')
).not.toBeVisible()
})
})

View File

@@ -116,9 +116,7 @@ test.describe('Templates', { tag: ['@slow', '@workflow'] }, () => {
await comfyPage.command.executeCommand('Comfy.BrowseTemplates')
const dialog = comfyPage.page.getByRole('dialog').filter({
has: comfyPage.page.getByRole('heading', { name: 'Modèles', exact: true })
})
const dialog = comfyPage.templatesDialog.filterByHeading('Modèles')
await expect(dialog).toBeVisible()
// Validate that French-localized strings from the templates index are rendered
@@ -220,8 +218,7 @@ test.describe('Templates', { tag: ['@slow', '@workflow'] }, () => {
await expect(comfyPage.templates.content).toBeVisible()
// Wait for filter bar select components to render
const dialog = comfyPage.page.getByRole('dialog')
const sortBySelect = dialog.getByRole('combobox', { name: /Sort/ })
const sortBySelect = comfyPage.templatesDialog.getCombobox(/Sort/)
await expect(sortBySelect).toBeVisible()
// Screenshot the filter bar containing MultiSelect and SingleSelect

View File

@@ -460,11 +460,8 @@ test.describe('Workflow Persistence', () => {
.getWorkflowTab('Unsaved Workflow')
.click({ button: 'middle' })
// Click "Save" in the dirty close dialog (scoped to dialog)
const dialog = comfyPage.page.getByRole('dialog')
const saveButton = dialog.getByRole('button', { name: 'Save' })
await saveButton.waitFor({ state: 'visible' })
await saveButton.click()
// Click "Save" in the dirty close dialog
await comfyPage.confirmDialog.click('save')
// Fill in the filename dialog
const saveDialog = comfyPage.menu.topbar.getSaveDialog()

View File

@@ -1,6 +1,6 @@
{
"name": "@comfyorg/comfyui-frontend",
"version": "1.43.11",
"version": "1.43.12",
"private": true,
"description": "Official front-end implementation of ComfyUI",
"homepage": "https://comfy.org",

View File

@@ -9,6 +9,7 @@ import { api } from '@/scripts/api'
import { useAssetsStore } from '@/stores/assetsStore'
const PASTED_IMAGE_EXPIRY_MS = 2000
const UPLOAD_TIMEOUT_MS = 120_000
interface ImageUploadFormFields {
/**
@@ -30,7 +31,8 @@ const uploadFile = async (
const resp = await api.fetchApi('/upload/image', {
method: 'POST',
body
body,
signal: AbortSignal.timeout(UPLOAD_TIMEOUT_MS)
})
if (resp.status !== 200) {
@@ -88,7 +90,11 @@ export const useNodeImageUpload = (
if (!path) return
return path
} catch (error) {
useToastStore().addAlert(String(error))
if (error instanceof DOMException && error.name === 'TimeoutError') {
useToastStore().addAlert(t('g.uploadTimedOut'))
} else {
useToastStore().addAlert(String(error))
}
}
}

View File

@@ -2527,6 +2527,23 @@
"inputsNone": "لا توجد مدخلات",
"inputsNoneTooltip": "العقدة ليس لديها مدخلات",
"locateNode": "تحديد موقع العقدة على اللوحة",
"missingMedia": {
"audio": "الصوتيات",
"cancelSelection": "إلغاء الاختيار",
"collapseNodes": "إخفاء العقد المشار إليها",
"confirmSelection": "تأكيد الاختيار",
"expandNodes": "عرض العقد المشار إليها",
"image": "الصور",
"locateNode": "تحديد موقع العقدة",
"missingMediaTitle": "المدخلات المفقودة",
"or": "أو",
"selectedFromLibrary": "تم الاختيار من المكتبة",
"uploadFile": "رفع {type}",
"uploaded": "تم الرفع",
"uploading": "جاري الرفع...",
"useFromLibrary": "استخدام من المكتبة",
"video": "الفيديوهات"
},
"missingModels": {
"alreadyExistsInCategory": "هذا النموذج موجود بالفعل في \"{category}\"",
"assetLoadTimeout": "انتهت مهلة اكتشاف النموذج. حاول إعادة تحميل سير العمل.",

View File

@@ -17951,6 +17951,241 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "إنشاء فيديو من صورة الإطار الأول، مع إمكانية إضافة صورة الإطار الأخير وصوت اختياري.",
"display_name": "وان 2.7 من صورة إلى فيديو",
"inputs": {
"audio": {
"name": "الصوت",
"tooltip": "الصوت المستخدم لتوجيه توليد الفيديو (مثل مزامنة الشفاه أو حركة متوافقة مع الإيقاع). المدة: ۲-۳۰ ثانية. إذا لم يتم توفيره، يقوم النموذج تلقائيًا بإنشاء موسيقى خلفية أو مؤثرات صوتية مناسبة."
},
"control_after_generate": {
"name": "التحكم بعد التوليد"
},
"first_frame": {
"name": "الإطار الأول",
"tooltip": "صورة الإطار الأول. يتم اشتقاق نسبة العرض إلى الارتفاع للإخراج من هذه الصورة."
},
"last_frame": {
"name": "الإطار الأخير",
"tooltip": "صورة الإطار الأخير. يقوم النموذج بإنشاء فيديو ينتقل من الإطار الأول إلى الأخير."
},
"model": {
"name": "النموذج"
},
"model_duration": {
"name": "المدة"
},
"model_negative_prompt": {
"name": "توجيه سلبي"
},
"model_prompt": {
"name": "توجيه"
},
"model_resolution": {
"name": "الدقة"
},
"prompt_extend": {
"name": "تعزيز التوجيه",
"tooltip": "ما إذا كان سيتم تعزيز التوجيه بمساعدة الذكاء الاصطناعي."
},
"seed": {
"name": "البذرة",
"tooltip": "البذرة المستخدمة في التوليد."
},
"watermark": {
"name": "علامة مائية",
"tooltip": "ما إذا كان سيتم إضافة علامة مائية مولدة بالذكاء الاصطناعي إلى النتيجة."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "إنشاء فيديو يعرض شخصًا أو كائنًا من مواد مرجعية. يدعم أداء شخصية واحدة وتفاعل عدة شخصيات.",
"display_name": "وان 2.7 من مرجع إلى فيديو",
"inputs": {
"control_after_generate": {
"name": "التحكم بعد التوليد"
},
"model": {
"name": "النموذج"
},
"model_duration": {
"name": "المدة"
},
"model_negative_prompt": {
"name": "توجيه سلبي"
},
"model_prompt": {
"name": "توجيه"
},
"model_ratio": {
"name": "النسبة"
},
"model_resolution": {
"name": "الدقة"
},
"seed": {
"name": "البذرة",
"tooltip": "البذرة المستخدمة في التوليد."
},
"watermark": {
"name": "علامة مائية",
"tooltip": "ما إذا كان سيتم إضافة علامة مائية مولدة بالذكاء الاصطناعي إلى النتيجة."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "ينشئ فيديو بناءً على وصف نصي باستخدام نموذج وان 2.7.",
"display_name": "وان 2.7 تحويل النص إلى فيديو",
"inputs": {
"audio": {
"name": "الصوت",
"tooltip": "الصوت المستخدم لتوجيه توليد الفيديو (مثل مزامنة الشفاه أو الحركة المتوافقة مع الإيقاع). المدة: ٣-٣٠ ثانية. إذا لم يتم توفيره، سيقوم النموذج تلقائيًا بإنشاء موسيقى خلفية أو مؤثرات صوتية مناسبة."
},
"control_after_generate": {
"name": "التحكم بعد التوليد"
},
"model": {
"name": "النموذج"
},
"model_duration": {
"name": "المدة"
},
"model_negative_prompt": {
"name": "الوصف السلبي"
},
"model_prompt": {
"name": "الوصف"
},
"model_ratio": {
"name": "النسبة"
},
"model_resolution": {
"name": "الدقة"
},
"prompt_extend": {
"name": "توسيع الوصف",
"tooltip": "ما إذا كان سيتم تعزيز الوصف بمساعدة الذكاء الاصطناعي."
},
"seed": {
"name": "البذرة",
"tooltip": "البذرة المستخدمة في التوليد."
},
"watermark": {
"name": "علامة مائية",
"tooltip": "ما إذا كان سيتم إضافة علامة مائية مولدة بالذكاء الاصطناعي إلى النتيجة."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "استكمال الفيديو من حيث توقف، مع إمكانية التحكم في الإطار الأخير.",
"display_name": "وان 2.7 استكمال الفيديو",
"inputs": {
"control_after_generate": {
"name": "التحكم بعد التوليد"
},
"first_clip": {
"name": "المقطع الأول",
"tooltip": "الفيديو المدخل للاستكمال منه. المدة: ٢-١٠ ثوانٍ. يتم اشتقاق نسبة العرض إلى الارتفاع من هذا الفيديو."
},
"last_frame": {
"name": "الإطار الأخير",
"tooltip": "صورة الإطار الأخير. سيتم الانتقال في الاستكمال نحو هذا الإطار."
},
"model": {
"name": "النموذج"
},
"model_duration": {
"name": "المدة"
},
"model_negative_prompt": {
"name": "الوصف السلبي"
},
"model_prompt": {
"name": "الوصف"
},
"model_resolution": {
"name": "الدقة"
},
"prompt_extend": {
"name": "توسيع الوصف",
"tooltip": "ما إذا كان سيتم تعزيز الوصف بمساعدة الذكاء الاصطناعي."
},
"seed": {
"name": "البذرة",
"tooltip": "البذرة المستخدمة في التوليد."
},
"watermark": {
"name": "علامة مائية",
"tooltip": "ما إذا كان سيتم إضافة علامة مائية مولدة بالذكاء الاصطناعي إلى النتيجة."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "تحرير فيديو باستخدام تعليمات نصية أو صور مرجعية أو نقل النمط.",
"display_name": "وان 2.7 تحرير الفيديو",
"inputs": {
"audio_setting": {
"name": "إعداد الصوت",
"tooltip": "'تلقائي': يقرر النموذج ما إذا كان سيعيد توليد الصوت بناءً على الوصف. 'الأصلي': الحفاظ على الصوت الأصلي من الفيديو المدخل."
},
"control_after_generate": {
"name": "التحكم بعد التوليد"
},
"model": {
"name": "النموذج"
},
"model_duration": {
"name": "المدة"
},
"model_prompt": {
"name": "الوصف"
},
"model_ratio": {
"name": "النسبة"
},
"model_resolution": {
"name": "الدقة"
},
"seed": {
"name": "البذرة",
"tooltip": "البذرة المستخدمة في التوليد."
},
"video": {
"name": "الفيديو",
"tooltip": "الفيديو المراد تحريره."
},
"watermark": {
"name": "علامة مائية",
"tooltip": "ما إذا كان سيتم إضافة علامة مائية مولدة بالذكاء الاصطناعي إلى النتيجة."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -199,6 +199,7 @@
"control_before_generate": "control before generate",
"choose_file_to_upload": "choose file to upload",
"uploadAlreadyInProgress": "Upload already in progress",
"uploadTimedOut": "Upload timed out. Please try again.",
"capture": "capture",
"nodes": "Nodes",
"nodesCount": "{count} node | {count} nodes",
@@ -1743,8 +1744,8 @@
"Tripo": "Tripo",
"Veo": "Veo",
"Vidu": "Vidu",
"camera": "camera",
"Wan": "Wan",
"camera": "camera",
"WaveSpeed": "WaveSpeed",
"zimage": "zimage"
},

View File

@@ -17976,6 +17976,241 @@
}
}
},
"Wan2ImageToVideoApi": {
"display_name": "Wan 2.7 Image to Video",
"description": "Generate a video from a first-frame image, with optional last-frame image and audio.",
"inputs": {
"model": {
"name": "model"
},
"first_frame": {
"name": "first_frame",
"tooltip": "First frame image. The output aspect ratio is derived from this image."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Whether to enhance the prompt with AI assistance."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an AI-generated watermark to the result."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Last frame image. The model generates a video transitioning from first to last frame."
},
"audio": {
"name": "audio",
"tooltip": "Audio for driving video generation (e.g., lip sync, beat-matched motion). Duration: 2s-30s. If not provided, the model automatically generates matching background music or sound effects."
},
"control_after_generate": {
"name": "control after generate"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"display_name": "Wan 2.7 Reference to Video",
"description": "Generate a video featuring a person or object from reference materials. Supports single-character performances and multi-character interactions.",
"inputs": {
"model": {
"name": "model"
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an AI-generated watermark to the result."
},
"control_after_generate": {
"name": "control after generate"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"display_name": "Wan 2.7 Text to Video",
"description": "Generates a video based on a text prompt using the Wan 2.7 model.",
"inputs": {
"model": {
"name": "model"
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Whether to enhance the prompt with AI assistance."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an AI-generated watermark to the result."
},
"audio": {
"name": "audio",
"tooltip": "Audio for driving video generation (e.g., lip sync, beat-matched motion). Duration: 3s-30s. If not provided, the model automatically generates matching background music or sound effects."
},
"control_after_generate": {
"name": "control after generate"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"display_name": "Wan 2.7 Video Continuation",
"description": "Continue a video from where it left off, with optional last-frame control.",
"inputs": {
"model": {
"name": "model"
},
"first_clip": {
"name": "first_clip",
"tooltip": "Input video to continue from. Duration: 2s-10s. The output aspect ratio is derived from this video."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Whether to enhance the prompt with AI assistance."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an AI-generated watermark to the result."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Last frame image. The continuation will transition towards this frame."
},
"control_after_generate": {
"name": "control after generate"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"display_name": "Wan 2.7 Video Edit",
"description": "Edit a video using text instructions, reference images, or style transfer.",
"inputs": {
"model": {
"name": "model"
},
"video": {
"name": "video",
"tooltip": "The video to edit."
},
"seed": {
"name": "seed",
"tooltip": "Seed to use for generation."
},
"audio_setting": {
"name": "audio_setting",
"tooltip": "'auto': model decides whether to regenerate audio based on the prompt. 'origin': preserve the original audio from the input video."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an AI-generated watermark to the result."
},
"control_after_generate": {
"name": "control after generate"
},
"model_duration": {
"name": "duration"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,6 +2527,23 @@
"inputsNone": "SIN ENTRADAS",
"inputsNoneTooltip": "El nodo no tiene entradas",
"locateNode": "Localizar nodo en el lienzo",
"missingMedia": {
"audio": "Audio",
"cancelSelection": "Cancelar selección",
"collapseNodes": "Ocultar nodos de referencia",
"confirmSelection": "Confirmar selección",
"expandNodes": "Mostrar nodos de referencia",
"image": "Imágenes",
"locateNode": "Localizar nodo",
"missingMediaTitle": "Entradas faltantes",
"or": "O",
"selectedFromLibrary": "Seleccionado de la biblioteca",
"uploadFile": "Subir {type}",
"uploaded": "Subido",
"uploading": "Subiendo...",
"useFromLibrary": "Usar de la biblioteca",
"video": "Videos"
},
"missingModels": {
"alreadyExistsInCategory": "Este modelo ya existe en \"{category}\"",
"assetLoadTimeout": "El tiempo de detección del modelo se agotó. Intenta recargar el flujo de trabajo.",

View File

@@ -17951,6 +17951,241 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "Genera un video a partir de una imagen del primer fotograma, con opción de imagen del último fotograma y audio.",
"display_name": "Wan 2.7 Imagen a Video",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Audio para guiar la generación del video (por ejemplo, sincronización labial, movimiento al ritmo). Duración: 2s-30s. Si no se proporciona, el modelo genera automáticamente música de fondo o efectos de sonido acordes."
},
"control_after_generate": {
"name": "controlar después de generar"
},
"first_frame": {
"name": "primer_fotograma",
"tooltip": "Imagen del primer fotograma. La relación de aspecto de salida se deriva de esta imagen."
},
"last_frame": {
"name": "último_fotograma",
"tooltip": "Imagen del último fotograma. El modelo genera un video que transiciona del primer al último fotograma."
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duración"
},
"model_negative_prompt": {
"name": "prompt_negativo"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolución"
},
"prompt_extend": {
"name": "extender_prompt",
"tooltip": "Si se debe mejorar el prompt con asistencia de IA."
},
"seed": {
"name": "semilla",
"tooltip": "Semilla para usar en la generación."
},
"watermark": {
"name": "marca_de_agua",
"tooltip": "Si se debe añadir una marca de agua generada por IA al resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "Genera un video con una persona u objeto a partir de materiales de referencia. Soporta actuaciones de un solo personaje e interacciones entre varios personajes.",
"display_name": "Wan 2.7 Referencia a Video",
"inputs": {
"control_after_generate": {
"name": "controlar después de generar"
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duración"
},
"model_negative_prompt": {
"name": "prompt_negativo"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "relación"
},
"model_resolution": {
"name": "resolución"
},
"seed": {
"name": "semilla",
"tooltip": "Semilla para usar en la generación."
},
"watermark": {
"name": "marca_de_agua",
"tooltip": "Si se debe añadir una marca de agua generada por IA al resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Genera un video a partir de un texto usando el modelo Wan 2.7.",
"display_name": "Wan 2.7 Texto a Video",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Audio para guiar la generación del video (por ejemplo, sincronización labial, movimiento al ritmo). Duración: 3s-30s. Si no se proporciona, el modelo genera automáticamente música de fondo o efectos de sonido acordes."
},
"control_after_generate": {
"name": "controlar después de generar"
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duración"
},
"model_negative_prompt": {
"name": "prompt_negativo"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "relación de aspecto"
},
"model_resolution": {
"name": "resolución"
},
"prompt_extend": {
"name": "extender_prompt",
"tooltip": "Si se debe mejorar el prompt con asistencia de IA."
},
"seed": {
"name": "semilla",
"tooltip": "Semilla para usar en la generación."
},
"watermark": {
"name": "marca de agua",
"tooltip": "Si se debe añadir una marca de agua generada por IA al resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "Continúa un video desde donde terminó, con control opcional del último fotograma.",
"display_name": "Wan 2.7 Continuación de Video",
"inputs": {
"control_after_generate": {
"name": "controlar después de generar"
},
"first_clip": {
"name": "primer_clip",
"tooltip": "Video de entrada desde el cual continuar. Duración: 2s-10s. La relación de aspecto de salida se deriva de este video."
},
"last_frame": {
"name": "último_fotograma",
"tooltip": "Imagen del último fotograma. La continuación hará la transición hacia este fotograma."
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duración"
},
"model_negative_prompt": {
"name": "prompt_negativo"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolución"
},
"prompt_extend": {
"name": "extender_prompt",
"tooltip": "Si se debe mejorar el prompt con asistencia de IA."
},
"seed": {
"name": "semilla",
"tooltip": "Semilla para usar en la generación."
},
"watermark": {
"name": "marca de agua",
"tooltip": "Si se debe añadir una marca de agua generada por IA al resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "Edita un video usando instrucciones de texto, imágenes de referencia o transferencia de estilo.",
"display_name": "Wan 2.7 Edición de Video",
"inputs": {
"audio_setting": {
"name": "configuración_de_audio",
"tooltip": "'auto': el modelo decide si regenerar el audio según el prompt. 'origin': conserva el audio original del video de entrada."
},
"control_after_generate": {
"name": "controlar después de generar"
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duración"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "relación de aspecto"
},
"model_resolution": {
"name": "resolución"
},
"seed": {
"name": "semilla",
"tooltip": "Semilla para usar en la generación."
},
"video": {
"name": "video",
"tooltip": "El video a editar."
},
"watermark": {
"name": "marca de agua",
"tooltip": "Si se debe añadir una marca de agua generada por IA al resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,6 +2527,23 @@
"inputsNone": "بدون ورودی",
"inputsNoneTooltip": "این نود ورودی ندارد",
"locateNode": "یافتن node در canvas",
"missingMedia": {
"audio": "صدا",
"cancelSelection": "لغو انتخاب",
"collapseNodes": "پنهان کردن nodeهای ارجاع‌دهنده",
"confirmSelection": "تأیید انتخاب",
"expandNodes": "نمایش nodeهای ارجاع‌دهنده",
"image": "تصاویر",
"locateNode": "یافتن node",
"missingMediaTitle": "ورودی‌های گمشده",
"or": "یا",
"selectedFromLibrary": "انتخاب شده از کتابخانه",
"uploadFile": "بارگذاری {type}",
"uploaded": "بارگذاری شد",
"uploading": "در حال بارگذاری...",
"useFromLibrary": "استفاده از کتابخانه",
"video": "ویدیوها"
},
"missingModels": {
"alreadyExistsInCategory": "این مدل قبلاً در «{category}» وجود دارد",
"assetLoadTimeout": "شناسایی مدل زمان‌بر شد. لطفاً workflow را مجدداً بارگذاری کنید.",

View File

@@ -17951,6 +17951,241 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "تولید ویدیو از تصویر فریم اول، با امکان افزودن تصویر فریم آخر و صدا به صورت اختیاری.",
"display_name": "وان ۲.۷ تبدیل تصویر به ویدیو",
"inputs": {
"audio": {
"name": "صدا",
"tooltip": "صدا برای هدایت تولید ویدیو (مثلاً هماهنگی لب، حرکت هماهنگ با ضرب‌آهنگ). مدت زمان: ۲ تا ۳۰ ثانیه. در صورت عدم ارائه، مدل به طور خودکار موسیقی پس‌زمینه یا افکت صوتی مناسب تولید می‌کند."
},
"control_after_generate": {
"name": "کنترل پس از تولید"
},
"first_frame": {
"name": "فریم اول",
"tooltip": "تصویر فریم اول. نسبت ابعاد خروجی از این تصویر استخراج می‌شود."
},
"last_frame": {
"name": "فریم آخر",
"tooltip": "تصویر فریم آخر. مدل ویدیویی با انتقال از فریم اول به فریم آخر تولید می‌کند."
},
"model": {
"name": "مدل"
},
"model_duration": {
"name": "مدت زمان"
},
"model_negative_prompt": {
"name": "پرامپت منفی"
},
"model_prompt": {
"name": "پرامپت"
},
"model_resolution": {
"name": "رزولوشن"
},
"prompt_extend": {
"name": "گسترش پرامپت",
"tooltip": "آیا پرامپت با کمک هوش مصنوعی بهبود یابد یا خیر."
},
"seed": {
"name": "بذر",
"tooltip": "بذر مورد استفاده برای تولید."
},
"watermark": {
"name": "واترمارک",
"tooltip": "آیا واترمارک تولیدشده توسط هوش مصنوعی به نتیجه اضافه شود یا خیر."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "تولید ویدیو با حضور یک شخص یا شیء بر اساس مواد مرجع. پشتیبانی از اجرای تک‌نفره و تعامل چندنفره.",
"display_name": "وان ۲.۷ مرجع به ویدیو",
"inputs": {
"control_after_generate": {
"name": "کنترل پس از تولید"
},
"model": {
"name": "مدل"
},
"model_duration": {
"name": "مدت زمان"
},
"model_negative_prompt": {
"name": "پرامپت منفی"
},
"model_prompt": {
"name": "پرامپت"
},
"model_ratio": {
"name": "نسبت تصویر"
},
"model_resolution": {
"name": "رزولوشن"
},
"seed": {
"name": "بذر",
"tooltip": "بذر مورد استفاده برای تولید."
},
"watermark": {
"name": "واترمارک",
"tooltip": "آیا واترمارک تولیدشده توسط هوش مصنوعی به نتیجه اضافه شود یا خیر."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "تولید ویدیو بر اساس یک پرامپت متنی با استفاده از مدل وان ۲.۷.",
"display_name": "وان ۲.۷ تبدیل متن به ویدیو",
"inputs": {
"audio": {
"name": "صدا",
"tooltip": "صدا برای هدایت تولید ویدیو (مثلاً هماهنگی لب، حرکت مطابق با ضرب). مدت زمان: ۳ تا ۳۰ ثانیه. در صورت عدم ارائه، مدل به طور خودکار موسیقی پس‌زمینه یا افکت صوتی مناسب تولید می‌کند."
},
"control_after_generate": {
"name": "کنترل پس از تولید"
},
"model": {
"name": "مدل"
},
"model_duration": {
"name": "مدت زمان"
},
"model_negative_prompt": {
"name": "پرامپت منفی"
},
"model_prompt": {
"name": "پرامپت"
},
"model_ratio": {
"name": "نسبت تصویر"
},
"model_resolution": {
"name": "وضوح"
},
"prompt_extend": {
"name": "گسترش پرامپت",
"tooltip": "آیا پرامپت با کمک هوش مصنوعی بهبود یابد."
},
"seed": {
"name": "بذر",
"tooltip": "بذر مورد استفاده برای تولید."
},
"watermark": {
"name": "واترمارک",
"tooltip": "آیا واترمارک تولیدشده توسط هوش مصنوعی به نتیجه اضافه شود."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "ادامه دادن یک ویدیو از جایی که متوقف شده است، با امکان کنترل فریم آخر.",
"display_name": "وان ۲.۷ ادامه ویدیو",
"inputs": {
"control_after_generate": {
"name": "کنترل پس از تولید"
},
"first_clip": {
"name": "کلیپ اول",
"tooltip": "ویدیوی ورودی برای ادامه دادن. مدت زمان: ۲ تا ۱۰ ثانیه. نسبت تصویر خروجی از این ویدیو گرفته می‌شود."
},
"last_frame": {
"name": "فریم آخر",
"tooltip": "تصویر فریم آخر. ادامه ویدیو به سمت این فریم انتقال می‌یابد."
},
"model": {
"name": "مدل"
},
"model_duration": {
"name": "مدت زمان"
},
"model_negative_prompt": {
"name": "پرامپت منفی"
},
"model_prompt": {
"name": "پرامپت"
},
"model_resolution": {
"name": "وضوح"
},
"prompt_extend": {
"name": "گسترش پرامپت",
"tooltip": "آیا پرامپت با کمک هوش مصنوعی بهبود یابد."
},
"seed": {
"name": "بذر",
"tooltip": "بذر مورد استفاده برای تولید."
},
"watermark": {
"name": "واترمارک",
"tooltip": "آیا واترمارک تولیدشده توسط هوش مصنوعی به نتیجه اضافه شود."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "ویرایش ویدیو با استفاده از دستور متنی، تصاویر مرجع یا انتقال سبک.",
"display_name": "وان ۲.۷ ویرایش ویدیو",
"inputs": {
"audio_setting": {
"name": "تنظیمات صدا",
"tooltip": "'auto': مدل تصمیم می‌گیرد که آیا صدا بر اساس پرامپت بازتولید شود یا نه. 'origin': صدای اصلی ویدیوی ورودی حفظ می‌شود."
},
"control_after_generate": {
"name": "کنترل پس از تولید"
},
"model": {
"name": "مدل"
},
"model_duration": {
"name": "مدت زمان"
},
"model_prompt": {
"name": "پرامپت"
},
"model_ratio": {
"name": "نسبت تصویر"
},
"model_resolution": {
"name": "وضوح"
},
"seed": {
"name": "بذر",
"tooltip": "بذر مورد استفاده برای تولید."
},
"video": {
"name": "ویدیو",
"tooltip": "ویدیویی که باید ویرایش شود."
},
"watermark": {
"name": "واترمارک",
"tooltip": "آیا واترمارک تولیدشده توسط هوش مصنوعی به نتیجه اضافه شود."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,6 +2527,23 @@
"inputsNone": "AUCUNE ENTRÉE",
"inputsNoneTooltip": "Le nœud na pas dentrées",
"locateNode": "Localiser le nœud sur le canevas",
"missingMedia": {
"audio": "Audio",
"cancelSelection": "Annuler la sélection",
"collapseNodes": "Masquer les nœuds de référence",
"confirmSelection": "Confirmer la sélection",
"expandNodes": "Afficher les nœuds de référence",
"image": "Images",
"locateNode": "Localiser le nœud",
"missingMediaTitle": "Entrées manquantes",
"or": "OU",
"selectedFromLibrary": "Sélectionné depuis la bibliothèque",
"uploadFile": "Téléverser {type}",
"uploaded": "Téléversé",
"uploading": "Téléversement en cours...",
"useFromLibrary": "Utiliser depuis la bibliothèque",
"video": "Vidéos"
},
"missingModels": {
"alreadyExistsInCategory": "Ce modèle existe déjà dans « {category} »",
"assetLoadTimeout": "Le délai de détection du modèle est dépassé. Essayez de recharger le workflow.",

View File

@@ -17951,6 +17951,241 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "Générez une vidéo à partir d'une image de première image, avec une image de dernière image et un audio optionnels.",
"display_name": "Wan 2.7 Image vers Vidéo",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Audio pour guider la génération vidéo (ex : synchronisation labiale, mouvement sur le rythme). Durée : 2s-30s. Si non fourni, le modèle génère automatiquement une musique de fond ou des effets sonores adaptés."
},
"control_after_generate": {
"name": "contrôle après génération"
},
"first_frame": {
"name": "première image",
"tooltip": "Image de la première image. Le format de sortie est dérivé de cette image."
},
"last_frame": {
"name": "dernière image",
"tooltip": "Image de la dernière image. Le modèle génère une vidéo passant de la première à la dernière image."
},
"model": {
"name": "modèle"
},
"model_duration": {
"name": "durée"
},
"model_negative_prompt": {
"name": "prompt négatif"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "résolution"
},
"prompt_extend": {
"name": "extension de prompt",
"tooltip": "Permet d'améliorer le prompt avec l'aide de l'IA."
},
"seed": {
"name": "graine",
"tooltip": "Graine à utiliser pour la génération."
},
"watermark": {
"name": "filigrane",
"tooltip": "Ajouter ou non un filigrane généré par IA au résultat."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "Générez une vidéo mettant en scène une personne ou un objet à partir de références. Prend en charge les performances à un personnage et les interactions multi-personnages.",
"display_name": "Wan 2.7 Référence vers Vidéo",
"inputs": {
"control_after_generate": {
"name": "contrôle après génération"
},
"model": {
"name": "modèle"
},
"model_duration": {
"name": "durée"
},
"model_negative_prompt": {
"name": "prompt négatif"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "résolution"
},
"seed": {
"name": "graine",
"tooltip": "Graine à utiliser pour la génération."
},
"watermark": {
"name": "filigrane",
"tooltip": "Ajouter ou non un filigrane généré par IA au résultat."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Génère une vidéo à partir d'une invite textuelle en utilisant le modèle Wan 2.7.",
"display_name": "Wan 2.7 Texte en Vidéo",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Audio pour guider la génération vidéo (ex : synchronisation labiale, mouvement sur le rythme). Durée : 3s-30s. Si non fourni, le modèle génère automatiquement une musique de fond ou des effets sonores adaptés."
},
"control_after_generate": {
"name": "contrôle après génération"
},
"model": {
"name": "modèle"
},
"model_duration": {
"name": "durée"
},
"model_negative_prompt": {
"name": "invite négative"
},
"model_prompt": {
"name": "invite"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "résolution"
},
"prompt_extend": {
"name": "extension d'invite",
"tooltip": "Améliorer l'invite avec l'assistance de l'IA."
},
"seed": {
"name": "graine",
"tooltip": "Graine à utiliser pour la génération."
},
"watermark": {
"name": "filigrane",
"tooltip": "Ajouter un filigrane généré par IA au résultat."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "Continue une vidéo à partir de l'endroit où elle s'est arrêtée, avec un contrôle optionnel de la dernière image.",
"display_name": "Wan 2.7 Continuation Vidéo",
"inputs": {
"control_after_generate": {
"name": "contrôle après génération"
},
"first_clip": {
"name": "premier clip",
"tooltip": "Vidéo d'entrée à continuer. Durée : 2s-10s. Le ratio de sortie est dérivé de cette vidéo."
},
"last_frame": {
"name": "dernière image",
"tooltip": "Image de la dernière frame. La continuation effectuera une transition vers cette image."
},
"model": {
"name": "modèle"
},
"model_duration": {
"name": "durée"
},
"model_negative_prompt": {
"name": "invite négative"
},
"model_prompt": {
"name": "invite"
},
"model_resolution": {
"name": "résolution"
},
"prompt_extend": {
"name": "extension d'invite",
"tooltip": "Améliorer l'invite avec l'assistance de l'IA."
},
"seed": {
"name": "graine",
"tooltip": "Graine à utiliser pour la génération."
},
"watermark": {
"name": "filigrane",
"tooltip": "Ajouter un filigrane généré par IA au résultat."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "Éditez une vidéo à l'aide d'instructions textuelles, d'images de référence ou de transfert de style.",
"display_name": "Wan 2.7 Édition Vidéo",
"inputs": {
"audio_setting": {
"name": "paramètre audio",
"tooltip": "'auto' : le modèle décide de régénérer ou non l'audio selon l'invite. 'origin' : préserve l'audio original de la vidéo d'entrée."
},
"control_after_generate": {
"name": "contrôle après génération"
},
"model": {
"name": "modèle"
},
"model_duration": {
"name": "durée"
},
"model_prompt": {
"name": "invite"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "résolution"
},
"seed": {
"name": "graine",
"tooltip": "Graine à utiliser pour la génération."
},
"video": {
"name": "vidéo",
"tooltip": "La vidéo à éditer."
},
"watermark": {
"name": "filigrane",
"tooltip": "Ajouter un filigrane généré par IA au résultat."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,6 +2527,23 @@
"inputsNone": "入力なし",
"inputsNoneTooltip": "このノードには入力がありません",
"locateNode": "キャンバス上でノードを探す",
"missingMedia": {
"audio": "音声",
"cancelSelection": "選択をキャンセル",
"collapseNodes": "参照ノードを非表示",
"confirmSelection": "選択を確定",
"expandNodes": "参照ノードを表示",
"image": "画像",
"locateNode": "ノードを特定",
"missingMediaTitle": "入力がありません",
"or": "または",
"selectedFromLibrary": "ライブラリから選択済み",
"uploadFile": "{type}をアップロード",
"uploaded": "アップロード完了",
"uploading": "アップロード中...",
"useFromLibrary": "ライブラリから使用",
"video": "動画"
},
"missingModels": {
"alreadyExistsInCategory": "このモデルはすでに「{category}」に存在します",
"assetLoadTimeout": "モデルの検出がタイムアウトしました。ワークフローを再読み込みしてください。",

View File

@@ -17951,6 +17951,241 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "最初のフレーム画像から動画を生成します。オプションで最後のフレーム画像や音声も指定できます。",
"display_name": "Wan 2.7 画像から動画へ",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "動画生成を制御する音声リップシンク、ビートに合わせた動き。長さ2秒30秒。指定しない場合は、モデルが自動的に合うBGMや効果音を生成します。"
},
"control_after_generate": {
"name": "control after generate"
},
"first_frame": {
"name": "first_frame",
"tooltip": "最初のフレーム画像。この画像から出力動画のアスペクト比が決まります。"
},
"last_frame": {
"name": "last_frame",
"tooltip": "最後のフレーム画像。最初から最後のフレームへと遷移する動画を生成します。"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "AIによるプロンプトの強化を行うかどうか。"
},
"seed": {
"name": "seed",
"tooltip": "生成に使用するシード値。"
},
"watermark": {
"name": "watermark",
"tooltip": "AI生成のウォーターマークを結果に追加するかどうか。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "リファレンス素材から人物や物体を特徴とする動画を生成します。単一キャラクターの演技や複数キャラクターのインタラクションに対応しています。",
"display_name": "Wan 2.7 リファレンスから動画へ",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "生成に使用するシード値。"
},
"watermark": {
"name": "watermark",
"tooltip": "AI生成のウォーターマークを結果に追加するかどうか。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Wan 2.7モデルを使用してテキストプロンプトに基づいたビデオを生成します。",
"display_name": "Wan 2.7 テキストからビデオへ",
"inputs": {
"audio": {
"name": "オーディオ",
"tooltip": "ビデオ生成を駆動する音声リップシンク、ビートに合わせた動き。長さ3秒30秒。未指定の場合、モデルが自動的に合うBGMや効果音を生成します。"
},
"control_after_generate": {
"name": "生成後のコントロール"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "長さ"
},
"model_negative_prompt": {
"name": "ネガティブプロンプト"
},
"model_prompt": {
"name": "プロンプト"
},
"model_ratio": {
"name": "アスペクト比"
},
"model_resolution": {
"name": "解像度"
},
"prompt_extend": {
"name": "プロンプト拡張",
"tooltip": "AIアシストでプロンプトを強化するかどうか。"
},
"seed": {
"name": "シード値",
"tooltip": "生成に使用するシード値。"
},
"watermark": {
"name": "ウォーターマーク",
"tooltip": "AI生成のウォーターマークを結果に追加するかどうか。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "ビデオの続きから生成を行い、オプションでラストフレーム制御も可能です。",
"display_name": "Wan 2.7 ビデオ継続生成",
"inputs": {
"control_after_generate": {
"name": "生成後のコントロール"
},
"first_clip": {
"name": "最初のクリップ",
"tooltip": "継続元となる入力ビデオ。長さ2秒10秒。出力のアスペクト比はこのビデオから取得されます。"
},
"last_frame": {
"name": "ラストフレーム",
"tooltip": "ラストフレーム画像。継続生成はこのフレームへと遷移します。"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "長さ"
},
"model_negative_prompt": {
"name": "ネガティブプロンプト"
},
"model_prompt": {
"name": "プロンプト"
},
"model_resolution": {
"name": "解像度"
},
"prompt_extend": {
"name": "プロンプト拡張",
"tooltip": "AIアシストでプロンプトを強化するかどうか。"
},
"seed": {
"name": "シード値",
"tooltip": "生成に使用するシード値。"
},
"watermark": {
"name": "ウォーターマーク",
"tooltip": "AI生成のウォーターマークを結果に追加するかどうか。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "テキスト指示、参照画像、またはスタイル転送を使ってビデオを編集します。",
"display_name": "Wan 2.7 ビデオ編集",
"inputs": {
"audio_setting": {
"name": "オーディオ設定",
"tooltip": "「auto」プロンプトに基づきモデルが音声再生成の有無を判断。「origin」入力ビデオの元の音声を保持。"
},
"control_after_generate": {
"name": "生成後のコントロール"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "長さ"
},
"model_prompt": {
"name": "プロンプト"
},
"model_ratio": {
"name": "アスペクト比"
},
"model_resolution": {
"name": "解像度"
},
"seed": {
"name": "シード値",
"tooltip": "生成に使用するシード値。"
},
"video": {
"name": "ビデオ",
"tooltip": "編集対象のビデオ。"
},
"watermark": {
"name": "ウォーターマーク",
"tooltip": "AI生成のウォーターマークを結果に追加するかどうか。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,6 +2527,23 @@
"inputsNone": "입력 없음",
"inputsNoneTooltip": "노드에 입력이 없습니다",
"locateNode": "캔버스에서 노드 찾기",
"missingMedia": {
"audio": "오디오",
"cancelSelection": "선택 취소",
"collapseNodes": "참조 노드 숨기기",
"confirmSelection": "선택 확인",
"expandNodes": "참조 노드 표시",
"image": "이미지",
"locateNode": "노드 위치 찾기",
"missingMediaTitle": "입력 누락",
"or": "또는",
"selectedFromLibrary": "라이브러리에서 선택됨",
"uploadFile": "{type} 업로드",
"uploaded": "업로드 완료",
"uploading": "업로드 중...",
"useFromLibrary": "라이브러리에서 사용",
"video": "비디오"
},
"missingModels": {
"alreadyExistsInCategory": "이 모델은 이미 \"{category}\"에 존재합니다",
"assetLoadTimeout": "모델 감지 시간이 초과되었습니다. 워크플로우를 다시 불러와 보세요.",

View File

@@ -17951,6 +17951,241 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "첫 프레임 이미지를 기반으로 비디오를 생성합니다. 마지막 프레임 이미지와 오디오를 선택적으로 추가할 수 있습니다.",
"display_name": "Wan 2.7 이미지 → 비디오",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "비디오 생성에 사용할 오디오입니다(예: 립싱크, 비트에 맞춘 동작). 길이: 2초~30초. 제공하지 않으면 모델이 자동으로 배경 음악 또는 효과음을 생성합니다."
},
"control_after_generate": {
"name": "control after generate"
},
"first_frame": {
"name": "first_frame",
"tooltip": "첫 프레임 이미지입니다. 출력 비율은 이 이미지에서 결정됩니다."
},
"last_frame": {
"name": "last_frame",
"tooltip": "마지막 프레임 이미지입니다. 모델이 첫 프레임에서 마지막 프레임으로 전환되는 비디오를 생성합니다."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "AI의 도움으로 프롬프트를 확장할지 여부입니다."
},
"seed": {
"name": "seed",
"tooltip": "생성에 사용할 시드 값입니다."
},
"watermark": {
"name": "watermark",
"tooltip": "AI가 생성한 워터마크를 결과에 추가할지 여부입니다."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "레퍼런스 자료를 기반으로 인물 또는 오브젝트가 등장하는 비디오를 생성합니다. 단일 캐릭터 연기와 다중 캐릭터 상호작용을 지원합니다.",
"display_name": "Wan 2.7 레퍼런스 → 비디오",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "생성에 사용할 시드 값입니다."
},
"watermark": {
"name": "watermark",
"tooltip": "AI가 생성한 워터마크를 결과에 추가할지 여부입니다."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Wan 2.7 모델을 사용하여 텍스트 프롬프트 기반으로 비디오를 생성합니다.",
"display_name": "Wan 2.7 텍스트 투 비디오",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "비디오 생성에 사용할 오디오(예: 립싱크, 비트에 맞춘 동작). 길이: 3초~30초. 제공하지 않으면 모델이 자동으로 배경 음악이나 효과음을 생성합니다."
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "AI의 도움으로 프롬프트를 확장할지 여부입니다."
},
"seed": {
"name": "seed",
"tooltip": "생성에 사용할 시드입니다."
},
"watermark": {
"name": "watermark",
"tooltip": "AI가 생성한 워터마크를 결과에 추가할지 여부입니다."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "비디오가 끝난 지점부터 이어서 생성하며, 마지막 프레임 제어도 선택적으로 지원합니다.",
"display_name": "Wan 2.7 비디오 연속 생성",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"first_clip": {
"name": "first_clip",
"tooltip": "이어 생성할 입력 비디오입니다. 길이: 2초~10초. 출력 비율은 이 비디오에서 파생됩니다."
},
"last_frame": {
"name": "last_frame",
"tooltip": "마지막 프레임 이미지입니다. 이어지는 비디오는 이 프레임으로 전환됩니다."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "AI의 도움으로 프롬프트를 확장할지 여부입니다."
},
"seed": {
"name": "seed",
"tooltip": "생성에 사용할 시드입니다."
},
"watermark": {
"name": "watermark",
"tooltip": "AI가 생성한 워터마크를 결과에 추가할지 여부입니다."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "텍스트 지시, 참조 이미지 또는 스타일 전송을 사용하여 비디오를 편집합니다.",
"display_name": "Wan 2.7 비디오 편집",
"inputs": {
"audio_setting": {
"name": "audio_setting",
"tooltip": "'auto': 프롬프트에 따라 오디오를 재생성할지 모델이 결정합니다. 'origin': 입력 비디오의 원본 오디오를 유지합니다."
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "생성에 사용할 시드입니다."
},
"video": {
"name": "video",
"tooltip": "편집할 비디오입니다."
},
"watermark": {
"name": "watermark",
"tooltip": "AI가 생성한 워터마크를 결과에 추가할지 여부입니다."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "완애니메이트투비디오",
"inputs": {

View File

@@ -2527,6 +2527,23 @@
"inputsNone": "SEM ENTRADAS",
"inputsNoneTooltip": "O nó não possui entradas",
"locateNode": "Localizar nó no canvas",
"missingMedia": {
"audio": "Áudio",
"cancelSelection": "Cancelar seleção",
"collapseNodes": "Ocultar nós de referência",
"confirmSelection": "Confirmar seleção",
"expandNodes": "Mostrar nós de referência",
"image": "Imagens",
"locateNode": "Localizar nó",
"missingMediaTitle": "Entradas Ausentes",
"or": "OU",
"selectedFromLibrary": "Selecionado da biblioteca",
"uploadFile": "Enviar {type}",
"uploaded": "Enviado",
"uploading": "Enviando...",
"useFromLibrary": "Usar da Biblioteca",
"video": "Vídeos"
},
"missingModels": {
"alreadyExistsInCategory": "Este modelo já existe em \"{category}\"",
"assetLoadTimeout": "Tempo esgotado na detecção do modelo. Tente recarregar o fluxo de trabalho.",

View File

@@ -17951,6 +17951,241 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "Gere um vídeo a partir de uma imagem do primeiro quadro, com opção de imagem do último quadro e áudio.",
"display_name": "Wan 2.7 Imagem para Vídeo",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Áudio para direcionar a geração do vídeo (ex: sincronização labial, movimento no ritmo da batida). Duração: 2s-30s. Se não for fornecido, o modelo gera automaticamente música de fundo ou efeitos sonoros correspondentes."
},
"control_after_generate": {
"name": "control after generate"
},
"first_frame": {
"name": "first_frame",
"tooltip": "Imagem do primeiro quadro. A proporção de aspecto de saída é derivada desta imagem."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Imagem do último quadro. O modelo gera um vídeo fazendo a transição do primeiro para o último quadro."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Se deseja aprimorar o prompt com assistência de IA."
},
"seed": {
"name": "seed",
"tooltip": "Seed para usar na geração."
},
"watermark": {
"name": "watermark",
"tooltip": "Se deseja adicionar uma marca d'água gerada por IA ao resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "Gere um vídeo apresentando uma pessoa ou objeto a partir de materiais de referência. Suporta performances de personagem único e interações entre múltiplos personagens.",
"display_name": "Wan 2.7 Referência para Vídeo",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "Seed para usar na geração."
},
"watermark": {
"name": "watermark",
"tooltip": "Se deseja adicionar uma marca d'água gerada por IA ao resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Gera um vídeo com base em um prompt de texto usando o modelo Wan 2.7.",
"display_name": "Wan 2.7 Texto para Vídeo",
"inputs": {
"audio": {
"name": "áudio",
"tooltip": "Áudio para direcionar a geração do vídeo (ex: sincronização labial, movimento no ritmo da música). Duração: 3s-30s. Se não fornecido, o modelo gera automaticamente música de fundo ou efeitos sonoros compatíveis."
},
"control_after_generate": {
"name": "controle após gerar"
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duração"
},
"model_negative_prompt": {
"name": "prompt_negativo"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "proporção"
},
"model_resolution": {
"name": "resolução"
},
"prompt_extend": {
"name": "estender_prompt",
"tooltip": "Se deseja aprimorar o prompt com assistência de IA."
},
"seed": {
"name": "semente",
"tooltip": "Semente a ser usada para a geração."
},
"watermark": {
"name": "marca_d'água",
"tooltip": "Se deseja adicionar uma marca d'água gerada por IA ao resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "Continue um vídeo de onde parou, com controle opcional do último quadro.",
"display_name": "Wan 2.7 Continuação de Vídeo",
"inputs": {
"control_after_generate": {
"name": "controle após gerar"
},
"first_clip": {
"name": "primeiro_clip",
"tooltip": "Vídeo de entrada para continuar. Duração: 2s-10s. A proporção de aspecto de saída é derivada deste vídeo."
},
"last_frame": {
"name": "último_quadro",
"tooltip": "Imagem do último quadro. A continuação fará a transição para este quadro."
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duração"
},
"model_negative_prompt": {
"name": "prompt_negativo"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolução"
},
"prompt_extend": {
"name": "estender_prompt",
"tooltip": "Se deseja aprimorar o prompt com assistência de IA."
},
"seed": {
"name": "semente",
"tooltip": "Semente a ser usada para a geração."
},
"watermark": {
"name": "marca_d'água",
"tooltip": "Se deseja adicionar uma marca d'água gerada por IA ao resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "Edite um vídeo usando instruções de texto, imagens de referência ou transferência de estilo.",
"display_name": "Wan 2.7 Edição de Vídeo",
"inputs": {
"audio_setting": {
"name": "configuração_de_áudio",
"tooltip": "'auto': o modelo decide se deve regenerar o áudio com base no prompt. 'origin': preserva o áudio original do vídeo de entrada."
},
"control_after_generate": {
"name": "controle após gerar"
},
"model": {
"name": "modelo"
},
"model_duration": {
"name": "duração"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "proporção"
},
"model_resolution": {
"name": "resolução"
},
"seed": {
"name": "semente",
"tooltip": "Semente a ser usada para a geração."
},
"video": {
"name": "vídeo",
"tooltip": "O vídeo a ser editado."
},
"watermark": {
"name": "marca_d'água",
"tooltip": "Se deseja adicionar uma marca d'água gerada por IA ao resultado."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,6 +2527,23 @@
"inputsNone": "НЕТ ВХОДОВ",
"inputsNoneTooltip": "Узел не имеет входов",
"locateNode": "Найти узел на холсте",
"missingMedia": {
"audio": "Аудио",
"cancelSelection": "Отменить выбор",
"collapseNodes": "Скрыть ссылающиеся узлы",
"confirmSelection": "Подтвердить выбор",
"expandNodes": "Показать ссылающиеся узлы",
"image": "Изображения",
"locateNode": "Найти узел",
"missingMediaTitle": "Отсутствующие входные данные",
"or": "ИЛИ",
"selectedFromLibrary": "Выбрано из библиотеки",
"uploadFile": "Загрузить {type}",
"uploaded": "Загружено",
"uploading": "Загрузка...",
"useFromLibrary": "Использовать из библиотеки",
"video": "Видео"
},
"missingModels": {
"alreadyExistsInCategory": "Эта модель уже существует в «{category}»",
"assetLoadTimeout": "Время ожидания обнаружения модели истекло. Попробуйте перезагрузить рабочий процесс.",

View File

@@ -17951,6 +17951,241 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "Создайте видео из изображения первого кадра с возможностью добавить изображение последнего кадра и аудио.",
"display_name": "Wan 2.7 Изображение в Видео",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Аудио для управления генерацией видео (например, синхронизация губ, движение в ритм музыки). Длительность: 230 сек. Если не указано, модель автоматически сгенерирует подходящую фоновую музыку или звуковые эффекты."
},
"control_after_generate": {
"name": "control after generate"
},
"first_frame": {
"name": "first_frame",
"tooltip": "Изображение первого кадра. Соотношение сторон результата определяется этим изображением."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Изображение последнего кадра. Модель создаёт видео с переходом от первого к последнему кадру."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Улучшить ли промпт с помощью AI."
},
"seed": {
"name": "seed",
"tooltip": "Сид для генерации."
},
"watermark": {
"name": "watermark",
"tooltip": "Добавлять ли AI-водяной знак к результату."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "Создайте видео с участием человека или объекта на основе референсных материалов. Поддерживает выступления одного персонажа и взаимодействие нескольких персонажей.",
"display_name": "Wan 2.7 Референс в Видео",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "Сид для генерации."
},
"watermark": {
"name": "watermark",
"tooltip": "Добавлять ли AI-водяной знак к результату."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Генерирует видео на основе текстового запроса с использованием модели Wan 2.7.",
"display_name": "Wan 2.7 Текст в видео",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Аудио для управления генерацией видео (например, синхронизация губ, движение в ритм музыки). Длительность: 330 сек. Если не указано, модель автоматически сгенерирует подходящую фоновую музыку или звуковые эффекты."
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Улучшать ли запрос с помощью ИИ."
},
"seed": {
"name": "seed",
"tooltip": "Зерно для генерации."
},
"watermark": {
"name": "watermark",
"tooltip": "Добавлять ли водяной знак, сгенерированный ИИ, к результату."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "Продолжает видео с того места, где оно закончилось, с возможностью контроля последнего кадра.",
"display_name": "Wan 2.7 Продолжение видео",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"first_clip": {
"name": "first_clip",
"tooltip": "Входное видео для продолжения. Длительность: 210 сек. Соотношение сторон результата определяется этим видео."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Изображение последнего кадра. Продолжение будет переходить к этому кадру."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "Улучшать ли запрос с помощью ИИ."
},
"seed": {
"name": "seed",
"tooltip": "Зерно для генерации."
},
"watermark": {
"name": "watermark",
"tooltip": "Добавлять ли водяной знак, сгенерированный ИИ, к результату."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "Редактируйте видео с помощью текстовых инструкций, референсных изображений или переноса стиля.",
"display_name": "Wan 2.7 Редактирование видео",
"inputs": {
"audio_setting": {
"name": "audio_setting",
"tooltip": "'auto': модель решает, нужно ли перегенерировать аудио на основе запроса. 'origin': сохранить оригинальное аудио из входного видео."
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "Зерно для генерации."
},
"video": {
"name": "video",
"tooltip": "Видео для редактирования."
},
"watermark": {
"name": "watermark",
"tooltip": "Добавлять ли водяной знак, сгенерированный ИИ, к результату."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,6 +2527,23 @@
"inputsNone": "GİRİŞ YOK",
"inputsNoneTooltip": "Düğümün girişi yok",
"locateNode": "Düğümü tuvalde bul",
"missingMedia": {
"audio": "Ses",
"cancelSelection": "Seçimi iptal et",
"collapseNodes": "Referans veren düğümleri gizle",
"confirmSelection": "Seçimi onayla",
"expandNodes": "Referans veren düğümleri göster",
"image": "Görseller",
"locateNode": "Düğümü bul",
"missingMediaTitle": "Eksik Girdiler",
"or": "VEYA",
"selectedFromLibrary": "Kütüphaneden seçildi",
"uploadFile": "{type} Yükle",
"uploaded": "Yüklendi",
"uploading": "Yükleniyor...",
"useFromLibrary": "Kütüphaneden Kullan",
"video": "Videolar"
},
"missingModels": {
"alreadyExistsInCategory": "Bu model zaten \"{category}\" içinde mevcut",
"assetLoadTimeout": "Model algılama zaman aşımına uğradı. Lütfen iş akışını yeniden yüklemeyi deneyin.",

View File

@@ -17951,6 +17951,241 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "İlk kare görüntüsünden bir video oluşturun, isteğe bağlı olarak son kare görüntüsü ve ses ekleyin.",
"display_name": "Wan 2.7 Görüntüden Videoya",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "Video oluşturmayı yönlendiren ses (ör. dudak senkronizasyonu, ritme uygun hareket). Süre: 2-30 saniye. Sağlanmazsa, model otomatik olarak uyumlu arka plan müziği veya ses efektleri üretir."
},
"control_after_generate": {
"name": "oluşturduktan sonra kontrol et"
},
"first_frame": {
"name": "first_frame",
"tooltip": "İlk kare görüntüsü. Çıktı en-boy oranı bu görüntüden alınır."
},
"last_frame": {
"name": "last_frame",
"tooltip": "Son kare görüntüsü. Model, ilk kareden son kareye geçiş yapan bir video oluşturur."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "süre"
},
"model_negative_prompt": {
"name": "negatif_istek"
},
"model_prompt": {
"name": "istek"
},
"model_resolution": {
"name": "çözünürlük"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "İsteğin yapay zeka yardımıyla geliştirilip geliştirilmeyeceği."
},
"seed": {
"name": "seed",
"tooltip": "Oluşturma için kullanılacak tohum."
},
"watermark": {
"name": "watermark",
"tooltip": "Sonuca yapay zeka tarafından oluşturulan bir filigran eklenip eklenmeyeceği."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "Referans materyallerden bir kişi veya nesne içeren video oluşturun. Tek karakterli performansları ve çok karakterli etkileşimleri destekler.",
"display_name": "Wan 2.7 Referanstan Videoya",
"inputs": {
"control_after_generate": {
"name": "oluşturduktan sonra kontrol et"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "süre"
},
"model_negative_prompt": {
"name": "negatif_istek"
},
"model_prompt": {
"name": "istek"
},
"model_ratio": {
"name": "oran"
},
"model_resolution": {
"name": "çözünürlük"
},
"seed": {
"name": "seed",
"tooltip": "Oluşturma için kullanılacak tohum."
},
"watermark": {
"name": "watermark",
"tooltip": "Sonuca yapay zeka tarafından oluşturulan bir filigran eklenip eklenmeyeceği."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "Wan 2.7 modeli kullanılarak bir metin istemine dayalı video oluşturur.",
"display_name": "Wan 2.7 Metinden Videoya",
"inputs": {
"audio": {
"name": "ses",
"tooltip": "Video oluşturmayı yönlendirmek için ses (ör. dudak senkronizasyonu, ritme uygun hareket). Süre: 3-30 sn. Sağlanmazsa, model otomatik olarak uygun arka plan müziği veya ses efektleri üretir."
},
"control_after_generate": {
"name": "oluşturduktan sonra kontrol"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "süre"
},
"model_negative_prompt": {
"name": "negatif_istem"
},
"model_prompt": {
"name": "istem"
},
"model_ratio": {
"name": "oran"
},
"model_resolution": {
"name": "çözünürlük"
},
"prompt_extend": {
"name": "istem_genişlet",
"tooltip": "İstemin yapay zeka yardımıyla geliştirilip geliştirilmeyeceği."
},
"seed": {
"name": "tohum",
"tooltip": "Oluşturma için kullanılacak tohum."
},
"watermark": {
"name": "filigran",
"tooltip": "Sonuca yapay zeka tarafından oluşturulan bir filigran eklenip eklenmeyeceği."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "Bir videoyu kaldığı yerden devam ettirin, isteğe bağlı olarak son kare kontrolüyle.",
"display_name": "Wan 2.7 Video Devamı",
"inputs": {
"control_after_generate": {
"name": "oluşturduktan sonra kontrol"
},
"first_clip": {
"name": "ilk_klip",
"tooltip": "Devam edilecek giriş videosu. Süre: 2-10 sn. Çıktı en-boy oranı bu videodan alınır."
},
"last_frame": {
"name": "son_kare",
"tooltip": "Son kare görseli. Devam bu kareye doğru geçiş yapacaktır."
},
"model": {
"name": "model"
},
"model_duration": {
"name": "süre"
},
"model_negative_prompt": {
"name": "negatif_istem"
},
"model_prompt": {
"name": "istem"
},
"model_resolution": {
"name": "çözünürlük"
},
"prompt_extend": {
"name": "istem_genişlet",
"tooltip": "İstemin yapay zeka yardımıyla geliştirilip geliştirilmeyeceği."
},
"seed": {
"name": "tohum",
"tooltip": "Oluşturma için kullanılacak tohum."
},
"watermark": {
"name": "filigran",
"tooltip": "Sonuca yapay zeka tarafından oluşturulan bir filigran eklenip eklenmeyeceği."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "Bir videoyu metin talimatları, referans görseller veya stil transferi ile düzenleyin.",
"display_name": "Wan 2.7 Video Düzenleme",
"inputs": {
"audio_setting": {
"name": "ses_ayarı",
"tooltip": "'auto': model, isteme göre sesi yeniden oluşturup oluşturmayacağına karar verir. 'origin': giriş videosundaki orijinal sesi korur."
},
"control_after_generate": {
"name": "oluşturduktan sonra kontrol"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "süre"
},
"model_prompt": {
"name": "istem"
},
"model_ratio": {
"name": "oran"
},
"model_resolution": {
"name": "çözünürlük"
},
"seed": {
"name": "tohum",
"tooltip": "Oluşturma için kullanılacak tohum."
},
"video": {
"name": "video",
"tooltip": "Düzenlenecek video."
},
"watermark": {
"name": "filigran",
"tooltip": "Sonuca yapay zeka tarafından oluşturulan bir filigran eklenip eklenmeyeceği."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,6 +2527,23 @@
"inputsNone": "無輸入",
"inputsNoneTooltip": "此節點沒有輸入",
"locateNode": "在畫布上定位節點",
"missingMedia": {
"audio": "音訊",
"cancelSelection": "取消選取",
"collapseNodes": "隱藏引用節點",
"confirmSelection": "確認選取",
"expandNodes": "顯示引用節點",
"image": "影像",
"locateNode": "定位節點",
"missingMediaTitle": "缺少輸入",
"or": "或",
"selectedFromLibrary": "已從媒體庫選取",
"uploadFile": "上傳{type}",
"uploaded": "已上傳",
"uploading": "正在上傳...",
"useFromLibrary": "從媒體庫使用",
"video": "影片"
},
"missingModels": {
"alreadyExistsInCategory": "此模型已存在於「{category}」中",
"assetLoadTimeout": "模型偵測逾時。請嘗試重新載入工作流程。",

View File

@@ -17951,6 +17951,241 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "從首幀圖像產生影片,可選擇加入末幀圖像與音訊。",
"display_name": "Wan 2.7 圖像轉影片",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "用於驅動影片生成的音訊如對嘴、節奏動作。時長2秒至30秒。若未提供模型會自動產生相符的背景音樂或音效。"
},
"control_after_generate": {
"name": "control after generate"
},
"first_frame": {
"name": "first_frame",
"tooltip": "首幀圖像。輸出長寬比將依此圖像決定。"
},
"last_frame": {
"name": "last_frame",
"tooltip": "末幀圖像。模型將產生從首幀到末幀的過渡影片。"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "是否使用 AI 協助增強提示詞。"
},
"seed": {
"name": "seed",
"tooltip": "用於生成的隨機種子。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否在結果中加入 AI 產生的浮水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "根據參考素材生成包含人物或物件的影片。支援單一角色表演與多角色互動。",
"display_name": "Wan 2.7 參考生成影片",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "用於生成的隨機種子。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否在結果中加入 AI 產生的浮水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "使用 Wan 2.7 模型根據文字提示生成影片。",
"display_name": "Wan 2.7 文字轉影片",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "用於驅動影片生成的音訊如對嘴、節奏同步動作。時長3秒至30秒。若未提供模型會自動生成匹配的背景音樂或音效。"
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "是否使用 AI 協助增強提示詞。"
},
"seed": {
"name": "seed",
"tooltip": "用於生成的種子值。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否在結果中加入 AI 生成的浮水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "從影片結束處繼續生成影片,可選擇最後一幀控制。",
"display_name": "Wan 2.7 影片續接",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"first_clip": {
"name": "first_clip",
"tooltip": "要續接的輸入影片。時長2秒至10秒。輸出長寬比將依此影片決定。"
},
"last_frame": {
"name": "last_frame",
"tooltip": "最後一幀圖像。續接內容將朝此幀過渡。"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "是否使用 AI 協助增強提示詞。"
},
"seed": {
"name": "seed",
"tooltip": "用於生成的種子值。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否在結果中加入 AI 生成的浮水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "使用文字指令、參考圖像或風格轉換編輯影片。",
"display_name": "Wan 2.7 影片編輯",
"inputs": {
"audio_setting": {
"name": "audio_setting",
"tooltip": "「auto」模型根據提示詞決定是否重新生成音訊。「origin」保留輸入影片的原始音訊。"
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "用於生成的種子值。"
},
"video": {
"name": "video",
"tooltip": "要編輯的影片。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否在結果中加入 AI 生成的浮水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "WanAnimateToVideo",
"inputs": {

View File

@@ -2527,6 +2527,23 @@
"inputsNone": "无输入",
"inputsNoneTooltip": "节点没有输入",
"locateNode": "在画布上定位节点",
"missingMedia": {
"audio": "音频",
"cancelSelection": "取消选择",
"collapseNodes": "隐藏引用节点",
"confirmSelection": "确认选择",
"expandNodes": "显示引用节点",
"image": "图像",
"locateNode": "定位节点",
"missingMediaTitle": "缺少输入",
"or": "或",
"selectedFromLibrary": "已从素材库选择",
"uploadFile": "上传{type}",
"uploaded": "已上传",
"uploading": "正在上传...",
"useFromLibrary": "从素材库选择",
"video": "视频"
},
"missingModels": {
"alreadyExistsInCategory": "该模型已存在于“{category}”中",
"assetLoadTimeout": "模型检测超时。请尝试重新加载工作流。",

View File

@@ -17951,6 +17951,241 @@
}
}
},
"Wan2ImageToVideoApi": {
"description": "根据首帧图像生成视频,可选末帧图像和音频。",
"display_name": "Wan 2.7 图像转视频",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "用于驱动视频生成的音频如对口型、节奏匹配动作。时长2秒-30秒。如未提供模型会自动生成匹配的背景音乐或音效。"
},
"control_after_generate": {
"name": "control after generate"
},
"first_frame": {
"name": "first_frame",
"tooltip": "首帧图像。输出的宽高比由该图像决定。"
},
"last_frame": {
"name": "last_frame",
"tooltip": "末帧图像。模型将生成从首帧到末帧的过渡视频。"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "是否使用 AI 辅助增强提示词。"
},
"seed": {
"name": "seed",
"tooltip": "用于生成的种子。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否为结果添加 AI 生成的水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2ReferenceVideoApi": {
"description": "根据参考素材生成包含人物或物体的视频。支持单角色表演和多角色互动。",
"display_name": "Wan 2.7 参考生成视频",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "用于生成的种子。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否为结果添加 AI 生成的水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2TextToVideoApi": {
"description": "使用 Wan 2.7 模型根据文本提示生成视频。",
"display_name": "Wan 2.7 文本转视频",
"inputs": {
"audio": {
"name": "audio",
"tooltip": "用于驱动视频生成的音频如对口型、节奏匹配动作。时长3秒-30秒。如未提供模型会自动生成匹配的背景音乐或音效。"
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "是否使用 AI 辅助增强提示词。"
},
"seed": {
"name": "seed",
"tooltip": "用于生成的种子。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否为结果添加 AI 生成的水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoContinuationApi": {
"description": "从视频结束处继续生成视频,可选用最后一帧进行控制。",
"display_name": "Wan 2.7 视频续写",
"inputs": {
"control_after_generate": {
"name": "control after generate"
},
"first_clip": {
"name": "first_clip",
"tooltip": "要续写的视频。时长2秒-10秒。输出宽高比将根据该视频确定。"
},
"last_frame": {
"name": "last_frame",
"tooltip": "最后一帧图像。续写内容将过渡到该帧。"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_negative_prompt": {
"name": "negative_prompt"
},
"model_prompt": {
"name": "prompt"
},
"model_resolution": {
"name": "resolution"
},
"prompt_extend": {
"name": "prompt_extend",
"tooltip": "是否使用 AI 辅助增强提示词。"
},
"seed": {
"name": "seed",
"tooltip": "用于生成的种子。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否为结果添加 AI 生成的水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Wan2VideoEditApi": {
"description": "通过文本指令、参考图像或风格迁移编辑视频。",
"display_name": "Wan 2.7 视频编辑",
"inputs": {
"audio_setting": {
"name": "audio_setting",
"tooltip": "“auto”模型根据提示词决定是否重新生成音频。“origin”保留输入视频的原始音频。"
},
"control_after_generate": {
"name": "control after generate"
},
"model": {
"name": "model"
},
"model_duration": {
"name": "duration"
},
"model_prompt": {
"name": "prompt"
},
"model_ratio": {
"name": "ratio"
},
"model_resolution": {
"name": "resolution"
},
"seed": {
"name": "seed",
"tooltip": "用于生成的种子。"
},
"video": {
"name": "video",
"tooltip": "要编辑的视频。"
},
"watermark": {
"name": "watermark",
"tooltip": "是否为结果添加 AI 生成的水印。"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanAnimateToVideo": {
"display_name": "Wan动画转视频",
"inputs": {