Compare commits

..

20 Commits

Author SHA1 Message Date
github-actions
68df807248 [automated] Update test expectations 2026-04-14 16:38:43 +00:00
Johnpaul
7a01408ecf test: hide legacy floating menu globally via CSS in setup()
Hide .comfy-menu.no-drag.comfy-menu-manual-pos via addStyleTag in
ComfyPage.setup(). This runs after every page load/reload, so the
menu is always hidden in screenshots without needing per-test
closeMenu() calls.

Remove all closeMenu() calls from 16 test files.
2026-04-14 17:30:49 +01:00
github-actions
485de05548 [automated] Update test expectations 2026-04-14 14:16:46 +00:00
Johnpaul Chiwetelu
464d04b709 Merge branch 'main' into feat/targeted-screenshot-tests 2026-04-14 15:10:27 +01:00
Johnpaul
3d4461f6ca fix: add per-test closeMenu for restore-workflow screenshot tests
File-level closeMenu was removed (interfered with Top menu tests),
so add it at the start of the two restore-workflow tests that take
screenshots with Disabled menu.
2026-04-14 15:04:19 +01:00
github-actions
b391383e78 [automated] Update test expectations 2026-04-14 00:02:08 +00:00
Johnpaul Chiwetelu
2d5d5c556d Merge branch 'main' into feat/targeted-screenshot-tests 2026-04-14 00:56:02 +01:00
Johnpaul
bdd977c231 fix: remove closeMenu from file-level beforeEach where tests switch to Top
closeMenu() in file-level beforeEach interferes with tests that
switch UseNewMenu to 'Top' in describe-level beforeEach. The click
consumes DOM events needed by group node conversion and prevents
error overlays from appearing.

Remove from: groupNode, interaction, primitiveNode file-level.
Keep per-setup() closeMenu calls where specifically needed.
2026-04-14 00:54:52 +01:00
github-actions
7e6fdef0f4 [automated] Update test expectations 2026-04-13 23:34:57 +00:00
Johnpaul Chiwetelu
81ddb96218 Merge branch 'main' into feat/targeted-screenshot-tests 2026-04-14 00:26:56 +01:00
Johnpaul
dc851e2e21 fix: make closeMenu resilient to non-clickable button
Button can exist in DOM but not be actionable (e.g. on mobile where
it's off-screen). Use isVisible() check and catch click failures
with a short timeout to avoid blocking tests.
2026-04-14 00:19:51 +01:00
Johnpaul
d6a13a3162 test: target execution and saveImageAndWebp screenshots
- execution: clip to CLIP Text Encode nodes instead of full canvas
- saveImageAndWebp: screenshot each node individually instead of canvas
2026-04-13 23:55:23 +01:00
github-actions
01ceaad837 [automated] Update test expectations 2026-04-13 22:24:18 +00:00
Johnpaul
7c3a16b16c test: close menu after setup() calls and in rerouteNode beforeEach
- colorPalette: add closeMenu() after setup() that reloads page
- interaction: add closeMenu() after setup({ clearStorage: false })
- rerouteNode: add closeMenu() in Native Reroute describe beforeEach
- Remove orphaned animated-image-preview golden images
2026-04-13 23:15:35 +01:00
github-actions
fd25ea2969 [automated] Update test expectations 2026-04-13 21:58:34 +00:00
Johnpaul
9c92669988 test: make closeMenu safe, use canvas for saveImageAndWebp
- closeMenu() now checks if close button exists before clicking
- saveImageAndWebp screenshots canvas instead of full page
- Delete widget golden images for regeneration
2026-04-13 22:49:34 +01:00
Johnpaul
fc9e442e36 test: close floating menu in beforeEach for remaining canvas tests
Close the legacy floating menu before screenshots in 12 more test
files that use UseNewMenu='Disabled'. This removes the menu overlay
from canvas screenshots, reducing visual noise and screenshot size.

Delete 147 golden images that need regeneration without the menu.
2026-04-13 22:44:41 +01:00
github-actions
346a4fbb64 [automated] Update test expectations 2026-04-13 21:30:12 +00:00
Johnpaul
9484b153f9 merge: resolve conflict in colors.spec.ts with main 2026-04-13 22:22:32 +01:00
Johnpaul
9b7c35dae1 test: use targeted screenshots instead of full-canvas captures
Screenshot tests now clip to specific nodes/regions instead of
capturing the entire canvas. This reduces screenshot size and
makes tests more focused on what they actually verify.

- Add getNodeClipRegion() helper for computing page-coordinate
  clip regions from LiteGraph node positions
- Convert 28 full-canvas screenshots to targeted clips across
  widget, nodeDisplay, primitiveNode, noteNode, recordAudio specs
- Use Vue node locators for mute, bypass, and color state tests
- Close floating menu in beforeEach for tests using UseNewMenu=Disabled
- Delete old golden images (will be regenerated in CI)
2026-04-13 22:16:11 +01:00
83 changed files with 247 additions and 818 deletions

View File

@@ -322,6 +322,9 @@ export class ComfyPage {
window.app && window.app.extensionManager
)
await this.page.locator('.p-blockui-mask').waitFor({ state: 'hidden' })
await this.page.addStyleTag({
content: '.comfy-menu.no-drag.comfy-menu-manual-pos { display: none; }'
})
await this.nextFrame()
}
@@ -371,8 +374,11 @@ export class ComfyPage {
}
async closeMenu() {
await this.page.locator('button.comfy-close-menu-btn').click()
await this.nextFrame()
const btn = this.page.locator('button.comfy-close-menu-btn')
if (await btn.isVisible()) {
await btn.click({ timeout: 2000 }).catch(() => {})
await this.nextFrame()
}
}
async clickDialogButton(prompt: string, buttonText: string = 'Yes') {

View File

@@ -0,0 +1,61 @@
import type { NodeId } from '@/platform/workflow/validation/schemas/workflowSchema'
import type { ComfyPage } from '@e2e/fixtures/ComfyPage'
/**
* Compute a clip region encompassing one or more nodes on the canvas.
* Returns page-level coordinates for use with
* `page.toHaveScreenshot({ clip })`.
*
* Accounts for zoom scale, pan offset, title bar height, and
* canvas element position on page.
*/
export async function getNodeClipRegion(
comfyPage: ComfyPage,
nodeIds: NodeId[],
padding = 40
): Promise<{ x: number; y: number; width: number; height: number }> {
const canvasBox = await comfyPage.canvas.boundingBox()
if (!canvasBox) throw new Error('Canvas element not visible')
const region = await comfyPage.page.evaluate(
([ids, pad]) => {
const canvas = window.app!.canvas
const ds = canvas.ds
let minX = Infinity
let minY = Infinity
let maxX = -Infinity
let maxY = -Infinity
for (const id of ids) {
const node = canvas.graph!.getNodeById(id)
if (!node) throw new Error(`Node ${id} not found`)
const pos = ds.convertOffsetToCanvas([node.pos[0], node.pos[1]])
const scaledWidth = node.size[0] * ds.scale
const scaledHeight = node.size[1] * ds.scale
const titleHeight = window.LiteGraph!.NODE_TITLE_HEIGHT * ds.scale
minX = Math.min(minX, pos[0])
minY = Math.min(minY, pos[1] - titleHeight)
maxX = Math.max(maxX, pos[0] + scaledWidth)
maxY = Math.max(maxY, pos[1] + scaledHeight)
}
return {
x: Math.max(0, minX - pad),
y: Math.max(0, minY - pad),
width: maxX - minX + pad * 2,
height: maxY - minY + pad * 2
}
},
[nodeIds, padding] as const
)
return {
x: Math.max(0, canvasBox.x + region.x),
y: Math.max(0, canvasBox.y + region.y),
width: region.width,
height: region.height
}
}

View File

@@ -2,6 +2,7 @@ import { expect } from '@playwright/test'
import { comfyPageFixture as test } from '@e2e/fixtures/ComfyPage'
import { TestIds } from '@e2e/fixtures/selectors'
import { getNodeClipRegion } from '@e2e/fixtures/utils/screenshotClip'
test.beforeEach(async ({ comfyPage }) => {
await comfyPage.settings.setSetting('Comfy.UseNewMenu', 'Disabled')
@@ -33,8 +34,16 @@ test.describe('Execution', { tag: ['@smoke', '@workflow'] }, () => {
.getByTestId(TestIds.dialogs.errorOverlayDismiss)
.click()
await errorOverlay.waitFor({ state: 'hidden' })
await expect(comfyPage.canvas).toHaveScreenshot(
'execution-error-unconnected-slot.png'
const nodes = await comfyPage.nodeOps.getNodeRefsByTitle(
'CLIP Text Encode (Prompt)'
)
const clip = await getNodeClipRegion(
comfyPage,
nodes.map((n) => n.id)
)
await expect(comfyPage.page).toHaveScreenshot(
'execution-error-unconnected-slot.png',
{ clip }
)
}
)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 100 KiB

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 95 KiB

After

Width:  |  Height:  |  Size: 95 KiB

View File

@@ -781,7 +781,6 @@ test.describe('Canvas Interaction', { tag: '@screenshot' }, () => {
})
test('@mobile Can pan with touch', async ({ comfyPage }) => {
await comfyPage.closeMenu()
await comfyPage.canvasOps.panWithTouch({ x: 200, y: 200 })
await expect(comfyPage.canvas).toHaveScreenshot('panned-touch.png')
})

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.7 KiB

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

After

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

After

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

After

Width:  |  Height:  |  Size: 69 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 69 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 40 KiB

View File

@@ -2,6 +2,7 @@ import { expect } from '@playwright/test'
import { comfyPageFixture as test } from '@e2e/fixtures/ComfyPage'
import { TestIds } from '@e2e/fixtures/selectors'
import { getNodeClipRegion } from '@e2e/fixtures/utils/screenshotClip'
test.beforeEach(async ({ comfyPage }) => {
await comfyPage.settings.setSetting('Comfy.UseNewMenu', 'Disabled')
@@ -12,27 +13,47 @@ test.beforeEach(async ({ comfyPage }) => {
test.describe('Optional input', { tag: ['@screenshot', '@node'] }, () => {
test('No shape specified', async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow('inputs/optional_input_no_shape')
await expect(comfyPage.canvas).toHaveScreenshot('optional_input.png')
const node = (await comfyPage.nodeOps.getFirstNodeRef())!
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot('optional_input.png', {
clip
})
})
test('Wrong shape specified', async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow('inputs/optional_input_wrong_shape')
await expect(comfyPage.canvas).toHaveScreenshot('optional_input.png')
const node = (await comfyPage.nodeOps.getFirstNodeRef())!
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot('optional_input.png', {
clip
})
})
test('Correct shape specified', async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow('inputs/optional_input_correct_shape')
await expect(comfyPage.canvas).toHaveScreenshot('optional_input.png')
const node = (await comfyPage.nodeOps.getFirstNodeRef())!
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot('optional_input.png', {
clip
})
})
test('Force input', async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow('inputs/force_input')
await expect(comfyPage.canvas).toHaveScreenshot('force_input.png')
const node = (await comfyPage.nodeOps.getFirstNodeRef())!
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot('force_input.png', {
clip
})
})
test('Default input', async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow('inputs/default_input')
await expect(comfyPage.canvas).toHaveScreenshot('default_input.png')
const node = (await comfyPage.nodeOps.getFirstNodeRef())!
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot('default_input.png', {
clip
})
})
test('Only optional inputs', async ({ comfyPage }) => {
@@ -74,22 +95,32 @@ test.describe('Optional input', { tag: ['@screenshot', '@node'] }, () => {
test('slider', async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow('inputs/simple_slider')
await expect(comfyPage.canvas).toHaveScreenshot('simple_slider.png')
const node = (await comfyPage.nodeOps.getFirstNodeRef())!
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot('simple_slider.png', {
clip
})
})
test('unknown converted widget', async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow(
'missing/missing_nodes_converted_widget'
)
await expect(comfyPage.canvas).toHaveScreenshot(
'missing_nodes_converted_widget.png'
const node = (await comfyPage.nodeOps.getFirstNodeRef())!
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot(
'missing_nodes_converted_widget.png',
{ clip }
)
})
test('dynamically added input', async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow('inputs/dynamically_added_input')
await expect(comfyPage.canvas).toHaveScreenshot(
'dynamically_added_input.png'
const node = (await comfyPage.nodeOps.getFirstNodeRef())!
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot(
'dynamically_added_input.png',
{ clip }
)
})
})

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 4.7 KiB

View File

@@ -150,7 +150,6 @@ test.describe('Node search box', { tag: '@node' }, () => {
})
test('@mobile Can trigger on empty canvas tap', async ({ comfyPage }) => {
await comfyPage.closeMenu()
await comfyPage.workflow.loadWorkflow('nodes/single_ksampler')
const screenCenter = {
x: 200,

View File

@@ -1,6 +1,8 @@
import { expect } from '@playwright/test'
import type { NodeId } from '@/platform/workflow/validation/schemas/workflowSchema'
import { comfyPageFixture as test } from '@e2e/fixtures/ComfyPage'
import { getNodeClipRegion } from '@e2e/fixtures/utils/screenshotClip'
test.beforeEach(async ({ comfyPage }) => {
await comfyPage.settings.setSetting('Comfy.UseNewMenu', 'Disabled')
@@ -9,6 +11,7 @@ test.beforeEach(async ({ comfyPage }) => {
test.describe('Note Node', { tag: '@node' }, () => {
test('Can load node nodes', { tag: '@screenshot' }, async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow('nodes/note_nodes')
await expect(comfyPage.canvas).toHaveScreenshot('note_nodes.png')
const clip = await getNodeClipRegion(comfyPage, [1, 2] as NodeId[])
await expect(comfyPage.page).toHaveScreenshot('note_nodes.png', { clip })
})
})

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

After

Width:  |  Height:  |  Size: 8.9 KiB

View File

@@ -2,6 +2,7 @@ import { expect } from '@playwright/test'
import { comfyPageFixture as test } from '@e2e/fixtures/ComfyPage'
import { TestIds } from '@e2e/fixtures/selectors'
import { getNodeClipRegion } from '@e2e/fixtures/utils/screenshotClip'
import type { NodeReference } from '@e2e/fixtures/utils/litegraphUtils'
test.beforeEach(async ({ comfyPage }) => {
@@ -11,7 +12,11 @@ test.beforeEach(async ({ comfyPage }) => {
test.describe('Primitive Node', { tag: ['@screenshot', '@node'] }, () => {
test('Can load with correct size', async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow('primitive/primitive_node')
await expect(comfyPage.canvas).toHaveScreenshot('primitive_node.png')
const node = (await comfyPage.nodeOps.getFirstNodeRef())!
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot('primitive_node.png', {
clip
})
})
// When link is dropped on widget, it should automatically convert the widget
@@ -26,8 +31,13 @@ test.describe('Primitive Node', { tag: ['@screenshot', '@node'] }, () => {
await comfyPage.nodeOps.getNodeRefById(2)
// Connect the output of the primitive node to the input of first widget of the ksampler node
await primitiveNode.connectWidget(0, ksamplerNode, 0)
await expect(comfyPage.canvas).toHaveScreenshot(
'primitive_node_connected.png'
const clip = await getNodeClipRegion(comfyPage, [
primitiveNode.id,
ksamplerNode.id
])
await expect(comfyPage.page).toHaveScreenshot(
'primitive_node_connected.png',
{ clip }
)
})
@@ -40,8 +50,13 @@ test.describe('Primitive Node', { tag: ['@screenshot', '@node'] }, () => {
const clipEncoderNode: NodeReference =
await comfyPage.nodeOps.getNodeRefById(2)
await primitiveNode.connectWidget(0, clipEncoderNode, 0)
await expect(comfyPage.canvas).toHaveScreenshot(
'primitive_node_connected_dom_widget.png'
const clip = await getNodeClipRegion(comfyPage, [
primitiveNode.id,
clipEncoderNode.id
])
await expect(comfyPage.page).toHaveScreenshot(
'primitive_node_connected_dom_widget.png',
{ clip }
)
})
@@ -54,8 +69,13 @@ test.describe('Primitive Node', { tag: ['@screenshot', '@node'] }, () => {
const ksamplerNode: NodeReference =
await comfyPage.nodeOps.getNodeRefById(2)
await primitiveNode.connectWidget(0, ksamplerNode, 0)
await expect(comfyPage.canvas).toHaveScreenshot(
'static_primitive_connected.png'
const clip = await getNodeClipRegion(comfyPage, [
primitiveNode.id,
ksamplerNode.id
])
await expect(comfyPage.page).toHaveScreenshot(
'static_primitive_connected.png',
{ clip }
)
})

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 27 KiB

View File

@@ -1,6 +1,7 @@
import { expect } from '@playwright/test'
import { comfyPageFixture as test } from '@e2e/fixtures/ComfyPage'
import { getNodeClipRegion } from '@e2e/fixtures/utils/screenshotClip'
test.beforeEach(async ({ comfyPage }) => {
await comfyPage.settings.setSetting('Comfy.UseNewMenu', 'Disabled')
@@ -30,6 +31,10 @@ test.describe('Record Audio Node', { tag: '@screenshot' }, () => {
.toBe(1)
// Take a screenshot of the canvas with the RecordAudio node
await expect(comfyPage.canvas).toHaveScreenshot('record_audio_node.png')
const nodes = await comfyPage.nodeOps.getNodeRefsByType('RecordAudio')
const clip = await getNodeClipRegion(comfyPage, [nodes[0].id])
await expect(comfyPage.page).toHaveScreenshot('record_audio_node.png', {
clip
})
})
})

Binary file not shown.

Before

Width:  |  Height:  |  Size: 95 KiB

After

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

After

Width:  |  Height:  |  Size: 26 KiB

View File

@@ -29,9 +29,8 @@ test.describe(
timeout: 30000
})
await expect(comfyPage.page).toHaveScreenshot(
'save-image-and-webm-preview.png'
)
await expect(saveImageNode).toHaveScreenshot('save-image-preview.png')
await expect(saveWebmNode).toHaveScreenshot('save-webm-preview.png')
})
}
)

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

@@ -25,9 +25,9 @@ test.describe('Vue Node Bypass', { tag: '@vue-nodes' }, () => {
.getByTestId('node-inner-wrapper')
await expect(checkpointNode).toHaveClass(BYPASS_CLASS)
await comfyPage.nextFrame()
await expect(comfyPage.canvas).toHaveScreenshot(
'vue-node-bypassed-state.png'
)
await expect(
comfyPage.vueNodes.getNodeByTitle('Load Checkpoint')
).toHaveScreenshot('vue-node-bypassed-state.png')
await comfyPage.page.keyboard.press(BYPASS_HOTKEY)
await expect(checkpointNode).not.toHaveClass(BYPASS_CLASS)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 108 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -34,7 +34,7 @@ test.describe(
.getByTestId(TestIds.selectionToolbox.colorBlue)
.click()
await expect(comfyPage.canvas).toHaveScreenshot(
await expect(loadCheckpointNode).toHaveScreenshot(
'vue-node-custom-color-blue.png'
)
})

Binary file not shown.

Before

Width:  |  Height:  |  Size: 107 KiB

After

Width:  |  Height:  |  Size: 8.8 KiB

View File

@@ -17,9 +17,7 @@ test.describe('Vue Node Mute', { tag: '@vue-nodes' }, () => {
const checkpointNode =
comfyPage.vueNodes.getNodeByTitle('Load Checkpoint')
await expect(checkpointNode).toHaveCSS('opacity', MUTE_OPACITY)
await expect(comfyPage.canvas).toHaveScreenshot(
'vue-node-muted-state.png'
)
await expect(checkpointNode).toHaveScreenshot('vue-node-muted-state.png')
await comfyPage.page.keyboard.press(MUTE_HOTKEY)
await expect(checkpointNode).not.toHaveCSS('opacity', MUTE_OPACITY)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 107 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -2,6 +2,7 @@ import { expect } from '@playwright/test'
import { comfyPageFixture as test } from '@e2e/fixtures/ComfyPage'
import { DefaultGraphPositions } from '@e2e/fixtures/constants/defaultGraphPositions'
import { getNodeClipRegion } from '@e2e/fixtures/utils/screenshotClip'
test.beforeEach(async ({ comfyPage }) => {
await comfyPage.settings.setSetting('Comfy.UseNewMenu', 'Disabled')
@@ -15,18 +16,29 @@ test.describe('Combo text widget', { tag: ['@screenshot', '@widget'] }, () => {
0.2,
1
)
await expect(comfyPage.canvas).toHaveScreenshot(
'load-checkpoint-resized-min-width.png'
const loadCheckpointNode = (
await comfyPage.nodeOps.getNodeRefsByTitle('Load Checkpoint')
)[0]
const loadCheckpointClip = await getNodeClipRegion(comfyPage, [
loadCheckpointNode.id
])
await expect(comfyPage.page).toHaveScreenshot(
'load-checkpoint-resized-min-width.png',
{ clip: loadCheckpointClip }
)
await comfyPage.closeMenu()
await comfyPage.nodeOps.resizeNode(
DefaultGraphPositions.ksampler.pos,
DefaultGraphPositions.ksampler.size,
0.2,
1
)
await expect(comfyPage.canvas).toHaveScreenshot(
`ksampler-resized-min-width.png`
const ksamplerNode = (
await comfyPage.nodeOps.getNodeRefsByTitle('KSampler')
)[0]
const ksamplerClip = await getNodeClipRegion(comfyPage, [ksamplerNode.id])
await expect(comfyPage.page).toHaveScreenshot(
`ksampler-resized-min-width.png`,
{ clip: ksamplerClip }
)
})
@@ -37,8 +49,13 @@ test.describe('Combo text widget', { tag: ['@screenshot', '@widget'] }, () => {
0.8,
0.8
)
await expect(comfyPage.canvas).toHaveScreenshot(
'empty-latent-resized-80-percent.png'
const emptyLatentNode = (
await comfyPage.nodeOps.getNodeRefsByTitle('Empty Latent Image')
)[0]
const clip = await getNodeClipRegion(comfyPage, [emptyLatentNode.id])
await expect(comfyPage.page).toHaveScreenshot(
'empty-latent-resized-80-percent.png',
{ clip }
)
})
@@ -50,7 +67,13 @@ test.describe('Combo text widget', { tag: ['@screenshot', '@widget'] }, () => {
1,
true
)
await expect(comfyPage.canvas).toHaveScreenshot('resized-to-original.png')
const loadCheckpointNode = (
await comfyPage.nodeOps.getNodeRefsByTitle('Load Checkpoint')
)[0]
const clip = await getNodeClipRegion(comfyPage, [loadCheckpointNode.id])
await expect(comfyPage.page).toHaveScreenshot('resized-to-original.png', {
clip
})
})
test('should refresh combo values of optional inputs', async ({
@@ -105,12 +128,16 @@ test.describe('Combo text widget', { tag: ['@screenshot', '@widget'] }, () => {
test.describe('Boolean widget', { tag: ['@screenshot', '@widget'] }, () => {
test('Can toggle', async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow('widgets/boolean_widget')
await expect(comfyPage.canvas).toHaveScreenshot('boolean_widget.png')
const node = (await comfyPage.nodeOps.getFirstNodeRef())!
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot('boolean_widget.png', {
clip
})
const widget = await node.getWidget(0)
await widget.click()
await expect(comfyPage.canvas).toHaveScreenshot(
'boolean_widget_toggled.png'
await expect(comfyPage.page).toHaveScreenshot(
'boolean_widget_toggled.png',
{ clip }
)
})
})
@@ -129,7 +156,10 @@ test.describe('Slider widget', { tag: ['@screenshot', '@widget'] }, () => {
}
})
await widget.dragHorizontal(50)
await expect(comfyPage.canvas).toHaveScreenshot('slider_widget_dragged.png')
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot('slider_widget_dragged.png', {
clip
})
await expect
.poll(() => comfyPage.page.evaluate(() => window.widgetValue))
@@ -151,7 +181,10 @@ test.describe('Number widget', { tag: ['@screenshot', '@widget'] }, () => {
}
})
await widget.dragHorizontal(50)
await expect(comfyPage.canvas).toHaveScreenshot('seed_widget_dragged.png')
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot('seed_widget_dragged.png', {
clip
})
await expect
.poll(() => comfyPage.page.evaluate(() => window.widgetValue))
@@ -179,8 +212,10 @@ test.describe(
.poll(async () => (await node.getSize()).height)
.toBeGreaterThan(initialSize.height)
await expect(comfyPage.canvas).toHaveScreenshot(
'ksampler_widget_added.png'
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot(
'ksampler_widget_added.png',
{ clip }
)
})
}
@@ -189,8 +224,11 @@ test.describe(
test.describe('Image widget', { tag: ['@screenshot', '@widget'] }, () => {
test('Can load image', async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow('widgets/load_image_widget')
await expect(comfyPage.canvas).toHaveScreenshot('load_image_widget.png', {
maxDiffPixels: 50
const nodes = await comfyPage.nodeOps.getNodeRefsByType('LoadImage')
const clip = await getNodeClipRegion(comfyPage, [nodes[0].id])
await expect(comfyPage.page).toHaveScreenshot('load_image_widget.png', {
maxDiffPixels: 50,
clip
})
})
@@ -208,8 +246,10 @@ test.describe('Image widget', { tag: ['@screenshot', '@widget'] }, () => {
})
// Expect the image preview to change automatically
await expect(comfyPage.canvas).toHaveScreenshot(
'image_preview_drag_and_dropped.png'
const clip = await getNodeClipRegion(comfyPage, [loadImageNode.id])
await expect(comfyPage.page).toHaveScreenshot(
'image_preview_drag_and_dropped.png',
{ clip }
)
// Expect the filename combo value to be updated
@@ -264,9 +304,10 @@ test.describe('Image widget', { tag: ['@screenshot', '@widget'] }, () => {
await comfyPage.nextFrame()
// Expect the image preview to change automatically
await expect(comfyPage.canvas).toHaveScreenshot(
const clip = await getNodeClipRegion(comfyPage, [loadImageNode.id])
await expect(comfyPage.page).toHaveScreenshot(
'image_preview_changed_by_combo_value.png',
{ maxDiffPixels: 50 }
{ maxDiffPixels: 50, clip }
)
// Expect the filename combo value to be updated
@@ -403,7 +444,11 @@ test.describe('Load audio widget', { tag: ['@screenshot', '@widget'] }, () => {
test('Can load audio', async ({ comfyPage }) => {
await comfyPage.workflow.loadWorkflow('widgets/load_audio_widget')
await expect(comfyPage.page.locator('.comfy-audio')).toBeVisible()
await expect(comfyPage.canvas).toHaveScreenshot('load_audio_widget.png')
const node = (await comfyPage.nodeOps.getFirstNodeRef())!
const clip = await getNodeClipRegion(comfyPage, [node.id])
await expect(comfyPage.page).toHaveScreenshot('load_audio_widget.png', {
clip
})
})
})

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

After

Width:  |  Height:  |  Size: 5.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

After

Width:  |  Height:  |  Size: 5.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

After

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 46 KiB

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 84 KiB

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

After

Width:  |  Height:  |  Size: 8.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 97 KiB

After

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

After

Width:  |  Height:  |  Size: 7.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

After

Width:  |  Height:  |  Size: 4.9 KiB

View File

@@ -1376,26 +1376,6 @@ export interface paths {
patch?: never;
trace?: never;
};
"/admin/nodeversions": {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
/**
* Admin list all node versions with optional filters
* @description Admin-only endpoint to list all node versions with support for including deleted versions. Only admins can access this endpoint.
*/
get: operations["adminListAllNodeVersions"];
put?: never;
post?: never;
delete?: never;
options?: never;
head?: never;
patch?: never;
trace?: never;
};
"/customers/admin/coupons": {
parameters: {
query?: never;
@@ -2102,26 +2082,6 @@ export interface paths {
patch?: never;
trace?: never;
};
"/proxy/kling/v1/general/advanced-presets-elements": {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
/**
* KlingAI Query Presets Elements
* @description Retrieves a list of advanced preset elements from Kling AI.
*/
get: operations["klingGetPresetsElements"];
put?: never;
post?: never;
delete?: never;
options?: never;
head?: never;
patch?: never;
trace?: never;
};
"/proxy/kling/v1/images/generations": {
parameters: {
query?: never;
@@ -3984,22 +3944,6 @@ export interface paths {
patch?: never;
trace?: never;
};
"/proxy/byteplus-seedance2/api/v3/contents/generations/tasks/{task_id}": {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
get: operations["byteplusSeedance2VideoGenerationQuery"];
put?: never;
post?: never;
delete?: never;
options?: never;
head?: never;
patch?: never;
trace?: never;
};
"/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis": {
parameters: {
query?: never;
@@ -5354,26 +5298,6 @@ export interface paths {
patch?: never;
trace?: never;
};
"/proxy/elevenlabs/v1/shared-voices": {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
/**
* List Shared Voices
* @description Retrieves a list of shared voices from the ElevenLabs voice library.
*/
get: operations["ElevenLabsGetSharedVoices"];
put?: never;
post?: never;
delete?: never;
options?: never;
head?: never;
patch?: never;
trace?: never;
};
"/proxy/elevenlabs/v1/sound-generation": {
parameters: {
query?: never;
@@ -5681,51 +5605,6 @@ export interface paths {
patch?: never;
trace?: never;
};
"/proxy/sonilo/v2m/generate": {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
get?: never;
put?: never;
/**
* Generate music from video
* @description Generate music from a video using Sonilo video-to-music AI.
* Accepts either a video file upload or a video URL, with an optional prompt.
* Returns a streaming NDJSON response with duration, titles, audio chunks, and completion events.
* Max video duration: 6 minutes. Max upload size: 1024MB.
*/
post: operations["soniloVideoToMusicGenerate"];
delete?: never;
options?: never;
head?: never;
patch?: never;
trace?: never;
};
"/proxy/sonilo/t2m/generate": {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
get?: never;
put?: never;
/**
* Generate music from text prompt
* @description Generate music from a text prompt using Sonilo text-to-music AI.
* Requires a prompt describing the desired music. Duration is optional and will be inferred if not provided.
* Returns a streaming NDJSON response with duration, titles, audio chunks, and completion events.
*/
post: operations["soniloTextToMusicGenerate"];
delete?: never;
options?: never;
head?: never;
patch?: never;
trace?: never;
};
}
export type webhooks = Record<string, never>;
export interface components {
@@ -9738,124 +9617,6 @@ export interface components {
*/
model_id: string;
};
ElevenLabsSharedVoice: {
public_owner_id: string;
voice_id: string;
date_unix: number;
name: string;
accent: string;
gender: string;
age: string;
descriptive: string;
use_case: string;
/** @enum {string} */
category: "generated" | "cloned" | "premade" | "professional" | "famous" | "high_quality";
language?: string | null;
locale?: string | null;
description?: string | null;
preview_url?: string | null;
usage_character_count_1y: number;
usage_character_count_7d: number;
play_api_usage_character_count_1y: number;
cloned_by_count: number;
/** Format: double */
rate?: number | null;
/**
* Format: double
* @description The rate of the voice in USD per 1000 credits. null if default.
*/
fiat_rate?: number | null;
free_users_allowed: boolean;
live_moderation_enabled: boolean;
featured: boolean;
/** @description The verified languages of the voice. */
verified_languages?: components["schemas"]["ElevenLabsVerifiedVoiceLanguage"][] | null;
notice_period?: number | null;
instagram_username?: string | null;
twitter_username?: string | null;
youtube_username?: string | null;
tiktok_username?: string | null;
image_url?: string | null;
is_added_by_user?: boolean | null;
is_bookmarked?: boolean | null;
};
ElevenLabsVerifiedVoiceLanguage: {
/** @description The language of the voice. */
language: string;
/** @description The voice's model ID. */
model_id: string;
/** @description The voice's accent, if applicable. */
accent?: string | null;
/** @description The voice's locale, if applicable. */
locale?: string | null;
/** @description The voice's preview URL, if applicable. */
preview_url?: string | null;
};
ElevenLabsGetSharedVoicesResponse: {
/** @description The list of shared voices */
voices: components["schemas"]["ElevenLabsSharedVoice"][];
/** @description Whether there are more shared voices in subsequent pages. */
has_more: boolean;
/**
* @description The total number of shared voices matching the query.
* @default 0
*/
total_count: number;
last_sort_id?: string | null;
};
KlingPresetsElementsResponse: {
code?: number;
message?: string;
request_id?: string;
data?: components["schemas"]["KlingPresetsElementTask"][];
};
KlingPresetsElementTask: {
task_id?: string;
/** @enum {string} */
task_status?: "submitted" | "processing" | "succeed" | "failed";
task_status_msg?: string;
task_info?: {
external_task_id?: string;
};
task_result?: {
elements?: components["schemas"]["KlingPresetsElement"][];
};
final_unit_deduction?: string;
/** @description Task creation time, Unix timestamp in ms */
created_at?: number;
/** @description Task update time, Unix timestamp in ms */
updated_at?: number;
};
KlingPresetsElement: {
/** Format: int64 */
element_id?: number;
element_name?: string;
element_description?: string;
reference_type?: string;
element_image_list?: {
frontal_image?: string;
refer_images?: {
image_url?: string;
}[];
};
element_video_list?: {
refer_videos?: {
video_url?: string;
}[];
};
element_voice_info?: {
voice_id?: string;
voice_name?: string;
trial_url?: string;
owned_by?: string;
};
tag_list?: {
id?: string;
name?: string;
description?: string;
}[];
owned_by?: string;
};
KlingErrorResponse: {
/**
* @description - 1000: Authentication failed
@@ -14085,7 +13846,7 @@ export interface components {
* @description The ID of the model to call. Available models include seedance-1-5-pro-251215, seedance-1-0-pro-250528, seedance-1-0-pro-fast-251015, seedance-1-0-lite-t2v-250428, seedance-1-0-lite-i2v-250428
* @enum {string}
*/
model: "seedance-1-5-pro-251215" | "seedance-1-0-pro-250528" | "seedance-1-0-lite-t2v-250428" | "seedance-1-0-lite-i2v-250428" | "seedance-1-0-pro-fast-251015" | "dreamina-seedance-2-0-260128" | "dreamina-seedance-2-0-fast-260128";
model: "seedance-1-5-pro-251215" | "seedance-1-0-pro-250528" | "seedance-1-0-lite-t2v-250428" | "seedance-1-0-lite-i2v-250428" | "seedance-1-0-pro-fast-251015";
/** @description The input content for the model to generate a video */
content: components["schemas"]["BytePlusVideoGenerationContent"][];
/**
@@ -14101,46 +13862,19 @@ export interface components {
*/
return_last_frame: boolean;
/**
* @description Supported by Seedance 2.0, 2.0 fast, and 1.5 pro. Whether the generated video includes audio synchronized with the visuals.
* true: The model outputs a video with synchronized audio.
* @description Only supported by Seedance 1.5 pro. Whether the generated video includes audio synchronized with the visuals.
* true: The model outputs a video with synchronized audio. Seedance 1.5 pro can automatically generate matching voice, sound effects, or background music based on the prompt and visual content. It is recommended to enclose dialogue in double quotes. Example: A man stops a woman and says, "Remember, never point your finger at the moon."
* false: The model outputs a silent video.
* @default true
*/
generate_audio: boolean;
/**
* @description Video resolution. Seedance 2.0 & 2.0 fast, 1.5 pro, 1.0 lite default: 720p. Seedance 1.0 pro & pro-fast default: 1080p.
* Note: Seedance 2.0 & 2.0 fast do not support 1080p.
* @enum {string}
*/
resolution?: "480p" | "720p" | "1080p";
/**
* @description Aspect ratio of the generated video. Seedance 2.0 & 2.0 fast, 1.5 pro default: adaptive.
* @enum {string}
*/
ratio?: "16:9" | "4:3" | "1:1" | "3:4" | "9:16" | "21:9" | "adaptive";
/** @description Video duration in seconds. Seedance 2.0 & 2.0 fast: [4,15] or -1 (auto). Seedance 1.5 pro: [4,12] or -1. Seedance 1.0: [2,12]. */
duration?: number;
/** @description Seed integer for controlling randomness. Range: [-1, 2^32-1]. -1 uses a random seed. */
seed?: number;
/**
* @description Whether the generated video includes a watermark.
* @default false
*/
watermark: boolean;
/**
* @description Service tier for processing. Seedance 2.0 & 2.0 fast do not support flex (offline inference).
* @enum {string}
*/
service_tier?: "default" | "flex";
/** @description Task timeout threshold in seconds. Default 172800 (48h). Range: [3600, 259200]. */
execution_expires_after?: number;
};
BytePlusVideoGenerationContent: {
/**
* @description The type of the input content
* @enum {string}
*/
type: "text" | "image_url" | "video_url" | "audio_url";
type: "text" | "image_url";
/**
* @description The input text information for the model. Includes text prompt and optional parameters.
*
@@ -14160,40 +13894,12 @@ export interface components {
text?: string;
image_url?: {
/**
* @description Image content for image-to-video generation (when type is "image_url")
* @description Image content for image-to-video generation (when type is "image")
* Image URL: Make sure that the image URL is accessible.
* Base64-encoded content: Format must be data:image/<format>;base64,<content>
* Asset ID: Format asset://<ASSET_ID>
*/
url?: string;
};
/** @description Input video object. Only Seedance 2.0 & 2.0 fast support video input. */
video_url?: {
/**
* @description Video URL or Asset ID.
* Video URL: Public URL of the video (mp4, mov).
* Asset ID: Format asset://<ASSET_ID>
*/
url?: string;
};
/** @description Input audio object. Only Seedance 2.0 & 2.0 fast support audio input. Cannot be used alone - must include at least 1 image or video. */
audio_url?: {
/**
* @description Audio URL, Base64 encoding, or Asset ID.
* Audio URL: Public URL of the audio (wav, mp3).
* Base64: Format data:audio/<format>;base64,<content>
* Asset ID: Format asset://<ASSET_ID>
*/
url?: string;
};
/**
* @description The role/position of the content item.
* For images: first_frame, last_frame, or reference_image.
* For videos: reference_video (Seedance 2.0 & 2.0 fast only).
* For audio: reference_audio (Seedance 2.0 & 2.0 fast only).
* @enum {string}
*/
role?: "first_frame" | "last_frame" | "reference_image" | "reference_video" | "reference_audio";
};
BytePlusVideoGenerationResponse: {
/** @description The ID of the video generation task */
@@ -14208,7 +13914,7 @@ export interface components {
* @description The state of the task
* @enum {string}
*/
status?: "queued" | "running" | "cancelled" | "succeeded" | "failed" | "expired";
status?: "queued" | "running" | "cancelled" | "succeeded" | "failed";
/** @description The error information. If the task succeeds, null is returned. If the task fails, the error information is returned. */
error?: {
/** @description The error code */
@@ -14238,7 +13944,7 @@ export interface components {
* @description The ID of the model to call
* @enum {string}
*/
model: "wan2.5-t2v-preview" | "wan2.5-i2v-preview" | "wan2.6-t2v" | "wan2.6-i2v" | "wan2.6-r2v" | "wan2.7-i2v" | "wan2.7-t2v" | "wan2.7-r2v" | "wan2.7-videoedit";
model: "wan2.5-t2v-preview" | "wan2.5-i2v-preview" | "wan2.6-t2v" | "wan2.6-i2v" | "wan2.6-r2v";
/** @description Enter basic information, such as prompt words, etc. */
input: {
/**
@@ -14246,7 +13952,7 @@ export interface components {
* For wan2.6-r2v with multiple reference videos, use 'character1', 'character2', etc. to refer to subjects
* in the order of reference videos. Example: "Character1 sings on the roadside, Character2 dances beside it"
*/
prompt?: string;
prompt: string;
/** @description Reverse prompt words are used to describe content that you do not want to see in the video screen */
negative_prompt?: string;
/** @description Audio file download URL. Supported formats: mp3 and wav. Cannot be used with reference_video_urls. */
@@ -14267,23 +13973,6 @@ export interface components {
* Billing: Based on actual reference duration used.
*/
reference_video_urls?: string[];
/**
* @description Media asset list for wan2.7 models. Specifies reference materials (image, audio, video)
* for video generation. Each element contains a type and url field.
* Supported type values vary by model:
* - wan2.7-i2v: first_frame, last_frame, driving_audio, first_clip
* - wan2.7-r2v: reference_image, reference_video
* - wan2.7-videoedit: video, reference_image
*/
media?: {
/**
* @description Media asset type
* @enum {string}
*/
type: "first_frame" | "last_frame" | "driving_audio" | "first_clip" | "reference_image" | "reference_video" | "video";
/** @description URL of the media file (public HTTP/HTTPS URL or OSS temporary URL) */
url: string;
}[];
};
/** @description Video processing parameters */
parameters?: {
@@ -14296,10 +13985,9 @@ export interface components {
*/
size?: string;
/**
* @description Resolution level. Supported values vary by model:
* @description Resolution level for I2V models. Supported values vary by model:
* - wan2.5-i2v-preview: 480P, 720P, 1080P
* - wan2.6-i2v: 720P, 1080P only (no 480P support)
* - wan2.7 models (i2v, t2v, r2v, videoedit): 720P, 1080P (default 1080P)
* @enum {string}
*/
resolution?: "480P" | "720P" | "1080P";
@@ -14308,11 +13996,10 @@ export interface components {
* - wan2.5 models: 5 or 10 seconds
* - wan2.6-t2v, wan2.6-i2v: 5, 10, or 15 seconds
* - wan2.6-r2v: 5 or 10 seconds only (no 15s support)
* - wan2.7-i2v, wan2.7-t2v: integer in [2, 15]
* - wan2.7-r2v, wan2.7-videoedit: integer in [2, 10]
* @default 5
* @enum {integer}
*/
duration?: number;
duration?: 5 | 10 | 15;
/**
* @description Is it enabled prompt intelligent rewriting. Default is true
* @default true
@@ -14320,27 +14007,13 @@ export interface components {
prompt_extend?: boolean;
/**
* @description Intelligent multi-lens control. Only active when prompt_extend is enabled.
* For wan2.6 and wan2.7-r2v models.
* - single: Single-shot video (default)
* - multi: Multi-shot video
* @default single
* For wan2.6 models only.
* - multi: Intelligent disassembly into multiple lenses (default)
* - single: Single lens generation
* @default multi
* @enum {string}
*/
shot_type?: "multi" | "single";
/**
* @description Aspect ratio of the generated video. For wan2.7 models only.
* If not provided, defaults based on the resolution tier.
* @enum {string}
*/
ratio?: "16:9" | "9:16" | "1:1" | "4:3" | "3:4";
/**
* @description Video audio setting for wan2.7-videoedit model.
* - auto (default): Model intelligently judges based on prompt content
* - origin: Forcefully preserve the original audio from the input video
* @default auto
* @enum {string}
*/
audio_setting?: "auto" | "origin";
/** @description Random number seed, used to control the randomness of the model generated content */
seed?: number;
/**
@@ -16606,72 +16279,6 @@ export interface components {
};
};
};
SoniloVideoToMusicRequest: {
/**
* Format: binary
* @description Multipart file part; e.g. video/mp4.
*/
video: string;
/** @description Optional text prompt to guide music generation. */
prompt?: string;
} | {
/**
* Format: uri
* @description Public http(s) URL of the video.
*/
video_url: string;
/** @description Optional text prompt to guide music generation. */
prompt?: string;
};
SoniloTextToMusicRequest: {
/** @description Text prompt describing the desired music. */
prompt: string;
/** @description Target duration in seconds. Will be inferred if not provided. */
duration?: number;
};
/** @description A single NDJSON event from the Sonilo streaming response. */
SoniloStreamEvent: {
/** @enum {string} */
type: "title";
stream_index: number;
prompt_index: number;
copy_index: number;
title: string;
display_tags: string[];
} | {
/** @enum {string} */
type: "audio_chunk";
sample_rate: number;
channels: number;
stream_index: number;
num_streams: number;
/** @description Base64-encoded AAC in fMP4 fragments; concatenate per stream_index. */
data: string;
} | {
/** @enum {string} */
type: "generated_audio";
sample_rate: number;
channels: number;
duration_sec_by_stream: number[];
billing_rate_per_sec: number;
billing: number;
} | {
/** @enum {string} */
type: "complete";
} | {
/** @enum {string} */
type: "error";
code?: string;
message: string;
};
SoniloErrorResponse: {
detail?: {
/** @description Error code */
code?: string;
/** @description Human-readable error message */
message?: string;
};
};
};
responses: never;
parameters: {
@@ -20439,82 +20046,6 @@ export interface operations {
};
};
};
adminListAllNodeVersions: {
parameters: {
query?: {
nodeId?: string;
statuses?: components["schemas"]["NodeVersionStatus"][];
include_status_reason?: boolean;
/** @description The page number to retrieve. */
page?: number;
/** @description The number of items to include per page. */
pageSize?: number;
/** @description search for status_reason, case insensitive */
status_reason?: string;
/** @description Include soft-deleted node versions in the results */
include_deleted?: boolean;
};
header?: never;
path?: never;
cookie?: never;
};
requestBody?: never;
responses: {
/** @description List of all node versions */
200: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": {
/** @description Total number of node versions available */
total?: number;
versions?: components["schemas"]["NodeVersion"][];
/** @description Current page number */
page?: number;
/** @description Maximum number of node versions per page. Maximum is 100. */
pageSize?: number;
/** @description Total number of pages available */
totalPages?: number;
};
};
};
/** @description Invalid input, object invalid */
400: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["ErrorResponse"];
};
};
/** @description Unauthorized */
401: {
headers: {
[name: string]: unknown;
};
content?: never;
};
/** @description Forbidden */
403: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["ErrorResponse"];
};
};
/** @description Internal server error */
500: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["ErrorResponse"];
};
};
};
};
listCoupons: {
parameters: {
query?: {
@@ -24399,67 +23930,6 @@ export interface operations {
};
};
};
klingGetPresetsElements: {
parameters: {
query?: {
/** @description Page number. Value range: [1, 1000]. */
pageNum?: number;
/** @description Data volume per page. Value range: [1, 500]. */
pageSize?: number;
};
header?: never;
path?: never;
cookie?: never;
};
requestBody?: never;
responses: {
/** @description Presets elements retrieved successfully */
200: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["KlingPresetsElementsResponse"];
};
};
/** @description Bad Request */
400: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["KlingErrorResponse"];
};
};
/** @description Authentication failed */
401: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["KlingErrorResponse"];
};
};
/** @description Rate limit exceeded */
429: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["KlingErrorResponse"];
};
};
/** @description Internal server error */
500: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["KlingErrorResponse"];
};
};
};
};
klingImageGenerationsQueryTaskList: {
parameters: {
query?: {
@@ -30142,38 +29612,6 @@ export interface operations {
};
};
};
byteplusSeedance2VideoGenerationQuery: {
parameters: {
query?: never;
header?: never;
path: {
/** @description The ID of the Seedance 2.0 video generation task to query */
task_id: string;
};
cookie?: never;
};
requestBody?: never;
responses: {
/** @description Video generation task information retrieved successfully */
200: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["BytePlusVideoGenerationQueryResponse"];
};
};
/** @description Error 4xx/5xx */
default: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["ErrorResponse"];
};
};
};
};
wanVideoGeneration: {
parameters: {
query?: never;
@@ -33403,79 +32841,6 @@ export interface operations {
};
};
};
ElevenLabsGetSharedVoices: {
parameters: {
query?: {
/** @description How many shared voices to return at maximum. Can not exceed 100, defaults to 30. */
page_size?: number;
/** @description Voice category used for filtering. One of: professional, famous, high_quality. */
category?: string;
/** @description Gender used for filtering */
gender?: string;
/** @description Age used for filtering */
age?: string;
/** @description Accent used for filtering */
accent?: string;
/** @description Language used for filtering */
language?: string;
/** @description Locale used for filtering */
locale?: string;
/** @description Search term used for filtering */
search?: string;
/** @description Use-case used for filtering */
use_cases?: string[];
/** @description Descriptives used for filtering */
descriptives?: string[];
/** @description Filter featured voices */
featured?: boolean;
/** @description Filter voices with a minimum notice period of the given number of days. */
min_notice_period_days?: number;
/** @description Include/exclude voices with custom rates */
include_custom_rates?: boolean;
/** @description Include/exclude voices that are live moderated */
include_live_moderated?: boolean;
/** @description Filter voices that are enabled for the reader app */
reader_app_enabled?: boolean;
/** @description Filter voices by public owner ID */
owner_id?: string;
/** @description Sort criteria */
sort?: string;
/** @description Page number */
page?: number;
};
header?: never;
path?: never;
cookie?: never;
};
requestBody?: never;
responses: {
/** @description Shared voices retrieved successfully */
200: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["ElevenLabsGetSharedVoicesResponse"];
};
};
/** @description Unauthorized */
401: {
headers: {
[name: string]: unknown;
};
content?: never;
};
/** @description Validation Error */
422: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["ElevenLabsValidationError"];
};
};
};
};
ElevenLabsSoundGeneration: {
parameters: {
query?: {
@@ -34099,115 +33464,4 @@ export interface operations {
};
};
};
soniloVideoToMusicGenerate: {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
requestBody: {
content: {
"multipart/form-data": components["schemas"]["SoniloVideoToMusicRequest"];
};
};
responses: {
/** @description OK - Streaming NDJSON response with audio generation events */
200: {
headers: {
[name: string]: unknown;
};
content: {
"application/x-ndjson": components["schemas"]["SoniloStreamEvent"];
};
};
/** @description Bad Request */
400: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["SoniloErrorResponse"];
};
};
/** @description Unauthorized - Invalid or missing API key */
401: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["SoniloErrorResponse"];
};
};
/** @description Payload Too Large - Video exceeds size limit */
413: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["SoniloErrorResponse"];
};
};
/** @description Internal Server Error */
500: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["SoniloErrorResponse"];
};
};
};
};
soniloTextToMusicGenerate: {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
requestBody: {
content: {
"multipart/form-data": components["schemas"]["SoniloTextToMusicRequest"];
};
};
responses: {
/** @description OK - Streaming NDJSON response with audio generation events */
200: {
headers: {
[name: string]: unknown;
};
content: {
"application/x-ndjson": components["schemas"]["SoniloStreamEvent"];
};
};
/** @description Bad Request */
400: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["SoniloErrorResponse"];
};
};
/** @description Unauthorized - Invalid or missing API key */
401: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["SoniloErrorResponse"];
};
};
/** @description Internal Server Error */
500: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["SoniloErrorResponse"];
};
};
};
};
}