mirror of
https://github.com/Comfy-Org/ComfyUI_frontend.git
synced 2026-04-25 08:49:36 +00:00
refactor: encapsulate error extraction in TaskItemImpl getters (#7650)
## Summary - Add `errorMessage` and `executionError` getters to `TaskItemImpl` that extract error info from status messages - Update `useJobErrorReporting` composable to use these getters instead of standalone function - Remove the standalone `extractExecutionError` function This encapsulates error extraction within `TaskItemImpl`, preparing for the Jobs API migration where the underlying data format will change but the getter interface will remain stable. ## Test plan - [x] All existing tests pass - [x] New tests added for `TaskItemImpl.errorMessage` and `TaskItemImpl.executionError` getters - [x] TypeScript, lint, and knip checks pass 🤖 Generated with [Claude Code](https://claude.com/claude-code) ┆Issue is synchronized with this [Notion page](https://www.notion.so/PR-7650-refactor-encapsulate-error-extraction-in-TaskItemImpl-getters-2ce6d73d365081caae33dcc7e1e07720) by [Unito](https://www.unito.io) --------- Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Christian Byrne <cbyrne@comfy.org>
This commit is contained in:
@@ -32,7 +32,6 @@ export function mapTaskOutputToAssetItem(
|
||||
subfolder: output.subfolder,
|
||||
executionTimeInSeconds: taskItem.executionTimeInSeconds,
|
||||
format: output.format,
|
||||
workflow: taskItem.workflow,
|
||||
create_time: taskItem.createTime
|
||||
}
|
||||
|
||||
|
||||
@@ -1,380 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Test fixtures for history tests.
|
||||
*/
|
||||
import type { HistoryResponseV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
|
||||
import type { HistoryTaskItem } from '@/schemas/apiSchema'
|
||||
|
||||
/**
|
||||
* V1 API raw response format (object with prompt IDs as keys)
|
||||
*/
|
||||
export const historyV1RawResponse: Record<
|
||||
string,
|
||||
Omit<HistoryTaskItem, 'taskType'>
|
||||
> = {
|
||||
'complete-item-id': {
|
||||
prompt: [
|
||||
24,
|
||||
'complete-item-id',
|
||||
{},
|
||||
{
|
||||
client_id: 'test-client',
|
||||
extra_pnginfo: {
|
||||
workflow: {
|
||||
id: '44f0c9f9-b5a7-48de-99fc-7e80c1570241',
|
||||
revision: 0,
|
||||
last_node_id: 9,
|
||||
last_link_id: 9,
|
||||
nodes: [],
|
||||
links: [],
|
||||
groups: [],
|
||||
config: {},
|
||||
extra: {},
|
||||
version: 0.4
|
||||
}
|
||||
}
|
||||
},
|
||||
['9']
|
||||
],
|
||||
outputs: {
|
||||
'9': {
|
||||
images: [
|
||||
{
|
||||
filename: 'test.png',
|
||||
subfolder: '',
|
||||
type: 'output'
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: [
|
||||
[
|
||||
'execution_start',
|
||||
{ prompt_id: 'complete-item-id', timestamp: 1234567890 }
|
||||
],
|
||||
[
|
||||
'execution_success',
|
||||
{ prompt_id: 'complete-item-id', timestamp: 1234567900 }
|
||||
]
|
||||
]
|
||||
},
|
||||
meta: {
|
||||
'9': {
|
||||
node_id: '9',
|
||||
display_node: '9'
|
||||
}
|
||||
}
|
||||
},
|
||||
'no-status-id': {
|
||||
prompt: [
|
||||
23,
|
||||
'no-status-id',
|
||||
{},
|
||||
{
|
||||
client_id: 'inference'
|
||||
},
|
||||
['10']
|
||||
],
|
||||
outputs: {
|
||||
'10': {
|
||||
images: []
|
||||
}
|
||||
},
|
||||
status: undefined,
|
||||
meta: {
|
||||
'10': {
|
||||
node_id: '10',
|
||||
display_node: '10'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* V2 response with multiple edge cases:
|
||||
* - Item 0: Complete with all fields
|
||||
* - Item 1: Missing optional status field
|
||||
* - Item 2: Missing optional meta field
|
||||
* - Item 3: Multiple output nodes
|
||||
*/
|
||||
export const historyV2Fixture: HistoryResponseV2 = {
|
||||
history: [
|
||||
{
|
||||
prompt_id: 'complete-item-id',
|
||||
prompt: {
|
||||
priority: 24,
|
||||
prompt_id: 'complete-item-id',
|
||||
extra_data: {
|
||||
client_id: 'test-client',
|
||||
extra_pnginfo: {
|
||||
workflow: {
|
||||
id: '44f0c9f9-b5a7-48de-99fc-7e80c1570241',
|
||||
revision: 0,
|
||||
last_node_id: 9,
|
||||
last_link_id: 9,
|
||||
nodes: [],
|
||||
links: [],
|
||||
groups: [],
|
||||
config: {},
|
||||
extra: {},
|
||||
version: 0.4
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
outputs: {
|
||||
'9': {
|
||||
images: [
|
||||
{
|
||||
filename: 'test.png',
|
||||
subfolder: '',
|
||||
type: 'output'
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: [
|
||||
[
|
||||
'execution_start',
|
||||
{ prompt_id: 'complete-item-id', timestamp: 1234567890 }
|
||||
],
|
||||
[
|
||||
'execution_success',
|
||||
{ prompt_id: 'complete-item-id', timestamp: 1234567900 }
|
||||
]
|
||||
]
|
||||
},
|
||||
meta: {
|
||||
'9': {
|
||||
node_id: '9',
|
||||
display_node: '9'
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
prompt_id: 'no-status-id',
|
||||
prompt: {
|
||||
priority: 23,
|
||||
prompt_id: 'no-status-id',
|
||||
extra_data: {
|
||||
client_id: 'inference'
|
||||
}
|
||||
},
|
||||
outputs: {
|
||||
'10': {
|
||||
images: []
|
||||
}
|
||||
},
|
||||
meta: {
|
||||
'10': {
|
||||
node_id: '10',
|
||||
display_node: '10'
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
prompt_id: 'no-meta-id',
|
||||
prompt: {
|
||||
priority: 22,
|
||||
prompt_id: 'no-meta-id',
|
||||
extra_data: {
|
||||
client_id: 'web-ui'
|
||||
}
|
||||
},
|
||||
outputs: {
|
||||
'11': {
|
||||
audio: []
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'error',
|
||||
completed: false,
|
||||
messages: []
|
||||
}
|
||||
},
|
||||
{
|
||||
prompt_id: 'multi-output-id',
|
||||
prompt: {
|
||||
priority: 21,
|
||||
prompt_id: 'multi-output-id',
|
||||
extra_data: {
|
||||
client_id: 'batch-processor'
|
||||
}
|
||||
},
|
||||
outputs: {
|
||||
'3': {
|
||||
images: [{ filename: 'img1.png', type: 'output', subfolder: '' }]
|
||||
},
|
||||
'9': {
|
||||
images: [{ filename: 'img2.png', type: 'output', subfolder: '' }]
|
||||
},
|
||||
'12': {
|
||||
video: [{ filename: 'video.mp4', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: []
|
||||
},
|
||||
meta: {
|
||||
'3': { node_id: '3', display_node: '3' },
|
||||
'9': { node_id: '9', display_node: '9' },
|
||||
'12': { node_id: '12', display_node: '12' }
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
/**
|
||||
* Expected V1 transformation of historyV2Fixture
|
||||
* Priority is now synthetic based on execution_success timestamp:
|
||||
* - complete-item-id: has timestamp → priority 1 (only one with timestamp)
|
||||
* - no-status-id: no status → priority 0
|
||||
* - no-meta-id: empty messages → priority 0
|
||||
* - multi-output-id: empty messages → priority 0
|
||||
*/
|
||||
export const expectedV1Fixture: HistoryTaskItem[] = [
|
||||
{
|
||||
taskType: 'History',
|
||||
prompt: [
|
||||
1,
|
||||
'complete-item-id',
|
||||
{},
|
||||
{
|
||||
client_id: 'test-client',
|
||||
extra_pnginfo: {
|
||||
workflow: {
|
||||
id: '44f0c9f9-b5a7-48de-99fc-7e80c1570241',
|
||||
revision: 0,
|
||||
last_node_id: 9,
|
||||
last_link_id: 9,
|
||||
nodes: [],
|
||||
links: [],
|
||||
groups: [],
|
||||
config: {},
|
||||
extra: {},
|
||||
version: 0.4
|
||||
}
|
||||
}
|
||||
},
|
||||
['9']
|
||||
],
|
||||
outputs: {
|
||||
'9': {
|
||||
images: [
|
||||
{
|
||||
filename: 'test.png',
|
||||
subfolder: '',
|
||||
type: 'output'
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: [
|
||||
[
|
||||
'execution_start',
|
||||
{ prompt_id: 'complete-item-id', timestamp: 1234567890 }
|
||||
],
|
||||
[
|
||||
'execution_success',
|
||||
{ prompt_id: 'complete-item-id', timestamp: 1234567900 }
|
||||
]
|
||||
]
|
||||
},
|
||||
meta: {
|
||||
'9': {
|
||||
node_id: '9',
|
||||
display_node: '9'
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
taskType: 'History',
|
||||
prompt: [
|
||||
0,
|
||||
'no-status-id',
|
||||
{},
|
||||
{
|
||||
client_id: 'inference'
|
||||
},
|
||||
['10']
|
||||
],
|
||||
outputs: {
|
||||
'10': {
|
||||
images: []
|
||||
}
|
||||
},
|
||||
status: undefined,
|
||||
meta: {
|
||||
'10': {
|
||||
node_id: '10',
|
||||
display_node: '10'
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
taskType: 'History',
|
||||
prompt: [
|
||||
0,
|
||||
'no-meta-id',
|
||||
{},
|
||||
{
|
||||
client_id: 'web-ui'
|
||||
},
|
||||
['11']
|
||||
],
|
||||
outputs: {
|
||||
'11': {
|
||||
audio: []
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'error',
|
||||
completed: false,
|
||||
messages: []
|
||||
},
|
||||
meta: undefined
|
||||
},
|
||||
{
|
||||
taskType: 'History',
|
||||
prompt: [
|
||||
0,
|
||||
'multi-output-id',
|
||||
{},
|
||||
{
|
||||
client_id: 'batch-processor'
|
||||
},
|
||||
['3', '9', '12']
|
||||
],
|
||||
outputs: {
|
||||
'3': {
|
||||
images: [{ filename: 'img1.png', type: 'output', subfolder: '' }]
|
||||
},
|
||||
'9': {
|
||||
images: [{ filename: 'img2.png', type: 'output', subfolder: '' }]
|
||||
},
|
||||
'12': {
|
||||
video: [{ filename: 'video.mp4', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: []
|
||||
},
|
||||
meta: {
|
||||
'3': { node_id: '3', display_node: '3' },
|
||||
'9': { node_id: '9', display_node: '9' },
|
||||
'12': { node_id: '12', display_node: '12' }
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -1,434 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Unit tests for V2 to V1 history adapter.
|
||||
*/
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
import { mapHistoryV2toHistory } from '@/platform/remote/comfyui/history/adapters/v2ToV1Adapter'
|
||||
import { zRawHistoryItemV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
|
||||
import type { HistoryResponseV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
|
||||
|
||||
import {
|
||||
expectedV1Fixture,
|
||||
historyV2Fixture
|
||||
} from '@/platform/remote/comfyui/history/__fixtures__/historyFixtures'
|
||||
import type { HistoryTaskItem } from '@/platform/remote/comfyui/history/types/historyV1Types'
|
||||
|
||||
const historyV2WithMissingTimestamp: HistoryResponseV2 = {
|
||||
history: [
|
||||
{
|
||||
prompt_id: 'item-timestamp-1000',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'item-timestamp-1000',
|
||||
extra_data: {
|
||||
client_id: 'test-client'
|
||||
}
|
||||
},
|
||||
outputs: {
|
||||
'1': {
|
||||
images: [{ filename: 'test1.png', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: [
|
||||
[
|
||||
'execution_success',
|
||||
{ prompt_id: 'item-timestamp-1000', timestamp: 1000 }
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
prompt_id: 'item-timestamp-2000',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'item-timestamp-2000',
|
||||
extra_data: {
|
||||
client_id: 'test-client'
|
||||
}
|
||||
},
|
||||
outputs: {
|
||||
'2': {
|
||||
images: [{ filename: 'test2.png', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: [
|
||||
[
|
||||
'execution_success',
|
||||
{ prompt_id: 'item-timestamp-2000', timestamp: 2000 }
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
prompt_id: 'item-no-timestamp',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'item-no-timestamp',
|
||||
extra_data: {
|
||||
client_id: 'test-client'
|
||||
}
|
||||
},
|
||||
outputs: {
|
||||
'3': {
|
||||
images: [{ filename: 'test3.png', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: []
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
const historyV2FiveItemsSorting: HistoryResponseV2 = {
|
||||
history: [
|
||||
{
|
||||
prompt_id: 'item-timestamp-3000',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'item-timestamp-3000',
|
||||
extra_data: { client_id: 'test-client' }
|
||||
},
|
||||
outputs: {
|
||||
'1': {
|
||||
images: [{ filename: 'test1.png', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: [
|
||||
[
|
||||
'execution_success',
|
||||
{ prompt_id: 'item-timestamp-3000', timestamp: 3000 }
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
prompt_id: 'item-timestamp-1000',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'item-timestamp-1000',
|
||||
extra_data: { client_id: 'test-client' }
|
||||
},
|
||||
outputs: {
|
||||
'2': {
|
||||
images: [{ filename: 'test2.png', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: [
|
||||
[
|
||||
'execution_success',
|
||||
{ prompt_id: 'item-timestamp-1000', timestamp: 1000 }
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
prompt_id: 'item-timestamp-5000',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'item-timestamp-5000',
|
||||
extra_data: { client_id: 'test-client' }
|
||||
},
|
||||
outputs: {
|
||||
'3': {
|
||||
images: [{ filename: 'test3.png', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: [
|
||||
[
|
||||
'execution_success',
|
||||
{ prompt_id: 'item-timestamp-5000', timestamp: 5000 }
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
prompt_id: 'item-timestamp-2000',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'item-timestamp-2000',
|
||||
extra_data: { client_id: 'test-client' }
|
||||
},
|
||||
outputs: {
|
||||
'4': {
|
||||
images: [{ filename: 'test4.png', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: [
|
||||
[
|
||||
'execution_success',
|
||||
{ prompt_id: 'item-timestamp-2000', timestamp: 2000 }
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
prompt_id: 'item-timestamp-4000',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'item-timestamp-4000',
|
||||
extra_data: { client_id: 'test-client' }
|
||||
},
|
||||
outputs: {
|
||||
'5': {
|
||||
images: [{ filename: 'test5.png', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: [
|
||||
[
|
||||
'execution_success',
|
||||
{ prompt_id: 'item-timestamp-4000', timestamp: 4000 }
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
const historyV2MultipleNoTimestamp: HistoryResponseV2 = {
|
||||
history: [
|
||||
{
|
||||
prompt_id: 'item-no-timestamp-1',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'item-no-timestamp-1',
|
||||
extra_data: { client_id: 'test-client' }
|
||||
},
|
||||
outputs: {
|
||||
'1': {
|
||||
images: [{ filename: 'test1.png', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: []
|
||||
}
|
||||
},
|
||||
{
|
||||
prompt_id: 'item-no-timestamp-2',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'item-no-timestamp-2',
|
||||
extra_data: { client_id: 'test-client' }
|
||||
},
|
||||
outputs: {
|
||||
'2': {
|
||||
images: [{ filename: 'test2.png', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: []
|
||||
}
|
||||
},
|
||||
{
|
||||
prompt_id: 'item-no-timestamp-3',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'item-no-timestamp-3',
|
||||
extra_data: { client_id: 'test-client' }
|
||||
},
|
||||
outputs: {
|
||||
'3': {
|
||||
images: [{ filename: 'test3.png', type: 'output', subfolder: '' }]
|
||||
}
|
||||
},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: []
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
function findResultByPromptId(
|
||||
result: HistoryTaskItem[],
|
||||
promptId: string
|
||||
): HistoryTaskItem {
|
||||
const item = result.find((item) => item.prompt[1] === promptId)
|
||||
if (!item) {
|
||||
throw new Error(`Expected item with promptId ${promptId} not found`)
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
describe('mapHistoryV2toHistory', () => {
|
||||
describe('fixture validation', () => {
|
||||
it('should have valid fixture data', () => {
|
||||
// Validate all items in the fixture to ensure test data is correct
|
||||
historyV2Fixture.history.forEach((item: unknown) => {
|
||||
expect(() => zRawHistoryItemV2.parse(item)).not.toThrow()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('given a complete V2 history response with edge cases', () => {
|
||||
const history = mapHistoryV2toHistory(historyV2Fixture)
|
||||
|
||||
it('should transform all items to V1 format with correct structure', () => {
|
||||
expect(history).toEqual(expectedV1Fixture)
|
||||
})
|
||||
|
||||
it('should add taskType "History" to all items', () => {
|
||||
history.forEach((item) => {
|
||||
expect(item.taskType).toBe('History')
|
||||
})
|
||||
})
|
||||
|
||||
it('should transform prompt to V1 tuple [priority, id, {}, extra_data, outputNodeIds]', () => {
|
||||
const firstItem = history[0]
|
||||
|
||||
expect(firstItem.prompt[0]).toBe(1) // Synthetic priority based on timestamp
|
||||
expect(firstItem.prompt[1]).toBe('complete-item-id')
|
||||
expect(firstItem.prompt[2]).toEqual({}) // history v2 does not return this data
|
||||
expect(firstItem.prompt[3]).toMatchObject({ client_id: 'test-client' })
|
||||
expect(firstItem.prompt[4]).toEqual(['9'])
|
||||
})
|
||||
|
||||
it('should handle missing optional status field', () => {
|
||||
expect(history[1].prompt[1]).toBe('no-status-id')
|
||||
expect(history[1].status).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should handle missing optional meta field', () => {
|
||||
expect(history[2].prompt[1]).toBe('no-meta-id')
|
||||
expect(history[2].meta).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should derive output node IDs from outputs object keys', () => {
|
||||
const multiOutputItem = history[3]
|
||||
|
||||
expect(multiOutputItem.prompt[4]).toEqual(
|
||||
expect.arrayContaining(['3', '9', '12'])
|
||||
)
|
||||
expect(multiOutputItem.prompt[4]).toHaveLength(3)
|
||||
})
|
||||
})
|
||||
|
||||
describe('given empty history array', () => {
|
||||
it('should return empty array', () => {
|
||||
const emptyResponse: HistoryResponseV2 = { history: [] }
|
||||
const history = mapHistoryV2toHistory(emptyResponse)
|
||||
|
||||
expect(history).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
describe('given empty outputs object', () => {
|
||||
it('should return empty array for output node IDs', () => {
|
||||
const v2Response: HistoryResponseV2 = {
|
||||
history: [
|
||||
{
|
||||
prompt_id: 'test-id',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'test-id',
|
||||
extra_data: { client_id: 'test' }
|
||||
},
|
||||
outputs: {}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
const history = mapHistoryV2toHistory(v2Response)
|
||||
|
||||
expect(history[0].prompt[4]).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
describe('given missing client_id', () => {
|
||||
it('should accept history items without client_id', () => {
|
||||
const v2Response: HistoryResponseV2 = {
|
||||
history: [
|
||||
{
|
||||
prompt_id: 'test-id',
|
||||
prompt: {
|
||||
priority: 0,
|
||||
prompt_id: 'test-id',
|
||||
extra_data: {}
|
||||
},
|
||||
outputs: {}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
const history = mapHistoryV2toHistory(v2Response)
|
||||
|
||||
expect(history[0].prompt[3].client_id).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('timestamp-based priority assignment', () => {
|
||||
it('assigns priority 0 to items without execution_success timestamp', () => {
|
||||
const result = mapHistoryV2toHistory(historyV2WithMissingTimestamp)
|
||||
|
||||
expect(result).toHaveLength(3)
|
||||
|
||||
const item1000 = findResultByPromptId(result, 'item-timestamp-1000')
|
||||
const item2000 = findResultByPromptId(result, 'item-timestamp-2000')
|
||||
const itemNoTimestamp = findResultByPromptId(result, 'item-no-timestamp')
|
||||
|
||||
expect(item2000.prompt[0]).toBe(2)
|
||||
expect(item1000.prompt[0]).toBe(1)
|
||||
expect(itemNoTimestamp.prompt[0]).toBe(0)
|
||||
})
|
||||
|
||||
it('correctly sorts and assigns priorities for multiple items', () => {
|
||||
const result = mapHistoryV2toHistory(historyV2FiveItemsSorting)
|
||||
|
||||
expect(result).toHaveLength(5)
|
||||
|
||||
const item1000 = findResultByPromptId(result, 'item-timestamp-1000')
|
||||
const item2000 = findResultByPromptId(result, 'item-timestamp-2000')
|
||||
const item3000 = findResultByPromptId(result, 'item-timestamp-3000')
|
||||
const item4000 = findResultByPromptId(result, 'item-timestamp-4000')
|
||||
const item5000 = findResultByPromptId(result, 'item-timestamp-5000')
|
||||
|
||||
expect(item5000.prompt[0]).toBe(5)
|
||||
expect(item4000.prompt[0]).toBe(4)
|
||||
expect(item3000.prompt[0]).toBe(3)
|
||||
expect(item2000.prompt[0]).toBe(2)
|
||||
expect(item1000.prompt[0]).toBe(1)
|
||||
})
|
||||
|
||||
it('assigns priority 0 to all items when multiple items lack timestamps', () => {
|
||||
const result = mapHistoryV2toHistory(historyV2MultipleNoTimestamp)
|
||||
|
||||
expect(result).toHaveLength(3)
|
||||
|
||||
const item1 = findResultByPromptId(result, 'item-no-timestamp-1')
|
||||
const item2 = findResultByPromptId(result, 'item-no-timestamp-2')
|
||||
const item3 = findResultByPromptId(result, 'item-no-timestamp-3')
|
||||
|
||||
expect(item1.prompt[0]).toBe(0)
|
||||
expect(item2.prompt[0]).toBe(0)
|
||||
expect(item3.prompt[0]).toBe(0)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,74 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Adapter to convert V2 history format to V1 format
|
||||
* @module platform/remote/comfyui/history/adapters/v2ToV1Adapter
|
||||
*/
|
||||
import type { HistoryTaskItem, TaskPrompt } from '../types/historyV1Types'
|
||||
import type {
|
||||
HistoryResponseV2,
|
||||
RawHistoryItemV2,
|
||||
TaskOutput,
|
||||
TaskPromptV2
|
||||
} from '../types/historyV2Types'
|
||||
|
||||
function mapPromptV2toV1(
|
||||
promptV2: TaskPromptV2,
|
||||
outputs: TaskOutput,
|
||||
syntheticPriority: number,
|
||||
createTime?: number
|
||||
): TaskPrompt {
|
||||
const extraData = {
|
||||
...(promptV2.extra_data ?? {}),
|
||||
...(typeof createTime === 'number' ? { create_time: createTime } : {})
|
||||
}
|
||||
return [
|
||||
syntheticPriority,
|
||||
promptV2.prompt_id,
|
||||
{},
|
||||
extraData,
|
||||
Object.keys(outputs)
|
||||
]
|
||||
}
|
||||
|
||||
function getExecutionSuccessTimestamp(item: RawHistoryItemV2): number {
|
||||
return (
|
||||
item.status?.messages?.find((m) => m[0] === 'execution_success')?.[1]
|
||||
?.timestamp ?? 0
|
||||
)
|
||||
}
|
||||
|
||||
export function mapHistoryV2toHistory(
|
||||
historyV2Response: HistoryResponseV2
|
||||
): HistoryTaskItem[] {
|
||||
const { history } = historyV2Response
|
||||
|
||||
// Sort by execution_success timestamp, descending (newest first)
|
||||
history.sort((a, b) => {
|
||||
return getExecutionSuccessTimestamp(b) - getExecutionSuccessTimestamp(a)
|
||||
})
|
||||
|
||||
// Count items with valid timestamps for synthetic priority calculation
|
||||
const countWithTimestamps = history.filter(
|
||||
(item) => getExecutionSuccessTimestamp(item) > 0
|
||||
).length
|
||||
|
||||
return history.map((item, index): HistoryTaskItem => {
|
||||
const { prompt, outputs, status, meta } = item
|
||||
const timestamp = getExecutionSuccessTimestamp(item)
|
||||
|
||||
// Items with timestamps get priority based on sorted position (highest first)
|
||||
const syntheticPriority = timestamp > 0 ? countWithTimestamps - index : 0
|
||||
|
||||
return {
|
||||
taskType: 'History' as const,
|
||||
prompt: mapPromptV2toV1(
|
||||
prompt,
|
||||
outputs,
|
||||
syntheticPriority,
|
||||
item.create_time
|
||||
),
|
||||
status,
|
||||
outputs,
|
||||
meta
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Unit tests for V1 history fetcher.
|
||||
*/
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { fetchHistoryV1 } from '@/platform/remote/comfyui/history/fetchers/fetchHistoryV1'
|
||||
|
||||
import { historyV1RawResponse } from '@/platform/remote/comfyui/history/__fixtures__/historyFixtures'
|
||||
|
||||
describe('fetchHistoryV1', () => {
|
||||
const mockFetchApi = vi.fn().mockResolvedValue({
|
||||
json: async () => historyV1RawResponse
|
||||
})
|
||||
|
||||
it('should fetch from /history endpoint with default max_items', async () => {
|
||||
await fetchHistoryV1(mockFetchApi)
|
||||
|
||||
expect(mockFetchApi).toHaveBeenCalledWith('/history?max_items=200')
|
||||
})
|
||||
|
||||
it('should fetch with custom max_items parameter', async () => {
|
||||
await fetchHistoryV1(mockFetchApi, 50)
|
||||
|
||||
expect(mockFetchApi).toHaveBeenCalledWith('/history?max_items=50')
|
||||
})
|
||||
|
||||
it('should transform object response to array with taskType and preserve fields', async () => {
|
||||
const result = await fetchHistoryV1(mockFetchApi)
|
||||
|
||||
expect(result.History).toHaveLength(2)
|
||||
result.History.forEach((item) => {
|
||||
expect(item.taskType).toBe('History')
|
||||
})
|
||||
expect(result.History[0]).toMatchObject({
|
||||
taskType: 'History',
|
||||
prompt: [24, 'complete-item-id', {}, expect.any(Object), ['9']],
|
||||
outputs: expect.any(Object),
|
||||
status: expect.any(Object),
|
||||
meta: expect.any(Object)
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle empty response object', async () => {
|
||||
const emptyMock = vi.fn().mockResolvedValue({
|
||||
json: async () => ({})
|
||||
})
|
||||
|
||||
const result = await fetchHistoryV1(emptyMock)
|
||||
|
||||
expect(result.History).toEqual([])
|
||||
})
|
||||
})
|
||||
@@ -1,51 +0,0 @@
|
||||
/**
|
||||
* @fileoverview V1 History Fetcher - Desktop/localhost API
|
||||
* @module platform/remote/comfyui/history/fetchers/fetchHistoryV1
|
||||
*
|
||||
* Fetches history directly from V1 API endpoint.
|
||||
* Used by desktop and localhost distributions.
|
||||
*/
|
||||
|
||||
import type {
|
||||
HistoryTaskItem,
|
||||
HistoryV1Response
|
||||
} from '../types/historyV1Types'
|
||||
|
||||
/**
|
||||
* Fetches history from V1 API endpoint
|
||||
* @param api - API instance with fetchApi method
|
||||
* @param maxItems - Maximum number of history items to fetch
|
||||
* @param offset - Offset for pagination (must be non-negative integer)
|
||||
* @returns Promise resolving to V1 history response
|
||||
* @throws Error if offset is invalid (negative or non-integer)
|
||||
*/
|
||||
export async function fetchHistoryV1(
|
||||
fetchApi: (url: string) => Promise<Response>,
|
||||
maxItems: number = 200,
|
||||
offset?: number
|
||||
): Promise<HistoryV1Response> {
|
||||
// Validate offset parameter
|
||||
if (offset !== undefined && (offset < 0 || !Number.isInteger(offset))) {
|
||||
throw new Error(
|
||||
`Invalid offset parameter: ${offset}. Must be a non-negative integer.`
|
||||
)
|
||||
}
|
||||
|
||||
const params = new URLSearchParams({ max_items: maxItems.toString() })
|
||||
if (offset !== undefined) {
|
||||
params.set('offset', offset.toString())
|
||||
}
|
||||
const url = `/history?${params.toString()}`
|
||||
const res = await fetchApi(url)
|
||||
const json: Record<
|
||||
string,
|
||||
Omit<HistoryTaskItem, 'taskType'>
|
||||
> = await res.json()
|
||||
|
||||
return {
|
||||
History: Object.values(json).map((item) => ({
|
||||
...item,
|
||||
taskType: 'History'
|
||||
}))
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Unit tests for V2 history fetcher.
|
||||
*/
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { fetchHistoryV2 } from '@/platform/remote/comfyui/history/fetchers/fetchHistoryV2'
|
||||
|
||||
import {
|
||||
expectedV1Fixture,
|
||||
historyV2Fixture
|
||||
} from '@/platform/remote/comfyui/history/__fixtures__/historyFixtures'
|
||||
|
||||
describe('fetchHistoryV2', () => {
|
||||
const mockFetchApi = vi.fn().mockResolvedValue({
|
||||
json: async () => historyV2Fixture
|
||||
})
|
||||
|
||||
it('should fetch from /history_v2 endpoint with default max_items', async () => {
|
||||
await fetchHistoryV2(mockFetchApi)
|
||||
|
||||
expect(mockFetchApi).toHaveBeenCalledWith('/history_v2?max_items=200')
|
||||
})
|
||||
|
||||
it('should fetch with custom max_items parameter', async () => {
|
||||
await fetchHistoryV2(mockFetchApi, 50)
|
||||
|
||||
expect(mockFetchApi).toHaveBeenCalledWith('/history_v2?max_items=50')
|
||||
})
|
||||
|
||||
it('should adapt V2 response to V1-compatible format', async () => {
|
||||
const result = await fetchHistoryV2(mockFetchApi)
|
||||
|
||||
expect(result.History).toEqual(expectedV1Fixture)
|
||||
expect(result).toHaveProperty('History')
|
||||
expect(Array.isArray(result.History)).toBe(true)
|
||||
result.History.forEach((item) => {
|
||||
expect(item.taskType).toBe('History')
|
||||
expect(item.prompt).toHaveLength(5)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,42 +0,0 @@
|
||||
/**
|
||||
* @fileoverview V2 History Fetcher - Cloud API with adapter
|
||||
* @module platform/remote/comfyui/history/fetchers/fetchHistoryV2
|
||||
*
|
||||
* Fetches history from V2 API endpoint and converts to V1 format.
|
||||
* Used exclusively by cloud distribution.
|
||||
*/
|
||||
|
||||
import { mapHistoryV2toHistory } from '../adapters/v2ToV1Adapter'
|
||||
import type { HistoryV1Response } from '../types/historyV1Types'
|
||||
import type { HistoryResponseV2 } from '../types/historyV2Types'
|
||||
|
||||
/**
|
||||
* Fetches history from V2 API endpoint and adapts to V1 format
|
||||
* @param fetchApi - API instance with fetchApi method
|
||||
* @param maxItems - Maximum number of history items to fetch
|
||||
* @param offset - Offset for pagination (must be non-negative integer)
|
||||
* @returns Promise resolving to V1 history response (adapted from V2)
|
||||
* @throws Error if offset is invalid (negative or non-integer)
|
||||
*/
|
||||
export async function fetchHistoryV2(
|
||||
fetchApi: (url: string) => Promise<Response>,
|
||||
maxItems: number = 200,
|
||||
offset?: number
|
||||
): Promise<HistoryV1Response> {
|
||||
// Validate offset parameter
|
||||
if (offset !== undefined && (offset < 0 || !Number.isInteger(offset))) {
|
||||
throw new Error(
|
||||
`Invalid offset parameter: ${offset}. Must be a non-negative integer.`
|
||||
)
|
||||
}
|
||||
|
||||
const params = new URLSearchParams({ max_items: maxItems.toString() })
|
||||
if (offset !== undefined) {
|
||||
params.set('offset', offset.toString())
|
||||
}
|
||||
const url = `/history_v2?${params.toString()}`
|
||||
const res = await fetchApi(url)
|
||||
const rawData: HistoryResponseV2 = await res.json()
|
||||
const adaptedHistory = mapHistoryV2toHistory(rawData)
|
||||
return { History: adaptedHistory }
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
/**
|
||||
* @fileoverview History API module - Distribution-aware exports
|
||||
* @module platform/remote/comfyui/history
|
||||
*
|
||||
* This module provides a unified history fetching interface that automatically
|
||||
* uses the correct implementation based on build-time distribution constant.
|
||||
*
|
||||
* - Cloud builds: Uses V2 API with adapter (tree-shakes V1 fetcher)
|
||||
* - Desktop/localhost builds: Uses V1 API directly (tree-shakes V2 fetcher + adapter)
|
||||
*
|
||||
* The rest of the application only needs to import from this module and use
|
||||
* V1 types - all distribution-specific details are encapsulated here.
|
||||
*/
|
||||
|
||||
import { isCloud } from '@/platform/distribution/types'
|
||||
import { fetchHistoryV1 } from './fetchers/fetchHistoryV1'
|
||||
import { fetchHistoryV2 } from './fetchers/fetchHistoryV2'
|
||||
|
||||
/**
|
||||
* Fetches history using the appropriate API for the current distribution.
|
||||
* Build-time constant enables dead code elimination - only one implementation
|
||||
* will be included in the final bundle.
|
||||
*/
|
||||
export const fetchHistory = isCloud ? fetchHistoryV2 : fetchHistoryV1
|
||||
|
||||
/**
|
||||
* Export only V1 types publicly - consumers don't need to know about V2
|
||||
*/
|
||||
export type * from './types'
|
||||
@@ -1,335 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Tests for history reconciliation (V1 and V2)
|
||||
*/
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { reconcileHistory } from '@/platform/remote/comfyui/history/reconciliation'
|
||||
import type { TaskItem } from '@/schemas/apiSchema'
|
||||
|
||||
// Mock distribution types
|
||||
vi.mock('@/platform/distribution/types', () => ({
|
||||
isCloud: false,
|
||||
isDesktop: true
|
||||
}))
|
||||
|
||||
function createHistoryItem(promptId: string, queueIndex = 0): TaskItem {
|
||||
return {
|
||||
taskType: 'History',
|
||||
prompt: [queueIndex, promptId, {}, {}, []],
|
||||
status: { status_str: 'success', completed: true, messages: [] },
|
||||
outputs: {}
|
||||
}
|
||||
}
|
||||
|
||||
function getAllPromptIds(result: TaskItem[]): string[] {
|
||||
return result.map((item) => item.prompt[1])
|
||||
}
|
||||
|
||||
describe('reconcileHistory (V1)', () => {
|
||||
beforeEach(async () => {
|
||||
const distTypes = await import('@/platform/distribution/types')
|
||||
vi.mocked(distTypes).isCloud = false
|
||||
})
|
||||
|
||||
describe('when filtering by queueIndex', () => {
|
||||
it('should retain items with queueIndex greater than lastKnownQueueIndex', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('new-1', 11),
|
||||
createHistoryItem('new-2', 10),
|
||||
createHistoryItem('old', 5)
|
||||
]
|
||||
const clientHistory = [createHistoryItem('old', 5)]
|
||||
|
||||
const result = reconcileHistory(serverHistory, clientHistory, 10, 9)
|
||||
|
||||
const promptIds = getAllPromptIds(result)
|
||||
expect(promptIds).toHaveLength(3)
|
||||
expect(promptIds).toContain('new-1')
|
||||
expect(promptIds).toContain('new-2')
|
||||
expect(promptIds).toContain('old')
|
||||
})
|
||||
|
||||
it('should evict items with queueIndex less than or equal to lastKnownQueueIndex', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('new', 11),
|
||||
createHistoryItem('existing', 10),
|
||||
createHistoryItem('old-should-not-appear', 5)
|
||||
]
|
||||
const clientHistory = [createHistoryItem('existing', 10)]
|
||||
|
||||
const result = reconcileHistory(serverHistory, clientHistory, 10, 10)
|
||||
|
||||
const promptIds = getAllPromptIds(result)
|
||||
expect(promptIds).toHaveLength(2)
|
||||
expect(promptIds).toContain('new')
|
||||
expect(promptIds).toContain('existing')
|
||||
expect(promptIds).not.toContain('old-should-not-appear')
|
||||
})
|
||||
|
||||
it('should retain all server items when lastKnownQueueIndex is undefined', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('item-1', 5),
|
||||
createHistoryItem('item-2', 4)
|
||||
]
|
||||
|
||||
const result = reconcileHistory(serverHistory, [], 10, undefined)
|
||||
|
||||
expect(result).toHaveLength(2)
|
||||
expect(result[0].prompt[1]).toBe('item-1')
|
||||
expect(result[1].prompt[1]).toBe('item-2')
|
||||
})
|
||||
})
|
||||
|
||||
describe('when reconciling with existing client items', () => {
|
||||
it('should retain client items that still exist on server', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('new', 11),
|
||||
createHistoryItem('existing-1', 9),
|
||||
createHistoryItem('existing-2', 8)
|
||||
]
|
||||
const clientHistory = [
|
||||
createHistoryItem('existing-1', 9),
|
||||
createHistoryItem('existing-2', 8)
|
||||
]
|
||||
|
||||
const result = reconcileHistory(serverHistory, clientHistory, 10, 10)
|
||||
|
||||
const promptIds = getAllPromptIds(result)
|
||||
expect(promptIds).toHaveLength(3)
|
||||
expect(promptIds).toContain('new')
|
||||
expect(promptIds).toContain('existing-1')
|
||||
expect(promptIds).toContain('existing-2')
|
||||
})
|
||||
|
||||
it('should evict client items that no longer exist on server', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('new', 11),
|
||||
createHistoryItem('keep', 9)
|
||||
]
|
||||
const clientHistory = [
|
||||
createHistoryItem('keep', 9),
|
||||
createHistoryItem('removed-from-server', 8)
|
||||
]
|
||||
|
||||
const result = reconcileHistory(serverHistory, clientHistory, 10, 10)
|
||||
|
||||
const promptIds = getAllPromptIds(result)
|
||||
expect(promptIds).toHaveLength(2)
|
||||
expect(promptIds).toContain('new')
|
||||
expect(promptIds).toContain('keep')
|
||||
expect(promptIds).not.toContain('removed-from-server')
|
||||
})
|
||||
})
|
||||
|
||||
describe('when limiting the result count', () => {
|
||||
it('should respect the maxItems constraint', () => {
|
||||
const serverHistory = Array.from({ length: 10 }, (_, i) =>
|
||||
createHistoryItem(`item-${i}`, 20 + i)
|
||||
)
|
||||
|
||||
const result = reconcileHistory(serverHistory, [], 5, 15)
|
||||
|
||||
const promptIds = getAllPromptIds(result)
|
||||
expect(promptIds).toHaveLength(5)
|
||||
})
|
||||
|
||||
it('should evict lowest priority items when exceeding capacity', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('new-1', 13),
|
||||
createHistoryItem('new-2', 12),
|
||||
createHistoryItem('new-3', 11),
|
||||
createHistoryItem('existing', 9)
|
||||
]
|
||||
const clientHistory = [createHistoryItem('existing', 9)]
|
||||
|
||||
const result = reconcileHistory(serverHistory, clientHistory, 2, 10)
|
||||
|
||||
expect(result).toHaveLength(2)
|
||||
expect(result[0].prompt[1]).toBe('new-1')
|
||||
expect(result[1].prompt[1]).toBe('new-2')
|
||||
})
|
||||
})
|
||||
|
||||
describe('when handling empty collections', () => {
|
||||
it('should return all server items when client history is empty', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('item-1', 10),
|
||||
createHistoryItem('item-2', 9)
|
||||
]
|
||||
|
||||
const result = reconcileHistory(serverHistory, [], 10, 8)
|
||||
|
||||
const promptIds = getAllPromptIds(result)
|
||||
expect(promptIds).toHaveLength(2)
|
||||
})
|
||||
|
||||
it('should return empty result when server history is empty', () => {
|
||||
const clientHistory = [createHistoryItem('item-1', 5)]
|
||||
|
||||
const result = reconcileHistory([], clientHistory, 10, 5)
|
||||
|
||||
expect(result).toHaveLength(0)
|
||||
})
|
||||
|
||||
it('should return empty result when both collections are empty', () => {
|
||||
const result = reconcileHistory([], [], 10, undefined)
|
||||
|
||||
expect(result).toHaveLength(0)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('reconcileHistory (V2/Cloud)', () => {
|
||||
beforeEach(async () => {
|
||||
const distTypes = await import('@/platform/distribution/types')
|
||||
vi.mocked(distTypes).isCloud = true
|
||||
})
|
||||
|
||||
describe('when adding new items from server', () => {
|
||||
it('should retain items with promptIds not present in client history', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('new-item'),
|
||||
createHistoryItem('existing-item')
|
||||
]
|
||||
const clientHistory = [createHistoryItem('existing-item')]
|
||||
|
||||
const result = reconcileHistory(serverHistory, clientHistory, 10)
|
||||
|
||||
const promptIds = getAllPromptIds(result)
|
||||
expect(promptIds).toHaveLength(2)
|
||||
expect(promptIds).toContain('new-item')
|
||||
expect(promptIds).toContain('existing-item')
|
||||
})
|
||||
|
||||
it('should respect priority ordering when retaining multiple new items', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('new-1'),
|
||||
createHistoryItem('new-2'),
|
||||
createHistoryItem('existing')
|
||||
]
|
||||
const clientHistory = [createHistoryItem('existing')]
|
||||
|
||||
const result = reconcileHistory(serverHistory, clientHistory, 10)
|
||||
|
||||
const promptIds = getAllPromptIds(result)
|
||||
expect(promptIds).toHaveLength(3)
|
||||
expect(promptIds).toContain('new-1')
|
||||
expect(promptIds).toContain('new-2')
|
||||
expect(promptIds).toContain('existing')
|
||||
})
|
||||
})
|
||||
|
||||
describe('when reconciling with existing client items', () => {
|
||||
it('should retain client items that still exist on server', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('item-1'),
|
||||
createHistoryItem('item-2')
|
||||
]
|
||||
const clientHistory = [
|
||||
createHistoryItem('item-1'),
|
||||
createHistoryItem('item-2')
|
||||
]
|
||||
|
||||
const result = reconcileHistory(serverHistory, clientHistory, 10)
|
||||
|
||||
const promptIds = getAllPromptIds(result)
|
||||
expect(promptIds).toHaveLength(2)
|
||||
expect(promptIds).toContain('item-1')
|
||||
expect(promptIds).toContain('item-2')
|
||||
})
|
||||
|
||||
it('should evict client items that no longer exist on server', () => {
|
||||
const serverHistory = [createHistoryItem('item-1')]
|
||||
const clientHistory = [
|
||||
createHistoryItem('item-1'),
|
||||
createHistoryItem('old-item')
|
||||
]
|
||||
|
||||
const result = reconcileHistory(serverHistory, clientHistory, 10)
|
||||
|
||||
const promptIds = getAllPromptIds(result)
|
||||
expect(promptIds).toHaveLength(1)
|
||||
expect(promptIds).toContain('item-1')
|
||||
expect(promptIds).not.toContain('old-item')
|
||||
})
|
||||
})
|
||||
|
||||
describe('when detecting new items by promptId', () => {
|
||||
it('should retain new items regardless of queueIndex values', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('existing', 100),
|
||||
createHistoryItem('new-item', 50)
|
||||
]
|
||||
const clientHistory = [createHistoryItem('existing', 100)]
|
||||
|
||||
const result = reconcileHistory(serverHistory, clientHistory, 10)
|
||||
|
||||
const promptIds = getAllPromptIds(result)
|
||||
expect(promptIds).toContain('new-item')
|
||||
expect(promptIds).toContain('existing')
|
||||
})
|
||||
})
|
||||
|
||||
describe('when limiting the result count', () => {
|
||||
it('should respect the maxItems constraint', () => {
|
||||
const serverHistory = Array.from({ length: 10 }, (_, i) =>
|
||||
createHistoryItem(`server-${i}`)
|
||||
)
|
||||
const clientHistory = Array.from({ length: 5 }, (_, i) =>
|
||||
createHistoryItem(`client-${i}`)
|
||||
)
|
||||
|
||||
const result = reconcileHistory(serverHistory, clientHistory, 5)
|
||||
|
||||
const promptIds = getAllPromptIds(result)
|
||||
expect(promptIds).toHaveLength(5)
|
||||
})
|
||||
|
||||
it('should evict lowest priority items when exceeding capacity', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('new-1'),
|
||||
createHistoryItem('new-2'),
|
||||
createHistoryItem('existing')
|
||||
]
|
||||
const clientHistory = [createHistoryItem('existing')]
|
||||
|
||||
const result = reconcileHistory(serverHistory, clientHistory, 2)
|
||||
|
||||
expect(result).toHaveLength(2)
|
||||
expect(result[0].prompt[1]).toBe('new-1')
|
||||
expect(result[1].prompt[1]).toBe('new-2')
|
||||
})
|
||||
})
|
||||
|
||||
describe('when handling empty collections', () => {
|
||||
it('should return all server items when client history is empty', () => {
|
||||
const serverHistory = [
|
||||
createHistoryItem('item-1'),
|
||||
createHistoryItem('item-2')
|
||||
]
|
||||
|
||||
const result = reconcileHistory(serverHistory, [], 10)
|
||||
|
||||
expect(result).toHaveLength(2)
|
||||
expect(result[0].prompt[1]).toBe('item-1')
|
||||
expect(result[1].prompt[1]).toBe('item-2')
|
||||
})
|
||||
|
||||
it('should return empty result when server history is empty', () => {
|
||||
const clientHistory = [
|
||||
createHistoryItem('item-1'),
|
||||
createHistoryItem('item-2')
|
||||
]
|
||||
|
||||
const result = reconcileHistory([], clientHistory, 10)
|
||||
|
||||
expect(result).toHaveLength(0)
|
||||
})
|
||||
|
||||
it('should return empty result when both collections are empty', () => {
|
||||
const result = reconcileHistory([], [], 10)
|
||||
|
||||
expect(result).toHaveLength(0)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,122 +0,0 @@
|
||||
/**
|
||||
* @fileoverview History reconciliation for V1 and V2 APIs
|
||||
* @module platform/remote/comfyui/history/reconciliation
|
||||
*
|
||||
* Returns list of items that should be displayed, sorted by queueIndex (newest first).
|
||||
* Caller is responsible for mapping to their own class instances.
|
||||
*
|
||||
* V1: QueueIndex-based filtering for stable monotonic indices
|
||||
* V2: PromptId-based merging for synthetic priorities (V2 assigns synthetic
|
||||
* priorities after timestamp sorting, so new items may have lower priority
|
||||
* than existing items)
|
||||
*/
|
||||
import { isCloud } from '@/platform/distribution/types'
|
||||
import type { TaskItem } from '@/schemas/apiSchema'
|
||||
|
||||
/**
|
||||
* V1 reconciliation: QueueIndex-based filtering works because V1 has stable,
|
||||
* monotonically increasing queue indices.
|
||||
*
|
||||
* Sort order: Sorts serverHistory by queueIndex descending (newest first) to ensure
|
||||
* consistent ordering. JavaScript .filter() maintains iteration order, so filtered
|
||||
* results remain sorted. clientHistory is assumed already sorted from previous update.
|
||||
*
|
||||
* @returns All items to display, sorted by queueIndex descending (newest first)
|
||||
*/
|
||||
function reconcileHistoryV1(
|
||||
serverHistory: TaskItem[],
|
||||
clientHistory: TaskItem[],
|
||||
maxItems: number,
|
||||
lastKnownQueueIndex: number | undefined
|
||||
): TaskItem[] {
|
||||
const sortedServerHistory = serverHistory.sort(
|
||||
(a, b) => b.prompt[0] - a.prompt[0]
|
||||
)
|
||||
|
||||
const serverPromptIds = new Set(
|
||||
sortedServerHistory.map((item) => item.prompt[1])
|
||||
)
|
||||
|
||||
// If undefined, treat as initial sync (all items are new)
|
||||
const itemsAddedSinceLastSync =
|
||||
lastKnownQueueIndex === undefined
|
||||
? sortedServerHistory
|
||||
: sortedServerHistory.filter(
|
||||
(item) => item.prompt[0] > lastKnownQueueIndex
|
||||
)
|
||||
|
||||
const clientItemsStillOnServer = clientHistory.filter((item) =>
|
||||
serverPromptIds.has(item.prompt[1])
|
||||
)
|
||||
|
||||
// Merge new and reused items, sort by queueIndex descending, limit to maxItems
|
||||
return [...itemsAddedSinceLastSync, ...clientItemsStillOnServer]
|
||||
.sort((a, b) => b.prompt[0] - a.prompt[0])
|
||||
.slice(0, maxItems)
|
||||
}
|
||||
|
||||
/**
|
||||
* V2 reconciliation: PromptId-based merging because V2 assigns synthetic
|
||||
* priorities after sorting by timestamp.
|
||||
*
|
||||
* Sort order: Sorts serverHistory by queueIndex descending (newest first) to ensure
|
||||
* consistent ordering. JavaScript .filter() maintains iteration order, so filtered
|
||||
* results remain sorted. clientHistory is assumed already sorted from previous update.
|
||||
*
|
||||
* @returns All items to display, sorted by queueIndex descending (newest first)
|
||||
*/
|
||||
function reconcileHistoryV2(
|
||||
serverHistory: TaskItem[],
|
||||
clientHistory: TaskItem[],
|
||||
maxItems: number
|
||||
): TaskItem[] {
|
||||
const sortedServerHistory = serverHistory.sort(
|
||||
(a, b) => b.prompt[0] - a.prompt[0]
|
||||
)
|
||||
|
||||
const serverPromptIds = new Set(
|
||||
sortedServerHistory.map((item) => item.prompt[1])
|
||||
)
|
||||
const clientPromptIds = new Set(clientHistory.map((item) => item.prompt[1]))
|
||||
|
||||
const newItems = sortedServerHistory.filter(
|
||||
(item) => !clientPromptIds.has(item.prompt[1])
|
||||
)
|
||||
|
||||
const clientItemsStillOnServer = clientHistory.filter((item) =>
|
||||
serverPromptIds.has(item.prompt[1])
|
||||
)
|
||||
|
||||
// Merge new and reused items, sort by queueIndex descending, limit to maxItems
|
||||
return [...newItems, ...clientItemsStillOnServer]
|
||||
.sort((a, b) => b.prompt[0] - a.prompt[0])
|
||||
.slice(0, maxItems)
|
||||
}
|
||||
|
||||
/**
|
||||
* Reconciles server history with client history.
|
||||
* Automatically uses V1 (queueIndex-based) or V2 (promptId-based) algorithm based on
|
||||
* distribution type.
|
||||
*
|
||||
* @param serverHistory - Server's current history items
|
||||
* @param clientHistory - Client's existing history items
|
||||
* @param maxItems - Maximum number of items to return
|
||||
* @param lastKnownQueueIndex - Last queue index seen (V1 only, optional for V2)
|
||||
* @returns All items that should be displayed, sorted by queueIndex descending
|
||||
*/
|
||||
export function reconcileHistory(
|
||||
serverHistory: TaskItem[],
|
||||
clientHistory: TaskItem[],
|
||||
maxItems: number,
|
||||
lastKnownQueueIndex?: number
|
||||
): TaskItem[] {
|
||||
if (isCloud) {
|
||||
return reconcileHistoryV2(serverHistory, clientHistory, maxItems)
|
||||
}
|
||||
return reconcileHistoryV1(
|
||||
serverHistory,
|
||||
clientHistory,
|
||||
maxItems,
|
||||
lastKnownQueueIndex
|
||||
)
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
/**
|
||||
* @fileoverview History V1 types - Public interface used throughout the app
|
||||
* @module platform/remote/comfyui/history/types/historyV1Types
|
||||
*
|
||||
* These types represent the V1 history format that the application expects.
|
||||
* Both desktop (direct V1 API) and cloud (V2 API + adapter) return data in this format.
|
||||
*/
|
||||
|
||||
import type { HistoryTaskItem, TaskPrompt } from '@/schemas/apiSchema'
|
||||
|
||||
export interface HistoryV1Response {
|
||||
History: HistoryTaskItem[]
|
||||
}
|
||||
|
||||
export type { HistoryTaskItem, TaskPrompt }
|
||||
@@ -1,46 +0,0 @@
|
||||
/**
|
||||
* @fileoverview History V2 types and schemas - Internal cloud API format
|
||||
* @module platform/remote/comfyui/history/types/historyV2Types
|
||||
*
|
||||
* These types and schemas represent the V2 history format returned by the cloud API.
|
||||
* They are only used internally and are converted to V1 format via adapter.
|
||||
*
|
||||
* IMPORTANT: These types should NOT be used outside this history module.
|
||||
*/
|
||||
|
||||
import { z } from 'zod'
|
||||
|
||||
import {
|
||||
zExtraData,
|
||||
zPromptId,
|
||||
zQueueIndex,
|
||||
zStatus,
|
||||
zTaskMeta,
|
||||
zTaskOutput
|
||||
} from '@/schemas/apiSchema'
|
||||
|
||||
const zTaskPromptV2 = z.object({
|
||||
priority: zQueueIndex,
|
||||
prompt_id: zPromptId,
|
||||
extra_data: zExtraData
|
||||
})
|
||||
|
||||
const zRawHistoryItemV2 = z.object({
|
||||
prompt_id: zPromptId,
|
||||
prompt: zTaskPromptV2,
|
||||
status: zStatus.optional(),
|
||||
outputs: zTaskOutput,
|
||||
meta: zTaskMeta.optional(),
|
||||
create_time: z.number().int().optional()
|
||||
})
|
||||
|
||||
const zHistoryResponseV2 = z.object({
|
||||
history: z.array(zRawHistoryItemV2)
|
||||
})
|
||||
|
||||
export type TaskPromptV2 = z.infer<typeof zTaskPromptV2>
|
||||
export type RawHistoryItemV2 = z.infer<typeof zRawHistoryItemV2>
|
||||
export type HistoryResponseV2 = z.infer<typeof zHistoryResponseV2>
|
||||
export type TaskOutput = z.infer<typeof zTaskOutput>
|
||||
|
||||
export { zRawHistoryItemV2 }
|
||||
@@ -1,9 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Public history types export
|
||||
* @module platform/remote/comfyui/history/types
|
||||
*
|
||||
* Only V1 types are exported publicly - the rest of the app
|
||||
* should never need to know about V2 types or implementation details.
|
||||
*/
|
||||
|
||||
export type * from './historyV1Types'
|
||||
@@ -16,17 +16,13 @@ type JobsListResponse = z.infer<typeof zJobsListResponse>
|
||||
|
||||
function createMockJob(
|
||||
id: string,
|
||||
status: 'pending' | 'in_progress' | 'completed' = 'completed',
|
||||
status: 'pending' | 'in_progress' | 'completed' | 'failed' = 'completed',
|
||||
overrides: Partial<RawJobListItem> = {}
|
||||
): RawJobListItem {
|
||||
return {
|
||||
id,
|
||||
status,
|
||||
create_time: Date.now(),
|
||||
execution_start_time: null,
|
||||
execution_end_time: null,
|
||||
preview_output: null,
|
||||
outputs_count: 0,
|
||||
...overrides
|
||||
}
|
||||
}
|
||||
@@ -63,7 +59,7 @@ describe('fetchJobs', () => {
|
||||
const result = await fetchHistory(mockFetch)
|
||||
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
'/jobs?status=completed&limit=200&offset=0'
|
||||
'/jobs?status=completed,failed,cancelled&limit=200&offset=0'
|
||||
)
|
||||
expect(result).toHaveLength(2)
|
||||
expect(result[0].id).toBe('job1')
|
||||
@@ -113,7 +109,7 @@ describe('fetchJobs', () => {
|
||||
const result = await fetchHistory(mockFetch, 200, 5)
|
||||
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
'/jobs?status=completed&limit=200&offset=5'
|
||||
'/jobs?status=completed,failed,cancelled&limit=200&offset=5'
|
||||
)
|
||||
// Priority base is total - offset = 10 - 5 = 5
|
||||
expect(result[0].priority).toBe(5) // (total - offset) - 0
|
||||
|
||||
@@ -68,7 +68,7 @@ function assignPriority(
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetches history (completed jobs)
|
||||
* Fetches history (terminal state jobs: completed, failed, cancelled)
|
||||
* Assigns synthetic priority starting from total (lower than queue jobs).
|
||||
*/
|
||||
export async function fetchHistory(
|
||||
@@ -78,7 +78,7 @@ export async function fetchHistory(
|
||||
): Promise<JobListItem[]> {
|
||||
const { jobs, total } = await fetchJobsRaw(
|
||||
fetchApi,
|
||||
['completed'],
|
||||
['completed', 'failed', 'cancelled'],
|
||||
maxItems,
|
||||
offset
|
||||
)
|
||||
|
||||
@@ -21,12 +21,15 @@ const zJobStatus = z.enum([
|
||||
const zPreviewOutput = z.object({
|
||||
filename: z.string(),
|
||||
subfolder: z.string(),
|
||||
type: resultItemType
|
||||
type: resultItemType,
|
||||
nodeId: z.string(),
|
||||
mediaType: z.string()
|
||||
})
|
||||
|
||||
/**
|
||||
* Execution error details for error jobs.
|
||||
* Contains the same structure as ExecutionErrorWsMessage from WebSocket.
|
||||
* Execution error from Jobs API.
|
||||
* Similar to ExecutionErrorWsMessage but with optional prompt_id/timestamp/executed
|
||||
* since these may not be present in stored errors or infrastructure-generated errors.
|
||||
*/
|
||||
const zExecutionError = z
|
||||
.object({
|
||||
@@ -43,6 +46,8 @@ const zExecutionError = z
|
||||
})
|
||||
.passthrough()
|
||||
|
||||
export type ExecutionError = z.infer<typeof zExecutionError>
|
||||
|
||||
/**
|
||||
* Raw job from API - uses passthrough to allow extra fields
|
||||
*/
|
||||
@@ -105,3 +110,9 @@ export type RawJobListItem = z.infer<typeof zRawJobListItem>
|
||||
/** Job list item with priority always set (server-provided or synthetic) */
|
||||
export type JobListItem = RawJobListItem & { priority: number }
|
||||
export type JobDetail = z.infer<typeof zJobDetail>
|
||||
|
||||
/** Task type used in the API (queue vs history endpoints) */
|
||||
export type APITaskType = 'queue' | 'history'
|
||||
|
||||
/** Internal task type derived from job status for UI display */
|
||||
export type TaskType = 'Running' | 'Pending' | 'History'
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import type { JobDetail } from '@/platform/remote/comfyui/jobs/jobTypes'
|
||||
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
|
||||
import { getWorkflowFromHistory } from '@/platform/workflow/cloud/getWorkflowFromHistory'
|
||||
import {
|
||||
extractWorkflow,
|
||||
fetchJobDetail
|
||||
} from '@/platform/remote/comfyui/jobs/fetchJobs'
|
||||
|
||||
const mockWorkflow: ComfyWorkflowJSON = {
|
||||
id: 'test-workflow-id',
|
||||
revision: 0,
|
||||
last_node_id: 5,
|
||||
last_link_id: 3,
|
||||
nodes: [],
|
||||
@@ -16,75 +18,63 @@ const mockWorkflow: ComfyWorkflowJSON = {
|
||||
version: 0.4
|
||||
}
|
||||
|
||||
const mockHistoryResponse = {
|
||||
'test-prompt-id': {
|
||||
prompt: {
|
||||
priority: 1,
|
||||
prompt_id: 'test-prompt-id',
|
||||
extra_data: {
|
||||
client_id: 'test-client',
|
||||
extra_pnginfo: {
|
||||
workflow: mockWorkflow
|
||||
}
|
||||
// Jobs API detail response structure (matches actual /jobs/{id} response)
|
||||
// workflow is nested at: workflow.extra_data.extra_pnginfo.workflow
|
||||
const mockJobDetailResponse: JobDetail = {
|
||||
id: 'test-prompt-id',
|
||||
status: 'completed',
|
||||
create_time: 1234567890,
|
||||
update_time: 1234567900,
|
||||
workflow: {
|
||||
extra_data: {
|
||||
extra_pnginfo: {
|
||||
workflow: mockWorkflow
|
||||
}
|
||||
},
|
||||
outputs: {},
|
||||
status: {
|
||||
status_str: 'success',
|
||||
completed: true,
|
||||
messages: []
|
||||
}
|
||||
},
|
||||
outputs: {
|
||||
'20': {
|
||||
images: [
|
||||
{ filename: 'test.png', subfolder: '', type: 'output' },
|
||||
{ filename: 'test2.png', subfolder: '', type: 'output' }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
describe('getWorkflowFromHistory', () => {
|
||||
it('should fetch workflow from /history_v2/{prompt_id} endpoint', async () => {
|
||||
describe('fetchJobDetail', () => {
|
||||
it('should fetch job detail from /jobs/{prompt_id} endpoint', async () => {
|
||||
const mockFetchApi = vi.fn().mockResolvedValue({
|
||||
json: async () => mockHistoryResponse
|
||||
ok: true,
|
||||
json: async () => mockJobDetailResponse
|
||||
})
|
||||
|
||||
await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
|
||||
await fetchJobDetail(mockFetchApi, 'test-prompt-id')
|
||||
|
||||
expect(mockFetchApi).toHaveBeenCalledWith('/history_v2/test-prompt-id')
|
||||
expect(mockFetchApi).toHaveBeenCalledWith('/jobs/test-prompt-id')
|
||||
})
|
||||
|
||||
it('should extract and return workflow from response', async () => {
|
||||
it('should return job detail with workflow and outputs', async () => {
|
||||
const mockFetchApi = vi.fn().mockResolvedValue({
|
||||
json: async () => mockHistoryResponse
|
||||
ok: true,
|
||||
json: async () => mockJobDetailResponse
|
||||
})
|
||||
|
||||
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
|
||||
const result = await fetchJobDetail(mockFetchApi, 'test-prompt-id')
|
||||
|
||||
expect(result).toEqual(mockWorkflow)
|
||||
expect(result).toBeDefined()
|
||||
expect(result?.id).toBe('test-prompt-id')
|
||||
expect(result?.outputs).toEqual(mockJobDetailResponse.outputs)
|
||||
expect(result?.workflow).toBeDefined()
|
||||
})
|
||||
|
||||
it('should return undefined when prompt_id not found in response', async () => {
|
||||
it('should return undefined when job not found (non-OK response)', async () => {
|
||||
const mockFetchApi = vi.fn().mockResolvedValue({
|
||||
json: async () => ({})
|
||||
ok: false,
|
||||
status: 404
|
||||
})
|
||||
|
||||
const result = await getWorkflowFromHistory(mockFetchApi, 'nonexistent-id')
|
||||
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should return undefined when workflow is missing from extra_pnginfo', async () => {
|
||||
const mockFetchApi = vi.fn().mockResolvedValue({
|
||||
json: async () => ({
|
||||
'test-prompt-id': {
|
||||
prompt: {
|
||||
priority: 1,
|
||||
prompt_id: 'test-prompt-id',
|
||||
extra_data: {
|
||||
client_id: 'test-client'
|
||||
}
|
||||
},
|
||||
outputs: {}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
|
||||
const result = await fetchJobDetail(mockFetchApi, 'nonexistent-id')
|
||||
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
@@ -92,19 +82,45 @@ describe('getWorkflowFromHistory', () => {
|
||||
it('should handle fetch errors gracefully', async () => {
|
||||
const mockFetchApi = vi.fn().mockRejectedValue(new Error('Network error'))
|
||||
|
||||
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
|
||||
const result = await fetchJobDetail(mockFetchApi, 'test-prompt-id')
|
||||
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should handle malformed JSON responses', async () => {
|
||||
const mockFetchApi = vi.fn().mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => {
|
||||
throw new Error('Invalid JSON')
|
||||
}
|
||||
})
|
||||
|
||||
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
|
||||
const result = await fetchJobDetail(mockFetchApi, 'test-prompt-id')
|
||||
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('extractWorkflow', () => {
|
||||
it('should extract workflow from job detail', async () => {
|
||||
const result = await extractWorkflow(mockJobDetailResponse)
|
||||
|
||||
expect(result).toEqual(mockWorkflow)
|
||||
})
|
||||
|
||||
it('should return undefined when job is undefined', async () => {
|
||||
const result = await extractWorkflow(undefined)
|
||||
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should return undefined when workflow is missing', async () => {
|
||||
const jobWithoutWorkflow: JobDetail = {
|
||||
...mockJobDetailResponse,
|
||||
workflow: {}
|
||||
}
|
||||
|
||||
const result = await extractWorkflow(jobWithoutWorkflow)
|
||||
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
|
||||
import type { PromptId } from '@/schemas/apiSchema'
|
||||
|
||||
export async function getWorkflowFromHistory(
|
||||
fetchApi: (url: string) => Promise<Response>,
|
||||
promptId: PromptId
|
||||
): Promise<ComfyWorkflowJSON | undefined> {
|
||||
try {
|
||||
const res = await fetchApi(`/history_v2/${promptId}`)
|
||||
const json = await res.json()
|
||||
|
||||
const historyItem = json[promptId]
|
||||
if (!historyItem) return undefined
|
||||
|
||||
const workflow = historyItem.prompt?.extra_data?.extra_pnginfo?.workflow
|
||||
return workflow ?? undefined
|
||||
} catch (error) {
|
||||
console.error(`Failed to fetch workflow for prompt ${promptId}:`, error)
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
/**
|
||||
* Cloud: Fetches workflow by prompt_id. Desktop: Returns undefined (workflows already in history).
|
||||
*/
|
||||
import { isCloud } from '@/platform/distribution/types'
|
||||
|
||||
import { getWorkflowFromHistory as cloudImpl } from './getWorkflowFromHistory'
|
||||
|
||||
export const getWorkflowFromHistory = isCloud
|
||||
? cloudImpl
|
||||
: async () => undefined
|
||||
@@ -8,17 +8,18 @@ import type { AssetItem } from '@/platform/assets/schemas/assetSchema'
|
||||
import { getOutputAssetMetadata } from '@/platform/assets/schemas/assetMetadataSchema'
|
||||
import { getAssetUrl } from '@/platform/assets/utils/assetUrlUtil'
|
||||
import { getWorkflowDataFromFile } from '@/scripts/metadata/parser'
|
||||
import { getJobWorkflow } from '@/services/jobOutputCache'
|
||||
|
||||
/**
|
||||
* Extract workflow from AssetItem (async - may need file fetch)
|
||||
* Tries metadata first (for output assets), then falls back to extracting from file
|
||||
* This supports both output assets (with embedded metadata) and input assets (PNG with workflow)
|
||||
* Extract workflow from AssetItem using jobs API
|
||||
* For output assets: uses jobs API (getJobWorkflow)
|
||||
* For input assets: extracts from file metadata
|
||||
*
|
||||
* @param asset The asset item to extract workflow from
|
||||
* @returns WorkflowSource with workflow and generated filename
|
||||
*
|
||||
* @example
|
||||
* const asset = { name: 'output.png', user_metadata: { workflow: {...} } }
|
||||
* const asset = { name: 'output.png', user_metadata: { promptId: '123' } }
|
||||
* const { workflow, filename } = await extractWorkflowFromAsset(asset)
|
||||
*/
|
||||
export async function extractWorkflowFromAsset(asset: AssetItem): Promise<{
|
||||
@@ -27,17 +28,14 @@ export async function extractWorkflowFromAsset(asset: AssetItem): Promise<{
|
||||
}> {
|
||||
const baseFilename = asset.name.replace(/\.[^/.]+$/, '.json')
|
||||
|
||||
// Strategy 1: Try metadata first (for output assets)
|
||||
// For output assets: use jobs API (with caching and validation)
|
||||
const metadata = getOutputAssetMetadata(asset.user_metadata)
|
||||
if (metadata?.workflow) {
|
||||
return {
|
||||
workflow: metadata.workflow as ComfyWorkflowJSON,
|
||||
filename: baseFilename
|
||||
}
|
||||
if (metadata?.promptId) {
|
||||
const workflow = await getJobWorkflow(metadata.promptId)
|
||||
return { workflow: workflow ?? null, filename: baseFilename }
|
||||
}
|
||||
|
||||
// Strategy 2: Try extracting from file (for input assets with embedded workflow)
|
||||
// This supports PNG, WEBP, FLAC, and other formats with metadata
|
||||
// For input assets: extract from file metadata (PNG/WEBP/FLAC with embedded workflow)
|
||||
try {
|
||||
const fileUrl = getAssetUrl(asset)
|
||||
const response = await fetch(fileUrl)
|
||||
|
||||
Reference in New Issue
Block a user