refactor: encapsulate error extraction in TaskItemImpl getters (#7650)

## Summary
- Add `errorMessage` and `executionError` getters to `TaskItemImpl` that
extract error info from status messages
- Update `useJobErrorReporting` composable to use these getters instead
of standalone function
- Remove the standalone `extractExecutionError` function

This encapsulates error extraction within `TaskItemImpl`, preparing for
the Jobs API migration where the underlying data format will change but
the getter interface will remain stable.

## Test plan
- [x] All existing tests pass
- [x] New tests added for `TaskItemImpl.errorMessage` and
`TaskItemImpl.executionError` getters
- [x] TypeScript, lint, and knip checks pass

🤖 Generated with [Claude Code](https://claude.com/claude-code)

┆Issue is synchronized with this [Notion
page](https://www.notion.so/PR-7650-refactor-encapsulate-error-extraction-in-TaskItemImpl-getters-2ce6d73d365081caae33dcc7e1e07720)
by [Unito](https://www.unito.io)

---------

Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: Christian Byrne <cbyrne@comfy.org>
This commit is contained in:
ric-yu
2026-01-15 20:11:22 -08:00
committed by GitHub
parent 089295606a
commit c0a649ef43
46 changed files with 1650 additions and 3067 deletions

View File

@@ -21,7 +21,6 @@ import {
import { Topbar } from './components/Topbar'
import type { Position, Size } from './types'
import { NodeReference, SubgraphSlotReference } from './utils/litegraphUtils'
import TaskHistory from './utils/taskHistory'
dotenv.config()
@@ -146,8 +145,6 @@ class ConfirmDialog {
}
export class ComfyPage {
private _history: TaskHistory | null = null
public readonly url: string
// All canvas position operations are based on default view of canvas.
public readonly canvas: Locator
@@ -301,11 +298,6 @@ export class ComfyPage {
}
}
setupHistory(): TaskHistory {
this._history ??= new TaskHistory(this)
return this._history
}
async setup({
clearStorage = true,
mockReleases = true

View File

@@ -262,7 +262,7 @@ const focusAssetInSidebar = async (item: JobListItem) => {
const inspectJobAsset = wrapWithErrorHandlingAsync(
async (item: JobListItem) => {
openResultGallery(item)
await openResultGallery(item)
await focusAssetInSidebar(item)
}
)

View File

@@ -1,6 +1,9 @@
import type { Meta, StoryObj } from '@storybook/vue3-vite'
import type { TaskStatus } from '@/schemas/apiSchema'
import type {
JobListItem,
JobStatus
} from '@/platform/remote/comfyui/jobs/jobTypes'
import { useExecutionStore } from '@/stores/executionStore'
import { TaskItemImpl, useQueueStore } from '@/stores/queueStore'
@@ -37,91 +40,86 @@ function resetStores() {
exec.nodeProgressStatesByPrompt = {}
}
function makeTask(
id: string,
priority: number,
fields: Partial<JobListItem> & { status: JobStatus; create_time: number }
): TaskItemImpl {
const job: JobListItem = {
id,
priority,
last_state_update: null,
update_time: fields.create_time,
...fields
}
return new TaskItemImpl(job)
}
function makePendingTask(
id: string,
index: number,
createTimeMs?: number
priority: number,
createTimeMs: number
): TaskItemImpl {
const extraData = {
client_id: 'c1',
...(typeof createTimeMs === 'number' ? { create_time: createTimeMs } : {})
}
return new TaskItemImpl('Pending', [index, id, {}, extraData, []])
return makeTask(id, priority, {
status: 'pending',
create_time: createTimeMs
})
}
function makeRunningTask(
id: string,
index: number,
createTimeMs?: number
priority: number,
createTimeMs: number
): TaskItemImpl {
const extraData = {
client_id: 'c1',
...(typeof createTimeMs === 'number' ? { create_time: createTimeMs } : {})
}
return new TaskItemImpl('Running', [index, id, {}, extraData, []])
return makeTask(id, priority, {
status: 'in_progress',
create_time: createTimeMs
})
}
function makeRunningTaskWithStart(
id: string,
index: number,
priority: number,
startedSecondsAgo: number
): TaskItemImpl {
const start = Date.now() - startedSecondsAgo * 1000
const status: TaskStatus = {
status_str: 'success',
completed: false,
messages: [['execution_start', { prompt_id: id, timestamp: start } as any]]
}
return new TaskItemImpl(
'Running',
[index, id, {}, { client_id: 'c1', create_time: start - 5000 }, []],
status
)
return makeTask(id, priority, {
status: 'in_progress',
create_time: start - 5000,
update_time: start
})
}
function makeHistoryTask(
id: string,
index: number,
priority: number,
durationSec: number,
ok: boolean,
errorMessage?: string
): TaskItemImpl {
const start = Date.now() - durationSec * 1000 - 1000
const end = start + durationSec * 1000
const messages: TaskStatus['messages'] = ok
? [
['execution_start', { prompt_id: id, timestamp: start } as any],
['execution_success', { prompt_id: id, timestamp: end } as any]
]
: [
['execution_start', { prompt_id: id, timestamp: start } as any],
[
'execution_error',
{
prompt_id: id,
timestamp: end,
node_id: '1',
node_type: 'Node',
executed: [],
exception_message:
errorMessage || 'Demo error: Node failed during execution',
exception_type: 'RuntimeError',
traceback: [],
current_inputs: {},
current_outputs: {}
} as any
]
]
const status: TaskStatus = {
status_str: ok ? 'success' : 'error',
completed: true,
messages
}
return new TaskItemImpl(
'History',
[index, id, {}, { client_id: 'c1', create_time: start }, []],
status
)
const now = Date.now()
const executionEndTime = now
const executionStartTime = now - durationSec * 1000
return makeTask(id, priority, {
status: ok ? 'completed' : 'failed',
create_time: executionStartTime - 5000,
update_time: now,
execution_start_time: executionStartTime,
execution_end_time: executionEndTime,
execution_error: errorMessage
? {
prompt_id: id,
timestamp: now,
node_id: '1',
node_type: 'ExampleNode',
exception_message: errorMessage,
exception_type: 'RuntimeError',
traceback: [],
current_inputs: {},
current_outputs: {}
}
: undefined
})
}
export const Queued: Story = {
@@ -140,8 +138,12 @@ export const Queued: Story = {
makePendingTask(jobId, queueIndex, Date.now() - 90_000)
]
// Add some other pending jobs to give context
queue.pendingTasks.push(makePendingTask('job-older-1', 100))
queue.pendingTasks.push(makePendingTask('job-older-2', 101))
queue.pendingTasks.push(
makePendingTask('job-older-1', 100, Date.now() - 60_000)
)
queue.pendingTasks.push(
makePendingTask('job-older-2', 101, Date.now() - 30_000)
)
// Queued at (in metadata on prompt[4])

View File

@@ -12,7 +12,7 @@
v-for="ji in group.items"
:key="ji.id"
:job-id="ji.id"
:workflow-id="ji.taskRef?.workflow?.id"
:workflow-id="ji.taskRef?.workflowId"
:state="ji.state"
:title="ji.title"
:right-text="ji.meta"

View File

@@ -2,116 +2,49 @@ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { computed, ref } from 'vue'
import type { ComputedRef } from 'vue'
import type { ExecutionErrorWsMessage } from '@/schemas/apiSchema'
import type { TaskItemImpl } from '@/stores/queueStore'
import type {
JobErrorDialogService,
UseJobErrorReportingOptions
} from '@/components/queue/job/useJobErrorReporting'
import * as jobErrorReporting from '@/components/queue/job/useJobErrorReporting'
import type { JobErrorDialogService } from '@/components/queue/job/useJobErrorReporting'
import { useJobErrorReporting } from '@/components/queue/job/useJobErrorReporting'
import type { ExecutionError } from '@/platform/remote/comfyui/jobs/jobTypes'
const createExecutionErrorMessage = (
overrides: Partial<ExecutionErrorWsMessage> = {}
): ExecutionErrorWsMessage => ({
prompt_id: 'prompt',
timestamp: 100,
node_id: 'node-1',
node_type: 'KSampler',
executed: [],
exception_message: 'default failure',
exception_type: 'RuntimeError',
traceback: ['Trace line'],
current_inputs: {},
current_outputs: {},
...overrides
})
const createTaskWithMessages = (
messages: Array<[string, unknown]> | undefined = []
const createTaskWithError = (
promptId: string,
errorMessage?: string,
executionError?: ExecutionError,
createTime?: number
): TaskItemImpl =>
({
status: {
status_str: 'error',
completed: false,
messages
}
}) as TaskItemImpl
describe('extractExecutionError', () => {
it('returns null when task has no execution error messages', () => {
expect(jobErrorReporting.extractExecutionError(null)).toBeNull()
expect(
jobErrorReporting.extractExecutionError({
status: undefined
} as TaskItemImpl)
).toBeNull()
expect(
jobErrorReporting.extractExecutionError({
status: {
status_str: 'error',
completed: false,
messages: {} as unknown as Array<[string, unknown]>
}
} as TaskItemImpl)
).toBeNull()
expect(
jobErrorReporting.extractExecutionError(createTaskWithMessages([]))
).toBeNull()
expect(
jobErrorReporting.extractExecutionError(
createTaskWithMessages([
['execution_start', { prompt_id: 'prompt', timestamp: 1 }]
] as Array<[string, unknown]>)
)
).toBeNull()
})
it('returns detail and message for execution_error entries', () => {
const detail = createExecutionErrorMessage({ exception_message: 'Kaboom' })
const result = jobErrorReporting.extractExecutionError(
createTaskWithMessages([
['execution_success', { prompt_id: 'prompt', timestamp: 2 }],
['execution_error', detail]
] as Array<[string, unknown]>)
)
expect(result).toEqual({
detail,
message: 'Kaboom'
})
})
it('falls back to an empty message when the tuple lacks detail', () => {
const result = jobErrorReporting.extractExecutionError(
createTaskWithMessages([
['execution_error'] as unknown as [string, ExecutionErrorWsMessage]
])
)
expect(result).toEqual({ detail: undefined, message: '' })
})
})
promptId,
errorMessage,
executionError,
createTime: createTime ?? Date.now()
}) as Partial<TaskItemImpl> as TaskItemImpl
describe('useJobErrorReporting', () => {
let taskState = ref<TaskItemImpl | null>(null)
let taskForJob: ComputedRef<TaskItemImpl | null>
let copyToClipboard: UseJobErrorReportingOptions['copyToClipboard']
let showExecutionErrorDialog: JobErrorDialogService['showExecutionErrorDialog']
let showErrorDialog: JobErrorDialogService['showErrorDialog']
let copyToClipboard: ReturnType<typeof vi.fn>
let showErrorDialog: ReturnType<typeof vi.fn>
let showExecutionErrorDialog: ReturnType<typeof vi.fn>
let dialog: JobErrorDialogService
let composable: ReturnType<typeof jobErrorReporting.useJobErrorReporting>
let composable: ReturnType<typeof useJobErrorReporting>
beforeEach(() => {
vi.clearAllMocks()
taskState = ref<TaskItemImpl | null>(null)
taskForJob = computed(() => taskState.value)
copyToClipboard = vi.fn()
showExecutionErrorDialog = vi.fn()
showErrorDialog = vi.fn()
showExecutionErrorDialog = vi.fn()
dialog = {
showExecutionErrorDialog,
showErrorDialog
}
composable = jobErrorReporting.useJobErrorReporting({
showErrorDialog,
showExecutionErrorDialog
} as unknown as JobErrorDialogService
composable = useJobErrorReporting({
taskForJob,
copyToClipboard,
copyToClipboard: copyToClipboard as (
value: string
) => void | Promise<void>,
dialog
})
})
@@ -121,73 +54,87 @@ describe('useJobErrorReporting', () => {
})
it('exposes a computed message that reflects the current task error', () => {
taskState.value = createTaskWithMessages([
[
'execution_error',
createExecutionErrorMessage({ exception_message: 'First failure' })
]
])
taskState.value = createTaskWithError('job-1', 'First failure')
expect(composable.errorMessageValue.value).toBe('First failure')
taskState.value = createTaskWithMessages([
[
'execution_error',
createExecutionErrorMessage({ exception_message: 'Second failure' })
]
])
taskState.value = createTaskWithError('job-2', 'Second failure')
expect(composable.errorMessageValue.value).toBe('Second failure')
})
it('returns empty string when no error message', () => {
taskState.value = createTaskWithError('job-1')
expect(composable.errorMessageValue.value).toBe('')
})
it('returns empty string when task is null', () => {
taskState.value = null
expect(composable.errorMessageValue.value).toBe('')
})
it('only calls the copy handler when a message exists', () => {
taskState.value = createTaskWithMessages([
[
'execution_error',
createExecutionErrorMessage({ exception_message: 'Clipboard failure' })
]
])
taskState.value = createTaskWithError('job-1', 'Clipboard failure')
composable.copyErrorMessage()
expect(copyToClipboard).toHaveBeenCalledTimes(1)
expect(copyToClipboard).toHaveBeenCalledWith('Clipboard failure')
vi.mocked(copyToClipboard).mockClear()
taskState.value = createTaskWithMessages([])
copyToClipboard.mockClear()
taskState.value = createTaskWithError('job-2')
composable.copyErrorMessage()
expect(copyToClipboard).not.toHaveBeenCalled()
})
it('prefers the detailed execution dialog when detail is available', () => {
const detail = createExecutionErrorMessage({
exception_message: 'Detailed failure'
})
taskState.value = createTaskWithMessages([['execution_error', detail]])
it('shows simple error dialog when only errorMessage present', () => {
taskState.value = createTaskWithError('job-1', 'Queue job error')
composable.reportJobError()
expect(showExecutionErrorDialog).toHaveBeenCalledTimes(1)
expect(showExecutionErrorDialog).toHaveBeenCalledWith(detail)
expect(showErrorDialog).not.toHaveBeenCalled()
})
it('shows a fallback dialog when only a message is available', () => {
const message = 'Queue job error'
taskState.value = createTaskWithMessages([])
const valueSpy = vi
.spyOn(composable.errorMessageValue, 'value', 'get')
.mockReturnValue(message)
expect(composable.errorMessageValue.value).toBe(message)
composable.reportJobError()
expect(showExecutionErrorDialog).not.toHaveBeenCalled()
expect(showErrorDialog).toHaveBeenCalledTimes(1)
const [errorArg, optionsArg] = vi.mocked(showErrorDialog).mock.calls[0]
const [errorArg, optionsArg] = showErrorDialog.mock.calls[0]
expect(errorArg).toBeInstanceOf(Error)
expect(errorArg.message).toBe(message)
expect(errorArg.message).toBe('Queue job error')
expect(optionsArg).toEqual({ reportType: 'queueJobError' })
valueSpy.mockRestore()
expect(showExecutionErrorDialog).not.toHaveBeenCalled()
})
it('does nothing when no error could be extracted', () => {
taskState.value = createTaskWithMessages([])
it('does nothing when no task exists', () => {
taskState.value = null
composable.reportJobError()
expect(showExecutionErrorDialog).not.toHaveBeenCalled()
expect(showErrorDialog).not.toHaveBeenCalled()
expect(showExecutionErrorDialog).not.toHaveBeenCalled()
})
it('shows rich error dialog when execution_error available on task', () => {
const executionError: ExecutionError = {
prompt_id: 'job-1',
timestamp: 12345,
node_id: '5',
node_type: 'KSampler',
executed: ['1', '2'],
exception_message: 'CUDA out of memory',
exception_type: 'RuntimeError',
traceback: ['line 1', 'line 2'],
current_inputs: {},
current_outputs: {}
}
taskState.value = createTaskWithError(
'job-1',
'CUDA out of memory',
executionError,
12345
)
composable.reportJobError()
expect(showExecutionErrorDialog).toHaveBeenCalledTimes(1)
expect(showExecutionErrorDialog).toHaveBeenCalledWith(executionError)
expect(showErrorDialog).not.toHaveBeenCalled()
})
it('does nothing when no error message and no execution_error', () => {
taskState.value = createTaskWithError('job-1')
composable.reportJobError()
expect(showErrorDialog).not.toHaveBeenCalled()
expect(showExecutionErrorDialog).not.toHaveBeenCalled()
})
})

View File

@@ -1,13 +1,13 @@
import { computed } from 'vue'
import type { ComputedRef } from 'vue'
import type { ExecutionErrorWsMessage } from '@/schemas/apiSchema'
import type { ExecutionErrorDialogInput } from '@/services/dialogService'
import type { TaskItemImpl } from '@/stores/queueStore'
type CopyHandler = (value: string) => void | Promise<void>
export type JobErrorDialogService = {
showExecutionErrorDialog: (error: ExecutionErrorWsMessage) => void
showExecutionErrorDialog: (executionError: ExecutionErrorDialogInput) => void
showErrorDialog: (
error: Error,
options?: {
@@ -17,30 +17,7 @@ export type JobErrorDialogService = {
) => void
}
type JobExecutionError = {
detail?: ExecutionErrorWsMessage
message: string
}
export const extractExecutionError = (
task: TaskItemImpl | null
): JobExecutionError | null => {
const status = (task as TaskItemImpl | null)?.status
const messages = (status as { messages?: unknown[] } | undefined)?.messages
if (!Array.isArray(messages) || !messages.length) return null
const record = messages.find((entry: unknown) => {
return Array.isArray(entry) && entry[0] === 'execution_error'
}) as [string, ExecutionErrorWsMessage?] | undefined
if (!record) return null
const detail = record[1]
const message = String(detail?.exception_message ?? '')
return {
detail,
message
}
}
export type UseJobErrorReportingOptions = {
type UseJobErrorReportingOptions = {
taskForJob: ComputedRef<TaskItemImpl | null>
copyToClipboard: CopyHandler
dialog: JobErrorDialogService
@@ -51,10 +28,7 @@ export const useJobErrorReporting = ({
copyToClipboard,
dialog
}: UseJobErrorReportingOptions) => {
const errorMessageValue = computed(() => {
const error = extractExecutionError(taskForJob.value)
return error?.message ?? ''
})
const errorMessageValue = computed(() => taskForJob.value?.errorMessage ?? '')
const copyErrorMessage = () => {
if (errorMessageValue.value) {
@@ -63,11 +37,12 @@ export const useJobErrorReporting = ({
}
const reportJobError = () => {
const error = extractExecutionError(taskForJob.value)
if (error?.detail) {
dialog.showExecutionErrorDialog(error.detail)
const executionError = taskForJob.value?.executionError
if (executionError) {
dialog.showExecutionErrorDialog(executionError)
return
}
if (errorMessageValue.value) {
dialog.showErrorDialog(new Error(errorMessageValue.value), {
reportType: 'queueJobError'

View File

@@ -240,12 +240,19 @@ import type { AssetItem } from '@/platform/assets/schemas/assetSchema'
import type { MediaKind } from '@/platform/assets/schemas/mediaAssetSchema'
import { isCloud } from '@/platform/distribution/types'
import { useSettingStore } from '@/platform/settings/settingStore'
import { getJobDetail } from '@/services/jobOutputCache'
import { useCommandStore } from '@/stores/commandStore'
import { useDialogStore } from '@/stores/dialogStore'
import { ResultItemImpl, useQueueStore } from '@/stores/queueStore'
import { formatDuration, getMediaTypeFromFilename } from '@/utils/formatUtil'
import { cn } from '@/utils/tailwindUtil'
interface JobOutputItem {
filename: string
subfolder: string
type: string
}
const { t, n } = useI18n()
const commandStore = useCommandStore()
const queueStore = useQueueStore()
@@ -492,6 +499,35 @@ function handleContextMenuHide() {
})
}
const handleBulkDownload = (assets: AssetItem[]) => {
downloadMultipleAssets(assets)
clearSelection()
}
const handleBulkDelete = async (assets: AssetItem[]) => {
await deleteMultipleAssets(assets)
clearSelection()
}
const handleClearQueue = async () => {
await commandStore.execute('Comfy.ClearPendingTasks')
}
const handleBulkAddToWorkflow = async (assets: AssetItem[]) => {
await addMultipleToWorkflow(assets)
clearSelection()
}
const handleBulkOpenWorkflow = async (assets: AssetItem[]) => {
await openMultipleWorkflows(assets)
clearSelection()
}
const handleBulkExportWorkflow = async (assets: AssetItem[]) => {
await exportMultipleWorkflows(assets)
clearSelection()
}
const handleZoomClick = (asset: AssetItem) => {
const mediaType = getMediaTypeFromFilename(asset.name)
@@ -519,16 +555,16 @@ const handleZoomClick = (asset: AssetItem) => {
}
}
const enterFolderView = (asset: AssetItem) => {
const enterFolderView = async (asset: AssetItem) => {
const metadata = getOutputAssetMetadata(asset.user_metadata)
if (!metadata) {
console.warn('Invalid output asset metadata')
return
}
const { promptId, allOutputs, executionTimeInSeconds } = metadata
const { promptId, allOutputs, executionTimeInSeconds, outputCount } = metadata
if (!promptId || !Array.isArray(allOutputs) || allOutputs.length === 0) {
if (!promptId) {
console.warn('Missing required folder view data')
return
}
@@ -536,7 +572,48 @@ const enterFolderView = (asset: AssetItem) => {
folderPromptId.value = promptId
folderExecutionTime.value = executionTimeInSeconds
folderAssets.value = allOutputs.map((output) => ({
// Determine which outputs to display
let outputsToDisplay = allOutputs ?? []
// If outputCount indicates more outputs than we have, fetch full outputs
const needsFullOutputs =
typeof outputCount === 'number' &&
outputCount > 1 &&
outputsToDisplay.length < outputCount
if (needsFullOutputs) {
try {
const jobDetail = await getJobDetail(promptId)
if (jobDetail?.outputs) {
// Convert job outputs to ResultItemImpl array
outputsToDisplay = Object.entries(jobDetail.outputs).flatMap(
([nodeId, nodeOutputs]) =>
Object.entries(nodeOutputs).flatMap(([mediaType, items]) =>
(items as JobOutputItem[])
.map(
(item) =>
new ResultItemImpl({
...item,
nodeId,
mediaType
})
)
.filter((r) => r.supportsPreview)
)
)
}
} catch (error) {
console.error('Failed to fetch job detail for folder view:', error)
outputsToDisplay = []
}
}
if (outputsToDisplay.length === 0) {
console.warn('No outputs available for folder view')
return
}
folderAssets.value = outputsToDisplay.map((output) => ({
id: `${output.nodeId}-${output.filename}`,
name: output.filename,
size: 0,
@@ -609,35 +686,6 @@ const handleDeleteSelected = async () => {
clearSelection()
}
const handleBulkDownload = (assets: AssetItem[]) => {
downloadMultipleAssets(assets)
clearSelection()
}
const handleBulkDelete = async (assets: AssetItem[]) => {
await deleteMultipleAssets(assets)
clearSelection()
}
const handleBulkAddToWorkflow = async (assets: AssetItem[]) => {
await addMultipleToWorkflow(assets)
clearSelection()
}
const handleBulkOpenWorkflow = async (assets: AssetItem[]) => {
await openMultipleWorkflows(assets)
clearSelection()
}
const handleBulkExportWorkflow = async (assets: AssetItem[]) => {
await exportMultipleWorkflows(assets)
clearSelection()
}
const handleClearQueue = async () => {
await commandStore.execute('Comfy.ClearPendingTasks')
}
const handleApproachEnd = useDebounceFn(async () => {
if (
activeTab.value === 'output' &&

View File

@@ -16,7 +16,7 @@ type TestTask = {
executionTime?: number
executionEndTimestamp?: number
createTime?: number
workflow?: { id?: string }
workflowId?: string
}
const translations: Record<string, string> = {
@@ -161,7 +161,7 @@ const createTask = (
executionTime: overrides.executionTime,
executionEndTimestamp: overrides.executionEndTimestamp,
createTime: overrides.createTime,
workflow: overrides.workflow
workflowId: overrides.workflowId
})
const mountUseJobList = () => {
@@ -305,7 +305,7 @@ describe('useJobList', () => {
expect(vi.getTimerCount()).toBe(0)
})
it('sorts all tasks by queue index descending', async () => {
it('sorts all tasks by priority descending', async () => {
queueStoreMock.pendingTasks = [
createTask({ promptId: 'p', queueIndex: 1, mockState: 'pending' })
]
@@ -360,13 +360,13 @@ describe('useJobList', () => {
promptId: 'wf-1',
queueIndex: 2,
mockState: 'pending',
workflow: { id: 'workflow-1' }
workflowId: 'workflow-1'
}),
createTask({
promptId: 'wf-2',
queueIndex: 1,
mockState: 'pending',
workflow: { id: 'workflow-2' }
workflowId: 'workflow-2'
})
]

View File

@@ -238,7 +238,7 @@ export function useJobList() {
const activeId = workflowStore.activeWorkflow?.activeState?.id
if (!activeId) return []
entries = entries.filter(({ task }) => {
const wid = task.workflow?.id
const wid = task.workflowId
return !!wid && wid === activeId
})
}

View File

@@ -73,6 +73,7 @@ vi.mock('@/scripts/utils', () => ({
}))
const dialogServiceMock = {
showErrorDialog: vi.fn(),
showExecutionErrorDialog: vi.fn(),
prompt: vi.fn()
}
@@ -103,6 +104,11 @@ vi.mock('@/stores/queueStore', () => ({
useQueueStore: () => queueStoreMock
}))
const getJobWorkflowMock = vi.fn()
vi.mock('@/services/jobOutputCache', () => ({
getJobWorkflow: (...args: any[]) => getJobWorkflowMock(...args)
}))
const createAnnotatedPathMock = vi.fn()
vi.mock('@/utils/createAnnotatedPath', () => ({
createAnnotatedPath: (...args: any[]) => createAnnotatedPathMock(...args)
@@ -132,9 +138,7 @@ const createJobItem = (
title: overrides.title ?? 'Test job',
meta: overrides.meta ?? 'meta',
state: overrides.state ?? 'completed',
taskRef: overrides.taskRef as Partial<TaskItemImpl> | undefined as
| TaskItemImpl
| undefined,
taskRef: overrides.taskRef as TaskItemImpl | undefined,
iconName: overrides.iconName,
iconImageUrl: overrides.iconImageUrl,
showClear: overrides.showClear,
@@ -181,6 +185,8 @@ describe('useJobMenu', () => {
LoadVideo: { id: 'LoadVideo' },
LoadAudio: { id: 'LoadAudio' }
}
// Default: no workflow available via lazy loading
getJobWorkflowMock.mockResolvedValue(undefined)
})
const setCurrentItem = (item: JobListItem | null) => {
@@ -190,10 +196,13 @@ describe('useJobMenu', () => {
it('opens workflow when workflow data exists', async () => {
const { openJobWorkflow } = mountJobMenu()
const workflow = { nodes: [] }
setCurrentItem(createJobItem({ id: '55', taskRef: { workflow } }))
// Mock lazy loading via fetchJobDetail + extractWorkflow
getJobWorkflowMock.mockResolvedValue(workflow)
setCurrentItem(createJobItem({ id: '55' }))
await openJobWorkflow()
expect(getJobWorkflowMock).toHaveBeenCalledWith('55')
expect(workflowStoreMock.createTemporary).toHaveBeenCalledWith(
'Job 55.json',
workflow
@@ -268,11 +277,10 @@ describe('useJobMenu', () => {
it('copies error message from failed job entry', async () => {
const { jobMenuEntries } = mountJobMenu()
const error = { exception_message: 'boom' }
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { status: { messages: [['execution_error', error]] } } as any
taskRef: { errorMessage: 'Something went wrong' } as any
})
)
@@ -280,31 +288,75 @@ describe('useJobMenu', () => {
const entry = findActionEntry(jobMenuEntries.value, 'copy-error')
await entry?.onClick?.()
expect(copyToClipboardMock).toHaveBeenCalledWith('boom')
expect(copyToClipboardMock).toHaveBeenCalledWith('Something went wrong')
})
it('reports error via dialog when entry triggered', async () => {
it('reports error via rich dialog when execution_error available', async () => {
const executionError = {
prompt_id: 'job-1',
timestamp: 12345,
node_id: '5',
node_type: 'KSampler',
executed: ['1', '2'],
exception_message: 'CUDA out of memory',
exception_type: 'RuntimeError',
traceback: ['line 1', 'line 2'],
current_inputs: {},
current_outputs: {}
}
const { jobMenuEntries } = mountJobMenu()
const error = { exception_message: 'bad', extra: 1 }
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { status: { messages: [['execution_error', error]] } } as any
taskRef: {
errorMessage: 'CUDA out of memory',
executionError,
createTime: 12345
} as any
})
)
await nextTick()
const entry = findActionEntry(jobMenuEntries.value, 'report-error')
void entry?.onClick?.()
await entry?.onClick?.()
expect(dialogServiceMock.showExecutionErrorDialog).toHaveBeenCalledTimes(1)
expect(dialogServiceMock.showExecutionErrorDialog).toHaveBeenCalledWith(
error
executionError
)
expect(dialogServiceMock.showErrorDialog).not.toHaveBeenCalled()
})
it('falls back to simple error dialog when no execution_error', async () => {
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { errorMessage: 'Job failed with error' } as any
})
)
await nextTick()
const entry = findActionEntry(jobMenuEntries.value, 'report-error')
await entry?.onClick?.()
expect(dialogServiceMock.showExecutionErrorDialog).not.toHaveBeenCalled()
expect(dialogServiceMock.showErrorDialog).toHaveBeenCalledTimes(1)
const [errorArg, optionsArg] =
dialogServiceMock.showErrorDialog.mock.calls[0]
expect(errorArg).toBeInstanceOf(Error)
expect(errorArg.message).toBe('Job failed with error')
expect(optionsArg).toEqual({ reportType: 'queueJobError' })
})
it('ignores error actions when message missing', async () => {
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(createJobItem({ state: 'failed', taskRef: { status: {} } }))
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { errorMessage: undefined } as any
})
)
await nextTick()
const copyEntry = findActionEntry(jobMenuEntries.value, 'copy-error')
@@ -313,6 +365,7 @@ describe('useJobMenu', () => {
await reportEntry?.onClick?.()
expect(copyToClipboardMock).not.toHaveBeenCalled()
expect(dialogServiceMock.showErrorDialog).not.toHaveBeenCalled()
expect(dialogServiceMock.showExecutionErrorDialog).not.toHaveBeenCalled()
})
@@ -488,12 +541,13 @@ describe('useJobMenu', () => {
})
it('exports workflow with default filename when prompting disabled', async () => {
const workflow = { foo: 'bar' }
getJobWorkflowMock.mockResolvedValue(workflow)
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
id: '7',
state: 'completed',
taskRef: { workflow: { foo: 'bar' } }
state: 'completed'
})
)
@@ -513,11 +567,11 @@ describe('useJobMenu', () => {
it('prompts for filename when setting enabled', async () => {
settingStoreMock.get.mockReturnValue(true)
dialogServiceMock.prompt.mockResolvedValue('custom-name')
getJobWorkflowMock.mockResolvedValue({})
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
state: 'completed',
taskRef: { workflow: {} }
state: 'completed'
})
)
@@ -537,12 +591,12 @@ describe('useJobMenu', () => {
it('keeps existing json extension when exporting workflow', async () => {
settingStoreMock.get.mockReturnValue(true)
dialogServiceMock.prompt.mockResolvedValue('existing.json')
getJobWorkflowMock.mockResolvedValue({ foo: 'bar' })
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
id: '42',
state: 'completed',
taskRef: { workflow: { foo: 'bar' } }
state: 'completed'
})
)
@@ -558,11 +612,11 @@ describe('useJobMenu', () => {
it('abandons export when prompt cancelled', async () => {
settingStoreMock.get.mockReturnValue(true)
dialogServiceMock.prompt.mockResolvedValue('')
getJobWorkflowMock.mockResolvedValue({})
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
state: 'completed',
taskRef: { workflow: {} }
state: 'completed'
})
)
@@ -682,7 +736,12 @@ describe('useJobMenu', () => {
it('returns failed menu entries with error actions', async () => {
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(createJobItem({ state: 'failed', taskRef: { status: {} } }))
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { errorMessage: 'Some error' } as any
})
)
await nextTick()
expect(jobMenuEntries.value.map((entry) => entry.key)).toEqual([

View File

@@ -9,15 +9,11 @@ import { useMediaAssetActions } from '@/platform/assets/composables/useMediaAsse
import { useSettingStore } from '@/platform/settings/settingStore'
import { useWorkflowService } from '@/platform/workflow/core/services/workflowService'
import { useWorkflowStore } from '@/platform/workflow/management/stores/workflowStore'
import type {
ExecutionErrorWsMessage,
ResultItem,
ResultItemType,
TaskStatus
} from '@/schemas/apiSchema'
import type { ResultItem, ResultItemType } from '@/schemas/apiSchema'
import { api } from '@/scripts/api'
import { downloadBlob } from '@/scripts/utils'
import { useDialogService } from '@/services/dialogService'
import { getJobWorkflow } from '@/services/jobOutputCache'
import { useLitegraphService } from '@/services/litegraphService'
import { useNodeDefStore } from '@/stores/nodeDefStore'
import { useQueueStore } from '@/stores/queueStore'
@@ -59,7 +55,7 @@ export function useJobMenu(
const openJobWorkflow = async (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const data = target.taskRef?.workflow
const data = await getJobWorkflow(target.id)
if (!data) return
const filename = `Job ${target.id}.json`
const temp = workflowStore.createTemporary(filename, data)
@@ -83,37 +79,39 @@ export function useJobMenu(
await queueStore.update()
}
const findExecutionError = (
messages: TaskStatus['messages'] | undefined
): ExecutionErrorWsMessage | undefined => {
const errMessage = messages?.find((m) => m[0] === 'execution_error')
if (errMessage && errMessage[0] === 'execution_error') {
return errMessage[1]
}
return undefined
}
const copyErrorMessage = async (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const err = findExecutionError(target.taskRef?.status?.messages)
const message = err?.exception_message
if (message) await copyToClipboard(String(message))
const message = target?.taskRef?.errorMessage
if (message) await copyToClipboard(message)
}
const reportError = (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const err = findExecutionError(target.taskRef?.status?.messages)
if (err) useDialogService().showExecutionErrorDialog(err)
// Use execution_error from list response if available
const executionError = target.taskRef?.executionError
if (executionError) {
useDialogService().showExecutionErrorDialog(executionError)
return
}
// Fall back to simple error dialog
const message = target.taskRef?.errorMessage
if (message) {
useDialogService().showErrorDialog(new Error(message), {
reportType: 'queueJobError'
})
}
}
// This is very magical only because it matches the respective backend implementation
// There is or will be a better way to do this
const addOutputLoaderNode = async (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const result: ResultItemImpl | undefined = target.taskRef?.previewOutput
const addOutputLoaderNode = async () => {
const item = currentMenuItem()
if (!item) return
const result: ResultItemImpl | undefined = item.taskRef?.previewOutput
if (!result) return
let nodeType: 'LoadImage' | 'LoadVideo' | 'LoadAudio' | null = null
@@ -161,10 +159,10 @@ export function useJobMenu(
/**
* Trigger a download of the job's previewable output asset.
*/
const downloadPreviewAsset = (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const result: ResultItemImpl | undefined = target.taskRef?.previewOutput
const downloadPreviewAsset = () => {
const item = currentMenuItem()
if (!item) return
const result: ResultItemImpl | undefined = item.taskRef?.previewOutput
if (!result) return
downloadFile(result.url)
}
@@ -172,14 +170,14 @@ export function useJobMenu(
/**
* Export the workflow JSON attached to the job.
*/
const exportJobWorkflow = async (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const data = target.taskRef?.workflow
const exportJobWorkflow = async () => {
const item = currentMenuItem()
if (!item) return
const data = await getJobWorkflow(item.id)
if (!data) return
const settingStore = useSettingStore()
let filename = `Job ${target.id}.json`
let filename = `Job ${item.id}.json`
if (settingStore.get('Comfy.PromptFilename')) {
const input = await useDialogService().prompt({
@@ -196,10 +194,10 @@ export function useJobMenu(
downloadBlob(filename, blob)
}
const deleteJobAsset = async (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const task = target.taskRef as TaskItemImpl | undefined
const deleteJobAsset = async () => {
const item = currentMenuItem()
if (!item) return
const task = item.taskRef as TaskItemImpl | undefined
const preview = task?.previewOutput
if (!task || !preview) return
@@ -210,8 +208,8 @@ export function useJobMenu(
}
}
const removeFailedJob = async (item?: JobListItem | null) => {
const task = resolveItem(item)?.taskRef as TaskItemImpl | undefined
const removeFailedJob = async () => {
const task = currentMenuItem()?.taskRef as TaskItemImpl | undefined
if (!task) return
await queueStore.delete(task)
}
@@ -242,8 +240,8 @@ export function useJobMenu(
icon: 'icon-[lucide--zoom-in]',
onClick: onInspectAsset
? () => {
const current = resolveItem()
if (current) onInspectAsset(current)
const item = currentMenuItem()
if (item) onInspectAsset(item)
}
: undefined
},
@@ -254,33 +252,33 @@ export function useJobMenu(
'Add to current workflow'
),
icon: 'icon-[comfy--node]',
onClick: () => addOutputLoaderNode(resolveItem())
onClick: addOutputLoaderNode
},
{
key: 'download',
label: st('queue.jobMenu.download', 'Download'),
icon: 'icon-[lucide--download]',
onClick: () => downloadPreviewAsset(resolveItem())
onClick: downloadPreviewAsset
},
{ kind: 'divider', key: 'd1' },
{
key: 'open-workflow',
label: jobMenuOpenWorkflowLabel.value,
icon: 'icon-[comfy--workflow]',
onClick: () => openJobWorkflow(resolveItem())
onClick: openJobWorkflow
},
{
key: 'export-workflow',
label: st('queue.jobMenu.exportWorkflow', 'Export workflow'),
icon: 'icon-[comfy--file-output]',
onClick: () => exportJobWorkflow(resolveItem())
onClick: exportJobWorkflow
},
{ kind: 'divider', key: 'd2' },
{
key: 'copy-id',
label: jobMenuCopyJobIdLabel.value,
icon: 'icon-[lucide--copy]',
onClick: () => copyJobId(resolveItem())
onClick: copyJobId
},
{ kind: 'divider', key: 'd3' },
...(hasDeletableAsset
@@ -289,7 +287,7 @@ export function useJobMenu(
key: 'delete',
label: st('queue.jobMenu.deleteAsset', 'Delete asset'),
icon: 'icon-[lucide--trash-2]',
onClick: () => deleteJobAsset(resolveItem())
onClick: deleteJobAsset
}
]
: [])
@@ -301,33 +299,33 @@ export function useJobMenu(
key: 'open-workflow',
label: jobMenuOpenWorkflowFailedLabel.value,
icon: 'icon-[comfy--workflow]',
onClick: () => openJobWorkflow(resolveItem())
onClick: openJobWorkflow
},
{ kind: 'divider', key: 'd1' },
{
key: 'copy-id',
label: jobMenuCopyJobIdLabel.value,
icon: 'icon-[lucide--copy]',
onClick: () => copyJobId(resolveItem())
onClick: copyJobId
},
{
key: 'copy-error',
label: st('queue.jobMenu.copyErrorMessage', 'Copy error message'),
icon: 'icon-[lucide--copy]',
onClick: () => copyErrorMessage(resolveItem())
onClick: copyErrorMessage
},
{
key: 'report-error',
label: st('queue.jobMenu.reportError', 'Report error'),
icon: 'icon-[lucide--message-circle-warning]',
onClick: () => reportError(resolveItem())
onClick: reportError
},
{ kind: 'divider', key: 'd2' },
{
key: 'delete',
label: st('queue.jobMenu.removeJob', 'Remove job'),
icon: 'icon-[lucide--circle-minus]',
onClick: () => removeFailedJob(resolveItem())
onClick: removeFailedJob
}
]
}
@@ -336,21 +334,21 @@ export function useJobMenu(
key: 'open-workflow',
label: jobMenuOpenWorkflowLabel.value,
icon: 'icon-[comfy--workflow]',
onClick: () => openJobWorkflow(resolveItem())
onClick: openJobWorkflow
},
{ kind: 'divider', key: 'd1' },
{
key: 'copy-id',
label: jobMenuCopyJobIdLabel.value,
icon: 'icon-[lucide--copy]',
onClick: () => copyJobId(resolveItem())
onClick: copyJobId
},
{ kind: 'divider', key: 'd2' },
{
key: 'cancel-job',
label: jobMenuCancelLabel.value,
icon: 'icon-[lucide--x]',
onClick: () => cancelJob(resolveItem())
onClick: cancelJob
}
]
})

View File

@@ -1,35 +1,74 @@
import { describe, it, expect } from 'vitest'
import { createPinia, setActivePinia } from 'pinia'
import { beforeEach, describe, expect, it } from 'vitest'
import { useResultGallery } from '@/composables/queue/useResultGallery'
import type { JobListItem } from '@/composables/queue/useJobList'
import type { JobListItem as JobListViewItem } from '@/composables/queue/useJobList'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/jobTypes'
import { ResultItemImpl, TaskItemImpl } from '@/stores/queueStore'
type PreviewLike = { url: string; supportsPreview: boolean }
const createResultItem = (
url: string,
supportsPreview = true
): ResultItemImpl => {
const item = new ResultItemImpl({
filename: url,
subfolder: '',
type: 'output',
nodeId: 'node-1',
mediaType: supportsPreview ? 'images' : 'unknown'
})
// Override url getter for test matching
Object.defineProperty(item, 'url', { get: () => url })
Object.defineProperty(item, 'supportsPreview', { get: () => supportsPreview })
return item
}
const createPreview = (url: string, supportsPreview = true): PreviewLike => ({
url,
supportsPreview
const createMockJob = (id: string, outputsCount = 1): JobListItem => ({
id,
status: 'completed',
create_time: Date.now(),
preview_output: null,
outputs_count: outputsCount,
priority: 0
})
const createTask = (preview?: PreviewLike) => ({
previewOutput: preview
})
const createTask = (
preview?: ResultItemImpl,
allOutputs?: ResultItemImpl[],
outputsCount = 1
): TaskItemImpl => {
const job = createMockJob(
`task-${Math.random().toString(36).slice(2)}`,
outputsCount
)
const flatOutputs = allOutputs ?? (preview ? [preview] : [])
return new TaskItemImpl(job, {}, flatOutputs)
}
const createJobItem = (id: string, preview?: PreviewLike): JobListItem =>
const createJobViewItem = (
id: string,
taskRef?: TaskItemImpl
): JobListViewItem =>
({
id,
title: `Job ${id}`,
meta: '',
state: 'completed',
showClear: false,
taskRef: preview ? { previewOutput: preview } : undefined
}) as JobListItem
taskRef
}) as JobListViewItem
describe('useResultGallery', () => {
it('collects only previewable outputs and preserves their order', () => {
const previewable = [createPreview('p-1'), createPreview('p-2')]
beforeEach(() => {
setActivePinia(createPinia())
})
it('collects only previewable outputs and preserves their order', async () => {
const previewable = [createResultItem('p-1'), createResultItem('p-2')]
const nonPreviewable = createResultItem('skip-me', false)
const tasks = [
createTask(previewable[0]),
createTask({ url: 'skip-me', supportsPreview: false }),
createTask(nonPreviewable),
createTask(previewable[1]),
createTask()
]
@@ -38,28 +77,28 @@ describe('useResultGallery', () => {
() => tasks
)
onViewItem(createJobItem('job-1', previewable[0]))
await onViewItem(createJobViewItem('job-1', tasks[0]))
expect(galleryItems.value).toEqual(previewable)
expect(galleryItems.value).toEqual([previewable[0]])
expect(galleryActiveIndex.value).toBe(0)
})
it('does not change state when there are no previewable tasks', () => {
it('does not change state when there are no previewable tasks', async () => {
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => []
)
onViewItem(createJobItem('job-missing'))
await onViewItem(createJobViewItem('job-missing'))
expect(galleryItems.value).toEqual([])
expect(galleryActiveIndex.value).toBe(-1)
})
it('activates the index that matches the viewed preview URL', () => {
it('activates the index that matches the viewed preview URL', async () => {
const previewable = [
createPreview('p-1'),
createPreview('p-2'),
createPreview('p-3')
createResultItem('p-1'),
createResultItem('p-2'),
createResultItem('p-3')
]
const tasks = previewable.map((preview) => createTask(preview))
@@ -67,37 +106,66 @@ describe('useResultGallery', () => {
() => tasks
)
onViewItem(createJobItem('job-2', createPreview('p-2')))
await onViewItem(createJobViewItem('job-2', tasks[1]))
expect(galleryItems.value).toEqual(previewable)
expect(galleryActiveIndex.value).toBe(1)
expect(galleryItems.value).toEqual([previewable[1]])
expect(galleryActiveIndex.value).toBe(0)
})
it('defaults to the first entry when the clicked job lacks a preview', () => {
const previewable = [createPreview('p-1'), createPreview('p-2')]
it('defaults to the first entry when the clicked job lacks a preview', async () => {
const previewable = [createResultItem('p-1'), createResultItem('p-2')]
const tasks = previewable.map((preview) => createTask(preview))
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => tasks
)
onViewItem(createJobItem('job-no-preview'))
await onViewItem(createJobViewItem('job-no-preview'))
expect(galleryItems.value).toEqual(previewable)
expect(galleryActiveIndex.value).toBe(0)
})
it('defaults to the first entry when no gallery item matches the preview URL', () => {
const previewable = [createPreview('p-1'), createPreview('p-2')]
it('defaults to the first entry when no gallery item matches the preview URL', async () => {
const previewable = [createResultItem('p-1'), createResultItem('p-2')]
const tasks = previewable.map((preview) => createTask(preview))
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => tasks
)
onViewItem(createJobItem('job-mismatch', createPreview('missing')))
const taskWithMismatchedPreview = createTask(createResultItem('missing'))
await onViewItem(
createJobViewItem('job-mismatch', taskWithMismatchedPreview)
)
expect(galleryItems.value).toEqual(previewable)
expect(galleryItems.value).toEqual([createResultItem('missing')])
expect(galleryActiveIndex.value).toBe(0)
})
it('loads full outputs when task has only preview outputs', async () => {
const previewOutput = createResultItem('preview-1')
const fullOutputs = [
createResultItem('full-1'),
createResultItem('full-2'),
createResultItem('full-3')
]
// Create a task with outputsCount > 1 to trigger lazy loading
const job = createMockJob('task-1', 3)
const task = new TaskItemImpl(job, {}, [previewOutput])
// Mock loadFullOutputs to return full outputs
const loadedTask = new TaskItemImpl(job, {}, fullOutputs)
task.loadFullOutputs = async () => loadedTask
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => [task]
)
await onViewItem(createJobViewItem('job-1', task))
expect(galleryItems.value).toEqual(fullOutputs)
expect(galleryActiveIndex.value).toBe(0)
})
})

View File

@@ -1,39 +1,42 @@
import { ref, shallowRef } from 'vue'
import type { JobListItem } from '@/composables/queue/useJobList'
/** Minimal preview item interface for gallery filtering. */
interface PreviewItem {
url: string
supportsPreview: boolean
}
/** Minimal task interface for gallery preview. */
interface TaskWithPreview<T extends PreviewItem = PreviewItem> {
previewOutput?: T
}
import { findActiveIndex, getOutputsForTask } from '@/services/jobOutputCache'
import type { ResultItemImpl, TaskItemImpl } from '@/stores/queueStore'
/**
* Manages result gallery state and activation for queue items.
*/
export function useResultGallery<T extends PreviewItem>(
getFilteredTasks: () => TaskWithPreview<T>[]
) {
export function useResultGallery(getFilteredTasks: () => TaskItemImpl[]) {
const galleryActiveIndex = ref(-1)
const galleryItems = shallowRef<T[]>([])
const galleryItems = shallowRef<ResultItemImpl[]>([])
const onViewItem = (item: JobListItem) => {
const items: T[] = getFilteredTasks().flatMap((t) => {
const preview = t.previewOutput
return preview && preview.supportsPreview ? [preview] : []
})
async function onViewItem(item: JobListItem) {
const tasks = getFilteredTasks()
if (!tasks.length) return
const targetTask = item.taskRef
const targetOutputs = targetTask
? await getOutputsForTask(targetTask)
: null
// Request was superseded by a newer one
if (targetOutputs === null && targetTask) return
// Use target's outputs if available, otherwise fall back to all previews
const items = targetOutputs?.length
? targetOutputs
: tasks
.map((t) => t.previewOutput)
.filter((o): o is ResultItemImpl => !!o)
if (!items.length) return
galleryItems.value = items
const activeUrl: string | undefined = item.taskRef?.previewOutput?.url
const idx = activeUrl ? items.findIndex((o) => o.url === activeUrl) : 0
galleryActiveIndex.value = idx >= 0 ? idx : 0
galleryActiveIndex.value = findActiveIndex(
items,
item.taskRef?.previewOutput?.url
)
}
return {

View File

@@ -32,7 +32,6 @@ export function mapTaskOutputToAssetItem(
subfolder: output.subfolder,
executionTimeInSeconds: taskItem.executionTimeInSeconds,
format: output.format,
workflow: taskItem.workflow,
create_time: taskItem.createTime
}

View File

@@ -1,380 +0,0 @@
/**
* @fileoverview Test fixtures for history tests.
*/
import type { HistoryResponseV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
import type { HistoryTaskItem } from '@/schemas/apiSchema'
/**
* V1 API raw response format (object with prompt IDs as keys)
*/
export const historyV1RawResponse: Record<
string,
Omit<HistoryTaskItem, 'taskType'>
> = {
'complete-item-id': {
prompt: [
24,
'complete-item-id',
{},
{
client_id: 'test-client',
extra_pnginfo: {
workflow: {
id: '44f0c9f9-b5a7-48de-99fc-7e80c1570241',
revision: 0,
last_node_id: 9,
last_link_id: 9,
nodes: [],
links: [],
groups: [],
config: {},
extra: {},
version: 0.4
}
}
},
['9']
],
outputs: {
'9': {
images: [
{
filename: 'test.png',
subfolder: '',
type: 'output'
}
]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_start',
{ prompt_id: 'complete-item-id', timestamp: 1234567890 }
],
[
'execution_success',
{ prompt_id: 'complete-item-id', timestamp: 1234567900 }
]
]
},
meta: {
'9': {
node_id: '9',
display_node: '9'
}
}
},
'no-status-id': {
prompt: [
23,
'no-status-id',
{},
{
client_id: 'inference'
},
['10']
],
outputs: {
'10': {
images: []
}
},
status: undefined,
meta: {
'10': {
node_id: '10',
display_node: '10'
}
}
}
}
/**
* V2 response with multiple edge cases:
* - Item 0: Complete with all fields
* - Item 1: Missing optional status field
* - Item 2: Missing optional meta field
* - Item 3: Multiple output nodes
*/
export const historyV2Fixture: HistoryResponseV2 = {
history: [
{
prompt_id: 'complete-item-id',
prompt: {
priority: 24,
prompt_id: 'complete-item-id',
extra_data: {
client_id: 'test-client',
extra_pnginfo: {
workflow: {
id: '44f0c9f9-b5a7-48de-99fc-7e80c1570241',
revision: 0,
last_node_id: 9,
last_link_id: 9,
nodes: [],
links: [],
groups: [],
config: {},
extra: {},
version: 0.4
}
}
}
},
outputs: {
'9': {
images: [
{
filename: 'test.png',
subfolder: '',
type: 'output'
}
]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_start',
{ prompt_id: 'complete-item-id', timestamp: 1234567890 }
],
[
'execution_success',
{ prompt_id: 'complete-item-id', timestamp: 1234567900 }
]
]
},
meta: {
'9': {
node_id: '9',
display_node: '9'
}
}
},
{
prompt_id: 'no-status-id',
prompt: {
priority: 23,
prompt_id: 'no-status-id',
extra_data: {
client_id: 'inference'
}
},
outputs: {
'10': {
images: []
}
},
meta: {
'10': {
node_id: '10',
display_node: '10'
}
}
},
{
prompt_id: 'no-meta-id',
prompt: {
priority: 22,
prompt_id: 'no-meta-id',
extra_data: {
client_id: 'web-ui'
}
},
outputs: {
'11': {
audio: []
}
},
status: {
status_str: 'error',
completed: false,
messages: []
}
},
{
prompt_id: 'multi-output-id',
prompt: {
priority: 21,
prompt_id: 'multi-output-id',
extra_data: {
client_id: 'batch-processor'
}
},
outputs: {
'3': {
images: [{ filename: 'img1.png', type: 'output', subfolder: '' }]
},
'9': {
images: [{ filename: 'img2.png', type: 'output', subfolder: '' }]
},
'12': {
video: [{ filename: 'video.mp4', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
},
meta: {
'3': { node_id: '3', display_node: '3' },
'9': { node_id: '9', display_node: '9' },
'12': { node_id: '12', display_node: '12' }
}
}
]
}
/**
* Expected V1 transformation of historyV2Fixture
* Priority is now synthetic based on execution_success timestamp:
* - complete-item-id: has timestamp → priority 1 (only one with timestamp)
* - no-status-id: no status → priority 0
* - no-meta-id: empty messages → priority 0
* - multi-output-id: empty messages → priority 0
*/
export const expectedV1Fixture: HistoryTaskItem[] = [
{
taskType: 'History',
prompt: [
1,
'complete-item-id',
{},
{
client_id: 'test-client',
extra_pnginfo: {
workflow: {
id: '44f0c9f9-b5a7-48de-99fc-7e80c1570241',
revision: 0,
last_node_id: 9,
last_link_id: 9,
nodes: [],
links: [],
groups: [],
config: {},
extra: {},
version: 0.4
}
}
},
['9']
],
outputs: {
'9': {
images: [
{
filename: 'test.png',
subfolder: '',
type: 'output'
}
]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_start',
{ prompt_id: 'complete-item-id', timestamp: 1234567890 }
],
[
'execution_success',
{ prompt_id: 'complete-item-id', timestamp: 1234567900 }
]
]
},
meta: {
'9': {
node_id: '9',
display_node: '9'
}
}
},
{
taskType: 'History',
prompt: [
0,
'no-status-id',
{},
{
client_id: 'inference'
},
['10']
],
outputs: {
'10': {
images: []
}
},
status: undefined,
meta: {
'10': {
node_id: '10',
display_node: '10'
}
}
},
{
taskType: 'History',
prompt: [
0,
'no-meta-id',
{},
{
client_id: 'web-ui'
},
['11']
],
outputs: {
'11': {
audio: []
}
},
status: {
status_str: 'error',
completed: false,
messages: []
},
meta: undefined
},
{
taskType: 'History',
prompt: [
0,
'multi-output-id',
{},
{
client_id: 'batch-processor'
},
['3', '9', '12']
],
outputs: {
'3': {
images: [{ filename: 'img1.png', type: 'output', subfolder: '' }]
},
'9': {
images: [{ filename: 'img2.png', type: 'output', subfolder: '' }]
},
'12': {
video: [{ filename: 'video.mp4', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
},
meta: {
'3': { node_id: '3', display_node: '3' },
'9': { node_id: '9', display_node: '9' },
'12': { node_id: '12', display_node: '12' }
}
}
]

View File

@@ -1,434 +0,0 @@
/**
* @fileoverview Unit tests for V2 to V1 history adapter.
*/
import { describe, expect, it } from 'vitest'
import { mapHistoryV2toHistory } from '@/platform/remote/comfyui/history/adapters/v2ToV1Adapter'
import { zRawHistoryItemV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
import type { HistoryResponseV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
import {
expectedV1Fixture,
historyV2Fixture
} from '@/platform/remote/comfyui/history/__fixtures__/historyFixtures'
import type { HistoryTaskItem } from '@/platform/remote/comfyui/history/types/historyV1Types'
const historyV2WithMissingTimestamp: HistoryResponseV2 = {
history: [
{
prompt_id: 'item-timestamp-1000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-1000',
extra_data: {
client_id: 'test-client'
}
},
outputs: {
'1': {
images: [{ filename: 'test1.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-1000', timestamp: 1000 }
]
]
}
},
{
prompt_id: 'item-timestamp-2000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-2000',
extra_data: {
client_id: 'test-client'
}
},
outputs: {
'2': {
images: [{ filename: 'test2.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-2000', timestamp: 2000 }
]
]
}
},
{
prompt_id: 'item-no-timestamp',
prompt: {
priority: 0,
prompt_id: 'item-no-timestamp',
extra_data: {
client_id: 'test-client'
}
},
outputs: {
'3': {
images: [{ filename: 'test3.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
}
}
]
}
const historyV2FiveItemsSorting: HistoryResponseV2 = {
history: [
{
prompt_id: 'item-timestamp-3000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-3000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'1': {
images: [{ filename: 'test1.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-3000', timestamp: 3000 }
]
]
}
},
{
prompt_id: 'item-timestamp-1000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-1000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'2': {
images: [{ filename: 'test2.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-1000', timestamp: 1000 }
]
]
}
},
{
prompt_id: 'item-timestamp-5000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-5000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'3': {
images: [{ filename: 'test3.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-5000', timestamp: 5000 }
]
]
}
},
{
prompt_id: 'item-timestamp-2000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-2000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'4': {
images: [{ filename: 'test4.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-2000', timestamp: 2000 }
]
]
}
},
{
prompt_id: 'item-timestamp-4000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-4000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'5': {
images: [{ filename: 'test5.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-4000', timestamp: 4000 }
]
]
}
}
]
}
const historyV2MultipleNoTimestamp: HistoryResponseV2 = {
history: [
{
prompt_id: 'item-no-timestamp-1',
prompt: {
priority: 0,
prompt_id: 'item-no-timestamp-1',
extra_data: { client_id: 'test-client' }
},
outputs: {
'1': {
images: [{ filename: 'test1.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
}
},
{
prompt_id: 'item-no-timestamp-2',
prompt: {
priority: 0,
prompt_id: 'item-no-timestamp-2',
extra_data: { client_id: 'test-client' }
},
outputs: {
'2': {
images: [{ filename: 'test2.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
}
},
{
prompt_id: 'item-no-timestamp-3',
prompt: {
priority: 0,
prompt_id: 'item-no-timestamp-3',
extra_data: { client_id: 'test-client' }
},
outputs: {
'3': {
images: [{ filename: 'test3.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
}
}
]
}
function findResultByPromptId(
result: HistoryTaskItem[],
promptId: string
): HistoryTaskItem {
const item = result.find((item) => item.prompt[1] === promptId)
if (!item) {
throw new Error(`Expected item with promptId ${promptId} not found`)
}
return item
}
describe('mapHistoryV2toHistory', () => {
describe('fixture validation', () => {
it('should have valid fixture data', () => {
// Validate all items in the fixture to ensure test data is correct
historyV2Fixture.history.forEach((item: unknown) => {
expect(() => zRawHistoryItemV2.parse(item)).not.toThrow()
})
})
})
describe('given a complete V2 history response with edge cases', () => {
const history = mapHistoryV2toHistory(historyV2Fixture)
it('should transform all items to V1 format with correct structure', () => {
expect(history).toEqual(expectedV1Fixture)
})
it('should add taskType "History" to all items', () => {
history.forEach((item) => {
expect(item.taskType).toBe('History')
})
})
it('should transform prompt to V1 tuple [priority, id, {}, extra_data, outputNodeIds]', () => {
const firstItem = history[0]
expect(firstItem.prompt[0]).toBe(1) // Synthetic priority based on timestamp
expect(firstItem.prompt[1]).toBe('complete-item-id')
expect(firstItem.prompt[2]).toEqual({}) // history v2 does not return this data
expect(firstItem.prompt[3]).toMatchObject({ client_id: 'test-client' })
expect(firstItem.prompt[4]).toEqual(['9'])
})
it('should handle missing optional status field', () => {
expect(history[1].prompt[1]).toBe('no-status-id')
expect(history[1].status).toBeUndefined()
})
it('should handle missing optional meta field', () => {
expect(history[2].prompt[1]).toBe('no-meta-id')
expect(history[2].meta).toBeUndefined()
})
it('should derive output node IDs from outputs object keys', () => {
const multiOutputItem = history[3]
expect(multiOutputItem.prompt[4]).toEqual(
expect.arrayContaining(['3', '9', '12'])
)
expect(multiOutputItem.prompt[4]).toHaveLength(3)
})
})
describe('given empty history array', () => {
it('should return empty array', () => {
const emptyResponse: HistoryResponseV2 = { history: [] }
const history = mapHistoryV2toHistory(emptyResponse)
expect(history).toEqual([])
})
})
describe('given empty outputs object', () => {
it('should return empty array for output node IDs', () => {
const v2Response: HistoryResponseV2 = {
history: [
{
prompt_id: 'test-id',
prompt: {
priority: 0,
prompt_id: 'test-id',
extra_data: { client_id: 'test' }
},
outputs: {}
}
]
}
const history = mapHistoryV2toHistory(v2Response)
expect(history[0].prompt[4]).toEqual([])
})
})
describe('given missing client_id', () => {
it('should accept history items without client_id', () => {
const v2Response: HistoryResponseV2 = {
history: [
{
prompt_id: 'test-id',
prompt: {
priority: 0,
prompt_id: 'test-id',
extra_data: {}
},
outputs: {}
}
]
}
const history = mapHistoryV2toHistory(v2Response)
expect(history[0].prompt[3].client_id).toBeUndefined()
})
})
describe('timestamp-based priority assignment', () => {
it('assigns priority 0 to items without execution_success timestamp', () => {
const result = mapHistoryV2toHistory(historyV2WithMissingTimestamp)
expect(result).toHaveLength(3)
const item1000 = findResultByPromptId(result, 'item-timestamp-1000')
const item2000 = findResultByPromptId(result, 'item-timestamp-2000')
const itemNoTimestamp = findResultByPromptId(result, 'item-no-timestamp')
expect(item2000.prompt[0]).toBe(2)
expect(item1000.prompt[0]).toBe(1)
expect(itemNoTimestamp.prompt[0]).toBe(0)
})
it('correctly sorts and assigns priorities for multiple items', () => {
const result = mapHistoryV2toHistory(historyV2FiveItemsSorting)
expect(result).toHaveLength(5)
const item1000 = findResultByPromptId(result, 'item-timestamp-1000')
const item2000 = findResultByPromptId(result, 'item-timestamp-2000')
const item3000 = findResultByPromptId(result, 'item-timestamp-3000')
const item4000 = findResultByPromptId(result, 'item-timestamp-4000')
const item5000 = findResultByPromptId(result, 'item-timestamp-5000')
expect(item5000.prompt[0]).toBe(5)
expect(item4000.prompt[0]).toBe(4)
expect(item3000.prompt[0]).toBe(3)
expect(item2000.prompt[0]).toBe(2)
expect(item1000.prompt[0]).toBe(1)
})
it('assigns priority 0 to all items when multiple items lack timestamps', () => {
const result = mapHistoryV2toHistory(historyV2MultipleNoTimestamp)
expect(result).toHaveLength(3)
const item1 = findResultByPromptId(result, 'item-no-timestamp-1')
const item2 = findResultByPromptId(result, 'item-no-timestamp-2')
const item3 = findResultByPromptId(result, 'item-no-timestamp-3')
expect(item1.prompt[0]).toBe(0)
expect(item2.prompt[0]).toBe(0)
expect(item3.prompt[0]).toBe(0)
})
})
})

View File

@@ -1,74 +0,0 @@
/**
* @fileoverview Adapter to convert V2 history format to V1 format
* @module platform/remote/comfyui/history/adapters/v2ToV1Adapter
*/
import type { HistoryTaskItem, TaskPrompt } from '../types/historyV1Types'
import type {
HistoryResponseV2,
RawHistoryItemV2,
TaskOutput,
TaskPromptV2
} from '../types/historyV2Types'
function mapPromptV2toV1(
promptV2: TaskPromptV2,
outputs: TaskOutput,
syntheticPriority: number,
createTime?: number
): TaskPrompt {
const extraData = {
...(promptV2.extra_data ?? {}),
...(typeof createTime === 'number' ? { create_time: createTime } : {})
}
return [
syntheticPriority,
promptV2.prompt_id,
{},
extraData,
Object.keys(outputs)
]
}
function getExecutionSuccessTimestamp(item: RawHistoryItemV2): number {
return (
item.status?.messages?.find((m) => m[0] === 'execution_success')?.[1]
?.timestamp ?? 0
)
}
export function mapHistoryV2toHistory(
historyV2Response: HistoryResponseV2
): HistoryTaskItem[] {
const { history } = historyV2Response
// Sort by execution_success timestamp, descending (newest first)
history.sort((a, b) => {
return getExecutionSuccessTimestamp(b) - getExecutionSuccessTimestamp(a)
})
// Count items with valid timestamps for synthetic priority calculation
const countWithTimestamps = history.filter(
(item) => getExecutionSuccessTimestamp(item) > 0
).length
return history.map((item, index): HistoryTaskItem => {
const { prompt, outputs, status, meta } = item
const timestamp = getExecutionSuccessTimestamp(item)
// Items with timestamps get priority based on sorted position (highest first)
const syntheticPriority = timestamp > 0 ? countWithTimestamps - index : 0
return {
taskType: 'History' as const,
prompt: mapPromptV2toV1(
prompt,
outputs,
syntheticPriority,
item.create_time
),
status,
outputs,
meta
}
})
}

View File

@@ -1,52 +0,0 @@
/**
* @fileoverview Unit tests for V1 history fetcher.
*/
import { describe, expect, it, vi } from 'vitest'
import { fetchHistoryV1 } from '@/platform/remote/comfyui/history/fetchers/fetchHistoryV1'
import { historyV1RawResponse } from '@/platform/remote/comfyui/history/__fixtures__/historyFixtures'
describe('fetchHistoryV1', () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => historyV1RawResponse
})
it('should fetch from /history endpoint with default max_items', async () => {
await fetchHistoryV1(mockFetchApi)
expect(mockFetchApi).toHaveBeenCalledWith('/history?max_items=200')
})
it('should fetch with custom max_items parameter', async () => {
await fetchHistoryV1(mockFetchApi, 50)
expect(mockFetchApi).toHaveBeenCalledWith('/history?max_items=50')
})
it('should transform object response to array with taskType and preserve fields', async () => {
const result = await fetchHistoryV1(mockFetchApi)
expect(result.History).toHaveLength(2)
result.History.forEach((item) => {
expect(item.taskType).toBe('History')
})
expect(result.History[0]).toMatchObject({
taskType: 'History',
prompt: [24, 'complete-item-id', {}, expect.any(Object), ['9']],
outputs: expect.any(Object),
status: expect.any(Object),
meta: expect.any(Object)
})
})
it('should handle empty response object', async () => {
const emptyMock = vi.fn().mockResolvedValue({
json: async () => ({})
})
const result = await fetchHistoryV1(emptyMock)
expect(result.History).toEqual([])
})
})

View File

@@ -1,51 +0,0 @@
/**
* @fileoverview V1 History Fetcher - Desktop/localhost API
* @module platform/remote/comfyui/history/fetchers/fetchHistoryV1
*
* Fetches history directly from V1 API endpoint.
* Used by desktop and localhost distributions.
*/
import type {
HistoryTaskItem,
HistoryV1Response
} from '../types/historyV1Types'
/**
* Fetches history from V1 API endpoint
* @param api - API instance with fetchApi method
* @param maxItems - Maximum number of history items to fetch
* @param offset - Offset for pagination (must be non-negative integer)
* @returns Promise resolving to V1 history response
* @throws Error if offset is invalid (negative or non-integer)
*/
export async function fetchHistoryV1(
fetchApi: (url: string) => Promise<Response>,
maxItems: number = 200,
offset?: number
): Promise<HistoryV1Response> {
// Validate offset parameter
if (offset !== undefined && (offset < 0 || !Number.isInteger(offset))) {
throw new Error(
`Invalid offset parameter: ${offset}. Must be a non-negative integer.`
)
}
const params = new URLSearchParams({ max_items: maxItems.toString() })
if (offset !== undefined) {
params.set('offset', offset.toString())
}
const url = `/history?${params.toString()}`
const res = await fetchApi(url)
const json: Record<
string,
Omit<HistoryTaskItem, 'taskType'>
> = await res.json()
return {
History: Object.values(json).map((item) => ({
...item,
taskType: 'History'
}))
}
}

View File

@@ -1,41 +0,0 @@
/**
* @fileoverview Unit tests for V2 history fetcher.
*/
import { describe, expect, it, vi } from 'vitest'
import { fetchHistoryV2 } from '@/platform/remote/comfyui/history/fetchers/fetchHistoryV2'
import {
expectedV1Fixture,
historyV2Fixture
} from '@/platform/remote/comfyui/history/__fixtures__/historyFixtures'
describe('fetchHistoryV2', () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => historyV2Fixture
})
it('should fetch from /history_v2 endpoint with default max_items', async () => {
await fetchHistoryV2(mockFetchApi)
expect(mockFetchApi).toHaveBeenCalledWith('/history_v2?max_items=200')
})
it('should fetch with custom max_items parameter', async () => {
await fetchHistoryV2(mockFetchApi, 50)
expect(mockFetchApi).toHaveBeenCalledWith('/history_v2?max_items=50')
})
it('should adapt V2 response to V1-compatible format', async () => {
const result = await fetchHistoryV2(mockFetchApi)
expect(result.History).toEqual(expectedV1Fixture)
expect(result).toHaveProperty('History')
expect(Array.isArray(result.History)).toBe(true)
result.History.forEach((item) => {
expect(item.taskType).toBe('History')
expect(item.prompt).toHaveLength(5)
})
})
})

View File

@@ -1,42 +0,0 @@
/**
* @fileoverview V2 History Fetcher - Cloud API with adapter
* @module platform/remote/comfyui/history/fetchers/fetchHistoryV2
*
* Fetches history from V2 API endpoint and converts to V1 format.
* Used exclusively by cloud distribution.
*/
import { mapHistoryV2toHistory } from '../adapters/v2ToV1Adapter'
import type { HistoryV1Response } from '../types/historyV1Types'
import type { HistoryResponseV2 } from '../types/historyV2Types'
/**
* Fetches history from V2 API endpoint and adapts to V1 format
* @param fetchApi - API instance with fetchApi method
* @param maxItems - Maximum number of history items to fetch
* @param offset - Offset for pagination (must be non-negative integer)
* @returns Promise resolving to V1 history response (adapted from V2)
* @throws Error if offset is invalid (negative or non-integer)
*/
export async function fetchHistoryV2(
fetchApi: (url: string) => Promise<Response>,
maxItems: number = 200,
offset?: number
): Promise<HistoryV1Response> {
// Validate offset parameter
if (offset !== undefined && (offset < 0 || !Number.isInteger(offset))) {
throw new Error(
`Invalid offset parameter: ${offset}. Must be a non-negative integer.`
)
}
const params = new URLSearchParams({ max_items: maxItems.toString() })
if (offset !== undefined) {
params.set('offset', offset.toString())
}
const url = `/history_v2?${params.toString()}`
const res = await fetchApi(url)
const rawData: HistoryResponseV2 = await res.json()
const adaptedHistory = mapHistoryV2toHistory(rawData)
return { History: adaptedHistory }
}

View File

@@ -1,29 +0,0 @@
/**
* @fileoverview History API module - Distribution-aware exports
* @module platform/remote/comfyui/history
*
* This module provides a unified history fetching interface that automatically
* uses the correct implementation based on build-time distribution constant.
*
* - Cloud builds: Uses V2 API with adapter (tree-shakes V1 fetcher)
* - Desktop/localhost builds: Uses V1 API directly (tree-shakes V2 fetcher + adapter)
*
* The rest of the application only needs to import from this module and use
* V1 types - all distribution-specific details are encapsulated here.
*/
import { isCloud } from '@/platform/distribution/types'
import { fetchHistoryV1 } from './fetchers/fetchHistoryV1'
import { fetchHistoryV2 } from './fetchers/fetchHistoryV2'
/**
* Fetches history using the appropriate API for the current distribution.
* Build-time constant enables dead code elimination - only one implementation
* will be included in the final bundle.
*/
export const fetchHistory = isCloud ? fetchHistoryV2 : fetchHistoryV1
/**
* Export only V1 types publicly - consumers don't need to know about V2
*/
export type * from './types'

View File

@@ -1,335 +0,0 @@
/**
* @fileoverview Tests for history reconciliation (V1 and V2)
*/
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { reconcileHistory } from '@/platform/remote/comfyui/history/reconciliation'
import type { TaskItem } from '@/schemas/apiSchema'
// Mock distribution types
vi.mock('@/platform/distribution/types', () => ({
isCloud: false,
isDesktop: true
}))
function createHistoryItem(promptId: string, queueIndex = 0): TaskItem {
return {
taskType: 'History',
prompt: [queueIndex, promptId, {}, {}, []],
status: { status_str: 'success', completed: true, messages: [] },
outputs: {}
}
}
function getAllPromptIds(result: TaskItem[]): string[] {
return result.map((item) => item.prompt[1])
}
describe('reconcileHistory (V1)', () => {
beforeEach(async () => {
const distTypes = await import('@/platform/distribution/types')
vi.mocked(distTypes).isCloud = false
})
describe('when filtering by queueIndex', () => {
it('should retain items with queueIndex greater than lastKnownQueueIndex', () => {
const serverHistory = [
createHistoryItem('new-1', 11),
createHistoryItem('new-2', 10),
createHistoryItem('old', 5)
]
const clientHistory = [createHistoryItem('old', 5)]
const result = reconcileHistory(serverHistory, clientHistory, 10, 9)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(3)
expect(promptIds).toContain('new-1')
expect(promptIds).toContain('new-2')
expect(promptIds).toContain('old')
})
it('should evict items with queueIndex less than or equal to lastKnownQueueIndex', () => {
const serverHistory = [
createHistoryItem('new', 11),
createHistoryItem('existing', 10),
createHistoryItem('old-should-not-appear', 5)
]
const clientHistory = [createHistoryItem('existing', 10)]
const result = reconcileHistory(serverHistory, clientHistory, 10, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
expect(promptIds).toContain('new')
expect(promptIds).toContain('existing')
expect(promptIds).not.toContain('old-should-not-appear')
})
it('should retain all server items when lastKnownQueueIndex is undefined', () => {
const serverHistory = [
createHistoryItem('item-1', 5),
createHistoryItem('item-2', 4)
]
const result = reconcileHistory(serverHistory, [], 10, undefined)
expect(result).toHaveLength(2)
expect(result[0].prompt[1]).toBe('item-1')
expect(result[1].prompt[1]).toBe('item-2')
})
})
describe('when reconciling with existing client items', () => {
it('should retain client items that still exist on server', () => {
const serverHistory = [
createHistoryItem('new', 11),
createHistoryItem('existing-1', 9),
createHistoryItem('existing-2', 8)
]
const clientHistory = [
createHistoryItem('existing-1', 9),
createHistoryItem('existing-2', 8)
]
const result = reconcileHistory(serverHistory, clientHistory, 10, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(3)
expect(promptIds).toContain('new')
expect(promptIds).toContain('existing-1')
expect(promptIds).toContain('existing-2')
})
it('should evict client items that no longer exist on server', () => {
const serverHistory = [
createHistoryItem('new', 11),
createHistoryItem('keep', 9)
]
const clientHistory = [
createHistoryItem('keep', 9),
createHistoryItem('removed-from-server', 8)
]
const result = reconcileHistory(serverHistory, clientHistory, 10, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
expect(promptIds).toContain('new')
expect(promptIds).toContain('keep')
expect(promptIds).not.toContain('removed-from-server')
})
})
describe('when limiting the result count', () => {
it('should respect the maxItems constraint', () => {
const serverHistory = Array.from({ length: 10 }, (_, i) =>
createHistoryItem(`item-${i}`, 20 + i)
)
const result = reconcileHistory(serverHistory, [], 5, 15)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(5)
})
it('should evict lowest priority items when exceeding capacity', () => {
const serverHistory = [
createHistoryItem('new-1', 13),
createHistoryItem('new-2', 12),
createHistoryItem('new-3', 11),
createHistoryItem('existing', 9)
]
const clientHistory = [createHistoryItem('existing', 9)]
const result = reconcileHistory(serverHistory, clientHistory, 2, 10)
expect(result).toHaveLength(2)
expect(result[0].prompt[1]).toBe('new-1')
expect(result[1].prompt[1]).toBe('new-2')
})
})
describe('when handling empty collections', () => {
it('should return all server items when client history is empty', () => {
const serverHistory = [
createHistoryItem('item-1', 10),
createHistoryItem('item-2', 9)
]
const result = reconcileHistory(serverHistory, [], 10, 8)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
})
it('should return empty result when server history is empty', () => {
const clientHistory = [createHistoryItem('item-1', 5)]
const result = reconcileHistory([], clientHistory, 10, 5)
expect(result).toHaveLength(0)
})
it('should return empty result when both collections are empty', () => {
const result = reconcileHistory([], [], 10, undefined)
expect(result).toHaveLength(0)
})
})
})
describe('reconcileHistory (V2/Cloud)', () => {
beforeEach(async () => {
const distTypes = await import('@/platform/distribution/types')
vi.mocked(distTypes).isCloud = true
})
describe('when adding new items from server', () => {
it('should retain items with promptIds not present in client history', () => {
const serverHistory = [
createHistoryItem('new-item'),
createHistoryItem('existing-item')
]
const clientHistory = [createHistoryItem('existing-item')]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
expect(promptIds).toContain('new-item')
expect(promptIds).toContain('existing-item')
})
it('should respect priority ordering when retaining multiple new items', () => {
const serverHistory = [
createHistoryItem('new-1'),
createHistoryItem('new-2'),
createHistoryItem('existing')
]
const clientHistory = [createHistoryItem('existing')]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(3)
expect(promptIds).toContain('new-1')
expect(promptIds).toContain('new-2')
expect(promptIds).toContain('existing')
})
})
describe('when reconciling with existing client items', () => {
it('should retain client items that still exist on server', () => {
const serverHistory = [
createHistoryItem('item-1'),
createHistoryItem('item-2')
]
const clientHistory = [
createHistoryItem('item-1'),
createHistoryItem('item-2')
]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
expect(promptIds).toContain('item-1')
expect(promptIds).toContain('item-2')
})
it('should evict client items that no longer exist on server', () => {
const serverHistory = [createHistoryItem('item-1')]
const clientHistory = [
createHistoryItem('item-1'),
createHistoryItem('old-item')
]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(1)
expect(promptIds).toContain('item-1')
expect(promptIds).not.toContain('old-item')
})
})
describe('when detecting new items by promptId', () => {
it('should retain new items regardless of queueIndex values', () => {
const serverHistory = [
createHistoryItem('existing', 100),
createHistoryItem('new-item', 50)
]
const clientHistory = [createHistoryItem('existing', 100)]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toContain('new-item')
expect(promptIds).toContain('existing')
})
})
describe('when limiting the result count', () => {
it('should respect the maxItems constraint', () => {
const serverHistory = Array.from({ length: 10 }, (_, i) =>
createHistoryItem(`server-${i}`)
)
const clientHistory = Array.from({ length: 5 }, (_, i) =>
createHistoryItem(`client-${i}`)
)
const result = reconcileHistory(serverHistory, clientHistory, 5)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(5)
})
it('should evict lowest priority items when exceeding capacity', () => {
const serverHistory = [
createHistoryItem('new-1'),
createHistoryItem('new-2'),
createHistoryItem('existing')
]
const clientHistory = [createHistoryItem('existing')]
const result = reconcileHistory(serverHistory, clientHistory, 2)
expect(result).toHaveLength(2)
expect(result[0].prompt[1]).toBe('new-1')
expect(result[1].prompt[1]).toBe('new-2')
})
})
describe('when handling empty collections', () => {
it('should return all server items when client history is empty', () => {
const serverHistory = [
createHistoryItem('item-1'),
createHistoryItem('item-2')
]
const result = reconcileHistory(serverHistory, [], 10)
expect(result).toHaveLength(2)
expect(result[0].prompt[1]).toBe('item-1')
expect(result[1].prompt[1]).toBe('item-2')
})
it('should return empty result when server history is empty', () => {
const clientHistory = [
createHistoryItem('item-1'),
createHistoryItem('item-2')
]
const result = reconcileHistory([], clientHistory, 10)
expect(result).toHaveLength(0)
})
it('should return empty result when both collections are empty', () => {
const result = reconcileHistory([], [], 10)
expect(result).toHaveLength(0)
})
})
})

View File

@@ -1,122 +0,0 @@
/**
* @fileoverview History reconciliation for V1 and V2 APIs
* @module platform/remote/comfyui/history/reconciliation
*
* Returns list of items that should be displayed, sorted by queueIndex (newest first).
* Caller is responsible for mapping to their own class instances.
*
* V1: QueueIndex-based filtering for stable monotonic indices
* V2: PromptId-based merging for synthetic priorities (V2 assigns synthetic
* priorities after timestamp sorting, so new items may have lower priority
* than existing items)
*/
import { isCloud } from '@/platform/distribution/types'
import type { TaskItem } from '@/schemas/apiSchema'
/**
* V1 reconciliation: QueueIndex-based filtering works because V1 has stable,
* monotonically increasing queue indices.
*
* Sort order: Sorts serverHistory by queueIndex descending (newest first) to ensure
* consistent ordering. JavaScript .filter() maintains iteration order, so filtered
* results remain sorted. clientHistory is assumed already sorted from previous update.
*
* @returns All items to display, sorted by queueIndex descending (newest first)
*/
function reconcileHistoryV1(
serverHistory: TaskItem[],
clientHistory: TaskItem[],
maxItems: number,
lastKnownQueueIndex: number | undefined
): TaskItem[] {
const sortedServerHistory = serverHistory.sort(
(a, b) => b.prompt[0] - a.prompt[0]
)
const serverPromptIds = new Set(
sortedServerHistory.map((item) => item.prompt[1])
)
// If undefined, treat as initial sync (all items are new)
const itemsAddedSinceLastSync =
lastKnownQueueIndex === undefined
? sortedServerHistory
: sortedServerHistory.filter(
(item) => item.prompt[0] > lastKnownQueueIndex
)
const clientItemsStillOnServer = clientHistory.filter((item) =>
serverPromptIds.has(item.prompt[1])
)
// Merge new and reused items, sort by queueIndex descending, limit to maxItems
return [...itemsAddedSinceLastSync, ...clientItemsStillOnServer]
.sort((a, b) => b.prompt[0] - a.prompt[0])
.slice(0, maxItems)
}
/**
* V2 reconciliation: PromptId-based merging because V2 assigns synthetic
* priorities after sorting by timestamp.
*
* Sort order: Sorts serverHistory by queueIndex descending (newest first) to ensure
* consistent ordering. JavaScript .filter() maintains iteration order, so filtered
* results remain sorted. clientHistory is assumed already sorted from previous update.
*
* @returns All items to display, sorted by queueIndex descending (newest first)
*/
function reconcileHistoryV2(
serverHistory: TaskItem[],
clientHistory: TaskItem[],
maxItems: number
): TaskItem[] {
const sortedServerHistory = serverHistory.sort(
(a, b) => b.prompt[0] - a.prompt[0]
)
const serverPromptIds = new Set(
sortedServerHistory.map((item) => item.prompt[1])
)
const clientPromptIds = new Set(clientHistory.map((item) => item.prompt[1]))
const newItems = sortedServerHistory.filter(
(item) => !clientPromptIds.has(item.prompt[1])
)
const clientItemsStillOnServer = clientHistory.filter((item) =>
serverPromptIds.has(item.prompt[1])
)
// Merge new and reused items, sort by queueIndex descending, limit to maxItems
return [...newItems, ...clientItemsStillOnServer]
.sort((a, b) => b.prompt[0] - a.prompt[0])
.slice(0, maxItems)
}
/**
* Reconciles server history with client history.
* Automatically uses V1 (queueIndex-based) or V2 (promptId-based) algorithm based on
* distribution type.
*
* @param serverHistory - Server's current history items
* @param clientHistory - Client's existing history items
* @param maxItems - Maximum number of items to return
* @param lastKnownQueueIndex - Last queue index seen (V1 only, optional for V2)
* @returns All items that should be displayed, sorted by queueIndex descending
*/
export function reconcileHistory(
serverHistory: TaskItem[],
clientHistory: TaskItem[],
maxItems: number,
lastKnownQueueIndex?: number
): TaskItem[] {
if (isCloud) {
return reconcileHistoryV2(serverHistory, clientHistory, maxItems)
}
return reconcileHistoryV1(
serverHistory,
clientHistory,
maxItems,
lastKnownQueueIndex
)
}

View File

@@ -1,15 +0,0 @@
/**
* @fileoverview History V1 types - Public interface used throughout the app
* @module platform/remote/comfyui/history/types/historyV1Types
*
* These types represent the V1 history format that the application expects.
* Both desktop (direct V1 API) and cloud (V2 API + adapter) return data in this format.
*/
import type { HistoryTaskItem, TaskPrompt } from '@/schemas/apiSchema'
export interface HistoryV1Response {
History: HistoryTaskItem[]
}
export type { HistoryTaskItem, TaskPrompt }

View File

@@ -1,46 +0,0 @@
/**
* @fileoverview History V2 types and schemas - Internal cloud API format
* @module platform/remote/comfyui/history/types/historyV2Types
*
* These types and schemas represent the V2 history format returned by the cloud API.
* They are only used internally and are converted to V1 format via adapter.
*
* IMPORTANT: These types should NOT be used outside this history module.
*/
import { z } from 'zod'
import {
zExtraData,
zPromptId,
zQueueIndex,
zStatus,
zTaskMeta,
zTaskOutput
} from '@/schemas/apiSchema'
const zTaskPromptV2 = z.object({
priority: zQueueIndex,
prompt_id: zPromptId,
extra_data: zExtraData
})
const zRawHistoryItemV2 = z.object({
prompt_id: zPromptId,
prompt: zTaskPromptV2,
status: zStatus.optional(),
outputs: zTaskOutput,
meta: zTaskMeta.optional(),
create_time: z.number().int().optional()
})
const zHistoryResponseV2 = z.object({
history: z.array(zRawHistoryItemV2)
})
export type TaskPromptV2 = z.infer<typeof zTaskPromptV2>
export type RawHistoryItemV2 = z.infer<typeof zRawHistoryItemV2>
export type HistoryResponseV2 = z.infer<typeof zHistoryResponseV2>
export type TaskOutput = z.infer<typeof zTaskOutput>
export { zRawHistoryItemV2 }

View File

@@ -1,9 +0,0 @@
/**
* @fileoverview Public history types export
* @module platform/remote/comfyui/history/types
*
* Only V1 types are exported publicly - the rest of the app
* should never need to know about V2 types or implementation details.
*/
export type * from './historyV1Types'

View File

@@ -16,17 +16,13 @@ type JobsListResponse = z.infer<typeof zJobsListResponse>
function createMockJob(
id: string,
status: 'pending' | 'in_progress' | 'completed' = 'completed',
status: 'pending' | 'in_progress' | 'completed' | 'failed' = 'completed',
overrides: Partial<RawJobListItem> = {}
): RawJobListItem {
return {
id,
status,
create_time: Date.now(),
execution_start_time: null,
execution_end_time: null,
preview_output: null,
outputs_count: 0,
...overrides
}
}
@@ -63,7 +59,7 @@ describe('fetchJobs', () => {
const result = await fetchHistory(mockFetch)
expect(mockFetch).toHaveBeenCalledWith(
'/jobs?status=completed&limit=200&offset=0'
'/jobs?status=completed,failed,cancelled&limit=200&offset=0'
)
expect(result).toHaveLength(2)
expect(result[0].id).toBe('job1')
@@ -113,7 +109,7 @@ describe('fetchJobs', () => {
const result = await fetchHistory(mockFetch, 200, 5)
expect(mockFetch).toHaveBeenCalledWith(
'/jobs?status=completed&limit=200&offset=5'
'/jobs?status=completed,failed,cancelled&limit=200&offset=5'
)
// Priority base is total - offset = 10 - 5 = 5
expect(result[0].priority).toBe(5) // (total - offset) - 0

View File

@@ -68,7 +68,7 @@ function assignPriority(
}
/**
* Fetches history (completed jobs)
* Fetches history (terminal state jobs: completed, failed, cancelled)
* Assigns synthetic priority starting from total (lower than queue jobs).
*/
export async function fetchHistory(
@@ -78,7 +78,7 @@ export async function fetchHistory(
): Promise<JobListItem[]> {
const { jobs, total } = await fetchJobsRaw(
fetchApi,
['completed'],
['completed', 'failed', 'cancelled'],
maxItems,
offset
)

View File

@@ -21,12 +21,15 @@ const zJobStatus = z.enum([
const zPreviewOutput = z.object({
filename: z.string(),
subfolder: z.string(),
type: resultItemType
type: resultItemType,
nodeId: z.string(),
mediaType: z.string()
})
/**
* Execution error details for error jobs.
* Contains the same structure as ExecutionErrorWsMessage from WebSocket.
* Execution error from Jobs API.
* Similar to ExecutionErrorWsMessage but with optional prompt_id/timestamp/executed
* since these may not be present in stored errors or infrastructure-generated errors.
*/
const zExecutionError = z
.object({
@@ -43,6 +46,8 @@ const zExecutionError = z
})
.passthrough()
export type ExecutionError = z.infer<typeof zExecutionError>
/**
* Raw job from API - uses passthrough to allow extra fields
*/
@@ -105,3 +110,9 @@ export type RawJobListItem = z.infer<typeof zRawJobListItem>
/** Job list item with priority always set (server-provided or synthetic) */
export type JobListItem = RawJobListItem & { priority: number }
export type JobDetail = z.infer<typeof zJobDetail>
/** Task type used in the API (queue vs history endpoints) */
export type APITaskType = 'queue' | 'history'
/** Internal task type derived from job status for UI display */
export type TaskType = 'Running' | 'Pending' | 'History'

View File

@@ -1,11 +1,13 @@
import { describe, expect, it, vi } from 'vitest'
import type { JobDetail } from '@/platform/remote/comfyui/jobs/jobTypes'
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import { getWorkflowFromHistory } from '@/platform/workflow/cloud/getWorkflowFromHistory'
import {
extractWorkflow,
fetchJobDetail
} from '@/platform/remote/comfyui/jobs/fetchJobs'
const mockWorkflow: ComfyWorkflowJSON = {
id: 'test-workflow-id',
revision: 0,
last_node_id: 5,
last_link_id: 3,
nodes: [],
@@ -16,75 +18,63 @@ const mockWorkflow: ComfyWorkflowJSON = {
version: 0.4
}
const mockHistoryResponse = {
'test-prompt-id': {
prompt: {
priority: 1,
prompt_id: 'test-prompt-id',
extra_data: {
client_id: 'test-client',
extra_pnginfo: {
workflow: mockWorkflow
}
// Jobs API detail response structure (matches actual /jobs/{id} response)
// workflow is nested at: workflow.extra_data.extra_pnginfo.workflow
const mockJobDetailResponse: JobDetail = {
id: 'test-prompt-id',
status: 'completed',
create_time: 1234567890,
update_time: 1234567900,
workflow: {
extra_data: {
extra_pnginfo: {
workflow: mockWorkflow
}
},
outputs: {},
status: {
status_str: 'success',
completed: true,
messages: []
}
},
outputs: {
'20': {
images: [
{ filename: 'test.png', subfolder: '', type: 'output' },
{ filename: 'test2.png', subfolder: '', type: 'output' }
]
}
}
}
describe('getWorkflowFromHistory', () => {
it('should fetch workflow from /history_v2/{prompt_id} endpoint', async () => {
describe('fetchJobDetail', () => {
it('should fetch job detail from /jobs/{prompt_id} endpoint', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => mockHistoryResponse
ok: true,
json: async () => mockJobDetailResponse
})
await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
await fetchJobDetail(mockFetchApi, 'test-prompt-id')
expect(mockFetchApi).toHaveBeenCalledWith('/history_v2/test-prompt-id')
expect(mockFetchApi).toHaveBeenCalledWith('/jobs/test-prompt-id')
})
it('should extract and return workflow from response', async () => {
it('should return job detail with workflow and outputs', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => mockHistoryResponse
ok: true,
json: async () => mockJobDetailResponse
})
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
const result = await fetchJobDetail(mockFetchApi, 'test-prompt-id')
expect(result).toEqual(mockWorkflow)
expect(result).toBeDefined()
expect(result?.id).toBe('test-prompt-id')
expect(result?.outputs).toEqual(mockJobDetailResponse.outputs)
expect(result?.workflow).toBeDefined()
})
it('should return undefined when prompt_id not found in response', async () => {
it('should return undefined when job not found (non-OK response)', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => ({})
ok: false,
status: 404
})
const result = await getWorkflowFromHistory(mockFetchApi, 'nonexistent-id')
expect(result).toBeUndefined()
})
it('should return undefined when workflow is missing from extra_pnginfo', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => ({
'test-prompt-id': {
prompt: {
priority: 1,
prompt_id: 'test-prompt-id',
extra_data: {
client_id: 'test-client'
}
},
outputs: {}
}
})
})
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
const result = await fetchJobDetail(mockFetchApi, 'nonexistent-id')
expect(result).toBeUndefined()
})
@@ -92,19 +82,45 @@ describe('getWorkflowFromHistory', () => {
it('should handle fetch errors gracefully', async () => {
const mockFetchApi = vi.fn().mockRejectedValue(new Error('Network error'))
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
const result = await fetchJobDetail(mockFetchApi, 'test-prompt-id')
expect(result).toBeUndefined()
})
it('should handle malformed JSON responses', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
ok: true,
json: async () => {
throw new Error('Invalid JSON')
}
})
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
const result = await fetchJobDetail(mockFetchApi, 'test-prompt-id')
expect(result).toBeUndefined()
})
})
describe('extractWorkflow', () => {
it('should extract workflow from job detail', async () => {
const result = await extractWorkflow(mockJobDetailResponse)
expect(result).toEqual(mockWorkflow)
})
it('should return undefined when job is undefined', async () => {
const result = await extractWorkflow(undefined)
expect(result).toBeUndefined()
})
it('should return undefined when workflow is missing', async () => {
const jobWithoutWorkflow: JobDetail = {
...mockJobDetailResponse,
workflow: {}
}
const result = await extractWorkflow(jobWithoutWorkflow)
expect(result).toBeUndefined()
})

View File

@@ -1,21 +0,0 @@
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import type { PromptId } from '@/schemas/apiSchema'
export async function getWorkflowFromHistory(
fetchApi: (url: string) => Promise<Response>,
promptId: PromptId
): Promise<ComfyWorkflowJSON | undefined> {
try {
const res = await fetchApi(`/history_v2/${promptId}`)
const json = await res.json()
const historyItem = json[promptId]
if (!historyItem) return undefined
const workflow = historyItem.prompt?.extra_data?.extra_pnginfo?.workflow
return workflow ?? undefined
} catch (error) {
console.error(`Failed to fetch workflow for prompt ${promptId}:`, error)
return undefined
}
}

View File

@@ -1,10 +0,0 @@
/**
* Cloud: Fetches workflow by prompt_id. Desktop: Returns undefined (workflows already in history).
*/
import { isCloud } from '@/platform/distribution/types'
import { getWorkflowFromHistory as cloudImpl } from './getWorkflowFromHistory'
export const getWorkflowFromHistory = isCloud
? cloudImpl
: async () => undefined

View File

@@ -8,17 +8,18 @@ import type { AssetItem } from '@/platform/assets/schemas/assetSchema'
import { getOutputAssetMetadata } from '@/platform/assets/schemas/assetMetadataSchema'
import { getAssetUrl } from '@/platform/assets/utils/assetUrlUtil'
import { getWorkflowDataFromFile } from '@/scripts/metadata/parser'
import { getJobWorkflow } from '@/services/jobOutputCache'
/**
* Extract workflow from AssetItem (async - may need file fetch)
* Tries metadata first (for output assets), then falls back to extracting from file
* This supports both output assets (with embedded metadata) and input assets (PNG with workflow)
* Extract workflow from AssetItem using jobs API
* For output assets: uses jobs API (getJobWorkflow)
* For input assets: extracts from file metadata
*
* @param asset The asset item to extract workflow from
* @returns WorkflowSource with workflow and generated filename
*
* @example
* const asset = { name: 'output.png', user_metadata: { workflow: {...} } }
* const asset = { name: 'output.png', user_metadata: { promptId: '123' } }
* const { workflow, filename } = await extractWorkflowFromAsset(asset)
*/
export async function extractWorkflowFromAsset(asset: AssetItem): Promise<{
@@ -27,17 +28,14 @@ export async function extractWorkflowFromAsset(asset: AssetItem): Promise<{
}> {
const baseFilename = asset.name.replace(/\.[^/.]+$/, '.json')
// Strategy 1: Try metadata first (for output assets)
// For output assets: use jobs API (with caching and validation)
const metadata = getOutputAssetMetadata(asset.user_metadata)
if (metadata?.workflow) {
return {
workflow: metadata.workflow as ComfyWorkflowJSON,
filename: baseFilename
}
if (metadata?.promptId) {
const workflow = await getJobWorkflow(metadata.promptId)
return { workflow: workflow ?? null, filename: baseFilename }
}
// Strategy 2: Try extracting from file (for input assets with embedded workflow)
// This supports PNG, WEBP, FLAC, and other formats with metadata
// For input assets: extract from file metadata (PNG/WEBP/FLAC with embedded workflow)
try {
const fileUrl = getAssetUrl(asset)
const response = await fetch(fileUrl)

View File

@@ -1,18 +1,14 @@
import { z } from 'zod'
import { LinkMarkerShape } from '@/lib/litegraph/src/litegraph'
import {
zComfyWorkflow,
zNodeId
} from '@/platform/workflow/validation/schemas/workflowSchema'
import { zNodeId } from '@/platform/workflow/validation/schemas/workflowSchema'
import { colorPalettesSchema } from '@/schemas/colorPaletteSchema'
import { zKeybinding } from '@/schemas/keyBindingSchema'
import { NodeBadgeMode } from '@/types/nodeSource'
import { LinkReleaseTriggerAction } from '@/types/searchBoxTypes'
const zNodeType = z.string()
export const zQueueIndex = z.number()
export const zPromptId = z.string()
const zPromptId = z.string()
export type PromptId = z.infer<typeof zPromptId>
export const resultItemType = z.enum(['input', 'output', 'temp'])
export type ResultItemType = z.infer<typeof resultItemType>
@@ -173,136 +169,9 @@ export type AssetDownloadWsMessage = z.infer<typeof zAssetDownloadWsMessage>
export type NotificationWsMessage = z.infer<typeof zNotificationWsMessage>
const zPromptInputItem = z.object({
inputs: z.record(z.string(), z.any()),
class_type: zNodeType
})
const zPromptInputs = z.record(zPromptInputItem)
const zExtraPngInfo = z
.object({
workflow: zComfyWorkflow
})
.passthrough()
export const zExtraData = z
.object({
/** extra_pnginfo can be missing is backend execution gets a validation error. */
extra_pnginfo: zExtraPngInfo.optional(),
client_id: z.string().optional(),
// Cloud/Adapters: creation time in milliseconds when available
create_time: z.number().int().optional()
})
// Allow backend/adapters/extensions to add arbitrary metadata
.passthrough()
const zOutputsToExecute = z.array(zNodeId)
const zExecutionStartMessage = z.tuple([
z.literal('execution_start'),
zExecutionStartWsMessage
])
const zExecutionSuccessMessage = z.tuple([
z.literal('execution_success'),
zExecutionSuccessWsMessage
])
const zExecutionCachedMessage = z.tuple([
z.literal('execution_cached'),
zExecutionCachedWsMessage
])
const zExecutionInterruptedMessage = z.tuple([
z.literal('execution_interrupted'),
zExecutionInterruptedWsMessage
])
const zExecutionErrorMessage = z.tuple([
z.literal('execution_error'),
zExecutionErrorWsMessage
])
const zStatusMessage = z.union([
zExecutionStartMessage,
zExecutionSuccessMessage,
zExecutionCachedMessage,
zExecutionInterruptedMessage,
zExecutionErrorMessage
])
export const zStatus = z.object({
status_str: z.enum(['success', 'error']),
completed: z.boolean(),
messages: z.array(zStatusMessage)
})
const zTaskPrompt = z.tuple([
zQueueIndex,
zPromptId,
zPromptInputs,
zExtraData,
zOutputsToExecute
])
const zRunningTaskItem = z.object({
taskType: z.literal('Running'),
prompt: zTaskPrompt,
// @Deprecated
remove: z.object({
name: z.literal('Cancel'),
cb: z.function()
})
})
const zPendingTaskItem = z.object({
taskType: z.literal('Pending'),
prompt: zTaskPrompt
})
export const zTaskOutput = z.record(zNodeId, zOutputs)
const zNodeOutputsMeta = z.object({
node_id: zNodeId,
display_node: zNodeId,
prompt_id: zPromptId.optional(),
read_node_id: zNodeId.optional()
})
export const zTaskMeta = z.record(zNodeId, zNodeOutputsMeta)
const zHistoryTaskItem = z.object({
taskType: z.literal('History'),
prompt: zTaskPrompt,
status: zStatus.optional(),
outputs: zTaskOutput,
meta: zTaskMeta.optional()
})
const zTaskItem = z.union([
zRunningTaskItem,
zPendingTaskItem,
zHistoryTaskItem
])
const zTaskType = z.union([
z.literal('Running'),
z.literal('Pending'),
z.literal('History')
])
export type TaskType = z.infer<typeof zTaskType>
export type TaskPrompt = z.infer<typeof zTaskPrompt>
export type TaskStatus = z.infer<typeof zStatus>
export type TaskOutput = z.infer<typeof zTaskOutput>
// `/queue`
export type RunningTaskItem = z.infer<typeof zRunningTaskItem>
export type PendingTaskItem = z.infer<typeof zPendingTaskItem>
// `/history`
export type HistoryTaskItem = z.infer<typeof zHistoryTaskItem>
export type TaskItem = z.infer<typeof zTaskItem>
const zEmbeddingsResponse = z.array(z.string())
const zExtensionsResponse = z.array(z.string())
const zError = z.object({

View File

@@ -10,6 +10,7 @@ import type {
} from '@/platform/assets/schemas/assetSchema'
import { isCloud } from '@/platform/distribution/types'
import { useToastStore } from '@/platform/updates/common/toastStore'
import type { IFuseOptions } from 'fuse.js'
import {
type TemplateInfo,
type WorkflowTemplates
@@ -31,16 +32,13 @@ import type {
ExecutionSuccessWsMessage,
ExtensionsResponse,
FeatureFlagsWsMessage,
HistoryTaskItem,
LogsRawResponse,
LogsWsMessage,
NotificationWsMessage,
PendingTaskItem,
ProgressStateWsMessage,
ProgressTextWsMessage,
ProgressWsMessage,
PromptResponse,
RunningTaskItem,
Settings,
StatusWsMessage,
StatusWsMessageStatus,
@@ -49,12 +47,19 @@ import type {
UserDataFullInfo,
PreviewMethod
} from '@/schemas/apiSchema'
import type {
JobDetail,
JobListItem
} from '@/platform/remote/comfyui/jobs/jobTypes'
import type { ComfyNodeDef } from '@/schemas/nodeDefSchema'
import type { useFirebaseAuthStore } from '@/stores/firebaseAuthStore'
import type { AuthHeader } from '@/types/authTypes'
import type { NodeExecutionId } from '@/types/nodeIdentification'
import { fetchHistory } from '@/platform/remote/comfyui/history'
import type { IFuseOptions } from 'fuse.js'
import {
fetchHistory,
fetchJobDetail,
fetchQueue
} from '@/platform/remote/comfyui/jobs/fetchJobs'
interface QueuePromptRequestBody {
client_id: string
@@ -670,7 +675,6 @@ export class ComfyApi extends EventTarget {
case 'logs':
case 'b_preview':
case 'notification':
case 'asset_download':
this.dispatchCustomEvent(msg.type, msg.data)
break
case 'feature_flags':
@@ -893,53 +897,13 @@ export class ComfyApi extends EventTarget {
* @returns The currently running and queued items
*/
async getQueue(): Promise<{
Running: RunningTaskItem[]
Pending: PendingTaskItem[]
Running: JobListItem[]
Pending: JobListItem[]
}> {
try {
const res = await this.fetchApi('/queue')
const data = await res.json()
// Normalize queue tuple shape across backends:
// - Backend (V1): [idx, prompt_id, inputs, extra_data(object), outputs_to_execute(array)]
// - Cloud: [idx, prompt_id, inputs, outputs_to_execute(array), metadata(object{create_time})]
const normalizeQueuePrompt = (prompt: any): any => {
if (!Array.isArray(prompt)) return prompt
// Ensure 5-tuple
const p = prompt.slice(0, 5)
const fourth = p[3]
const fifth = p[4]
// Cloud shape: 4th is array, 5th is metadata object
if (
Array.isArray(fourth) &&
fifth &&
typeof fifth === 'object' &&
!Array.isArray(fifth)
) {
const meta: any = fifth
const extraData = { ...meta }
return [p[0], p[1], p[2], extraData, fourth]
}
// V1 shape already: return as-is
return p
}
return {
// Running action uses a different endpoint for cancelling
Running: data.queue_running.map((prompt: any) => {
const np = normalizeQueuePrompt(prompt)
return {
taskType: 'Running',
prompt: np,
// prompt[1] is the prompt id
remove: { name: 'Cancel', cb: () => api.interrupt(np[1]) }
}
}),
Pending: data.queue_pending.map((prompt: any) => ({
taskType: 'Pending',
prompt: normalizeQueuePrompt(prompt)
}))
}
return await fetchQueue(this.fetchApi.bind(this))
} catch (error) {
console.error(error)
console.error('Failed to fetch queue:', error)
return { Running: [], Pending: [] }
}
}
@@ -951,7 +915,7 @@ export class ComfyApi extends EventTarget {
async getHistory(
max_items: number = 200,
options?: { offset?: number }
): Promise<{ History: HistoryTaskItem[] }> {
): Promise<JobListItem[]> {
try {
return await fetchHistory(
this.fetchApi.bind(this),
@@ -960,10 +924,19 @@ export class ComfyApi extends EventTarget {
)
} catch (error) {
console.error(error)
return { History: [] }
return []
}
}
/**
* Gets detailed job info including outputs and workflow
* @param jobId The job/prompt ID
* @returns Full job details or undefined if not found
*/
async getJobDetail(jobId: string): Promise<JobDetail | undefined> {
return fetchJobDetail(this.fetchApi.bind(this), jobId)
}
/**
* Gets system & device stats
* @returns System stats such as python version, OS, per device info
@@ -1273,29 +1246,6 @@ export class ComfyApi extends EventTarget {
}
}
/**
* Gets the Fuse options from the server.
*
* @returns The Fuse options, or null if not found or invalid
*/
async getFuseOptions(): Promise<IFuseOptions<TemplateInfo> | null> {
try {
const res = await axios.get(
this.fileURL('/templates/fuse_options.json'),
{
headers: {
'Content-Type': 'application/json'
}
}
)
const contentType = res.headers['content-type']
return contentType?.includes('application/json') ? res.data : null
} catch (error) {
console.error('Error loading fuse options:', error)
return null
}
}
/**
* Gets the custom nodes i18n data from the server.
*
@@ -1331,6 +1281,24 @@ export class ComfyApi extends EventTarget {
getServerFeatures(): Record<string, unknown> {
return { ...this.serverFeatureFlags }
}
async getFuseOptions(): Promise<IFuseOptions<TemplateInfo> | null> {
try {
const res = await axios.get(
this.fileURL('/templates/fuse_options.json'),
{
headers: {
'Content-Type': 'application/json'
}
}
)
const contentType = res.headers['content-type']
return contentType?.includes('application/json') ? res.data : null
} catch (error) {
console.error('Error loading fuse options:', error)
return null
}
}
}
export const api = new ComfyApi()

View File

@@ -1,6 +1,6 @@
import { useSettingStore } from '@/platform/settings/settingStore'
import { WORKFLOW_ACCEPT_STRING } from '@/platform/workflow/core/types/formats'
import { type StatusWsMessageStatus, type TaskItem } from '@/schemas/apiSchema'
import { type StatusWsMessageStatus } from '@/schemas/apiSchema'
import { useDialogService } from '@/services/dialogService'
import { isCloud } from '@/platform/distribution/types'
import { useTelemetry } from '@/platform/telemetry'
@@ -33,6 +33,17 @@ type Props = {
type Children = Element[] | Element | string | string[]
/**
* @deprecated Legacy queue item structure from old history API.
* Will be removed when ComfyList is migrated to Jobs API.
*/
interface LegacyQueueItem {
prompt: [unknown, string, unknown, { extra_pnginfo: { workflow: unknown } }]
outputs?: Record<string, unknown>
meta?: Record<string, { display_node?: string }>
remove?: { name: string; cb: () => Promise<void> | void }
}
type ElementType<K extends string> = K extends keyof HTMLElementTagNameMap
? HTMLElementTagNameMap[K]
: HTMLElement
@@ -259,29 +270,28 @@ class ComfyList {
$el('div.comfy-list-items', [
// @ts-expect-error fixme ts strict error
...(this.#reverse ? items[section].reverse() : items[section]).map(
(item: TaskItem) => {
(item: LegacyQueueItem) => {
// Allow items to specify a custom remove action (e.g. for interrupt current prompt)
const removeAction =
'remove' in item
? item.remove
: {
name: 'Delete',
cb: () => api.deleteItem(this.#type, item.prompt[1])
}
const removeAction = item.remove ?? {
name: 'Delete',
cb: () => api.deleteItem(this.#type, item.prompt[1])
}
return $el('div', { textContent: item.prompt[0] + ': ' }, [
$el('button', {
textContent: 'Load',
onclick: async () => {
await app.loadGraphData(
// @ts-expect-error fixme ts strict error
item.prompt[3].extra_pnginfo.workflow,
item.prompt[3].extra_pnginfo.workflow as Parameters<
typeof app.loadGraphData
>[0],
true,
false
)
if ('outputs' in item) {
if ('outputs' in item && item.outputs) {
app.nodeOutputs = {}
for (const [key, value] of Object.entries(item.outputs)) {
const realKey = item['meta']?.[key]?.display_node ?? key
// @ts-expect-error fixme ts strict error
app.nodeOutputs[realKey] = value
}
}

View File

@@ -19,7 +19,6 @@ import { useTelemetry } from '@/platform/telemetry'
import { isCloud } from '@/platform/distribution/types'
import { useSubscription } from '@/platform/cloud/subscription/composables/useSubscription'
import SettingDialogContent from '@/platform/settings/components/SettingDialogContent.vue'
import type { ExecutionErrorWsMessage } from '@/schemas/apiSchema'
import { useDialogStore } from '@/stores/dialogStore'
import type {
DialogComponentProps,
@@ -45,6 +44,18 @@ export type ConfirmationDialogType =
| 'dirtyClose'
| 'reinstall'
/**
* Minimal interface for execution error dialogs.
* Satisfied by both ExecutionErrorWsMessage (WebSocket) and ExecutionError (Jobs API).
*/
export interface ExecutionErrorDialogInput {
exception_type: string
exception_message: string
node_id: string | number
node_type: string
traceback: string[]
}
export const useDialogService = () => {
const dialogStore = useDialogStore()
@@ -115,7 +126,7 @@ export const useDialogService = () => {
})
}
function showExecutionErrorDialog(executionError: ExecutionErrorWsMessage) {
function showExecutionErrorDialog(executionError: ExecutionErrorDialogInput) {
const props: ComponentAttrs<typeof ErrorDialogContent> = {
error: {
exceptionType: executionError.exception_type,

View File

@@ -0,0 +1,278 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type {
JobDetail,
JobListItem
} from '@/platform/remote/comfyui/jobs/jobTypes'
import { ResultItemImpl, TaskItemImpl } from '@/stores/queueStore'
vi.mock('@/platform/remote/comfyui/jobs/fetchJobs', () => ({
fetchJobDetail: vi.fn(),
extractWorkflow: vi.fn()
}))
function createResultItem(url: string, supportsPreview = true): ResultItemImpl {
const item = new ResultItemImpl({
filename: url,
subfolder: '',
type: 'output',
nodeId: 'node-1',
mediaType: supportsPreview ? 'images' : 'unknown'
})
Object.defineProperty(item, 'url', { get: () => url })
Object.defineProperty(item, 'supportsPreview', { get: () => supportsPreview })
return item
}
function createMockJob(id: string, outputsCount = 1): JobListItem {
return {
id,
status: 'completed',
create_time: Date.now(),
preview_output: null,
outputs_count: outputsCount,
priority: 0
}
}
function createTask(
preview?: ResultItemImpl,
allOutputs?: ResultItemImpl[],
outputsCount = 1
): TaskItemImpl {
const job = createMockJob(
`task-${Math.random().toString(36).slice(2)}`,
outputsCount
)
const flatOutputs = allOutputs ?? (preview ? [preview] : [])
return new TaskItemImpl(job, {}, flatOutputs)
}
describe('jobOutputCache', () => {
beforeEach(() => {
vi.resetModules()
vi.clearAllMocks()
})
describe('findActiveIndex', () => {
it('returns index of matching URL', async () => {
const { findActiveIndex } = await import('@/services/jobOutputCache')
const items = [
createResultItem('a'),
createResultItem('b'),
createResultItem('c')
]
expect(findActiveIndex(items, 'b')).toBe(1)
})
it('returns 0 when URL not found', async () => {
const { findActiveIndex } = await import('@/services/jobOutputCache')
const items = [createResultItem('a'), createResultItem('b')]
expect(findActiveIndex(items, 'missing')).toBe(0)
})
it('returns 0 when URL is undefined', async () => {
const { findActiveIndex } = await import('@/services/jobOutputCache')
const items = [createResultItem('a'), createResultItem('b')]
expect(findActiveIndex(items, undefined)).toBe(0)
})
})
describe('getOutputsForTask', () => {
it('returns previewable outputs directly when no lazy load needed', async () => {
const { getOutputsForTask } = await import('@/services/jobOutputCache')
const outputs = [createResultItem('p-1'), createResultItem('p-2')]
const task = createTask(undefined, outputs, 1)
const result = await getOutputsForTask(task)
expect(result).toEqual(outputs)
})
it('lazy loads when outputsCount > 1', async () => {
const { getOutputsForTask } = await import('@/services/jobOutputCache')
const previewOutput = createResultItem('preview')
const fullOutputs = [
createResultItem('full-1'),
createResultItem('full-2')
]
const job = createMockJob('task-1', 3)
const task = new TaskItemImpl(job, {}, [previewOutput])
const loadedTask = new TaskItemImpl(job, {}, fullOutputs)
task.loadFullOutputs = vi.fn().mockResolvedValue(loadedTask)
const result = await getOutputsForTask(task)
expect(result).toEqual(fullOutputs)
expect(task.loadFullOutputs).toHaveBeenCalled()
})
it('caches loaded tasks', async () => {
const { getOutputsForTask } = await import('@/services/jobOutputCache')
const fullOutputs = [createResultItem('full-1')]
const job = createMockJob('task-1', 3)
const task = new TaskItemImpl(job, {}, [createResultItem('preview')])
const loadedTask = new TaskItemImpl(job, {}, fullOutputs)
task.loadFullOutputs = vi.fn().mockResolvedValue(loadedTask)
// First call should load
await getOutputsForTask(task)
expect(task.loadFullOutputs).toHaveBeenCalledTimes(1)
// Second call should use cache
await getOutputsForTask(task)
expect(task.loadFullOutputs).toHaveBeenCalledTimes(1)
})
it('falls back to preview outputs on load error', async () => {
const { getOutputsForTask } = await import('@/services/jobOutputCache')
const previewOutput = createResultItem('preview')
const job = createMockJob('task-1', 3)
const task = new TaskItemImpl(job, {}, [previewOutput])
task.loadFullOutputs = vi
.fn()
.mockRejectedValue(new Error('Network error'))
const result = await getOutputsForTask(task)
expect(result).toEqual([previewOutput])
})
it('returns null when request is superseded', async () => {
const { getOutputsForTask } = await import('@/services/jobOutputCache')
const job1 = createMockJob('task-1', 3)
const job2 = createMockJob('task-2', 3)
const task1 = new TaskItemImpl(job1, {}, [createResultItem('preview-1')])
const task2 = new TaskItemImpl(job2, {}, [createResultItem('preview-2')])
const loadedTask1 = new TaskItemImpl(job1, {}, [
createResultItem('full-1')
])
const loadedTask2 = new TaskItemImpl(job2, {}, [
createResultItem('full-2')
])
// Task1 loads slowly, task2 loads quickly
task1.loadFullOutputs = vi.fn().mockImplementation(
() =>
new Promise((resolve) => {
setTimeout(() => resolve(loadedTask1), 50)
})
)
task2.loadFullOutputs = vi.fn().mockResolvedValue(loadedTask2)
// Start task1, then immediately start task2
const promise1 = getOutputsForTask(task1)
const promise2 = getOutputsForTask(task2)
const [result1, result2] = await Promise.all([promise1, promise2])
// Task2 should succeed, task1 should return null (superseded)
expect(result1).toBeNull()
expect(result2).toEqual([createResultItem('full-2')])
})
})
describe('getJobDetail', () => {
it('fetches and caches job detail', async () => {
const { getJobDetail } = await import('@/services/jobOutputCache')
const { fetchJobDetail } =
await import('@/platform/remote/comfyui/jobs/fetchJobs')
const mockDetail: JobDetail = {
id: 'job-1',
status: 'completed',
create_time: Date.now(),
priority: 0,
outputs: {}
}
vi.mocked(fetchJobDetail).mockResolvedValue(mockDetail)
const result = await getJobDetail('job-1')
expect(result).toEqual(mockDetail)
expect(fetchJobDetail).toHaveBeenCalledWith(expect.any(Function), 'job-1')
})
it('returns cached job detail on subsequent calls', async () => {
const { getJobDetail } = await import('@/services/jobOutputCache')
const { fetchJobDetail } =
await import('@/platform/remote/comfyui/jobs/fetchJobs')
const mockDetail: JobDetail = {
id: 'job-2',
status: 'completed',
create_time: Date.now(),
priority: 0,
outputs: {}
}
vi.mocked(fetchJobDetail).mockResolvedValue(mockDetail)
// First call
await getJobDetail('job-2')
expect(fetchJobDetail).toHaveBeenCalledTimes(1)
// Second call should use cache
const result = await getJobDetail('job-2')
expect(result).toEqual(mockDetail)
expect(fetchJobDetail).toHaveBeenCalledTimes(1)
})
it('returns undefined on fetch error', async () => {
const { getJobDetail } = await import('@/services/jobOutputCache')
const { fetchJobDetail } =
await import('@/platform/remote/comfyui/jobs/fetchJobs')
vi.mocked(fetchJobDetail).mockRejectedValue(new Error('Network error'))
const result = await getJobDetail('job-error')
expect(result).toBeUndefined()
})
})
describe('getJobWorkflow', () => {
it('fetches job detail and extracts workflow', async () => {
const { getJobWorkflow } = await import('@/services/jobOutputCache')
const { fetchJobDetail, extractWorkflow } =
await import('@/platform/remote/comfyui/jobs/fetchJobs')
const mockDetail: JobDetail = {
id: 'job-wf',
status: 'completed',
create_time: Date.now(),
priority: 0,
outputs: {}
}
const mockWorkflow = { version: 1 }
vi.mocked(fetchJobDetail).mockResolvedValue(mockDetail)
vi.mocked(extractWorkflow).mockResolvedValue(mockWorkflow as any)
const result = await getJobWorkflow('job-wf')
expect(result).toEqual(mockWorkflow)
expect(extractWorkflow).toHaveBeenCalledWith(mockDetail)
})
it('returns undefined when job detail not found', async () => {
const { getJobWorkflow } = await import('@/services/jobOutputCache')
const { fetchJobDetail, extractWorkflow } =
await import('@/platform/remote/comfyui/jobs/fetchJobs')
vi.mocked(fetchJobDetail).mockResolvedValue(undefined)
vi.mocked(extractWorkflow).mockResolvedValue(undefined)
const result = await getJobWorkflow('missing')
expect(result).toBeUndefined()
})
})
})

View File

@@ -0,0 +1,103 @@
/**
* @fileoverview Job output cache for caching and managing job data
* @module services/jobOutputCache
*
* Centralizes job output and detail caching with LRU eviction.
* Provides helpers for working with previewable outputs and workflows.
*/
import QuickLRU from '@alloc/quick-lru'
import type { JobDetail } from '@/platform/remote/comfyui/jobs/jobTypes'
import { extractWorkflow } from '@/platform/remote/comfyui/jobs/fetchJobs'
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import { api } from '@/scripts/api'
import { ResultItemImpl } from '@/stores/queueStore'
import type { TaskItemImpl } from '@/stores/queueStore'
const MAX_TASK_CACHE_SIZE = 50
const MAX_JOB_DETAIL_CACHE_SIZE = 50
const taskCache = new QuickLRU<string, TaskItemImpl>({
maxSize: MAX_TASK_CACHE_SIZE
})
const jobDetailCache = new QuickLRU<string, JobDetail>({
maxSize: MAX_JOB_DETAIL_CACHE_SIZE
})
// Track latest request to dedupe stale responses
let latestTaskRequestId: string | null = null
// ===== Task Output Caching =====
export function findActiveIndex(
items: readonly ResultItemImpl[],
url?: string
): number {
return ResultItemImpl.findByUrl(items, url)
}
/**
* Gets previewable outputs for a task, with lazy loading, caching, and request deduping.
* Returns null if a newer request superseded this one while loading.
*/
export async function getOutputsForTask(
task: TaskItemImpl
): Promise<ResultItemImpl[] | null> {
const requestId = String(task.promptId)
latestTaskRequestId = requestId
const outputsCount = task.outputsCount ?? 0
const needsLazyLoad = outputsCount > 1
if (!needsLazyLoad) {
return [...task.previewableOutputs]
}
const cached = taskCache.get(requestId)
if (cached) {
return [...cached.previewableOutputs]
}
try {
const loadedTask = await task.loadFullOutputs()
// Check if request was superseded while loading
if (latestTaskRequestId !== requestId) {
return null
}
taskCache.set(requestId, loadedTask)
return [...loadedTask.previewableOutputs]
} catch (error) {
console.warn('Failed to load full outputs, using preview:', error)
return [...task.previewableOutputs]
}
}
// ===== Job Detail Caching =====
export async function getJobDetail(
jobId: string
): Promise<JobDetail | undefined> {
const cached = jobDetailCache.get(jobId)
if (cached) return cached
try {
const detail = await api.getJobDetail(jobId)
if (detail) {
jobDetailCache.set(jobId, detail)
}
return detail
} catch (error) {
console.warn('Failed to fetch job detail:', error)
return undefined
}
}
export async function getJobWorkflow(
jobId: string
): Promise<ComfyWorkflowJSON | undefined> {
const detail = await getJobDetail(jobId)
return await extractWorkflow(detail)
}

View File

@@ -3,12 +3,7 @@ import { beforeEach, describe, expect, it, vi } from 'vitest'
import { useAssetsStore } from '@/stores/assetsStore'
import { api } from '@/scripts/api'
import type {
HistoryTaskItem,
TaskPrompt,
TaskStatus,
TaskOutput
} from '@/schemas/apiSchema'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/jobTypes'
// Mock the api module
vi.mock('@/scripts/api', () => ({
@@ -53,26 +48,25 @@ vi.mock('@/stores/queueStore', () => ({
url: string
}
| undefined
public promptId: string
constructor(
public taskType: string,
public prompt: TaskPrompt,
public status: TaskStatus | undefined,
public outputs: TaskOutput
) {
this.flatOutputs = this.outputs
? [
{
supportsPreview: true,
filename: 'test.png',
subfolder: '',
type: 'output',
url: 'http://test.com/test.png'
}
]
: []
constructor(public job: JobListItem) {
this.promptId = job.id
this.flatOutputs = [
{
supportsPreview: true,
filename: 'test.png',
subfolder: '',
type: 'output',
url: 'http://test.com/test.png'
}
]
this.previewOutput = this.flatOutputs[0]
}
get previewableOutputs() {
return this.flatOutputs.filter((o) => o.supportsPreview)
}
}
}))
@@ -82,17 +76,17 @@ vi.mock('@/platform/assets/composables/media/assetMappers', () => ({
id: `${type}-${index}`,
name,
size: 0,
created_at: new Date(Date.now() - index * 1000).toISOString(), // Unique timestamps
created_at: new Date(Date.now() - index * 1000).toISOString(),
tags: [type],
preview_url: `http://test.com/${name}`
})),
mapTaskOutputToAssetItem: vi.fn((task, output) => {
const index = parseInt(task.prompt[1].split('_')[1]) || 0
const index = parseInt(task.promptId.split('_')[1]) || 0
return {
id: task.prompt[1], // Use promptId as asset ID
id: task.promptId,
name: output.filename,
size: 0,
created_at: new Date(Date.now() - index * 1000).toISOString(), // Unique timestamps
created_at: new Date(Date.now() - index * 1000).toISOString(),
tags: ['output'],
preview_url: output.url,
user_metadata: {}
@@ -103,43 +97,20 @@ vi.mock('@/platform/assets/composables/media/assetMappers', () => ({
describe('assetsStore - Refactored (Option A)', () => {
let store: ReturnType<typeof useAssetsStore>
// Helper function to create mock history items
const createMockHistoryItem = (index: number): HistoryTaskItem => ({
taskType: 'History' as const,
prompt: [
1000 + index, // queueIndex
`prompt_${index}`, // promptId
{}, // promptInputs
{
extra_pnginfo: {
workflow: {
last_node_id: 1,
last_link_id: 1,
nodes: [],
links: [],
groups: [],
config: {},
version: 1
}
}
}, // extraData
[] // outputsToExecute
],
status: {
status_str: 'success' as const,
completed: true,
messages: []
},
outputs: {
'1': {
images: [
{
filename: `output_${index}.png`,
subfolder: '',
type: 'output' as const
}
]
}
// Helper function to create mock job items
const createMockJobItem = (index: number): JobListItem => ({
id: `prompt_${index}`,
status: 'completed',
create_time: 1000 + index,
update_time: 1000 + index,
last_state_update: 1000 + index,
priority: 1000 + index,
preview_output: {
filename: `output_${index}.png`,
subfolder: '',
type: 'output',
nodeId: 'node_1',
mediaType: 'images'
}
})
@@ -152,11 +123,9 @@ describe('assetsStore - Refactored (Option A)', () => {
describe('Initial Load', () => {
it('should load initial history items', async () => {
const mockHistory = Array.from({ length: 10 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValue({
History: mockHistory
})
vi.mocked(api.getHistory).mockResolvedValue(mockHistory)
await store.updateHistory()
@@ -169,11 +138,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should set hasMoreHistory to true when batch is full', async () => {
const mockHistory = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValue({
History: mockHistory
})
vi.mocked(api.getHistory).mockResolvedValue(mockHistory)
await store.updateHistory()
@@ -197,11 +164,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should accumulate items when loading more', async () => {
// First batch - full BATCH_SIZE
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
expect(store.historyAssets).toHaveLength(200)
@@ -209,11 +174,9 @@ describe('assetsStore - Refactored (Option A)', () => {
// Second batch - different items
const secondBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(200 + i)
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: secondBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(secondBatch)
await store.loadMoreHistory()
@@ -225,24 +188,20 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should prevent duplicate items during pagination', async () => {
// First batch - full BATCH_SIZE
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
expect(store.historyAssets).toHaveLength(200)
// Second batch with some duplicates
const secondBatch = [
createMockHistoryItem(2), // Duplicate
createMockHistoryItem(5), // Duplicate
...Array.from({ length: 198 }, (_, i) => createMockHistoryItem(200 + i)) // New
createMockJobItem(2), // Duplicate
createMockJobItem(5), // Duplicate
...Array.from({ length: 198 }, (_, i) => createMockJobItem(200 + i)) // New
]
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: secondBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(secondBatch)
await store.loadMoreHistory()
@@ -258,11 +217,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should stop loading when no more items', async () => {
// First batch - less than BATCH_SIZE
const firstBatch = Array.from({ length: 50 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
expect(store.hasMoreHistory).toBe(false)
@@ -277,11 +234,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should handle race conditions with concurrent loads', async () => {
// Setup initial state with full batch
const initialBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: initialBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(initialBatch)
await store.updateHistory()
expect(store.hasMoreHistory).toBe(true)
@@ -289,12 +244,10 @@ describe('assetsStore - Refactored (Option A)', () => {
vi.mocked(api.getHistory).mockClear()
// Setup slow API response
let resolveLoadMore: (value: { History: HistoryTaskItem[] }) => void
const loadMorePromise = new Promise<{ History: HistoryTaskItem[] }>(
(resolve) => {
resolveLoadMore = resolve
}
)
let resolveLoadMore: (value: JobListItem[]) => void
const loadMorePromise = new Promise<JobListItem[]>((resolve) => {
resolveLoadMore = resolve
})
vi.mocked(api.getHistory).mockReturnValueOnce(loadMorePromise)
// Start first loadMore
@@ -305,9 +258,9 @@ describe('assetsStore - Refactored (Option A)', () => {
// Resolve
const secondBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(200 + i)
createMockJobItem(200 + i)
)
resolveLoadMore!({ History: secondBatch })
resolveLoadMore!(secondBatch)
await Promise.all([firstLoad, secondLoad])
@@ -320,21 +273,17 @@ describe('assetsStore - Refactored (Option A)', () => {
// Initial load
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
// Load additional batches
for (let batch = 1; batch < BATCH_COUNT; batch++) {
const items = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(batch * 200 + i)
createMockJobItem(batch * 200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: items
})
vi.mocked(api.getHistory).mockResolvedValueOnce(items)
await store.loadMoreHistory()
}
@@ -347,21 +296,17 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should maintain date sorting after pagination', async () => {
// First batch
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
// Second batch
const secondBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(200 + i)
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: secondBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(secondBatch)
await store.loadMoreHistory()
@@ -378,11 +323,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should preserve existing data when loadMore fails', async () => {
// First successful load - full batch
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
expect(store.historyAssets).toHaveLength(200)
@@ -402,11 +345,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should clear error state on successful retry', async () => {
// First load succeeds
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
@@ -419,11 +360,9 @@ describe('assetsStore - Refactored (Option A)', () => {
// Third load succeeds
const thirdBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(200 + i)
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: thirdBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(thirdBatch)
await store.loadMoreHistory()
@@ -450,11 +389,9 @@ describe('assetsStore - Refactored (Option A)', () => {
for (let batch = 0; batch < batches; batch++) {
const items = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(batch * 200 + i)
createMockJobItem(batch * 200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: items
})
vi.mocked(api.getHistory).mockResolvedValueOnce(items)
if (batch === 0) {
await store.updateHistory()
@@ -476,11 +413,9 @@ describe('assetsStore - Refactored (Option A)', () => {
// Load items beyond limit
for (let batch = 0; batch < 6; batch++) {
const items = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(batch * 200 + i)
createMockJobItem(batch * 200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: items
})
vi.mocked(api.getHistory).mockResolvedValueOnce(items)
if (batch === 0) {
await store.updateHistory()
@@ -503,11 +438,9 @@ describe('assetsStore - Refactored (Option A)', () => {
describe('jobDetailView Support', () => {
it('should include outputCount and allOutputs in user_metadata', async () => {
const mockHistory = Array.from({ length: 5 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValue({
History: mockHistory
})
vi.mocked(api.getHistory).mockResolvedValue(mockHistory)
await store.updateHistory()

View File

@@ -9,7 +9,7 @@ import {
import type { AssetItem } from '@/platform/assets/schemas/assetSchema'
import { assetService } from '@/platform/assets/services/assetService'
import { isCloud } from '@/platform/distribution/types'
import type { TaskItem } from '@/schemas/apiSchema'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/jobTypes'
import { api } from '@/scripts/api'
import { TaskItemImpl } from './queueStore'
@@ -48,27 +48,18 @@ async function fetchInputFilesFromCloud(): Promise<AssetItem[]> {
}
/**
* Convert history task items to asset items
* Convert history job items to asset items
*/
function mapHistoryToAssets(historyItems: TaskItem[]): AssetItem[] {
function mapHistoryToAssets(historyItems: JobListItem[]): AssetItem[] {
const assetItems: AssetItem[] = []
for (const item of historyItems) {
// Type guard for HistoryTaskItem which has status and outputs
if (item.taskType !== 'History') {
for (const job of historyItems) {
// Only process completed jobs with preview output
if (job.status !== 'completed' || !job.preview_output) {
continue
}
if (!item.outputs || !item.status || item.status?.status_str === 'error') {
continue
}
const task = new TaskItemImpl(
'History',
item.prompt,
item.status,
item.outputs
)
const task = new TaskItemImpl(job)
if (!task.previewOutput) {
continue
@@ -76,11 +67,10 @@ function mapHistoryToAssets(historyItems: TaskItem[]): AssetItem[] {
const assetItem = mapTaskOutputToAssetItem(task, task.previewOutput)
const supportedOutputs = task.flatOutputs.filter((o) => o.supportsPreview)
assetItem.user_metadata = {
...assetItem.user_metadata,
outputCount: supportedOutputs.length,
allOutputs: supportedOutputs
outputCount: job.outputs_count,
allOutputs: task.previewableOutputs
}
assetItems.push(assetItem)
@@ -143,8 +133,8 @@ export const useAssetsStore = defineStore('assets', () => {
offset: historyOffset.value
})
// Convert TaskItems to AssetItems
const newAssets = mapHistoryToAssets(history.History)
// Convert JobListItems to AssetItems
const newAssets = mapHistoryToAssets(history)
if (loadMore) {
// Filter out duplicates and insert in sorted order
@@ -176,7 +166,7 @@ export const useAssetsStore = defineStore('assets', () => {
// Update pagination state
historyOffset.value += BATCH_SIZE
hasMoreHistory.value = history.History.length === BATCH_SIZE
hasMoreHistory.value = history.length === BATCH_SIZE
if (allHistoryItems.value.length > MAX_HISTORY_ITEMS) {
const removed = allHistoryItems.value.slice(MAX_HISTORY_ITEMS)

View File

@@ -396,10 +396,8 @@ export const useExecutionStore = defineStore('execution', () => {
error: e.detail.exception_message
})
}
const pid = e.detail?.prompt_id
// Clear initialization for errored prompt if present
if (e.detail?.prompt_id) clearInitializationByPromptId(e.detail.prompt_id)
resetExecutionState(pid)
clearInitializationByPromptId(e.detail.prompt_id)
resetExecutionState(e.detail.prompt_id)
}
/**

View File

@@ -1,14 +1,14 @@
import { createPinia, setActivePinia } from 'pinia'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { ComfyApp } from '@/scripts/app'
import type {
JobDetail,
JobListItem
} from '@/platform/remote/comfyui/jobs/jobTypes'
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import type { ComfyApp } from '@/scripts/app'
import { TaskItemImpl } from '@/stores/queueStore'
import * as getWorkflowModule from '@/platform/workflow/cloud'
vi.mock('@/platform/distribution/types', () => ({
isCloud: true
}))
import * as jobOutputCache from '@/services/jobOutputCache'
vi.mock('@/services/extensionService', () => ({
useExtensionService: vi.fn(() => ({
@@ -17,8 +17,6 @@ vi.mock('@/services/extensionService', () => ({
}))
const mockWorkflow: ComfyWorkflowJSON = {
id: 'test-workflow-id',
revision: 0,
last_node_id: 5,
last_link_id: 3,
nodes: [],
@@ -29,53 +27,46 @@ const mockWorkflow: ComfyWorkflowJSON = {
version: 0.4
}
const createHistoryTaskWithWorkflow = (): TaskItemImpl => {
return new TaskItemImpl(
'History',
[
0, // queueIndex
'test-prompt-id', // promptId
{}, // promptInputs
{
client_id: 'test-client',
extra_pnginfo: {
workflow: mockWorkflow
}
},
[] // outputsToExecute
],
{
status_str: 'success',
completed: true,
messages: []
},
{} // outputs
)
// Mock job detail response (matches actual /jobs/{id} API response structure)
// workflow is nested at: workflow.extra_data.extra_pnginfo.workflow
const mockJobDetail = {
id: 'test-prompt-id',
status: 'completed' as const,
create_time: Date.now(),
update_time: Date.now(),
workflow: {
extra_data: {
extra_pnginfo: {
workflow: mockWorkflow
}
}
},
outputs: {
'1': { images: [{ filename: 'test.png', subfolder: '', type: 'output' }] }
}
}
const createHistoryTaskWithoutWorkflow = (): TaskItemImpl => {
return new TaskItemImpl(
'History',
[
0,
'test-prompt-id',
{},
{
client_id: 'test-client'
// No extra_pnginfo.workflow
},
[]
],
{
status_str: 'success',
completed: true,
messages: []
},
{}
)
function createHistoryJob(id: string): JobListItem {
const now = Date.now()
return {
id,
status: 'completed',
create_time: now,
priority: now
}
}
describe('TaskItemImpl.loadWorkflow - cloud history workflow fetching', () => {
function createRunningJob(id: string): JobListItem {
const now = Date.now()
return {
id,
status: 'in_progress',
create_time: now,
priority: now
}
}
describe('TaskItemImpl.loadWorkflow - workflow fetching', () => {
let mockApp: ComfyApp
let mockFetchApi: ReturnType<typeof vi.fn>
@@ -91,85 +82,57 @@ describe('TaskItemImpl.loadWorkflow - cloud history workflow fetching', () => {
fetchApi: mockFetchApi
}
} as unknown as ComfyApp
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory')
})
it('should load workflow directly when workflow is in extra_pnginfo', async () => {
const task = createHistoryTaskWithWorkflow()
it('should fetch workflow from API for history tasks', async () => {
const job = createHistoryJob('test-prompt-id')
const task = new TaskItemImpl(job)
await task.loadWorkflow(mockApp)
expect(mockApp.loadGraphData).toHaveBeenCalledWith(mockWorkflow)
expect(mockFetchApi).not.toHaveBeenCalled()
})
it('should fetch workflow from cloud when workflow is missing from history task', async () => {
const task = createHistoryTaskWithoutWorkflow()
// Mock getWorkflowFromHistory to return workflow
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory').mockResolvedValue(
mockWorkflow
vi.spyOn(jobOutputCache, 'getJobDetail').mockResolvedValue(
mockJobDetail as JobDetail
)
await task.loadWorkflow(mockApp)
expect(getWorkflowModule.getWorkflowFromHistory).toHaveBeenCalledWith(
expect.any(Function),
'test-prompt-id'
)
expect(jobOutputCache.getJobDetail).toHaveBeenCalledWith('test-prompt-id')
expect(mockApp.loadGraphData).toHaveBeenCalledWith(mockWorkflow)
})
it('should not load workflow when fetch returns undefined', async () => {
const task = createHistoryTaskWithoutWorkflow()
const job = createHistoryJob('test-prompt-id')
const task = new TaskItemImpl(job)
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory').mockResolvedValue(
undefined
)
vi.spyOn(jobOutputCache, 'getJobDetail').mockResolvedValue(undefined)
await task.loadWorkflow(mockApp)
expect(getWorkflowModule.getWorkflowFromHistory).toHaveBeenCalled()
expect(jobOutputCache.getJobDetail).toHaveBeenCalled()
expect(mockApp.loadGraphData).not.toHaveBeenCalled()
})
it('should only fetch for history tasks, not running tasks', async () => {
const runningTask = new TaskItemImpl(
'Running',
[
0,
'test-prompt-id',
{},
{
client_id: 'test-client'
},
[]
],
undefined,
{}
)
const job = createRunningJob('test-prompt-id')
const runningTask = new TaskItemImpl(job)
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory').mockResolvedValue(
mockWorkflow
vi.spyOn(jobOutputCache, 'getJobDetail').mockResolvedValue(
mockJobDetail as JobDetail
)
await runningTask.loadWorkflow(mockApp)
expect(getWorkflowModule.getWorkflowFromHistory).not.toHaveBeenCalled()
expect(jobOutputCache.getJobDetail).not.toHaveBeenCalled()
expect(mockApp.loadGraphData).not.toHaveBeenCalled()
})
it('should handle fetch errors gracefully by returning undefined', async () => {
const task = createHistoryTaskWithoutWorkflow()
const job = createHistoryJob('test-prompt-id')
const task = new TaskItemImpl(job)
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory').mockResolvedValue(
undefined
)
vi.spyOn(jobOutputCache, 'getJobDetail').mockResolvedValue(undefined)
await task.loadWorkflow(mockApp)
expect(getWorkflowModule.getWorkflowFromHistory).toHaveBeenCalled()
expect(jobOutputCache.getJobDetail).toHaveBeenCalled()
expect(mockApp.loadGraphData).not.toHaveBeenCalled()
})
})

View File

@@ -1,34 +1,39 @@
import { createPinia, setActivePinia } from 'pinia'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type {
HistoryTaskItem,
PendingTaskItem,
RunningTaskItem,
TaskOutput,
TaskPrompt,
TaskStatus
} from '@/schemas/apiSchema'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/jobTypes'
import type { TaskOutput } from '@/schemas/apiSchema'
import { api } from '@/scripts/api'
import { TaskItemImpl, useQueueStore } from '@/stores/queueStore'
// Fixture factories
const createTaskPrompt = (
queueIndex: number,
promptId: string,
inputs: Record<string, any> = {},
extraData: Record<string, any> = {},
outputsToExecute: any[] = []
): TaskPrompt => [queueIndex, promptId, inputs, extraData, outputsToExecute]
// Fixture factory for JobListItem
function createJob(
id: string,
status: JobListItem['status'],
createTime: number = Date.now(),
priority?: number
): JobListItem {
return {
id,
status,
create_time: createTime,
update_time: createTime,
last_state_update: createTime,
priority: priority ?? createTime
}
}
const createTaskStatus = (
statusStr: 'success' | 'error' = 'success',
messages: any[] = []
): TaskStatus => ({
status_str: statusStr,
completed: true,
messages
})
function createRunningJob(createTime: number, id: string): JobListItem {
return createJob(id, 'in_progress', createTime)
}
function createPendingJob(createTime: number, id: string): JobListItem {
return createJob(id, 'pending', createTime)
}
function createHistoryJob(createTime: number, id: string): JobListItem {
return createJob(id, 'completed', createTime)
}
const createTaskOutput = (
nodeId: string = 'node-1',
@@ -39,35 +44,6 @@ const createTaskOutput = (
}
})
const createRunningTask = (
queueIndex: number,
promptId: string
): RunningTaskItem => ({
taskType: 'Running',
prompt: createTaskPrompt(queueIndex, promptId),
remove: { name: 'Cancel', cb: () => {} }
})
const createPendingTask = (
queueIndex: number,
promptId: string
): PendingTaskItem => ({
taskType: 'Pending',
prompt: createTaskPrompt(queueIndex, promptId)
})
const createHistoryTask = (
queueIndex: number,
promptId: string,
outputs: TaskOutput = createTaskOutput(),
status: TaskStatus = createTaskStatus()
): HistoryTaskItem => ({
taskType: 'History',
prompt: createTaskPrompt(queueIndex, promptId),
status,
outputs
})
// Mock API
vi.mock('@/scripts/api', () => ({
api: {
@@ -83,17 +59,13 @@ vi.mock('@/scripts/api', () => ({
describe('TaskItemImpl', () => {
it('should remove animated property from outputs during construction', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
images: [{ filename: 'test.png', type: 'output', subfolder: '' }],
animated: [false]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
images: [{ filename: 'test.png', type: 'output', subfolder: '' }],
animated: [false]
}
)
})
// Check that animated property was removed
expect('animated' in taskItem.outputs['node-1']).toBe(false)
@@ -103,90 +75,72 @@ describe('TaskItemImpl', () => {
})
it('should handle outputs without animated property', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
images: [{ filename: 'test.png', type: 'output', subfolder: '' }]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
images: [{ filename: 'test.png', type: 'output', subfolder: '' }]
}
)
})
expect(taskItem.outputs['node-1'].images).toBeDefined()
expect(taskItem.outputs['node-1'].images?.[0]?.filename).toBe('test.png')
})
it('should recognize webm video from core', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
video: [{ filename: 'test.webm', type: 'output', subfolder: '' }]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
video: [{ filename: 'test.webm', type: 'output', subfolder: '' }]
}
)
})
const output = taskItem.flatOutputs[0]
expect(output.htmlVideoType).toBe('video/webm')
expect(output.isVideo).toBe(true)
expect(output.isWebm).toBe(true)
expect(output.isVhsFormat).toBe(false)
expect(output.isImage).toBe(false)
})
// https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite/blob/0a75c7958fe320efcb052f1d9f8451fd20c730a8/videohelpersuite/nodes.py#L578-L590
it('should recognize webm video from VHS', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
gifs: [
{
filename: 'test.webm',
type: 'output',
subfolder: '',
format: 'video/webm',
frame_rate: 30
}
]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
gifs: [
{
filename: 'test.webm',
type: 'output',
subfolder: '',
format: 'video/webm',
frame_rate: 30
}
]
}
)
})
const output = taskItem.flatOutputs[0]
expect(output.htmlVideoType).toBe('video/webm')
expect(output.isVideo).toBe(true)
expect(output.isWebm).toBe(true)
expect(output.isVhsFormat).toBe(true)
expect(output.isImage).toBe(false)
})
it('should recognize mp4 video from core', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
images: [
{
filename: 'test.mp4',
type: 'output',
subfolder: ''
}
],
animated: [true]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
images: [
{
filename: 'test.mp4',
type: 'output',
subfolder: ''
}
],
animated: [true]
}
)
})
const output = taskItem.flatOutputs[0]
@@ -205,22 +159,18 @@ describe('TaskItemImpl', () => {
audioFormats.forEach(({ extension, mimeType }) => {
it(`should recognize ${extension} audio`, () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
audio: [
{
filename: `test.${extension}`,
type: 'output',
subfolder: ''
}
]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
audio: [
{
filename: `test.${extension}`,
type: 'output',
subfolder: ''
}
]
}
)
})
const output = taskItem.flatOutputs[0]
@@ -232,6 +182,58 @@ describe('TaskItemImpl', () => {
})
})
})
describe('error extraction getters', () => {
it('errorMessage returns undefined when no execution_error', () => {
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job)
expect(taskItem.errorMessage).toBeUndefined()
})
it('errorMessage returns the exception_message from execution_error', () => {
const job: JobListItem = {
...createHistoryJob(0, 'prompt-id'),
status: 'failed',
execution_error: {
node_id: 'node-1',
node_type: 'KSampler',
exception_message: 'GPU out of memory',
exception_type: 'RuntimeError',
traceback: ['line 1', 'line 2'],
current_inputs: {},
current_outputs: {}
}
}
const taskItem = new TaskItemImpl(job)
expect(taskItem.errorMessage).toBe('GPU out of memory')
})
it('executionError returns undefined when no execution_error', () => {
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job)
expect(taskItem.executionError).toBeUndefined()
})
it('executionError returns the full error object from execution_error', () => {
const errorDetail = {
node_id: 'node-1',
node_type: 'KSampler',
executed: ['node-0'],
exception_message: 'Invalid dimensions',
exception_type: 'ValueError',
traceback: ['traceback line'],
current_inputs: { input1: 'value' },
current_outputs: {}
}
const job: JobListItem = {
...createHistoryJob(0, 'prompt-id'),
status: 'failed',
execution_error: errorDetail
}
const taskItem = new TaskItemImpl(job)
expect(taskItem.executionError).toEqual(errorDetail)
})
})
})
describe('useQueueStore', () => {
@@ -267,15 +269,16 @@ describe('useQueueStore', () => {
describe('update() - basic functionality', () => {
it('should load running and pending tasks from API', async () => {
const runningTask = createRunningTask(1, 'run-1')
const pendingTask1 = createPendingTask(2, 'pend-1')
const pendingTask2 = createPendingTask(3, 'pend-2')
const runningJob = createRunningJob(1, 'run-1')
const pendingJob1 = createPendingJob(2, 'pend-1')
const pendingJob2 = createPendingJob(3, 'pend-2')
// API returns pre-sorted data (newest first)
mockGetQueue.mockResolvedValue({
Running: [runningTask],
Pending: [pendingTask1, pendingTask2]
Running: [runningJob],
Pending: [pendingJob2, pendingJob1] // Pre-sorted by create_time desc
})
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
@@ -287,13 +290,11 @@ describe('useQueueStore', () => {
})
it('should load history tasks from API', async () => {
const historyTask1 = createHistoryTask(5, 'hist-1')
const historyTask2 = createHistoryTask(4, 'hist-2')
const historyJob1 = createHistoryJob(5, 'hist-1')
const historyJob2 = createHistoryJob(4, 'hist-2')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [historyTask1, historyTask2]
})
mockGetHistory.mockResolvedValue([historyJob1, historyJob2])
await store.update()
@@ -304,7 +305,7 @@ describe('useQueueStore', () => {
it('should set loading state correctly', async () => {
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
expect(store.isLoading).toBe(false)
@@ -317,7 +318,7 @@ describe('useQueueStore', () => {
it('should clear loading state even if API fails', async () => {
mockGetQueue.mockRejectedValue(new Error('API error'))
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await expect(store.update()).rejects.toThrow('API error')
expect(store.isLoading).toBe(false)
@@ -326,14 +327,12 @@ describe('useQueueStore', () => {
describe('update() - sorting', () => {
it('should sort tasks by queueIndex descending', async () => {
const task1 = createHistoryTask(1, 'hist-1')
const task2 = createHistoryTask(5, 'hist-2')
const task3 = createHistoryTask(3, 'hist-3')
const job1 = createHistoryJob(1, 'hist-1')
const job2 = createHistoryJob(5, 'hist-2')
const job3 = createHistoryJob(3, 'hist-3')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [task1, task2, task3]
})
mockGetHistory.mockResolvedValue([job1, job2, job3])
await store.update()
@@ -342,16 +341,17 @@ describe('useQueueStore', () => {
expect(store.historyTasks[2].queueIndex).toBe(1)
})
it('should sort pending tasks by queueIndex descending', async () => {
const pend1 = createPendingTask(10, 'pend-1')
const pend2 = createPendingTask(15, 'pend-2')
const pend3 = createPendingTask(12, 'pend-3')
it('should preserve API sort order for pending tasks', async () => {
const pend1 = createPendingJob(10, 'pend-1')
const pend2 = createPendingJob(15, 'pend-2')
const pend3 = createPendingJob(12, 'pend-3')
// API returns pre-sorted data (newest first)
mockGetQueue.mockResolvedValue({
Running: [],
Pending: [pend1, pend2, pend3]
Pending: [pend2, pend3, pend1] // Pre-sorted by create_time desc
})
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
@@ -363,19 +363,17 @@ describe('useQueueStore', () => {
describe('update() - queue index collision (THE BUG FIX)', () => {
it('should NOT confuse different prompts with same queueIndex', async () => {
const hist1 = createHistoryTask(50, 'prompt-uuid-aaa')
const hist1 = createHistoryJob(50, 'prompt-uuid-aaa')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1] })
mockGetHistory.mockResolvedValue([hist1])
await store.update()
expect(store.historyTasks).toHaveLength(1)
expect(store.historyTasks[0].promptId).toBe('prompt-uuid-aaa')
const hist2 = createHistoryTask(51, 'prompt-uuid-bbb')
mockGetHistory.mockResolvedValue({
History: [hist2]
})
const hist2 = createHistoryJob(51, 'prompt-uuid-bbb')
mockGetHistory.mockResolvedValue([hist2])
await store.update()
@@ -385,19 +383,17 @@ describe('useQueueStore', () => {
})
it('should correctly reconcile when queueIndex is reused', async () => {
const hist1 = createHistoryTask(100, 'first-prompt-at-100')
const hist2 = createHistoryTask(99, 'prompt-at-99')
const hist1 = createHistoryJob(100, 'first-prompt-at-100')
const hist2 = createHistoryJob(99, 'prompt-at-99')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1, hist2] })
mockGetHistory.mockResolvedValue([hist1, hist2])
await store.update()
expect(store.historyTasks).toHaveLength(2)
const hist3 = createHistoryTask(101, 'second-prompt-at-101')
mockGetHistory.mockResolvedValue({
History: [hist3, hist2]
})
const hist3 = createHistoryJob(101, 'second-prompt-at-101')
mockGetHistory.mockResolvedValue([hist3, hist2])
await store.update()
@@ -409,23 +405,19 @@ describe('useQueueStore', () => {
})
it('should handle multiple queueIndex collisions simultaneously', async () => {
const hist1 = createHistoryTask(10, 'old-at-10')
const hist2 = createHistoryTask(20, 'old-at-20')
const hist3 = createHistoryTask(30, 'keep-at-30')
const hist1 = createHistoryJob(10, 'old-at-10')
const hist2 = createHistoryJob(20, 'old-at-20')
const hist3 = createHistoryJob(30, 'keep-at-30')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [hist3, hist2, hist1]
})
mockGetHistory.mockResolvedValue([hist3, hist2, hist1])
await store.update()
expect(store.historyTasks).toHaveLength(3)
const newHist1 = createHistoryTask(31, 'new-at-31')
const newHist2 = createHistoryTask(32, 'new-at-32')
mockGetHistory.mockResolvedValue({
History: [newHist2, newHist1, hist3]
})
const newHist1 = createHistoryJob(31, 'new-at-31')
const newHist2 = createHistoryJob(32, 'new-at-32')
mockGetHistory.mockResolvedValue([newHist2, newHist1, hist3])
await store.update()
@@ -437,19 +429,17 @@ describe('useQueueStore', () => {
describe('update() - history reconciliation', () => {
it('should keep existing items still on server (by promptId)', async () => {
const hist1 = createHistoryTask(10, 'existing-1')
const hist2 = createHistoryTask(9, 'existing-2')
const hist1 = createHistoryJob(10, 'existing-1')
const hist2 = createHistoryJob(9, 'existing-2')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1, hist2] })
mockGetHistory.mockResolvedValue([hist1, hist2])
await store.update()
expect(store.historyTasks).toHaveLength(2)
const hist3 = createHistoryTask(11, 'new-1')
mockGetHistory.mockResolvedValue({
History: [hist3, hist1, hist2]
})
const hist3 = createHistoryJob(11, 'new-1')
mockGetHistory.mockResolvedValue([hist3, hist1, hist2])
await store.update()
@@ -460,16 +450,16 @@ describe('useQueueStore', () => {
})
it('should remove items no longer on server', async () => {
const hist1 = createHistoryTask(10, 'remove-me')
const hist2 = createHistoryTask(9, 'keep-me')
const hist1 = createHistoryJob(10, 'remove-me')
const hist2 = createHistoryJob(9, 'keep-me')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1, hist2] })
mockGetHistory.mockResolvedValue([hist1, hist2])
await store.update()
expect(store.historyTasks).toHaveLength(2)
mockGetHistory.mockResolvedValue({ History: [hist2] })
mockGetHistory.mockResolvedValue([hist2])
await store.update()
@@ -478,18 +468,16 @@ describe('useQueueStore', () => {
})
it('should add new items from server', async () => {
const hist1 = createHistoryTask(5, 'old-1')
const hist1 = createHistoryJob(5, 'old-1')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1] })
mockGetHistory.mockResolvedValue([hist1])
await store.update()
const hist2 = createHistoryTask(6, 'new-1')
const hist3 = createHistoryTask(7, 'new-2')
mockGetHistory.mockResolvedValue({
History: [hist3, hist2, hist1]
})
const hist2 = createHistoryJob(6, 'new-1')
const hist3 = createHistoryJob(7, 'new-2')
mockGetHistory.mockResolvedValue([hist3, hist2, hist1])
await store.update()
@@ -497,18 +485,69 @@ describe('useQueueStore', () => {
expect(store.historyTasks.map((t) => t.promptId)).toContain('new-1')
expect(store.historyTasks.map((t) => t.promptId)).toContain('new-2')
})
it('should recreate TaskItemImpl when outputs_count changes', async () => {
// Initial load without outputs_count
const jobWithoutOutputsCount = createHistoryJob(10, 'job-1')
delete (jobWithoutOutputsCount as any).outputs_count
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue([jobWithoutOutputsCount])
await store.update()
expect(store.historyTasks).toHaveLength(1)
const initialTask = store.historyTasks[0]
expect(initialTask.outputsCount).toBeUndefined()
// Second load with outputs_count now populated
const jobWithOutputsCount = {
...createHistoryJob(10, 'job-1'),
outputs_count: 2
}
mockGetHistory.mockResolvedValue([jobWithOutputsCount])
await store.update()
// Should have recreated the TaskItemImpl with new outputs_count
expect(store.historyTasks).toHaveLength(1)
const updatedTask = store.historyTasks[0]
expect(updatedTask.outputsCount).toBe(2)
// Should be a different instance
expect(updatedTask).not.toBe(initialTask)
})
it('should reuse TaskItemImpl when outputs_count unchanged', async () => {
const job = {
...createHistoryJob(10, 'job-1'),
outputs_count: 2
}
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue([job])
await store.update()
const initialTask = store.historyTasks[0]
// Same job with same outputs_count
mockGetHistory.mockResolvedValue([{ ...job }])
await store.update()
// Should reuse the same instance
expect(store.historyTasks[0]).toBe(initialTask)
})
})
describe('update() - maxHistoryItems limit', () => {
it('should enforce maxHistoryItems limit', async () => {
store.maxHistoryItems = 3
const tasks = Array.from({ length: 5 }, (_, i) =>
createHistoryTask(10 - i, `hist-${i}`)
const jobs = Array.from({ length: 5 }, (_, i) =>
createHistoryJob(10 - i, `hist-${i}`)
)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
@@ -522,21 +561,19 @@ describe('useQueueStore', () => {
store.maxHistoryItems = 5
const initial = Array.from({ length: 3 }, (_, i) =>
createHistoryTask(10 + i, `existing-${i}`)
createHistoryJob(10 + i, `existing-${i}`)
)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: initial })
mockGetHistory.mockResolvedValue(initial)
await store.update()
expect(store.historyTasks).toHaveLength(3)
const newTasks = Array.from({ length: 4 }, (_, i) =>
createHistoryTask(20 + i, `new-${i}`)
const newJobs = Array.from({ length: 4 }, (_, i) =>
createHistoryJob(20 + i, `new-${i}`)
)
mockGetHistory.mockResolvedValue({
History: [...newTasks, ...initial]
})
mockGetHistory.mockResolvedValue([...newJobs, ...initial])
await store.update()
@@ -547,10 +584,10 @@ describe('useQueueStore', () => {
it('should handle maxHistoryItems = 0', async () => {
store.maxHistoryItems = 0
const tasks = [createHistoryTask(10, 'hist-1')]
const jobs = [createHistoryJob(10, 'hist-1')]
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
@@ -560,13 +597,13 @@ describe('useQueueStore', () => {
it('should handle maxHistoryItems = 1', async () => {
store.maxHistoryItems = 1
const tasks = [
createHistoryTask(10, 'hist-1'),
createHistoryTask(9, 'hist-2')
const jobs = [
createHistoryJob(10, 'hist-1'),
createHistoryJob(9, 'hist-2')
]
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
@@ -577,18 +614,18 @@ describe('useQueueStore', () => {
it('should dynamically adjust when maxHistoryItems changes', async () => {
store.maxHistoryItems = 10
const tasks = Array.from({ length: 15 }, (_, i) =>
createHistoryTask(20 - i, `hist-${i}`)
const jobs = Array.from({ length: 15 }, (_, i) =>
createHistoryJob(20 - i, `hist-${i}`)
)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
expect(store.historyTasks).toHaveLength(10)
store.maxHistoryItems = 5
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
expect(store.historyTasks).toHaveLength(5)
@@ -597,19 +634,17 @@ describe('useQueueStore', () => {
describe('computed properties', () => {
it('tasks should combine pending, running, and history in correct order', async () => {
const running = createRunningTask(5, 'run-1')
const pending1 = createPendingTask(6, 'pend-1')
const pending2 = createPendingTask(7, 'pend-2')
const hist1 = createHistoryTask(3, 'hist-1')
const hist2 = createHistoryTask(4, 'hist-2')
const running = createRunningJob(5, 'run-1')
const pending1 = createPendingJob(6, 'pend-1')
const pending2 = createPendingJob(7, 'pend-2')
const hist1 = createHistoryJob(3, 'hist-1')
const hist2 = createHistoryJob(4, 'hist-2')
mockGetQueue.mockResolvedValue({
Running: [running],
Pending: [pending1, pending2]
})
mockGetHistory.mockResolvedValue({
History: [hist2, hist1]
})
mockGetHistory.mockResolvedValue([hist2, hist1])
await store.update()
@@ -624,9 +659,9 @@ describe('useQueueStore', () => {
it('hasPendingTasks should be true when pending tasks exist', async () => {
mockGetQueue.mockResolvedValue({
Running: [],
Pending: [createPendingTask(1, 'pend-1')]
Pending: [createPendingJob(1, 'pend-1')]
})
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
expect(store.hasPendingTasks).toBe(true)
@@ -634,21 +669,19 @@ describe('useQueueStore', () => {
it('hasPendingTasks should be false when no pending tasks', async () => {
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
expect(store.hasPendingTasks).toBe(false)
})
it('lastHistoryQueueIndex should return highest queue index', async () => {
const hist1 = createHistoryTask(10, 'hist-1')
const hist2 = createHistoryTask(25, 'hist-2')
const hist3 = createHistoryTask(15, 'hist-3')
const hist1 = createHistoryJob(10, 'hist-1')
const hist2 = createHistoryJob(25, 'hist-2')
const hist3 = createHistoryJob(15, 'hist-3')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [hist1, hist2, hist3]
})
mockGetHistory.mockResolvedValue([hist1, hist2, hist3])
await store.update()
expect(store.lastHistoryQueueIndex).toBe(25)
@@ -656,7 +689,7 @@ describe('useQueueStore', () => {
it('lastHistoryQueueIndex should be -1 when no history', async () => {
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
expect(store.lastHistoryQueueIndex).toBe(-1)
@@ -666,19 +699,17 @@ describe('useQueueStore', () => {
describe('clear()', () => {
beforeEach(async () => {
mockGetQueue.mockResolvedValue({
Running: [createRunningTask(1, 'run-1')],
Pending: [createPendingTask(2, 'pend-1')]
})
mockGetHistory.mockResolvedValue({
History: [createHistoryTask(3, 'hist-1')]
Running: [createRunningJob(1, 'run-1')],
Pending: [createPendingJob(2, 'pend-1')]
})
mockGetHistory.mockResolvedValue([createHistoryJob(3, 'hist-1')])
await store.update()
})
it('should clear both queue and history by default', async () => {
mockClearItems.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.clear()
@@ -693,9 +724,7 @@ describe('useQueueStore', () => {
it('should clear only queue when specified', async () => {
mockClearItems.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [createHistoryTask(3, 'hist-1')]
})
mockGetHistory.mockResolvedValue([createHistoryJob(3, 'hist-1')])
await store.clear(['queue'])
@@ -707,10 +736,10 @@ describe('useQueueStore', () => {
it('should clear only history when specified', async () => {
mockClearItems.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({
Running: [createRunningTask(1, 'run-1')],
Pending: [createPendingTask(2, 'pend-1')]
Running: [createRunningJob(1, 'run-1')],
Pending: [createPendingJob(2, 'pend-1')]
})
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.clear(['history'])
@@ -729,11 +758,12 @@ describe('useQueueStore', () => {
describe('delete()', () => {
it('should delete task from queue', async () => {
const task = new TaskItemImpl('Pending', createTaskPrompt(1, 'pend-1'))
const job = createPendingJob(1, 'pend-1')
const task = new TaskItemImpl(job)
mockDeleteItem.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.delete(task)
@@ -741,16 +771,12 @@ describe('useQueueStore', () => {
})
it('should delete task from history', async () => {
const task = new TaskItemImpl(
'History',
createTaskPrompt(1, 'hist-1'),
createTaskStatus(),
createTaskOutput()
)
const job = createHistoryJob(1, 'hist-1')
const task = new TaskItemImpl(job, createTaskOutput())
mockDeleteItem.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.delete(task)
@@ -758,11 +784,12 @@ describe('useQueueStore', () => {
})
it('should refresh store after deletion', async () => {
const task = new TaskItemImpl('Pending', createTaskPrompt(1, 'pend-1'))
const job = createPendingJob(1, 'pend-1')
const task = new TaskItemImpl(job)
mockDeleteItem.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.delete(task)

View File

@@ -2,34 +2,27 @@ import _ from 'es-toolkit/compat'
import { defineStore } from 'pinia'
import { computed, ref, shallowRef, toRaw, toValue } from 'vue'
import { isCloud } from '@/platform/distribution/types'
import { reconcileHistory } from '@/platform/remote/comfyui/history/reconciliation'
import { useSettingStore } from '@/platform/settings/settingStore'
import { getWorkflowFromHistory } from '@/platform/workflow/cloud'
import { extractWorkflow } from '@/platform/remote/comfyui/jobs/fetchJobs'
import type {
ComfyWorkflowJSON,
NodeId
} from '@/platform/workflow/validation/schemas/workflowSchema'
APITaskType,
JobListItem,
TaskType
} from '@/platform/remote/comfyui/jobs/jobTypes'
import type { NodeId } from '@/platform/workflow/validation/schemas/workflowSchema'
import type {
HistoryTaskItem,
ResultItem,
StatusWsMessageStatus,
TaskItem,
TaskOutput,
TaskPrompt,
TaskStatus,
TaskType
TaskOutput
} from '@/schemas/apiSchema'
import { api } from '@/scripts/api'
import type { ComfyApp } from '@/scripts/app'
import { useExtensionService } from '@/services/extensionService'
import { getJobDetail } from '@/services/jobOutputCache'
import { useNodeOutputStore } from '@/stores/imagePreviewStore'
import { useExecutionStore } from '@/stores/executionStore'
import { useSettingStore } from '@/platform/settings/settingStore'
import { getMediaTypeFromFilename } from '@/utils/formatUtil'
// Task type used in the API.
type APITaskType = 'queue' | 'history'
enum TaskItemDisplayStatus {
Running = 'Running',
Pending = 'Pending',
@@ -212,32 +205,44 @@ export class ResultItemImpl {
get supportsPreview(): boolean {
return this.isImage || this.isVideo || this.isAudio || this.is3D
}
static filterPreviewable(
outputs: readonly ResultItemImpl[]
): ResultItemImpl[] {
return outputs.filter((o) => o.supportsPreview)
}
static findByUrl(items: readonly ResultItemImpl[], url?: string): number {
if (!url) return 0
const idx = items.findIndex((o) => o.url === url)
return idx >= 0 ? idx : 0
}
}
export class TaskItemImpl {
readonly taskType: TaskType
readonly prompt: TaskPrompt
readonly status?: TaskStatus
readonly job: JobListItem
readonly outputs: TaskOutput
readonly flatOutputs: ReadonlyArray<ResultItemImpl>
constructor(
taskType: TaskType,
prompt: TaskPrompt,
status?: TaskStatus,
job: JobListItem,
outputs?: TaskOutput,
flatOutputs?: ReadonlyArray<ResultItemImpl>
) {
this.taskType = taskType
this.prompt = prompt
this.status = status
this.job = job
// If no outputs provided but job has preview_output, create synthetic outputs
// using the real nodeId and mediaType from the backend response
const effectiveOutputs =
outputs ??
(job.preview_output
? {
[job.preview_output.nodeId]: {
[job.preview_output.mediaType]: [job.preview_output]
}
}
: {})
// Remove animated outputs from the outputs object
// outputs.animated is an array of boolean values that indicates if the images
// array in the result are animated or not.
// The queueStore does not use this information.
// It is part of the legacy API response. We should redesign the backend API.
// https://github.com/Comfy-Org/ComfyUI_frontend/issues/2739
this.outputs = _.mapValues(outputs ?? {}, (nodeOutputs) =>
this.outputs = _.mapValues(effectiveOutputs, (nodeOutputs) =>
_.omit(nodeOutputs, 'animated')
)
this.flatOutputs = flatOutputs ?? this.calculateFlatOutputs()
@@ -261,15 +266,31 @@ export class TaskItemImpl {
)
}
/** All outputs that support preview (images, videos, audio, 3D) */
get previewableOutputs(): readonly ResultItemImpl[] {
return ResultItemImpl.filterPreviewable(this.flatOutputs)
}
get previewOutput(): ResultItemImpl | undefined {
const previewable = this.previewableOutputs
// Prefer saved media files over the temp previews
return (
this.flatOutputs.find(
// Prefer saved media files over the temp previews
(output) => output.type === 'output' && output.supportsPreview
) ?? this.flatOutputs.find((output) => output.supportsPreview)
previewable.find((output) => output.type === 'output') ?? previewable[0]
)
}
// Derive taskType from job status
get taskType(): TaskType {
switch (this.job.status) {
case 'in_progress':
return 'Running'
case 'pending':
return 'Pending'
default:
return 'History'
}
}
get apiTaskType(): APITaskType {
switch (this.taskType) {
case 'Running':
@@ -285,61 +306,42 @@ export class TaskItemImpl {
}
get queueIndex() {
return this.prompt[0]
return this.job.priority
}
get promptId() {
return this.prompt[1]
return this.job.id
}
get promptInputs() {
return this.prompt[2]
get outputsCount(): number | undefined {
return this.job.outputs_count ?? undefined
}
get extraData() {
return this.prompt[3]
get status() {
return this.job.status
}
get outputsToExecute() {
return this.prompt[4]
get errorMessage(): string | undefined {
return this.job.execution_error?.exception_message ?? undefined
}
get extraPngInfo() {
return this.extraData.extra_pnginfo
get executionError() {
return this.job.execution_error ?? undefined
}
get clientId() {
return this.extraData.client_id
get workflowId(): string | undefined {
return this.job.workflow_id ?? undefined
}
get workflow(): ComfyWorkflowJSON | undefined {
return this.extraPngInfo?.workflow
get createTime(): number {
return this.job.create_time
}
get messages() {
return this.status?.messages || []
}
/**
* Server-provided creation time in milliseconds, when available.
*
* Sources:
* - Queue: 5th tuple element may be a metadata object with { create_time }.
* - History (Cloud V2): Adapter injects create_time into prompt[3].extra_data.
*/
get createTime(): number | undefined {
const extra = (this.extraData as any) || {}
const fromExtra =
typeof extra.create_time === 'number' ? extra.create_time : undefined
if (typeof fromExtra === 'number') return fromExtra
return undefined
}
get interrupted() {
return _.some(
this.messages,
(message) => message[0] === 'execution_interrupted'
get interrupted(): boolean {
return (
this.job.status === 'failed' &&
this.job.execution_error?.exception_type ===
'InterruptProcessingException'
)
}
@@ -352,42 +354,26 @@ export class TaskItemImpl {
}
get displayStatus(): TaskItemDisplayStatus {
switch (this.taskType) {
case 'Running':
switch (this.job.status) {
case 'in_progress':
return TaskItemDisplayStatus.Running
case 'Pending':
case 'pending':
return TaskItemDisplayStatus.Pending
case 'History':
if (this.interrupted) return TaskItemDisplayStatus.Cancelled
switch (this.status!.status_str) {
case 'success':
return TaskItemDisplayStatus.Completed
case 'error':
return TaskItemDisplayStatus.Failed
}
case 'completed':
return TaskItemDisplayStatus.Completed
case 'failed':
return TaskItemDisplayStatus.Failed
case 'cancelled':
return TaskItemDisplayStatus.Cancelled
}
}
get executionStartTimestamp() {
const message = this.messages.find(
(message) => message[0] === 'execution_start'
)
return message ? message[1].timestamp : undefined
return this.job.execution_start_time ?? undefined
}
get executionEndTimestamp() {
const messages = this.messages.filter((message) =>
[
'execution_success',
'execution_interrupted',
'execution_error'
].includes(message[0])
)
if (!messages.length) {
return undefined
}
return _.max(messages.map((message) => message[1].timestamp))
return this.job.execution_end_time ?? undefined
}
get executionTime() {
@@ -403,28 +389,48 @@ export class TaskItemImpl {
: undefined
}
public async loadWorkflow(app: ComfyApp) {
let workflowData = this.workflow
/**
* Loads full outputs for tasks that only have preview data
* Returns a new TaskItemImpl with full outputs and execution status
*/
public async loadFullOutputs(): Promise<TaskItemImpl> {
// Only load for history tasks (caller checks outputsCount > 1)
if (!this.isHistory) {
return this
}
const jobDetail = await getJobDetail(this.promptId)
if (isCloud && !workflowData && this.isHistory) {
workflowData = await getWorkflowFromHistory(
(url) => app.api.fetchApi(url),
this.promptId
)
if (!jobDetail?.outputs) {
return this
}
// Create new TaskItemImpl with full outputs
return new TaskItemImpl(this.job, jobDetail.outputs)
}
public async loadWorkflow(app: ComfyApp) {
if (!this.isHistory) {
return
}
// Single fetch for both workflow and outputs (with caching)
const jobDetail = await getJobDetail(this.promptId)
const workflowData = await extractWorkflow(jobDetail)
if (!workflowData) {
return
}
await app.loadGraphData(toRaw(workflowData))
if (!this.outputs) {
// Use full outputs from job detail, or fall back to existing outputs
const outputsToLoad = jobDetail?.outputs ?? this.outputs
if (!outputsToLoad) {
return
}
const nodeOutputsStore = useNodeOutputStore()
const rawOutputs = toRaw(this.outputs)
const rawOutputs = toRaw(outputsToLoad)
for (const nodeExecutionId in rawOutputs) {
nodeOutputsStore.setNodeOutputsByExecutionId(
nodeExecutionId,
@@ -445,15 +451,10 @@ export class TaskItemImpl {
return this.flatOutputs.map(
(output: ResultItemImpl, i: number) =>
new TaskItemImpl(
this.taskType,
[
this.queueIndex,
`${this.promptId}-${i}`,
this.promptInputs,
this.extraData,
this.outputsToExecute
],
this.status,
{
...this.job,
id: `${this.promptId}-${i}`
},
{
[output.nodeId]: {
[output.mediaType]: [output]
@@ -463,32 +464,8 @@ export class TaskItemImpl {
)
)
}
public toTaskItem(): TaskItem {
const item: HistoryTaskItem = {
taskType: 'History',
prompt: this.prompt,
status: this.status!,
outputs: this.outputs
}
return item
}
}
const sortNewestFirst = (a: TaskItemImpl, b: TaskItemImpl) =>
b.queueIndex - a.queueIndex
const toTaskItemImpls = (tasks: TaskItem[]): TaskItemImpl[] =>
tasks.map(
(task) =>
new TaskItemImpl(
task.taskType,
task.prompt,
'status' in task ? task.status : undefined,
'outputs' in task ? task.outputs : undefined
)
)
export const useQueueStore = defineStore('queue', () => {
// Use shallowRef because TaskItemImpl instances are immutable and arrays are
// replaced entirely (not mutated), so deep reactivity would waste performance
@@ -525,8 +502,9 @@ export const useQueueStore = defineStore('queue', () => {
api.getHistory(maxHistoryItems.value)
])
runningTasks.value = toTaskItemImpls(queue.Running).sort(sortNewestFirst)
pendingTasks.value = toTaskItemImpls(queue.Pending).sort(sortNewestFirst)
// API returns pre-sorted data (sort_by=create_time&order=desc)
runningTasks.value = queue.Running.map((job) => new TaskItemImpl(job))
pendingTasks.value = queue.Pending.map((job) => new TaskItemImpl(job))
const currentHistory = toValue(historyTasks)
@@ -534,7 +512,7 @@ export const useQueueStore = defineStore('queue', () => {
const executionStore = useExecutionStore()
appearedTasks.forEach((task) => {
const promptIdString = String(task.promptId)
const workflowId = task.workflow?.id
const workflowId = task.workflowId
if (workflowId && promptIdString) {
executionStore.registerPromptWorkflowIdMapping(
promptIdString,
@@ -543,22 +521,26 @@ export const useQueueStore = defineStore('queue', () => {
}
})
const items = reconcileHistory(
history.History,
currentHistory.map((impl) => impl.toTaskItem()),
toValue(maxHistoryItems),
toValue(lastHistoryQueueIndex)
)
// Sort by create_time descending and limit to maxItems
const sortedHistory = [...history]
.sort((a, b) => b.create_time - a.create_time)
.slice(0, toValue(maxHistoryItems))
// Reuse existing TaskItemImpl instances or create new
// Must recreate if outputs_count changed (e.g., API started returning it)
const existingByPromptId = new Map(
currentHistory.map((impl) => [impl.promptId, impl])
)
historyTasks.value = items.map(
(item) =>
existingByPromptId.get(item.prompt[1]) ?? toTaskItemImpls([item])[0]
)
historyTasks.value = sortedHistory.map((job) => {
const existing = existingByPromptId.get(job.id)
if (!existing) return new TaskItemImpl(job)
// Recreate if outputs_count changed to ensure lazy loading works
if (existing.outputsCount !== (job.outputs_count ?? undefined)) {
return new TaskItemImpl(job)
}
return existing
})
} finally {
isLoading.value = false
}