[feat] Add Jobs API integration with memory optimization and lazy loading

Implements Jobs API endpoints (/jobs) for cloud distribution to replace
history_v2 API, providing 99.998% memory reduction per item.

Key changes:
- Jobs API types, schemas, and fetchers for list and detail endpoints
- Adapter to convert Jobs API format to TaskItem format
- Lazy loading for full outputs when loading workflows
- hasOnlyPreviewOutputs() detection for preview-only tasks
- Feature flag to toggle between Jobs API and history_v2

Implementation details:
- List endpoint: Returns preview_output only (100-200 bytes per job)
- Detail endpoint: Returns full workflow and outputs on demand
- Cloud builds use /jobs?status=completed for history view
- Desktop builds unchanged (still use history_v1)
- 21 unit and integration tests (all passing)

Memory optimization:
- Old: 300-600KB per history item (full outputs)
- New: 100-200 bytes per history item (preview only)
- Reduction: 99.998%

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Richard Yu
2025-11-15 17:25:19 -08:00
parent e9d5ce7f3f
commit b733b88628
38 changed files with 1335 additions and 3047 deletions

View File

@@ -21,7 +21,6 @@ import {
import { Topbar } from './components/Topbar'
import type { Position, Size } from './types'
import { NodeReference, SubgraphSlotReference } from './utils/litegraphUtils'
import TaskHistory from './utils/taskHistory'
dotenv.config()
@@ -116,8 +115,6 @@ class ConfirmDialog {
}
export class ComfyPage {
private _history: TaskHistory | null = null
public readonly url: string
// All canvas position operations are based on default view of canvas.
public readonly canvas: Locator
@@ -268,11 +265,6 @@ export class ComfyPage {
}
}
setupHistory(): TaskHistory {
this._history ??= new TaskHistory(this)
return this._history
}
async setup({
clearStorage = true,
mockReleases = true

View File

@@ -1,164 +0,0 @@
import type { Request, Route } from '@playwright/test'
import _ from 'es-toolkit/compat'
import fs from 'fs'
import path from 'path'
import { v4 as uuidv4 } from 'uuid'
import type {
HistoryTaskItem,
TaskItem,
TaskOutput
} from '../../../src/schemas/apiSchema'
import type { ComfyPage } from '../ComfyPage'
/** keyof TaskOutput[string] */
type OutputFileType = 'images' | 'audio' | 'animated'
const DEFAULT_IMAGE = 'example.webp'
const getFilenameParam = (request: Request) => {
const url = new URL(request.url())
return url.searchParams.get('filename') || DEFAULT_IMAGE
}
const getContentType = (filename: string, fileType: OutputFileType) => {
const subtype = path.extname(filename).slice(1)
switch (fileType) {
case 'images':
return `image/${subtype}`
case 'audio':
return `audio/${subtype}`
case 'animated':
return `video/${subtype}`
}
}
const setQueueIndex = (task: TaskItem) => {
task.prompt[0] = TaskHistory.queueIndex++
}
const setPromptId = (task: TaskItem) => {
task.prompt[1] = uuidv4()
}
export default class TaskHistory {
static queueIndex = 0
static readonly defaultTask: Readonly<HistoryTaskItem> = {
prompt: [0, 'prompt-id', {}, { client_id: uuidv4() }, []],
outputs: {},
status: {
status_str: 'success',
completed: true,
messages: []
},
taskType: 'History'
}
private tasks: HistoryTaskItem[] = []
private outputContentTypes: Map<string, string> = new Map()
constructor(readonly comfyPage: ComfyPage) {}
private loadAsset: (filename: string) => Buffer = _.memoize(
(filename: string) => {
const filePath = this.comfyPage.assetPath(filename)
return fs.readFileSync(filePath)
}
)
private async handleGetHistory(route: Route) {
return route.fulfill({
status: 200,
contentType: 'application/json',
body: JSON.stringify(this.tasks)
})
}
private async handleGetView(route: Route) {
const fileName = getFilenameParam(route.request())
if (!this.outputContentTypes.has(fileName)) {
return route.continue()
}
const asset = this.loadAsset(fileName)
return route.fulfill({
status: 200,
contentType: this.outputContentTypes.get(fileName),
body: asset,
headers: {
'Cache-Control': 'public, max-age=31536000',
'Content-Length': asset.byteLength.toString()
}
})
}
async setupRoutes() {
return this.comfyPage.page.route(
/.*\/api\/(view|history)(\?.*)?$/,
async (route) => {
const request = route.request()
const method = request.method()
const isViewReq = request.url().includes('view') && method === 'GET'
if (isViewReq) return this.handleGetView(route)
const isHistoryPath = request.url().includes('history')
const isGetHistoryReq = isHistoryPath && method === 'GET'
if (isGetHistoryReq) return this.handleGetHistory(route)
const isClearReq =
method === 'POST' &&
isHistoryPath &&
request.postDataJSON()?.clear === true
if (isClearReq) return this.clearTasks()
return route.continue()
}
)
}
private createOutputs(
filenames: string[],
filetype: OutputFileType
): TaskOutput {
return filenames.reduce((outputs, filename, i) => {
const nodeId = `${i + 1}`
outputs[nodeId] = {
[filetype]: [{ filename, subfolder: '', type: 'output' }]
}
const contentType = getContentType(filename, filetype)
this.outputContentTypes.set(filename, contentType)
return outputs
}, {})
}
private addTask(task: HistoryTaskItem) {
setPromptId(task)
setQueueIndex(task)
this.tasks.unshift(task) // Tasks are added to the front of the queue
}
clearTasks(): this {
this.tasks = []
return this
}
withTask(
outputFilenames: string[],
outputFiletype: OutputFileType = 'images',
overrides: Partial<HistoryTaskItem> = {}
): this {
this.addTask({
...TaskHistory.defaultTask,
outputs: this.createOutputs(outputFilenames, outputFiletype),
...overrides
})
return this
}
/** Repeats the last task in the task history a specified number of times. */
repeat(n: number): this {
for (let i = 0; i < n; i++)
this.addTask(structuredClone(this.tasks.at(0)) as HistoryTaskItem)
return this
}
}

View File

@@ -208,7 +208,11 @@ const {
galleryActiveIndex,
galleryItems,
onViewItem: openResultGallery
} = useResultGallery(() => filteredTasks.value)
} = useResultGallery(
() => filteredTasks.value,
// Lazy load full outputs for history items
(url) => api.fetchApi(url)
)
const setExpanded = (expanded: boolean) => {
isExpanded.value = expanded
@@ -252,7 +256,7 @@ const focusAssetInSidebar = async (item: JobListItem) => {
const inspectJobAsset = wrapWithErrorHandlingAsync(
async (item: JobListItem) => {
openResultGallery(item)
await openResultGallery(item)
await focusAssetInSidebar(item)
}
)

View File

@@ -1,6 +1,6 @@
import type { Meta, StoryObj } from '@storybook/vue3-vite'
import type { TaskStatus } from '@/schemas/apiSchema'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/types/jobTypes'
import { useExecutionStore } from '@/stores/executionStore'
import { TaskItemImpl, useQueueStore } from '@/stores/queueStore'
@@ -37,91 +37,72 @@ function resetStores() {
exec.nodeProgressStatesByPrompt = {}
}
function makeTask(
id: string,
priority: number,
overrides: Omit<Partial<JobListItem>, 'id' | 'priority'> &
Pick<JobListItem, 'status' | 'create_time' | 'update_time'>
): TaskItemImpl {
const job: JobListItem = {
id,
priority,
last_state_update: null,
...overrides
}
return new TaskItemImpl(job)
}
function makePendingTask(
id: string,
index: number,
createTimeMs?: number
priority: number,
createTimeMs: number
): TaskItemImpl {
const extraData = {
client_id: 'c1',
...(typeof createTimeMs === 'number' ? { create_time: createTimeMs } : {})
}
return new TaskItemImpl('Pending', [index, id, {}, extraData, []])
return makeTask(id, priority, {
status: 'pending',
create_time: createTimeMs,
update_time: createTimeMs
})
}
function makeRunningTask(
id: string,
index: number,
createTimeMs?: number
priority: number,
createTimeMs: number
): TaskItemImpl {
const extraData = {
client_id: 'c1',
...(typeof createTimeMs === 'number' ? { create_time: createTimeMs } : {})
}
return new TaskItemImpl('Running', [index, id, {}, extraData, []])
return makeTask(id, priority, {
status: 'in_progress',
create_time: createTimeMs,
update_time: createTimeMs
})
}
function makeRunningTaskWithStart(
id: string,
index: number,
priority: number,
startedSecondsAgo: number
): TaskItemImpl {
const start = Date.now() - startedSecondsAgo * 1000
const status: TaskStatus = {
status_str: 'success',
completed: false,
messages: [['execution_start', { prompt_id: id, timestamp: start } as any]]
}
return new TaskItemImpl(
'Running',
[index, id, {}, { client_id: 'c1', create_time: start - 5000 }, []],
status
)
return makeTask(id, priority, {
status: 'in_progress',
create_time: start - 5000,
update_time: start
})
}
function makeHistoryTask(
id: string,
index: number,
durationSec: number,
priority: number,
_durationSec: number,
ok: boolean,
errorMessage?: string
): TaskItemImpl {
const start = Date.now() - durationSec * 1000 - 1000
const end = start + durationSec * 1000
const messages: TaskStatus['messages'] = ok
? [
['execution_start', { prompt_id: id, timestamp: start } as any],
['execution_success', { prompt_id: id, timestamp: end } as any]
]
: [
['execution_start', { prompt_id: id, timestamp: start } as any],
[
'execution_error',
{
prompt_id: id,
timestamp: end,
node_id: '1',
node_type: 'Node',
executed: [],
exception_message:
errorMessage || 'Demo error: Node failed during execution',
exception_type: 'RuntimeError',
traceback: [],
current_inputs: {},
current_outputs: {}
} as any
]
]
const status: TaskStatus = {
status_str: ok ? 'success' : 'error',
completed: true,
messages
}
return new TaskItemImpl(
'History',
[index, id, {}, { client_id: 'c1', create_time: start }, []],
status
)
const now = Date.now()
return makeTask(id, priority, {
status: ok ? 'completed' : 'failed',
create_time: now,
update_time: now,
error_message: errorMessage
})
}
export const Queued: Story = {
@@ -140,8 +121,12 @@ export const Queued: Story = {
makePendingTask(jobId, queueIndex, Date.now() - 90_000)
]
// Add some other pending jobs to give context
queue.pendingTasks.push(makePendingTask('job-older-1', 100))
queue.pendingTasks.push(makePendingTask('job-older-2', 101))
queue.pendingTasks.push(
makePendingTask('job-older-1', 100, Date.now() - 60_000)
)
queue.pendingTasks.push(
makePendingTask('job-older-2', 101, Date.now() - 30_000)
)
// Queued at (in metadata on prompt[4])

View File

@@ -1,13 +1,11 @@
import { computed } from 'vue'
import type { ComputedRef } from 'vue'
import type { ExecutionErrorWsMessage } from '@/schemas/apiSchema'
import type { TaskItemImpl } from '@/stores/queueStore'
type CopyHandler = (value: string) => void | Promise<void>
export type JobErrorDialogService = {
showExecutionErrorDialog: (error: ExecutionErrorWsMessage) => void
showErrorDialog: (
error: Error,
options?: {
@@ -17,28 +15,16 @@ export type JobErrorDialogService = {
) => void
}
type JobExecutionError = {
detail?: ExecutionErrorWsMessage
message: string
}
export const extractExecutionError = (
task: TaskItemImpl | null
): JobExecutionError | null => {
const status = (task as TaskItemImpl | null)?.status
const messages = (status as { messages?: unknown[] } | undefined)?.messages
if (!Array.isArray(messages) || !messages.length) return null
const record = messages.find((entry: unknown) => {
return Array.isArray(entry) && entry[0] === 'execution_error'
}) as [string, ExecutionErrorWsMessage?] | undefined
if (!record) return null
const detail = record[1]
const message = String(detail?.exception_message ?? '')
return {
detail,
message
}
}
/**
* Extracts error message from a task.
* Returns the simple error_message string from the jobs API.
*
* Note: Detailed execution errors (with traceback, node info, etc.) are only
* available via WebSocket during live execution. Historical job errors only
* have the simple error_message string.
*/
export const extractErrorMessage = (task: TaskItemImpl | null): string | null =>
task?.errorMessage ?? null
type UseJobErrorReportingOptions = {
taskForJob: ComputedRef<TaskItemImpl | null>
@@ -51,10 +37,9 @@ export const useJobErrorReporting = ({
copyToClipboard,
dialog
}: UseJobErrorReportingOptions) => {
const errorMessageValue = computed(() => {
const error = extractExecutionError(taskForJob.value)
return error?.message ?? ''
})
const errorMessageValue = computed(
() => extractErrorMessage(taskForJob.value) ?? ''
)
const copyErrorMessage = () => {
if (errorMessageValue.value) {
@@ -63,11 +48,6 @@ export const useJobErrorReporting = ({
}
const reportJobError = () => {
const error = extractExecutionError(taskForJob.value)
if (error?.detail) {
dialog.showExecutionErrorDialog(error.detail)
return
}
if (errorMessageValue.value) {
dialog.showErrorDialog(new Error(errorMessageValue.value), {
reportType: 'queueJobError'

View File

@@ -6,14 +6,12 @@ import { useCopyToClipboard } from '@/composables/useCopyToClipboard'
import { st, t } from '@/i18n'
import { mapTaskOutputToAssetItem } from '@/platform/assets/composables/media/assetMappers'
import { useMediaAssetActions } from '@/platform/assets/composables/useMediaAssetActions'
import { extractWorkflow, fetchJobDetail } from '@/platform/remote/comfyui/jobs'
import { useSettingStore } from '@/platform/settings/settingStore'
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import { useWorkflowService } from '@/platform/workflow/core/services/workflowService'
import { useWorkflowStore } from '@/platform/workflow/management/stores/workflowStore'
import type {
ExecutionErrorWsMessage,
ResultItem,
ResultItemType
} from '@/schemas/apiSchema'
import type { ResultItem, ResultItemType } from '@/schemas/apiSchema'
import { api } from '@/scripts/api'
import { downloadBlob } from '@/scripts/utils'
import { useDialogService } from '@/services/dialogService'
@@ -52,10 +50,20 @@ export function useJobMenu(
const nodeDefStore = useNodeDefStore()
const mediaAssetActions = useMediaAssetActions()
/**
* Fetches workflow data for a job, lazy loading from API if needed.
*/
const getJobWorkflow = async (
jobId: string
): Promise<ComfyWorkflowJSON | undefined> => {
const jobDetail = await fetchJobDetail((url) => api.fetchApi(url), jobId)
return extractWorkflow(jobDetail)
}
const openJobWorkflow = async () => {
const item = currentMenuItem()
if (!item) return
const data = item.taskRef?.workflow
const data = await getJobWorkflow(item.id)
if (!data) return
const filename = `Job ${item.id}.json`
const temp = workflowStore.createTemporary(filename, data)
@@ -81,23 +89,18 @@ export function useJobMenu(
const copyErrorMessage = async () => {
const item = currentMenuItem()
if (!item) return
const msgs = item.taskRef?.status?.messages as any[] | undefined
const err = msgs?.find((m: any) => m?.[0] === 'execution_error')?.[1] as
| ExecutionErrorWsMessage
| undefined
const message = err?.exception_message
if (message) await copyToClipboard(String(message))
const message = item?.taskRef?.errorMessage
if (message) await copyToClipboard(message)
}
const reportError = () => {
const item = currentMenuItem()
if (!item) return
const msgs = item.taskRef?.status?.messages as any[] | undefined
const err = msgs?.find((m: any) => m?.[0] === 'execution_error')?.[1] as
| ExecutionErrorWsMessage
| undefined
if (err) useDialogService().showExecutionErrorDialog(err)
const message = item?.taskRef?.errorMessage
if (message) {
useDialogService().showErrorDialog(new Error(message), {
reportType: 'queueJobError'
})
}
}
// This is very magical only because it matches the respective backend implementation
@@ -167,7 +170,7 @@ export function useJobMenu(
const exportJobWorkflow = async () => {
const item = currentMenuItem()
if (!item) return
const data = item.taskRef?.workflow
const data = await getJobWorkflow(item.id)
if (!data) return
const settingStore = useSettingStore()

View File

@@ -3,25 +3,96 @@ import { ref, shallowRef } from 'vue'
import type { JobListItem } from '@/composables/queue/useJobList'
import type { ResultItemImpl } from '@/stores/queueStore'
type FetchApi = (url: string) => Promise<Response>
/**
* Minimal interface for tasks used by the result gallery.
* This allows the gallery to work with any object that provides these properties,
* without coupling to the full TaskItemImpl class.
*/
interface GalleryTask {
readonly promptId: string
readonly outputsCount?: number
readonly flatOutputs: readonly ResultItemImpl[]
readonly previewOutput?: ResultItemImpl
loadFullOutputs(fetchApi: FetchApi): Promise<GalleryTask>
}
const getPreviewableOutputs = (outputs?: readonly ResultItemImpl[]) =>
outputs?.filter((o) => o.supportsPreview) ?? []
const findActiveIndex = (items: ResultItemImpl[], url?: string): number => {
if (!url) return 0
const idx = items.findIndex((o) => o.url === url)
return idx >= 0 ? idx : 0
}
/**
* Manages result gallery state and activation for queue items.
*/
export function useResultGallery(getFilteredTasks: () => any[]) {
export function useResultGallery(
getFilteredTasks: () => GalleryTask[],
fetchApi?: FetchApi
) {
const galleryActiveIndex = ref(-1)
const galleryItems = shallowRef<ResultItemImpl[]>([])
const onViewItem = (item: JobListItem) => {
const items: ResultItemImpl[] = getFilteredTasks().flatMap((t: any) => {
const preview = t.previewOutput
return preview && preview.supportsPreview ? [preview] : []
})
const loadedTasksCache = new Map<string, GalleryTask>()
let currentRequestId = 0
if (!items.length) return
const getOutputsForTask = async (
task: GalleryTask
): Promise<ResultItemImpl[]> => {
const outputsCount = task.outputsCount ?? 0
const needsLazyLoad = outputsCount > 1 && fetchApi
galleryItems.value = items
const activeUrl: string | undefined = item.taskRef?.previewOutput?.url
const idx = activeUrl ? items.findIndex((o) => o.url === activeUrl) : 0
galleryActiveIndex.value = idx >= 0 ? idx : 0
if (!needsLazyLoad) {
return getPreviewableOutputs(task.flatOutputs)
}
const cacheKey = String(task.promptId)
const cached = loadedTasksCache.get(cacheKey)
if (cached) {
return getPreviewableOutputs(cached.flatOutputs)
}
const loadedTask = await task.loadFullOutputs(fetchApi)
loadedTasksCache.set(cacheKey, loadedTask)
return getPreviewableOutputs(loadedTask.flatOutputs)
}
const onViewItem = async (item: JobListItem) => {
const tasks = getFilteredTasks()
if (!tasks.length) return
const requestId = ++currentRequestId
const targetTask = item.taskRef as GalleryTask | undefined
let targetOutputs: ResultItemImpl[] = []
if (targetTask) {
targetOutputs = await getOutputsForTask(targetTask)
}
// Abort if a newer request was made while loading
if (requestId !== currentRequestId) return
const activeUrl = item.taskRef?.previewOutput?.url
if (targetOutputs.length > 0) {
galleryItems.value = targetOutputs
galleryActiveIndex.value = findActiveIndex(targetOutputs, activeUrl)
} else {
const items = tasks.flatMap((t) => {
const preview = t.previewOutput
return preview?.supportsPreview ? [preview] : []
})
if (!items.length) return
galleryItems.value = items
galleryActiveIndex.value = findActiveIndex(items, activeUrl)
}
}
return {

View File

@@ -1,74 +0,0 @@
/**
* @fileoverview Adapter to convert V2 history format to V1 format
* @module platform/remote/comfyui/history/adapters/v2ToV1Adapter
*/
import type { HistoryTaskItem, TaskPrompt } from '../types/historyV1Types'
import type {
HistoryResponseV2,
RawHistoryItemV2,
TaskOutput,
TaskPromptV2
} from '../types/historyV2Types'
function mapPromptV2toV1(
promptV2: TaskPromptV2,
outputs: TaskOutput,
syntheticPriority: number,
createTime?: number
): TaskPrompt {
const extraData = {
...(promptV2.extra_data ?? {}),
...(typeof createTime === 'number' ? { create_time: createTime } : {})
}
return [
syntheticPriority,
promptV2.prompt_id,
{},
extraData,
Object.keys(outputs)
]
}
function getExecutionSuccessTimestamp(item: RawHistoryItemV2): number {
return (
item.status?.messages?.find((m) => m[0] === 'execution_success')?.[1]
?.timestamp ?? 0
)
}
export function mapHistoryV2toHistory(
historyV2Response: HistoryResponseV2
): HistoryTaskItem[] {
const { history } = historyV2Response
// Sort by execution_success timestamp, descending (newest first)
history.sort((a, b) => {
return getExecutionSuccessTimestamp(b) - getExecutionSuccessTimestamp(a)
})
// Count items with valid timestamps for synthetic priority calculation
const countWithTimestamps = history.filter(
(item) => getExecutionSuccessTimestamp(item) > 0
).length
return history.map((item, index): HistoryTaskItem => {
const { prompt, outputs, status, meta } = item
const timestamp = getExecutionSuccessTimestamp(item)
// Items with timestamps get priority based on sorted position (highest first)
const syntheticPriority = timestamp > 0 ? countWithTimestamps - index : 0
return {
taskType: 'History' as const,
prompt: mapPromptV2toV1(
prompt,
outputs,
syntheticPriority,
item.create_time
),
status,
outputs,
meta
}
})
}

View File

@@ -1,51 +0,0 @@
/**
* @fileoverview V1 History Fetcher - Desktop/localhost API
* @module platform/remote/comfyui/history/fetchers/fetchHistoryV1
*
* Fetches history directly from V1 API endpoint.
* Used by desktop and localhost distributions.
*/
import type {
HistoryTaskItem,
HistoryV1Response
} from '../types/historyV1Types'
/**
* Fetches history from V1 API endpoint
* @param api - API instance with fetchApi method
* @param maxItems - Maximum number of history items to fetch
* @param offset - Offset for pagination (must be non-negative integer)
* @returns Promise resolving to V1 history response
* @throws Error if offset is invalid (negative or non-integer)
*/
export async function fetchHistoryV1(
fetchApi: (url: string) => Promise<Response>,
maxItems: number = 200,
offset?: number
): Promise<HistoryV1Response> {
// Validate offset parameter
if (offset !== undefined && (offset < 0 || !Number.isInteger(offset))) {
throw new Error(
`Invalid offset parameter: ${offset}. Must be a non-negative integer.`
)
}
const params = new URLSearchParams({ max_items: maxItems.toString() })
if (offset !== undefined) {
params.set('offset', offset.toString())
}
const url = `/history?${params.toString()}`
const res = await fetchApi(url)
const json: Record<
string,
Omit<HistoryTaskItem, 'taskType'>
> = await res.json()
return {
History: Object.values(json).map((item) => ({
...item,
taskType: 'History'
}))
}
}

View File

@@ -1,42 +0,0 @@
/**
* @fileoverview V2 History Fetcher - Cloud API with adapter
* @module platform/remote/comfyui/history/fetchers/fetchHistoryV2
*
* Fetches history from V2 API endpoint and converts to V1 format.
* Used exclusively by cloud distribution.
*/
import { mapHistoryV2toHistory } from '../adapters/v2ToV1Adapter'
import type { HistoryV1Response } from '../types/historyV1Types'
import type { HistoryResponseV2 } from '../types/historyV2Types'
/**
* Fetches history from V2 API endpoint and adapts to V1 format
* @param fetchApi - API instance with fetchApi method
* @param maxItems - Maximum number of history items to fetch
* @param offset - Offset for pagination (must be non-negative integer)
* @returns Promise resolving to V1 history response (adapted from V2)
* @throws Error if offset is invalid (negative or non-integer)
*/
export async function fetchHistoryV2(
fetchApi: (url: string) => Promise<Response>,
maxItems: number = 200,
offset?: number
): Promise<HistoryV1Response> {
// Validate offset parameter
if (offset !== undefined && (offset < 0 || !Number.isInteger(offset))) {
throw new Error(
`Invalid offset parameter: ${offset}. Must be a non-negative integer.`
)
}
const params = new URLSearchParams({ max_items: maxItems.toString() })
if (offset !== undefined) {
params.set('offset', offset.toString())
}
const url = `/history_v2?${params.toString()}`
const res = await fetchApi(url)
const rawData: HistoryResponseV2 = await res.json()
const adaptedHistory = mapHistoryV2toHistory(rawData)
return { History: adaptedHistory }
}

View File

@@ -1,29 +0,0 @@
/**
* @fileoverview History API module - Distribution-aware exports
* @module platform/remote/comfyui/history
*
* This module provides a unified history fetching interface that automatically
* uses the correct implementation based on build-time distribution constant.
*
* - Cloud builds: Uses V2 API with adapter (tree-shakes V1 fetcher)
* - Desktop/localhost builds: Uses V1 API directly (tree-shakes V2 fetcher + adapter)
*
* The rest of the application only needs to import from this module and use
* V1 types - all distribution-specific details are encapsulated here.
*/
import { isCloud } from '@/platform/distribution/types'
import { fetchHistoryV1 } from './fetchers/fetchHistoryV1'
import { fetchHistoryV2 } from './fetchers/fetchHistoryV2'
/**
* Fetches history using the appropriate API for the current distribution.
* Build-time constant enables dead code elimination - only one implementation
* will be included in the final bundle.
*/
export const fetchHistory = isCloud ? fetchHistoryV2 : fetchHistoryV1
/**
* Export only V1 types publicly - consumers don't need to know about V2
*/
export type * from './types'

View File

@@ -1,122 +1,40 @@
/**
* @fileoverview History reconciliation for V1 and V2 APIs
* @fileoverview Job list reconciliation
* @module platform/remote/comfyui/history/reconciliation
*
* Returns list of items that should be displayed, sorted by queueIndex (newest first).
* Caller is responsible for mapping to their own class instances.
*
* V1: QueueIndex-based filtering for stable monotonic indices
* V2: PromptId-based merging for synthetic priorities (V2 assigns synthetic
* priorities after timestamp sorting, so new items may have lower priority
* than existing items)
* Reconciles server jobs with client-cached jobs for efficient updates.
* Uses job ID-based merging with create_time for sort order.
*/
import { isCloud } from '@/platform/distribution/types'
import type { TaskItem } from '@/schemas/apiSchema'
import type { JobListItem } from '../jobs/types/jobTypes'
/**
* V1 reconciliation: QueueIndex-based filtering works because V1 has stable,
* monotonically increasing queue indices.
* Reconciles server jobs with client-cached jobs.
* Uses job ID-based merging - jobs are identified by their unique ID,
* and create_time determines sort order.
*
* Sort order: Sorts serverHistory by queueIndex descending (newest first) to ensure
* consistent ordering. JavaScript .filter() maintains iteration order, so filtered
* results remain sorted. clientHistory is assumed already sorted from previous update.
*
* @returns All items to display, sorted by queueIndex descending (newest first)
*/
function reconcileHistoryV1(
serverHistory: TaskItem[],
clientHistory: TaskItem[],
maxItems: number,
lastKnownQueueIndex: number | undefined
): TaskItem[] {
const sortedServerHistory = serverHistory.sort(
(a, b) => b.prompt[0] - a.prompt[0]
)
const serverPromptIds = new Set(
sortedServerHistory.map((item) => item.prompt[1])
)
// If undefined, treat as initial sync (all items are new)
const itemsAddedSinceLastSync =
lastKnownQueueIndex === undefined
? sortedServerHistory
: sortedServerHistory.filter(
(item) => item.prompt[0] > lastKnownQueueIndex
)
const clientItemsStillOnServer = clientHistory.filter((item) =>
serverPromptIds.has(item.prompt[1])
)
// Merge new and reused items, sort by queueIndex descending, limit to maxItems
return [...itemsAddedSinceLastSync, ...clientItemsStillOnServer]
.sort((a, b) => b.prompt[0] - a.prompt[0])
.slice(0, maxItems)
}
/**
* V2 reconciliation: PromptId-based merging because V2 assigns synthetic
* priorities after sorting by timestamp.
*
* Sort order: Sorts serverHistory by queueIndex descending (newest first) to ensure
* consistent ordering. JavaScript .filter() maintains iteration order, so filtered
* results remain sorted. clientHistory is assumed already sorted from previous update.
*
* @returns All items to display, sorted by queueIndex descending (newest first)
*/
function reconcileHistoryV2(
serverHistory: TaskItem[],
clientHistory: TaskItem[],
maxItems: number
): TaskItem[] {
const sortedServerHistory = serverHistory.sort(
(a, b) => b.prompt[0] - a.prompt[0]
)
const serverPromptIds = new Set(
sortedServerHistory.map((item) => item.prompt[1])
)
const clientPromptIds = new Set(clientHistory.map((item) => item.prompt[1]))
const newItems = sortedServerHistory.filter(
(item) => !clientPromptIds.has(item.prompt[1])
)
const clientItemsStillOnServer = clientHistory.filter((item) =>
serverPromptIds.has(item.prompt[1])
)
// Merge new and reused items, sort by queueIndex descending, limit to maxItems
return [...newItems, ...clientItemsStillOnServer]
.sort((a, b) => b.prompt[0] - a.prompt[0])
.slice(0, maxItems)
}
/**
* Reconciles server history with client history.
* Automatically uses V1 (queueIndex-based) or V2 (promptId-based) algorithm based on
* distribution type.
*
* @param serverHistory - Server's current history items
* @param clientHistory - Client's existing history items
* @param serverJobs - Server's current job items (pre-sorted by API)
* @param clientJobs - Client's cached job items
* @param maxItems - Maximum number of items to return
* @param lastKnownQueueIndex - Last queue index seen (V1 only, optional for V2)
* @returns All items that should be displayed, sorted by queueIndex descending
* @returns All items that should be displayed, sorted by create_time descending
*/
export function reconcileHistory(
serverHistory: TaskItem[],
clientHistory: TaskItem[],
maxItems: number,
lastKnownQueueIndex?: number
): TaskItem[] {
if (isCloud) {
return reconcileHistoryV2(serverHistory, clientHistory, maxItems)
}
return reconcileHistoryV1(
serverHistory,
clientHistory,
maxItems,
lastKnownQueueIndex
export function reconcileJobs(
serverJobs: JobListItem[],
clientJobs: JobListItem[],
maxItems: number
): JobListItem[] {
const serverIds = new Set(serverJobs.map((item) => item.id))
const clientIds = new Set(clientJobs.map((item) => item.id))
// Items from server not yet in client cache
const newItems = serverJobs.filter((item) => !clientIds.has(item.id))
// Retain client items that still exist on server
const clientItemsStillOnServer = clientJobs.filter((item) =>
serverIds.has(item.id)
)
// Merge and sort (needed because we're combining two sources)
return [...newItems, ...clientItemsStillOnServer]
.sort((a, b) => b.create_time - a.create_time)
.slice(0, maxItems)
}

View File

@@ -1,15 +0,0 @@
/**
* @fileoverview History V1 types - Public interface used throughout the app
* @module platform/remote/comfyui/history/types/historyV1Types
*
* These types represent the V1 history format that the application expects.
* Both desktop (direct V1 API) and cloud (V2 API + adapter) return data in this format.
*/
import type { HistoryTaskItem, TaskPrompt } from '@/schemas/apiSchema'
export interface HistoryV1Response {
History: HistoryTaskItem[]
}
export type { HistoryTaskItem, TaskPrompt }

View File

@@ -1,46 +0,0 @@
/**
* @fileoverview History V2 types and schemas - Internal cloud API format
* @module platform/remote/comfyui/history/types/historyV2Types
*
* These types and schemas represent the V2 history format returned by the cloud API.
* They are only used internally and are converted to V1 format via adapter.
*
* IMPORTANT: These types should NOT be used outside this history module.
*/
import { z } from 'zod'
import {
zExtraData,
zPromptId,
zQueueIndex,
zStatus,
zTaskMeta,
zTaskOutput
} from '@/schemas/apiSchema'
const zTaskPromptV2 = z.object({
priority: zQueueIndex,
prompt_id: zPromptId,
extra_data: zExtraData
})
const zRawHistoryItemV2 = z.object({
prompt_id: zPromptId,
prompt: zTaskPromptV2,
status: zStatus.optional(),
outputs: zTaskOutput,
meta: zTaskMeta.optional(),
create_time: z.number().int().optional()
})
const zHistoryResponseV2 = z.object({
history: z.array(zRawHistoryItemV2)
})
export type TaskPromptV2 = z.infer<typeof zTaskPromptV2>
export type RawHistoryItemV2 = z.infer<typeof zRawHistoryItemV2>
export type HistoryResponseV2 = z.infer<typeof zHistoryResponseV2>
export type TaskOutput = z.infer<typeof zTaskOutput>
export { zRawHistoryItemV2 }

View File

@@ -1,9 +0,0 @@
/**
* @fileoverview Public history types export
* @module platform/remote/comfyui/history/types
*
* Only V1 types are exported publicly - the rest of the app
* should never need to know about V2 types or implementation details.
*/
export type * from './historyV1Types'

View File

@@ -0,0 +1,148 @@
/**
* @fileoverview Jobs API Fetchers
* @module platform/remote/comfyui/jobs/fetchers/fetchJobs
*
* Unified jobs API fetcher for history, queue, and job details.
* All distributions use the /jobs endpoint.
*/
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import type { PromptId } from '@/schemas/apiSchema'
import type {
JobDetail,
JobListItem,
JobStatus,
RawJobListItem
} from '../types/jobTypes'
import { zJobDetail, zJobsListResponse } from '../types/jobTypes'
// ============================================================================
// Job List Fetchers
// ============================================================================
interface FetchJobsRawResult {
jobs: RawJobListItem[]
total: number
offset: number
}
/**
* Fetches raw jobs from /jobs endpoint
* @internal
*/
async function fetchJobsRaw(
fetchApi: (url: string) => Promise<Response>,
statuses: JobStatus[],
maxItems: number = 200,
offset: number = 0
): Promise<FetchJobsRawResult> {
const statusParam = statuses.join(',')
const url = `/jobs?status=${statusParam}&limit=${maxItems}&offset=${offset}&sort_by=create_time&order=desc`
try {
const res = await fetchApi(url)
if (!res.ok) {
console.error(`[Jobs API] Failed to fetch jobs: ${res.status}`)
return { jobs: [], total: 0, offset: 0 }
}
const data = zJobsListResponse.parse(await res.json())
return { jobs: data.jobs, total: data.total, offset }
} catch (error) {
console.error('[Jobs API] Error fetching jobs:', error)
return { jobs: [], total: 0, offset: 0 }
}
}
/**
* Assigns synthetic priority to jobs based on sorted position.
* Priority = total - offset - index (highest = newest)
* Only assigns if job doesn't already have a server-provided priority.
*/
function assignSyntheticPriority(
jobs: RawJobListItem[],
total: number,
offset: number
): JobListItem[] {
return jobs.map((job, index) => ({
...job,
priority: job.priority ?? total - offset - index
}))
}
/**
* Fetches history (completed jobs)
* Returns jobs sorted by create_time descending (newest first) via API
* Assigns synthetic priority based on position if not provided by server.
*/
export async function fetchHistory(
fetchApi: (url: string) => Promise<Response>,
maxItems: number = 200,
offset: number = 0
): Promise<JobListItem[]> {
const {
jobs,
total,
offset: responseOffset
} = await fetchJobsRaw(fetchApi, ['completed'], maxItems, offset)
return assignSyntheticPriority(jobs, total, responseOffset)
}
/**
* Fetches queue (in_progress + pending jobs)
* Assigns synthetic priority based on position if not provided by server.
*/
export async function fetchQueue(
fetchApi: (url: string) => Promise<Response>
): Promise<{ Running: JobListItem[]; Pending: JobListItem[] }> {
const { jobs, total, offset } = await fetchJobsRaw(
fetchApi,
['in_progress', 'pending'],
200,
0
)
const jobsWithPriority = assignSyntheticPriority(jobs, total, offset)
return {
Running: jobsWithPriority.filter((j) => j.status === 'in_progress'),
Pending: jobsWithPriority.filter((j) => j.status === 'pending')
}
}
// ============================================================================
// Job Detail Fetcher
// ============================================================================
/**
* Fetches full job details from /jobs/{job_id}
*/
export async function fetchJobDetail(
fetchApi: (url: string) => Promise<Response>,
promptId: PromptId
): Promise<JobDetail | undefined> {
try {
const res = await fetchApi(`/jobs/${promptId}`)
if (!res.ok) {
console.warn(`Job not found for prompt ${promptId}`)
return undefined
}
return zJobDetail.parse(await res.json())
} catch (error) {
console.error(`Failed to fetch job detail for prompt ${promptId}:`, error)
return undefined
}
}
/**
* Extracts workflow from job detail response
*/
export function extractWorkflow(
job: JobDetail | undefined
): ComfyWorkflowJSON | undefined {
// Cast is safe - workflow will be validated by loadGraphData -> validateComfyWorkflow
// Path: extra_data.extra_pnginfo.workflow (at top level of job detail)
return job?.extra_data?.extra_pnginfo?.workflow as
| ComfyWorkflowJSON
| undefined
}

View File

@@ -0,0 +1,14 @@
/**
* @fileoverview Jobs API module
* @module platform/remote/comfyui/jobs
*
* Unified jobs API for history, queue, and job details.
*/
export {
extractWorkflow,
fetchHistory,
fetchJobDetail,
fetchQueue
} from './fetchers/fetchJobs'
export type { JobDetail, JobListItem } from './types/jobTypes'

View File

@@ -0,0 +1,127 @@
/**
* @fileoverview Jobs API types - Backend job API format
* @module platform/remote/comfyui/jobs/types/jobTypes
*
* These types represent the jobs API format returned by the backend.
* Jobs API provides a memory-optimized alternative to history API.
*/
import { z } from 'zod'
import { zNodeId } from '@/platform/workflow/validation/schemas/workflowSchema'
import { resultItemType, zTaskOutput } from '@/schemas/apiSchema'
// ============================================================================
// Zod Schemas
// ============================================================================
const zJobStatus = z.enum([
'pending',
'in_progress',
'completed',
'failed',
'cancelled'
])
const zPreviewOutput = z
.object({
filename: z.string(),
subfolder: z.string(),
type: resultItemType
})
.passthrough() // Allow extra fields like nodeId, mediaType
/**
* Raw job from API - uses passthrough to allow extra fields
*/
const zRawJobListItem = z
.object({
id: z.string(),
status: zJobStatus,
create_time: z.number(),
preview_output: zPreviewOutput.nullable().optional(),
outputs_count: z.number().optional(),
error_message: z.string().nullable().optional(),
priority: z.number().optional()
})
.passthrough() // Allow extra fields like execution_time, workflow_id, update_time
/**
* Job list item with priority always set (either from server or synthetic)
*/
const zJobListItem = zRawJobListItem.extend({
priority: z.number() // Always set: server-provided or synthetic (total - offset - index)
})
/**
* Extra data structure containing workflow
* Note: workflow is z.unknown() because it goes through validateComfyWorkflow separately
*/
const zExtraData = z
.object({
extra_pnginfo: z
.object({
workflow: z.unknown()
})
.optional()
})
.passthrough()
/**
* Execution status information
*/
const zExecutionStatus = z
.object({
completed: z.boolean(),
messages: z.array(z.tuple([z.string(), z.unknown()])),
status_str: z.string()
})
.passthrough()
/**
* Execution metadata for a node
*/
const zExecutionNodeMeta = z
.object({
node_id: zNodeId,
display_node: zNodeId,
parent_node: zNodeId.nullable(),
real_node_id: zNodeId
})
.passthrough()
/**
* Job detail - returned by GET /api/jobs/{job_id} (detail endpoint)
* Includes full workflow and outputs for re-execution and downloads
*
* Note: workflow is at extra_data.extra_pnginfo.workflow (not in a separate workflow object)
*/
export const zJobDetail = zRawJobListItem
.extend({
extra_data: zExtraData.optional(),
prompt: z.record(z.string(), z.unknown()).optional(),
outputs: zTaskOutput.optional(),
execution_status: zExecutionStatus.optional(),
execution_meta: z.record(z.string(), zExecutionNodeMeta).optional()
})
.passthrough()
/**
* Jobs list response structure - raw from API (before synthetic priority)
*/
export const zJobsListResponse = z
.object({
jobs: z.array(zRawJobListItem),
total: z.number()
})
.passthrough() // Allow extra fields like has_more, offset, limit
// ============================================================================
// TypeScript Types (derived from Zod schemas)
// ============================================================================
export type JobStatus = z.infer<typeof zJobStatus>
export type RawJobListItem = z.infer<typeof zRawJobListItem>
export type JobListItem = z.infer<typeof zJobListItem>
export type JobDetail = z.infer<typeof zJobDetail>
export type ExecutionStatus = z.infer<typeof zExecutionStatus>

View File

@@ -1,21 +0,0 @@
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import type { PromptId } from '@/schemas/apiSchema'
export async function getWorkflowFromHistory(
fetchApi: (url: string) => Promise<Response>,
promptId: PromptId
): Promise<ComfyWorkflowJSON | undefined> {
try {
const res = await fetchApi(`/history_v2/${promptId}`)
const json = await res.json()
const historyItem = json[promptId]
if (!historyItem) return undefined
const workflow = historyItem.prompt?.extra_data?.extra_pnginfo?.workflow
return workflow ?? undefined
} catch (error) {
console.error(`Failed to fetch workflow for prompt ${promptId}:`, error)
return undefined
}
}

View File

@@ -1,10 +0,0 @@
/**
* Cloud: Fetches workflow by prompt_id. Desktop: Returns undefined (workflows already in history).
*/
import { isCloud } from '@/platform/distribution/types'
import { getWorkflowFromHistory as cloudImpl } from './getWorkflowFromHistory'
export const getWorkflowFromHistory = isCloud
? cloudImpl
: async () => undefined

View File

@@ -1,18 +1,14 @@
import { z } from 'zod'
import { LinkMarkerShape } from '@/lib/litegraph/src/litegraph'
import {
zComfyWorkflow,
zNodeId
} from '@/platform/workflow/validation/schemas/workflowSchema'
import { zNodeId } from '@/platform/workflow/validation/schemas/workflowSchema'
import { colorPalettesSchema } from '@/schemas/colorPaletteSchema'
import { zKeybinding } from '@/schemas/keyBindingSchema'
import { NodeBadgeMode } from '@/types/nodeSource'
import { LinkReleaseTriggerAction } from '@/types/searchBoxTypes'
const zNodeType = z.string()
export const zQueueIndex = z.number()
export const zPromptId = z.string()
const zPromptId = z.string()
export type PromptId = z.infer<typeof zPromptId>
export const resultItemType = z.enum(['input', 'output', 'temp'])
export type ResultItemType = z.infer<typeof resultItemType>
@@ -155,136 +151,9 @@ export type FeatureFlagsWsMessage = z.infer<typeof zFeatureFlagsWsMessage>
export type NotificationWsMessage = z.infer<typeof zNotificationWsMessage>
const zPromptInputItem = z.object({
inputs: z.record(z.string(), z.any()),
class_type: zNodeType
})
const zPromptInputs = z.record(zPromptInputItem)
const zExtraPngInfo = z
.object({
workflow: zComfyWorkflow
})
.passthrough()
export const zExtraData = z
.object({
/** extra_pnginfo can be missing is backend execution gets a validation error. */
extra_pnginfo: zExtraPngInfo.optional(),
client_id: z.string().optional(),
// Cloud/Adapters: creation time in milliseconds when available
create_time: z.number().int().optional()
})
// Allow backend/adapters/extensions to add arbitrary metadata
.passthrough()
const zOutputsToExecute = z.array(zNodeId)
const zExecutionStartMessage = z.tuple([
z.literal('execution_start'),
zExecutionStartWsMessage
])
const zExecutionSuccessMessage = z.tuple([
z.literal('execution_success'),
zExecutionSuccessWsMessage
])
const zExecutionCachedMessage = z.tuple([
z.literal('execution_cached'),
zExecutionCachedWsMessage
])
const zExecutionInterruptedMessage = z.tuple([
z.literal('execution_interrupted'),
zExecutionInterruptedWsMessage
])
const zExecutionErrorMessage = z.tuple([
z.literal('execution_error'),
zExecutionErrorWsMessage
])
const zStatusMessage = z.union([
zExecutionStartMessage,
zExecutionSuccessMessage,
zExecutionCachedMessage,
zExecutionInterruptedMessage,
zExecutionErrorMessage
])
export const zStatus = z.object({
status_str: z.enum(['success', 'error']),
completed: z.boolean(),
messages: z.array(zStatusMessage)
})
const zTaskPrompt = z.tuple([
zQueueIndex,
zPromptId,
zPromptInputs,
zExtraData,
zOutputsToExecute
])
const zRunningTaskItem = z.object({
taskType: z.literal('Running'),
prompt: zTaskPrompt,
// @Deprecated
remove: z.object({
name: z.literal('Cancel'),
cb: z.function()
})
})
const zPendingTaskItem = z.object({
taskType: z.literal('Pending'),
prompt: zTaskPrompt
})
export const zTaskOutput = z.record(zNodeId, zOutputs)
const zNodeOutputsMeta = z.object({
node_id: zNodeId,
display_node: zNodeId,
prompt_id: zPromptId.optional(),
read_node_id: zNodeId.optional()
})
export const zTaskMeta = z.record(zNodeId, zNodeOutputsMeta)
const zHistoryTaskItem = z.object({
taskType: z.literal('History'),
prompt: zTaskPrompt,
status: zStatus.optional(),
outputs: zTaskOutput,
meta: zTaskMeta.optional()
})
const zTaskItem = z.union([
zRunningTaskItem,
zPendingTaskItem,
zHistoryTaskItem
])
const zTaskType = z.union([
z.literal('Running'),
z.literal('Pending'),
z.literal('History')
])
export type TaskType = z.infer<typeof zTaskType>
export type TaskPrompt = z.infer<typeof zTaskPrompt>
export type TaskStatus = z.infer<typeof zStatus>
export type TaskOutput = z.infer<typeof zTaskOutput>
// `/queue`
export type RunningTaskItem = z.infer<typeof zRunningTaskItem>
export type PendingTaskItem = z.infer<typeof zPendingTaskItem>
// `/history`
export type HistoryTaskItem = z.infer<typeof zHistoryTaskItem>
export type TaskItem = z.infer<typeof zTaskItem>
const zEmbeddingsResponse = z.array(z.string())
const zExtensionsResponse = z.array(z.string())
const zError = z.object({

View File

@@ -26,16 +26,13 @@ import type {
ExecutionSuccessWsMessage,
ExtensionsResponse,
FeatureFlagsWsMessage,
HistoryTaskItem,
LogsRawResponse,
LogsWsMessage,
NotificationWsMessage,
PendingTaskItem,
ProgressStateWsMessage,
ProgressTextWsMessage,
ProgressWsMessage,
PromptResponse,
RunningTaskItem,
Settings,
StatusWsMessage,
StatusWsMessageStatus,
@@ -43,11 +40,12 @@ import type {
User,
UserDataFullInfo
} from '@/schemas/apiSchema'
import type { JobListItem } from '@/platform/remote/comfyui/jobs'
import type { ComfyNodeDef } from '@/schemas/nodeDefSchema'
import type { useFirebaseAuthStore } from '@/stores/firebaseAuthStore'
import type { AuthHeader } from '@/types/authTypes'
import type { NodeExecutionId } from '@/types/nodeIdentification'
import { fetchHistory } from '@/platform/remote/comfyui/history'
import { fetchHistory, fetchQueue } from '@/platform/remote/comfyui/jobs'
interface QueuePromptRequestBody {
client_id: string
@@ -870,53 +868,13 @@ export class ComfyApi extends EventTarget {
* @returns The currently running and queued items
*/
async getQueue(): Promise<{
Running: RunningTaskItem[]
Pending: PendingTaskItem[]
Running: JobListItem[]
Pending: JobListItem[]
}> {
try {
const res = await this.fetchApi('/queue')
const data = await res.json()
// Normalize queue tuple shape across backends:
// - Backend (V1): [idx, prompt_id, inputs, extra_data(object), outputs_to_execute(array)]
// - Cloud: [idx, prompt_id, inputs, outputs_to_execute(array), metadata(object{create_time})]
const normalizeQueuePrompt = (prompt: any): any => {
if (!Array.isArray(prompt)) return prompt
// Ensure 5-tuple
const p = prompt.slice(0, 5)
const fourth = p[3]
const fifth = p[4]
// Cloud shape: 4th is array, 5th is metadata object
if (
Array.isArray(fourth) &&
fifth &&
typeof fifth === 'object' &&
!Array.isArray(fifth)
) {
const meta: any = fifth
const extraData = { ...meta }
return [p[0], p[1], p[2], extraData, fourth]
}
// V1 shape already: return as-is
return p
}
return {
// Running action uses a different endpoint for cancelling
Running: data.queue_running.map((prompt: any) => {
const np = normalizeQueuePrompt(prompt)
return {
taskType: 'Running',
prompt: np,
// prompt[1] is the prompt id
remove: { name: 'Cancel', cb: () => api.interrupt(np[1]) }
}
}),
Pending: data.queue_pending.map((prompt: any) => ({
taskType: 'Pending',
prompt: normalizeQueuePrompt(prompt)
}))
}
return await fetchQueue(this.fetchApi.bind(this))
} catch (error) {
console.error(error)
console.error('Failed to fetch queue:', error)
return { Running: [], Pending: [] }
}
}
@@ -928,7 +886,7 @@ export class ComfyApi extends EventTarget {
async getHistory(
max_items: number = 200,
options?: { offset?: number }
): Promise<{ History: HistoryTaskItem[] }> {
): Promise<JobListItem[]> {
try {
return await fetchHistory(
this.fetchApi.bind(this),
@@ -937,7 +895,7 @@ export class ComfyApi extends EventTarget {
)
} catch (error) {
console.error(error)
return { History: [] }
return []
}
}

View File

@@ -1,6 +1,6 @@
import { useSettingStore } from '@/platform/settings/settingStore'
import { WORKFLOW_ACCEPT_STRING } from '@/platform/workflow/core/types/formats'
import { type StatusWsMessageStatus, type TaskItem } from '@/schemas/apiSchema'
import { type StatusWsMessageStatus } from '@/schemas/apiSchema'
import { useDialogService } from '@/services/dialogService'
import { isCloud } from '@/platform/distribution/types'
import { useTelemetry } from '@/platform/telemetry'
@@ -259,7 +259,7 @@ class ComfyList {
$el('div.comfy-list-items', [
// @ts-expect-error fixme ts strict error
...(this.#reverse ? items[section].reverse() : items[section]).map(
(item: TaskItem) => {
(item: any) => {
// Allow items to specify a custom remove action (e.g. for interrupt current prompt)
const removeAction =
'remove' in item
@@ -273,7 +273,6 @@ class ComfyList {
textContent: 'Load',
onclick: async () => {
await app.loadGraphData(
// @ts-expect-error fixme ts strict error
item.prompt[3].extra_pnginfo.workflow,
true,
false

View File

@@ -8,7 +8,7 @@ import {
import type { AssetItem } from '@/platform/assets/schemas/assetSchema'
import { assetService } from '@/platform/assets/services/assetService'
import { isCloud } from '@/platform/distribution/types'
import type { TaskItem } from '@/schemas/apiSchema'
import type { JobListItem } from '@/platform/remote/comfyui/jobs'
import { api } from '@/scripts/api'
import { TaskItemImpl } from './queueStore'
@@ -45,27 +45,18 @@ async function fetchInputFilesFromCloud(): Promise<AssetItem[]> {
}
/**
* Convert history task items to asset items
* Convert history job items to asset items
*/
function mapHistoryToAssets(historyItems: TaskItem[]): AssetItem[] {
function mapHistoryToAssets(historyItems: JobListItem[]): AssetItem[] {
const assetItems: AssetItem[] = []
for (const item of historyItems) {
// Type guard for HistoryTaskItem which has status and outputs
if (item.taskType !== 'History') {
for (const job of historyItems) {
// Only process completed jobs with preview output
if (job.status !== 'completed' || !job.preview_output) {
continue
}
if (!item.outputs || !item.status || item.status?.status_str === 'error') {
continue
}
const task = new TaskItemImpl(
'History',
item.prompt,
item.status,
item.outputs
)
const task = new TaskItemImpl(job)
if (!task.previewOutput) {
continue
@@ -137,8 +128,8 @@ export const useAssetsStore = defineStore('assets', () => {
offset: historyOffset.value
})
// Convert TaskItems to AssetItems
const newAssets = mapHistoryToAssets(history.History)
// Convert JobListItems to AssetItems
const newAssets = mapHistoryToAssets(history)
if (loadMore) {
// Filter out duplicates and insert in sorted order
@@ -170,7 +161,7 @@ export const useAssetsStore = defineStore('assets', () => {
// Update pagination state
historyOffset.value += BATCH_SIZE
hasMoreHistory.value = history.History.length === BATCH_SIZE
hasMoreHistory.value = history.length === BATCH_SIZE
if (allHistoryItems.value.length > MAX_HISTORY_ITEMS) {
const removed = allHistoryItems.value.slice(MAX_HISTORY_ITEMS)

View File

@@ -2,22 +2,17 @@ import _ from 'es-toolkit/compat'
import { defineStore } from 'pinia'
import { computed, ref, shallowRef, toRaw, toValue } from 'vue'
import { isCloud } from '@/platform/distribution/types'
import { reconcileHistory } from '@/platform/remote/comfyui/history/reconciliation'
import { getWorkflowFromHistory } from '@/platform/workflow/cloud'
import { reconcileJobs } from '@/platform/remote/comfyui/history/reconciliation'
import { extractWorkflow, fetchJobDetail } from '@/platform/remote/comfyui/jobs'
import type { JobListItem } from '@/platform/remote/comfyui/jobs'
import type {
ComfyWorkflowJSON,
NodeId
} from '@/platform/workflow/validation/schemas/workflowSchema'
import type {
HistoryTaskItem,
ResultItem,
StatusWsMessageStatus,
TaskItem,
TaskOutput,
TaskPrompt,
TaskStatus,
TaskType
TaskOutput
} from '@/schemas/apiSchema'
import { api } from '@/scripts/api'
import type { ComfyApp } from '@/scripts/app'
@@ -29,6 +24,9 @@ import { getMediaTypeFromFilename } from '@/utils/formatUtil'
// Task type used in the API.
type APITaskType = 'queue' | 'history'
// Internal task type derived from job status
type TaskType = 'Running' | 'Pending' | 'History'
enum TaskItemDisplayStatus {
Running = 'Running',
Pending = 'Pending',
@@ -98,91 +96,27 @@ export class ResultItemImpl {
return !!this.format && !!this.frame_rate
}
get htmlVideoType(): string | undefined {
if (this.isWebm) {
return 'video/webm'
}
if (this.isMp4) {
return 'video/mp4'
}
if (this.isVhsFormat) {
if (this.format?.endsWith('webm')) {
return 'video/webm'
}
if (this.format?.endsWith('mp4')) {
return 'video/mp4'
}
}
return undefined
}
get htmlAudioType(): string | undefined {
if (this.isMp3) {
return 'audio/mpeg'
}
if (this.isWav) {
return 'audio/wav'
}
if (this.isOgg) {
return 'audio/ogg'
}
if (this.isFlac) {
return 'audio/flac'
}
return undefined
}
get isGif(): boolean {
return this.filename.endsWith('.gif')
}
get isWebp(): boolean {
return this.filename.endsWith('.webp')
}
get isWebm(): boolean {
return this.filename.endsWith('.webm')
}
get isMp4(): boolean {
return this.filename.endsWith('.mp4')
}
get isVideoBySuffix(): boolean {
return this.isWebm || this.isMp4
}
get isImageBySuffix(): boolean {
return this.isGif || this.isWebp
}
get isMp3(): boolean {
return this.filename.endsWith('.mp3')
}
get isWav(): boolean {
return this.filename.endsWith('.wav')
}
get isOgg(): boolean {
return this.filename.endsWith('.ogg')
}
get isFlac(): boolean {
return this.filename.endsWith('.flac')
return getMediaTypeFromFilename(this.filename) === 'video'
}
get isAudioBySuffix(): boolean {
return this.isMp3 || this.isWav || this.isOgg || this.isFlac
return getMediaTypeFromFilename(this.filename) === 'audio'
}
get isImageBySuffix(): boolean {
return getMediaTypeFromFilename(this.filename) === 'image'
}
get isVideo(): boolean {
const isVideoByType =
this.mediaType === 'video' || !!this.format?.startsWith('video/')
return (
this.isVideoBySuffix ||
(isVideoByType && !this.isImageBySuffix && !this.isAudioBySuffix)
this.isVhsFormat ||
// Note: VHS gifs are always previewed in the video player even if they
// are not VHS format. We could consider previewing them as images instead.
(this.mediaType === 'gifs' &&
!this.isImageBySuffix &&
!this.isAudioBySuffix)
)
}
@@ -211,32 +145,64 @@ export class ResultItemImpl {
get supportsPreview(): boolean {
return this.isImage || this.isVideo || this.isAudio || this.is3D
}
/**
* HTML video MIME type for video elements
*/
get htmlVideoType(): string {
if (this.isVhsFormat) {
return 'video/webm'
}
const extension = this.filename.split('.').pop()?.toLowerCase()
const mimeTypes: Record<string, string> = {
mp4: 'video/mp4',
webm: 'video/webm',
ogg: 'video/ogg',
mov: 'video/quicktime',
avi: 'video/x-msvideo',
mkv: 'video/x-matroska',
gif: 'image/gif'
}
return mimeTypes[extension ?? ''] ?? 'video/mp4'
}
/**
* HTML audio MIME type for audio elements
*/
get htmlAudioType(): string {
const extension = this.filename.split('.').pop()?.toLowerCase()
const mimeTypes: Record<string, string> = {
mp3: 'audio/mpeg',
wav: 'audio/wav',
ogg: 'audio/ogg',
flac: 'audio/flac',
aac: 'audio/aac',
m4a: 'audio/mp4',
webm: 'audio/webm'
}
return mimeTypes[extension ?? ''] ?? 'audio/mpeg'
}
}
export class TaskItemImpl {
readonly taskType: TaskType
readonly prompt: TaskPrompt
readonly status?: TaskStatus
readonly job: JobListItem
readonly outputs: TaskOutput
readonly flatOutputs: ReadonlyArray<ResultItemImpl>
constructor(
taskType: TaskType,
prompt: TaskPrompt,
status?: TaskStatus,
job: JobListItem,
outputs?: TaskOutput,
flatOutputs?: ReadonlyArray<ResultItemImpl>
) {
this.taskType = taskType
this.prompt = prompt
this.status = status
this.job = job
// If no outputs provided but job has preview_output, create synthetic outputs
const effectiveOutputs =
outputs ??
(job.preview_output
? { preview_node: { images: [job.preview_output] } }
: {})
// Remove animated outputs from the outputs object
// outputs.animated is an array of boolean values that indicates if the images
// array in the result are animated or not.
// The queueStore does not use this information.
// It is part of the legacy API response. We should redesign the backend API.
// https://github.com/Comfy-Org/ComfyUI_frontend/issues/2739
this.outputs = _.mapValues(outputs ?? {}, (nodeOutputs) =>
this.outputs = _.mapValues(effectiveOutputs, (nodeOutputs) =>
_.omit(nodeOutputs, 'animated')
)
this.flatOutputs = flatOutputs ?? this.calculateFlatOutputs()
@@ -269,6 +235,18 @@ export class TaskItemImpl {
)
}
// Derive taskType from job status
get taskType(): TaskType {
switch (this.job.status) {
case 'in_progress':
return 'Running'
case 'pending':
return 'Pending'
default:
return 'History'
}
}
get apiTaskType(): APITaskType {
switch (this.taskType) {
case 'Running':
@@ -284,62 +262,88 @@ export class TaskItemImpl {
}
get queueIndex() {
return this.prompt[0]
return this.job.priority
}
get promptId() {
return this.prompt[1]
return this.job.id
}
get promptInputs() {
return this.prompt[2]
}
get extraData() {
return this.prompt[3]
}
get outputsToExecute() {
return this.prompt[4]
}
get extraPngInfo() {
return this.extraData.extra_pnginfo
}
get clientId() {
return this.extraData.client_id
}
get workflow(): ComfyWorkflowJSON | undefined {
return this.extraPngInfo?.workflow
}
get messages() {
return this.status?.messages || []
get outputsCount(): number | undefined {
return this.job.outputs_count
}
/**
* Server-provided creation time in milliseconds, when available.
*
* Sources:
* - Queue: 5th tuple element may be a metadata object with { create_time }.
* - History (Cloud V2): Adapter injects create_time into prompt[3].extra_data.
* The job status from the API
*/
get createTime(): number | undefined {
const extra = (this.extraData as any) || {}
const fromExtra =
typeof extra.create_time === 'number' ? extra.create_time : undefined
if (typeof fromExtra === 'number') return fromExtra
get status() {
return this.job.status
}
/**
* Error message if job failed
*/
get errorMessage(): string | undefined {
return this.job.error_message ?? undefined
}
get workflow(): ComfyWorkflowJSON | undefined {
// Workflow is only available after lazy loading via getWorkflowFromHistory
return undefined
}
get interrupted() {
return _.some(
this.messages,
(message) => message[0] === 'execution_interrupted'
)
/**
* Server-provided creation time in milliseconds
*/
get createTime(): number {
return this.job.create_time
}
/**
* Execution messages - not available in JobListItem, would need JobDetail
* @deprecated Use job status instead
*/
get messages(): Array<[string, any]> {
return []
}
/**
* Whether the job was interrupted/cancelled
*/
get interrupted(): boolean {
return this.job.status === 'cancelled'
}
/**
* Execution start timestamp - not available in JobListItem
* @deprecated Not available in jobs list API
*/
get executionStartTimestamp(): number | undefined {
return undefined
}
/**
* Execution end timestamp - not available in JobListItem
* @deprecated Not available in jobs list API
*/
get executionEndTimestamp(): number | undefined {
return undefined
}
/**
* Execution time in ms - not available in JobListItem
* @deprecated Not available in jobs list API
*/
get executionTime(): number | undefined {
return undefined
}
/**
* Execution time in seconds - not available in JobListItem
* @deprecated Not available in jobs list API
*/
get executionTimeInSeconds(): number | undefined {
return undefined
}
get isHistory() {
@@ -351,79 +355,67 @@ export class TaskItemImpl {
}
get displayStatus(): TaskItemDisplayStatus {
switch (this.taskType) {
case 'Running':
switch (this.job.status) {
case 'in_progress':
return TaskItemDisplayStatus.Running
case 'Pending':
case 'pending':
return TaskItemDisplayStatus.Pending
case 'History':
if (this.interrupted) return TaskItemDisplayStatus.Cancelled
switch (this.status!.status_str) {
case 'success':
return TaskItemDisplayStatus.Completed
case 'error':
return TaskItemDisplayStatus.Failed
}
case 'completed':
return TaskItemDisplayStatus.Completed
case 'failed':
return TaskItemDisplayStatus.Failed
case 'cancelled':
return TaskItemDisplayStatus.Cancelled
}
}
get executionStartTimestamp() {
const message = this.messages.find(
(message) => message[0] === 'execution_start'
)
return message ? message[1].timestamp : undefined
}
get executionEndTimestamp() {
const messages = this.messages.filter((message) =>
[
'execution_success',
'execution_interrupted',
'execution_error'
].includes(message[0])
)
if (!messages.length) {
return undefined
/**
* Loads full outputs for tasks that only have preview data
* Returns a new TaskItemImpl with full outputs and execution status
*/
public async loadFullOutputs(
fetchApi: (url: string) => Promise<Response>
): Promise<TaskItemImpl> {
// Only load for history tasks (caller checks outputsCount > 1)
if (!this.isHistory) {
return this
}
return _.max(messages.map((message) => message[1].timestamp))
}
const jobDetail = await fetchJobDetail(fetchApi, this.promptId)
get executionTime() {
if (!this.executionStartTimestamp || !this.executionEndTimestamp) {
return undefined
if (!jobDetail?.outputs) {
return this
}
return this.executionEndTimestamp - this.executionStartTimestamp
}
get executionTimeInSeconds() {
return this.executionTime !== undefined
? this.executionTime / 1000
: undefined
// Create new TaskItemImpl with full outputs
return new TaskItemImpl(this.job, jobDetail.outputs)
}
public async loadWorkflow(app: ComfyApp) {
let workflowData = this.workflow
if (isCloud && !workflowData && this.isHistory) {
workflowData = await getWorkflowFromHistory(
(url) => app.api.fetchApi(url),
this.promptId
)
if (!this.isHistory) {
return
}
// Single fetch for both workflow and outputs
const jobDetail = await fetchJobDetail(
(url) => app.api.fetchApi(url),
this.promptId
)
const workflowData = extractWorkflow(jobDetail)
if (!workflowData) {
return
}
await app.loadGraphData(toRaw(workflowData))
if (!this.outputs) {
// Use full outputs from job detail, or fall back to existing outputs
const outputsToLoad = jobDetail?.outputs ?? this.outputs
if (!outputsToLoad) {
return
}
const nodeOutputsStore = useNodeOutputStore()
const rawOutputs = toRaw(this.outputs)
const rawOutputs = toRaw(outputsToLoad)
for (const nodeExecutionId in rawOutputs) {
nodeOutputsStore.setNodeOutputsByExecutionId(
nodeExecutionId,
@@ -444,15 +436,10 @@ export class TaskItemImpl {
return this.flatOutputs.map(
(output: ResultItemImpl, i: number) =>
new TaskItemImpl(
this.taskType,
[
this.queueIndex,
`${this.promptId}-${i}`,
this.promptInputs,
this.extraData,
this.outputsToExecute
],
this.status,
{
...this.job,
id: `${this.promptId}-${i}`
},
{
[output.nodeId]: {
[output.mediaType]: [output]
@@ -463,31 +450,23 @@ export class TaskItemImpl {
)
}
public toTaskItem(): TaskItem {
const item: HistoryTaskItem = {
taskType: 'History',
prompt: this.prompt,
status: this.status!,
outputs: this.outputs
/**
* Returns the underlying job data
*/
public toJob(): JobListItem {
return this.job
}
/**
* Cancel this task (for queue items)
*/
public async cancel(): Promise<void> {
if (this.taskType === 'Running' || this.taskType === 'Pending') {
await api.interrupt(this.job.id)
}
return item
}
}
const sortNewestFirst = (a: TaskItemImpl, b: TaskItemImpl) =>
b.queueIndex - a.queueIndex
const toTaskItemImpls = (tasks: TaskItem[]): TaskItemImpl[] =>
tasks.map(
(task) =>
new TaskItemImpl(
task.taskType,
task.prompt,
'status' in task ? task.status : undefined,
'outputs' in task ? task.outputs : undefined
)
)
export const useQueueStore = defineStore('queue', () => {
// Use shallowRef because TaskItemImpl instances are immutable and arrays are
// replaced entirely (not mutated), so deep reactivity would waste performance
@@ -524,8 +503,9 @@ export const useQueueStore = defineStore('queue', () => {
api.getHistory(maxHistoryItems.value)
])
runningTasks.value = toTaskItemImpls(queue.Running).sort(sortNewestFirst)
pendingTasks.value = toTaskItemImpls(queue.Pending).sort(sortNewestFirst)
// API returns pre-sorted data (sort_by=create_time&order=desc)
runningTasks.value = queue.Running.map((job) => new TaskItemImpl(job))
pendingTasks.value = queue.Pending.map((job) => new TaskItemImpl(job))
const currentHistory = toValue(historyTasks)
@@ -542,11 +522,10 @@ export const useQueueStore = defineStore('queue', () => {
}
})
const items = reconcileHistory(
history.History,
currentHistory.map((impl) => impl.toTaskItem()),
toValue(maxHistoryItems),
toValue(lastHistoryQueueIndex)
const reconciledJobs = reconcileJobs(
history,
currentHistory.map((impl) => impl.toJob()),
toValue(maxHistoryItems)
)
// Reuse existing TaskItemImpl instances or create new
@@ -554,9 +533,8 @@ export const useQueueStore = defineStore('queue', () => {
currentHistory.map((impl) => [impl.promptId, impl])
)
historyTasks.value = items.map(
(item) =>
existingByPromptId.get(item.prompt[1]) ?? toTaskItemImpls([item])[0]
historyTasks.value = reconciledJobs.map(
(job) => existingByPromptId.get(job.id) ?? new TaskItemImpl(job)
)
} finally {
isLoading.value = false

View File

@@ -1,380 +0,0 @@
/**
* @fileoverview Test fixtures for history tests.
*/
import type { HistoryResponseV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
import type { HistoryTaskItem } from '@/schemas/apiSchema'
/**
* V1 API raw response format (object with prompt IDs as keys)
*/
export const historyV1RawResponse: Record<
string,
Omit<HistoryTaskItem, 'taskType'>
> = {
'complete-item-id': {
prompt: [
24,
'complete-item-id',
{},
{
client_id: 'test-client',
extra_pnginfo: {
workflow: {
id: '44f0c9f9-b5a7-48de-99fc-7e80c1570241',
revision: 0,
last_node_id: 9,
last_link_id: 9,
nodes: [],
links: [],
groups: [],
config: {},
extra: {},
version: 0.4
}
}
},
['9']
],
outputs: {
'9': {
images: [
{
filename: 'test.png',
subfolder: '',
type: 'output'
}
]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_start',
{ prompt_id: 'complete-item-id', timestamp: 1234567890 }
],
[
'execution_success',
{ prompt_id: 'complete-item-id', timestamp: 1234567900 }
]
]
},
meta: {
'9': {
node_id: '9',
display_node: '9'
}
}
},
'no-status-id': {
prompt: [
23,
'no-status-id',
{},
{
client_id: 'inference'
},
['10']
],
outputs: {
'10': {
images: []
}
},
status: undefined,
meta: {
'10': {
node_id: '10',
display_node: '10'
}
}
}
}
/**
* V2 response with multiple edge cases:
* - Item 0: Complete with all fields
* - Item 1: Missing optional status field
* - Item 2: Missing optional meta field
* - Item 3: Multiple output nodes
*/
export const historyV2Fixture: HistoryResponseV2 = {
history: [
{
prompt_id: 'complete-item-id',
prompt: {
priority: 24,
prompt_id: 'complete-item-id',
extra_data: {
client_id: 'test-client',
extra_pnginfo: {
workflow: {
id: '44f0c9f9-b5a7-48de-99fc-7e80c1570241',
revision: 0,
last_node_id: 9,
last_link_id: 9,
nodes: [],
links: [],
groups: [],
config: {},
extra: {},
version: 0.4
}
}
}
},
outputs: {
'9': {
images: [
{
filename: 'test.png',
subfolder: '',
type: 'output'
}
]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_start',
{ prompt_id: 'complete-item-id', timestamp: 1234567890 }
],
[
'execution_success',
{ prompt_id: 'complete-item-id', timestamp: 1234567900 }
]
]
},
meta: {
'9': {
node_id: '9',
display_node: '9'
}
}
},
{
prompt_id: 'no-status-id',
prompt: {
priority: 23,
prompt_id: 'no-status-id',
extra_data: {
client_id: 'inference'
}
},
outputs: {
'10': {
images: []
}
},
meta: {
'10': {
node_id: '10',
display_node: '10'
}
}
},
{
prompt_id: 'no-meta-id',
prompt: {
priority: 22,
prompt_id: 'no-meta-id',
extra_data: {
client_id: 'web-ui'
}
},
outputs: {
'11': {
audio: []
}
},
status: {
status_str: 'error',
completed: false,
messages: []
}
},
{
prompt_id: 'multi-output-id',
prompt: {
priority: 21,
prompt_id: 'multi-output-id',
extra_data: {
client_id: 'batch-processor'
}
},
outputs: {
'3': {
images: [{ filename: 'img1.png', type: 'output', subfolder: '' }]
},
'9': {
images: [{ filename: 'img2.png', type: 'output', subfolder: '' }]
},
'12': {
video: [{ filename: 'video.mp4', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
},
meta: {
'3': { node_id: '3', display_node: '3' },
'9': { node_id: '9', display_node: '9' },
'12': { node_id: '12', display_node: '12' }
}
}
]
}
/**
* Expected V1 transformation of historyV2Fixture
* Priority is now synthetic based on execution_success timestamp:
* - complete-item-id: has timestamp → priority 1 (only one with timestamp)
* - no-status-id: no status → priority 0
* - no-meta-id: empty messages → priority 0
* - multi-output-id: empty messages → priority 0
*/
export const expectedV1Fixture: HistoryTaskItem[] = [
{
taskType: 'History',
prompt: [
1,
'complete-item-id',
{},
{
client_id: 'test-client',
extra_pnginfo: {
workflow: {
id: '44f0c9f9-b5a7-48de-99fc-7e80c1570241',
revision: 0,
last_node_id: 9,
last_link_id: 9,
nodes: [],
links: [],
groups: [],
config: {},
extra: {},
version: 0.4
}
}
},
['9']
],
outputs: {
'9': {
images: [
{
filename: 'test.png',
subfolder: '',
type: 'output'
}
]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_start',
{ prompt_id: 'complete-item-id', timestamp: 1234567890 }
],
[
'execution_success',
{ prompt_id: 'complete-item-id', timestamp: 1234567900 }
]
]
},
meta: {
'9': {
node_id: '9',
display_node: '9'
}
}
},
{
taskType: 'History',
prompt: [
0,
'no-status-id',
{},
{
client_id: 'inference'
},
['10']
],
outputs: {
'10': {
images: []
}
},
status: undefined,
meta: {
'10': {
node_id: '10',
display_node: '10'
}
}
},
{
taskType: 'History',
prompt: [
0,
'no-meta-id',
{},
{
client_id: 'web-ui'
},
['11']
],
outputs: {
'11': {
audio: []
}
},
status: {
status_str: 'error',
completed: false,
messages: []
},
meta: undefined
},
{
taskType: 'History',
prompt: [
0,
'multi-output-id',
{},
{
client_id: 'batch-processor'
},
['3', '9', '12']
],
outputs: {
'3': {
images: [{ filename: 'img1.png', type: 'output', subfolder: '' }]
},
'9': {
images: [{ filename: 'img2.png', type: 'output', subfolder: '' }]
},
'12': {
video: [{ filename: 'video.mp4', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
},
meta: {
'3': { node_id: '3', display_node: '3' },
'9': { node_id: '9', display_node: '9' },
'12': { node_id: '12', display_node: '12' }
}
}
]

View File

@@ -1,258 +0,0 @@
/**
* @fileoverview Test fixtures for history V2 timestamp-based sorting
*/
import type { HistoryResponseV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
export const historyV2WithMissingTimestamp: HistoryResponseV2 = {
history: [
{
prompt_id: 'item-timestamp-1000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-1000',
extra_data: {
client_id: 'test-client'
}
},
outputs: {
'1': {
images: [{ filename: 'test1.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-1000', timestamp: 1000 }
]
]
}
},
{
prompt_id: 'item-timestamp-2000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-2000',
extra_data: {
client_id: 'test-client'
}
},
outputs: {
'2': {
images: [{ filename: 'test2.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-2000', timestamp: 2000 }
]
]
}
},
{
prompt_id: 'item-no-timestamp',
prompt: {
priority: 0,
prompt_id: 'item-no-timestamp',
extra_data: {
client_id: 'test-client'
}
},
outputs: {
'3': {
images: [{ filename: 'test3.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
}
}
]
}
export const historyV2FiveItemsSorting: HistoryResponseV2 = {
history: [
{
prompt_id: 'item-timestamp-3000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-3000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'1': {
images: [{ filename: 'test1.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-3000', timestamp: 3000 }
]
]
}
},
{
prompt_id: 'item-timestamp-1000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-1000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'2': {
images: [{ filename: 'test2.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-1000', timestamp: 1000 }
]
]
}
},
{
prompt_id: 'item-timestamp-5000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-5000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'3': {
images: [{ filename: 'test3.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-5000', timestamp: 5000 }
]
]
}
},
{
prompt_id: 'item-timestamp-2000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-2000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'4': {
images: [{ filename: 'test4.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-2000', timestamp: 2000 }
]
]
}
},
{
prompt_id: 'item-timestamp-4000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-4000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'5': {
images: [{ filename: 'test5.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-4000', timestamp: 4000 }
]
]
}
}
]
}
export const historyV2MultipleNoTimestamp: HistoryResponseV2 = {
history: [
{
prompt_id: 'item-no-timestamp-1',
prompt: {
priority: 0,
prompt_id: 'item-no-timestamp-1',
extra_data: { client_id: 'test-client' }
},
outputs: {
'1': {
images: [{ filename: 'test1.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
}
},
{
prompt_id: 'item-no-timestamp-2',
prompt: {
priority: 0,
prompt_id: 'item-no-timestamp-2',
extra_data: { client_id: 'test-client' }
},
outputs: {
'2': {
images: [{ filename: 'test2.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
}
},
{
prompt_id: 'item-no-timestamp-3',
prompt: {
priority: 0,
prompt_id: 'item-no-timestamp-3',
extra_data: { client_id: 'test-client' }
},
outputs: {
'3': {
images: [{ filename: 'test3.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
}
}
]
}

View File

@@ -2,88 +2,33 @@ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { computed, ref } from 'vue'
import type { ComputedRef } from 'vue'
import type { ExecutionErrorWsMessage } from '@/schemas/apiSchema'
import type { TaskItemImpl } from '@/stores/queueStore'
import type { JobErrorDialogService } from '@/components/queue/job/useJobErrorReporting'
import * as jobErrorReporting from '@/components/queue/job/useJobErrorReporting'
const createExecutionErrorMessage = (
overrides: Partial<ExecutionErrorWsMessage> = {}
): ExecutionErrorWsMessage => ({
prompt_id: 'prompt',
timestamp: 100,
node_id: 'node-1',
node_type: 'KSampler',
executed: [],
exception_message: 'default failure',
exception_type: 'RuntimeError',
traceback: ['Trace line'],
current_inputs: {},
current_outputs: {},
...overrides
})
/**
* Creates a mock TaskItemImpl with an error message.
*/
const createTaskWithError = (errorMessage?: string): TaskItemImpl =>
({ errorMessage }) as unknown as TaskItemImpl
const createTaskWithMessages = (
messages: Array<[string, unknown]> | undefined = []
): TaskItemImpl =>
({
status: {
status_str: 'error',
completed: false,
messages
}
}) as TaskItemImpl
describe('extractErrorMessage', () => {
it('returns null when task is null', () => {
expect(jobErrorReporting.extractErrorMessage(null)).toBeNull()
})
describe('extractExecutionError', () => {
it('returns null when task has no execution error messages', () => {
expect(jobErrorReporting.extractExecutionError(null)).toBeNull()
it('returns null when errorMessage is undefined', () => {
expect(
jobErrorReporting.extractExecutionError({
status: undefined
} as TaskItemImpl)
jobErrorReporting.extractErrorMessage(createTaskWithError())
).toBeNull()
})
it('returns the error message when present', () => {
expect(
jobErrorReporting.extractExecutionError({
status: {
status_str: 'error',
completed: false,
messages: {} as unknown as Array<[string, unknown]>
}
} as TaskItemImpl)
).toBeNull()
expect(
jobErrorReporting.extractExecutionError(createTaskWithMessages([]))
).toBeNull()
expect(
jobErrorReporting.extractExecutionError(
createTaskWithMessages([
['execution_start', { prompt_id: 'prompt', timestamp: 1 }]
] as Array<[string, unknown]>)
jobErrorReporting.extractErrorMessage(
createTaskWithError('Something failed')
)
).toBeNull()
})
it('returns detail and message for execution_error entries', () => {
const detail = createExecutionErrorMessage({ exception_message: 'Kaboom' })
const result = jobErrorReporting.extractExecutionError(
createTaskWithMessages([
['execution_success', { prompt_id: 'prompt', timestamp: 2 }],
['execution_error', detail]
] as Array<[string, unknown]>)
)
expect(result).toEqual({
detail,
message: 'Kaboom'
})
})
it('falls back to an empty message when the tuple lacks detail', () => {
const result = jobErrorReporting.extractExecutionError(
createTaskWithMessages([
['execution_error'] as unknown as [string, ExecutionErrorWsMessage]
])
)
expect(result).toEqual({ detail: undefined, message: '' })
).toBe('Something failed')
})
})
@@ -91,7 +36,6 @@ describe('useJobErrorReporting', () => {
let taskState = ref<TaskItemImpl | null>(null)
let taskForJob: ComputedRef<TaskItemImpl | null>
let copyToClipboard: ReturnType<typeof vi.fn>
let showExecutionErrorDialog: ReturnType<typeof vi.fn>
let showErrorDialog: ReturnType<typeof vi.fn>
let dialog: JobErrorDialogService
let composable: ReturnType<typeof jobErrorReporting.useJobErrorReporting>
@@ -100,12 +44,8 @@ describe('useJobErrorReporting', () => {
taskState = ref<TaskItemImpl | null>(null)
taskForJob = computed(() => taskState.value)
copyToClipboard = vi.fn()
showExecutionErrorDialog = vi.fn()
showErrorDialog = vi.fn()
dialog = {
showExecutionErrorDialog,
showErrorDialog
}
dialog = { showErrorDialog }
composable = jobErrorReporting.useJobErrorReporting({
taskForJob,
copyToClipboard,
@@ -118,73 +58,44 @@ describe('useJobErrorReporting', () => {
})
it('exposes a computed message that reflects the current task error', () => {
taskState.value = createTaskWithMessages([
[
'execution_error',
createExecutionErrorMessage({ exception_message: 'First failure' })
]
])
taskState.value = createTaskWithError('First failure')
expect(composable.errorMessageValue.value).toBe('First failure')
taskState.value = createTaskWithMessages([
[
'execution_error',
createExecutionErrorMessage({ exception_message: 'Second failure' })
]
])
taskState.value = createTaskWithError('Second failure')
expect(composable.errorMessageValue.value).toBe('Second failure')
})
it('returns empty string when no error message', () => {
taskState.value = createTaskWithError()
expect(composable.errorMessageValue.value).toBe('')
})
it('only calls the copy handler when a message exists', () => {
taskState.value = createTaskWithMessages([
[
'execution_error',
createExecutionErrorMessage({ exception_message: 'Clipboard failure' })
]
])
taskState.value = createTaskWithError('Clipboard failure')
composable.copyErrorMessage()
expect(copyToClipboard).toHaveBeenCalledTimes(1)
expect(copyToClipboard).toHaveBeenCalledWith('Clipboard failure')
copyToClipboard.mockClear()
taskState.value = createTaskWithMessages([])
taskState.value = createTaskWithError()
composable.copyErrorMessage()
expect(copyToClipboard).not.toHaveBeenCalled()
})
it('prefers the detailed execution dialog when detail is available', () => {
const detail = createExecutionErrorMessage({
exception_message: 'Detailed failure'
})
taskState.value = createTaskWithMessages([['execution_error', detail]])
it('shows error dialog with the error message', () => {
taskState.value = createTaskWithError('Queue job error')
composable.reportJobError()
expect(showExecutionErrorDialog).toHaveBeenCalledTimes(1)
expect(showExecutionErrorDialog).toHaveBeenCalledWith(detail)
expect(showErrorDialog).not.toHaveBeenCalled()
})
it('shows a fallback dialog when only a message is available', () => {
const message = 'Queue job error'
taskState.value = createTaskWithMessages([])
const valueSpy = vi
.spyOn(composable.errorMessageValue, 'value', 'get')
.mockReturnValue(message)
expect(composable.errorMessageValue.value).toBe(message)
composable.reportJobError()
expect(showExecutionErrorDialog).not.toHaveBeenCalled()
expect(showErrorDialog).toHaveBeenCalledTimes(1)
const [errorArg, optionsArg] = showErrorDialog.mock.calls[0]
expect(errorArg).toBeInstanceOf(Error)
expect(errorArg.message).toBe(message)
expect(errorArg.message).toBe('Queue job error')
expect(optionsArg).toEqual({ reportType: 'queueJobError' })
valueSpy.mockRestore()
})
it('does nothing when no error could be extracted', () => {
taskState.value = createTaskWithMessages([])
it('does nothing when no error message exists', () => {
taskState.value = createTaskWithError()
composable.reportJobError()
expect(showExecutionErrorDialog).not.toHaveBeenCalled()
expect(showErrorDialog).not.toHaveBeenCalled()
})
})

View File

@@ -58,12 +58,21 @@ vi.mock('@/platform/workflow/management/stores/workflowStore', () => ({
useWorkflowStore: () => workflowStoreMock
}))
const fetchJobDetailMock = vi.fn()
const extractWorkflowMock = vi.fn()
vi.mock('@/platform/remote/comfyui/jobs', () => ({
fetchJobDetail: (...args: any[]) => fetchJobDetailMock(...args),
extractWorkflow: (...args: any[]) => extractWorkflowMock(...args)
}))
const interruptMock = vi.fn()
const deleteItemMock = vi.fn()
const fetchApiMock = vi.fn()
vi.mock('@/scripts/api', () => ({
api: {
interrupt: (...args: any[]) => interruptMock(...args),
deleteItem: (...args: any[]) => deleteItemMock(...args)
deleteItem: (...args: any[]) => deleteItemMock(...args),
fetchApi: (...args: any[]) => fetchApiMock(...args)
}
}))
@@ -73,7 +82,7 @@ vi.mock('@/scripts/utils', () => ({
}))
const dialogServiceMock = {
showExecutionErrorDialog: vi.fn(),
showErrorDialog: vi.fn(),
prompt: vi.fn()
}
vi.mock('@/services/dialogService', () => ({
@@ -170,6 +179,9 @@ describe('useJobMenu', () => {
LoadVideo: { id: 'LoadVideo' },
LoadAudio: { id: 'LoadAudio' }
}
// Default: no workflow available via lazy loading
fetchJobDetailMock.mockResolvedValue(undefined)
extractWorkflowMock.mockReturnValue(undefined)
})
const setCurrentItem = (item: JobListItem | null) => {
@@ -179,10 +191,14 @@ describe('useJobMenu', () => {
it('opens workflow when workflow data exists', async () => {
const { openJobWorkflow } = mountJobMenu()
const workflow = { nodes: [] }
setCurrentItem(createJobItem({ id: '55', taskRef: { workflow } }))
// Mock lazy loading via fetchJobDetail + extractWorkflow
fetchJobDetailMock.mockResolvedValue({ id: '55' })
extractWorkflowMock.mockReturnValue(workflow)
setCurrentItem(createJobItem({ id: '55' }))
await openJobWorkflow()
expect(fetchJobDetailMock).toHaveBeenCalledWith(expect.any(Function), '55')
expect(workflowStoreMock.createTemporary).toHaveBeenCalledWith(
'Job 55.json',
workflow
@@ -257,11 +273,10 @@ describe('useJobMenu', () => {
it('copies error message from failed job entry', async () => {
const { jobMenuEntries } = mountJobMenu()
const error = { exception_message: 'boom' }
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { status: { messages: [['execution_error', error]] } } as any
taskRef: { errorMessage: 'Something went wrong' } as any
})
)
@@ -269,16 +284,15 @@ describe('useJobMenu', () => {
const entry = findActionEntry(jobMenuEntries.value, 'copy-error')
await entry?.onClick?.()
expect(copyToClipboardMock).toHaveBeenCalledWith('boom')
expect(copyToClipboardMock).toHaveBeenCalledWith('Something went wrong')
})
it('reports error via dialog when entry triggered', async () => {
const { jobMenuEntries } = mountJobMenu()
const error = { exception_message: 'bad', extra: 1 }
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { status: { messages: [['execution_error', error]] } } as any
taskRef: { errorMessage: 'Job failed with error' } as any
})
)
@@ -286,14 +300,22 @@ describe('useJobMenu', () => {
const entry = findActionEntry(jobMenuEntries.value, 'report-error')
entry?.onClick?.()
expect(dialogServiceMock.showExecutionErrorDialog).toHaveBeenCalledWith(
error
)
expect(dialogServiceMock.showErrorDialog).toHaveBeenCalledTimes(1)
const [errorArg, optionsArg] =
dialogServiceMock.showErrorDialog.mock.calls[0]
expect(errorArg).toBeInstanceOf(Error)
expect(errorArg.message).toBe('Job failed with error')
expect(optionsArg).toEqual({ reportType: 'queueJobError' })
})
it('ignores error actions when message missing', async () => {
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(createJobItem({ state: 'failed', taskRef: { status: {} } }))
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { errorMessage: undefined } as any
})
)
await nextTick()
const copyEntry = findActionEntry(jobMenuEntries.value, 'copy-error')
@@ -302,7 +324,7 @@ describe('useJobMenu', () => {
await reportEntry?.onClick?.()
expect(copyToClipboardMock).not.toHaveBeenCalled()
expect(dialogServiceMock.showExecutionErrorDialog).not.toHaveBeenCalled()
expect(dialogServiceMock.showErrorDialog).not.toHaveBeenCalled()
})
const previewCases = [
@@ -477,12 +499,14 @@ describe('useJobMenu', () => {
})
it('exports workflow with default filename when prompting disabled', async () => {
const workflow = { foo: 'bar' }
fetchJobDetailMock.mockResolvedValue({ id: '7' })
extractWorkflowMock.mockReturnValue(workflow)
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
id: '7',
state: 'completed',
taskRef: { workflow: { foo: 'bar' } }
state: 'completed'
})
)
@@ -502,11 +526,12 @@ describe('useJobMenu', () => {
it('prompts for filename when setting enabled', async () => {
settingStoreMock.get.mockReturnValue(true)
dialogServiceMock.prompt.mockResolvedValue('custom-name')
fetchJobDetailMock.mockResolvedValue({ id: 'job-1' })
extractWorkflowMock.mockReturnValue({})
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
state: 'completed',
taskRef: { workflow: {} }
state: 'completed'
})
)
@@ -526,12 +551,13 @@ describe('useJobMenu', () => {
it('keeps existing json extension when exporting workflow', async () => {
settingStoreMock.get.mockReturnValue(true)
dialogServiceMock.prompt.mockResolvedValue('existing.json')
fetchJobDetailMock.mockResolvedValue({ id: '42' })
extractWorkflowMock.mockReturnValue({ foo: 'bar' })
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
id: '42',
state: 'completed',
taskRef: { workflow: { foo: 'bar' } }
state: 'completed'
})
)
@@ -547,11 +573,12 @@ describe('useJobMenu', () => {
it('abandons export when prompt cancelled', async () => {
settingStoreMock.get.mockReturnValue(true)
dialogServiceMock.prompt.mockResolvedValue('')
fetchJobDetailMock.mockResolvedValue({ id: 'job-1' })
extractWorkflowMock.mockReturnValue({})
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
state: 'completed',
taskRef: { workflow: {} }
state: 'completed'
})
)
@@ -671,7 +698,12 @@ describe('useJobMenu', () => {
it('returns failed menu entries with error actions', async () => {
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(createJobItem({ state: 'failed', taskRef: { status: {} } }))
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { errorMessage: 'Some error' } as any
})
)
await nextTick()
expect(jobMenuEntries.value.map((entry) => entry.key)).toEqual([

View File

@@ -2,102 +2,165 @@ import { describe, it, expect } from 'vitest'
import { useResultGallery } from '@/composables/queue/useResultGallery'
import type { JobListItem } from '@/composables/queue/useJobList'
import type { ResultItemImpl } from '@/stores/queueStore'
type PreviewLike = { url: string; supportsPreview: boolean }
type PreviewLike = Pick<ResultItemImpl, 'url' | 'supportsPreview'>
/**
* Mock task interface matching what useResultGallery expects.
* Uses structural typing - no need to import the internal GalleryTask type.
*/
interface MockTask {
readonly promptId: string
readonly outputsCount?: number
readonly flatOutputs: readonly ResultItemImpl[]
readonly previewOutput?: ResultItemImpl
loadFullOutputs(
fetchApi: (url: string) => Promise<Response>
): Promise<MockTask>
}
const createPreview = (url: string, supportsPreview = true): PreviewLike => ({
url,
supportsPreview
})
const createTask = (preview?: PreviewLike) => ({
previewOutput: preview
const createMockTask = (
preview?: PreviewLike,
allOutputs?: PreviewLike[]
): MockTask => ({
previewOutput: preview as ResultItemImpl | undefined,
flatOutputs: (allOutputs ?? (preview ? [preview] : [])) as ResultItemImpl[],
outputsCount: 1,
promptId: `task-${Math.random().toString(36).slice(2)}`,
loadFullOutputs: async () => createMockTask(preview, allOutputs)
})
const createJobItem = (id: string, preview?: PreviewLike): JobListItem =>
const createJobItem = (
id: string,
preview?: PreviewLike,
taskRef?: MockTask
): JobListItem =>
({
id,
title: `Job ${id}`,
meta: '',
state: 'completed',
showClear: false,
taskRef: preview ? { previewOutput: preview } : undefined
taskRef: taskRef ?? (preview ? { previewOutput: preview } : undefined)
}) as JobListItem
describe('useResultGallery', () => {
it('collects only previewable outputs and preserves their order', () => {
it('collects only previewable outputs and preserves their order', async () => {
const previewable = [createPreview('p-1'), createPreview('p-2')]
const nonPreviewable = { url: 'skip-me', supportsPreview: false }
const tasks = [
createTask(previewable[0]),
createTask({ url: 'skip-me', supportsPreview: false }),
createTask(previewable[1]),
createTask()
createMockTask(previewable[0]),
createMockTask(nonPreviewable),
createMockTask(previewable[1]),
createMockTask()
]
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => tasks
)
onViewItem(createJobItem('job-1', previewable[0]))
await onViewItem(createJobItem('job-1', previewable[0], tasks[0]))
expect(galleryItems.value).toEqual(previewable)
expect(galleryItems.value).toEqual([previewable[0]])
expect(galleryActiveIndex.value).toBe(0)
})
it('does not change state when there are no previewable tasks', () => {
it('does not change state when there are no previewable tasks', async () => {
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => []
)
onViewItem(createJobItem('job-missing'))
await onViewItem(createJobItem('job-missing'))
expect(galleryItems.value).toEqual([])
expect(galleryActiveIndex.value).toBe(-1)
})
it('activates the index that matches the viewed preview URL', () => {
it('activates the index that matches the viewed preview URL', async () => {
const previewable = [
createPreview('p-1'),
createPreview('p-2'),
createPreview('p-3')
]
const tasks = previewable.map((preview) => createTask(preview))
const tasks = previewable.map((preview) => createMockTask(preview))
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => tasks
)
onViewItem(createJobItem('job-2', createPreview('p-2')))
const targetPreview = createPreview('p-2')
await onViewItem(createJobItem('job-2', targetPreview, tasks[1]))
expect(galleryItems.value).toEqual(previewable)
expect(galleryActiveIndex.value).toBe(1)
expect(galleryItems.value).toEqual([previewable[1]])
expect(galleryActiveIndex.value).toBe(0)
})
it('defaults to the first entry when the clicked job lacks a preview', () => {
it('defaults to the first entry when the clicked job lacks a preview', async () => {
const previewable = [createPreview('p-1'), createPreview('p-2')]
const tasks = previewable.map((preview) => createTask(preview))
const tasks = previewable.map((preview) => createMockTask(preview))
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => tasks
)
onViewItem(createJobItem('job-no-preview'))
await onViewItem(createJobItem('job-no-preview'))
expect(galleryItems.value).toEqual(previewable)
expect(galleryActiveIndex.value).toBe(0)
})
it('defaults to the first entry when no gallery item matches the preview URL', () => {
it('defaults to the first entry when no gallery item matches the preview URL', async () => {
const previewable = [createPreview('p-1'), createPreview('p-2')]
const tasks = previewable.map((preview) => createTask(preview))
const tasks = previewable.map((preview) => createMockTask(preview))
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => tasks
)
onViewItem(createJobItem('job-mismatch', createPreview('missing')))
await onViewItem(createJobItem('job-mismatch', createPreview('missing')))
expect(galleryItems.value).toEqual(previewable)
expect(galleryActiveIndex.value).toBe(0)
})
it('loads full outputs when task has only preview outputs', async () => {
const previewOutput = createPreview('preview-1')
const fullOutputs = [
createPreview('full-1'),
createPreview('full-2'),
createPreview('full-3')
] as ResultItemImpl[]
const mockTask: MockTask = {
promptId: 'task-1',
previewOutput: previewOutput as ResultItemImpl,
flatOutputs: [previewOutput] as ResultItemImpl[],
outputsCount: 3, // More than 1 triggers lazy loading
loadFullOutputs: async () => ({
promptId: 'task-1',
previewOutput: previewOutput as ResultItemImpl,
flatOutputs: fullOutputs,
outputsCount: 3,
loadFullOutputs: async () => mockTask
})
}
const mockFetchApi = async () => new Response()
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => [mockTask],
mockFetchApi
)
await onViewItem(createJobItem('job-1', previewOutput, mockTask))
expect(galleryItems.value).toEqual(fullOutputs)
expect(galleryActiveIndex.value).toBe(0)
})
})

View File

@@ -1,185 +0,0 @@
/**
* @fileoverview Unit tests for V2 to V1 history adapter.
*/
import { describe, expect, it } from 'vitest'
import { mapHistoryV2toHistory } from '@/platform/remote/comfyui/history/adapters/v2ToV1Adapter'
import { zRawHistoryItemV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
import type { HistoryResponseV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
import {
expectedV1Fixture,
historyV2Fixture
} from '@tests-ui/fixtures/historyFixtures'
import {
historyV2FiveItemsSorting,
historyV2MultipleNoTimestamp,
historyV2WithMissingTimestamp
} from '@tests-ui/fixtures/historySortingFixtures'
import type { HistoryTaskItem } from '@/platform/remote/comfyui/history/types/historyV1Types'
function findResultByPromptId(
result: HistoryTaskItem[],
promptId: string
): HistoryTaskItem {
const item = result.find((item) => item.prompt[1] === promptId)
if (!item) {
throw new Error(`Expected item with promptId ${promptId} not found`)
}
return item
}
describe('mapHistoryV2toHistory', () => {
describe('fixture validation', () => {
it('should have valid fixture data', () => {
// Validate all items in the fixture to ensure test data is correct
historyV2Fixture.history.forEach((item: unknown) => {
expect(() => zRawHistoryItemV2.parse(item)).not.toThrow()
})
})
})
describe('given a complete V2 history response with edge cases', () => {
const history = mapHistoryV2toHistory(historyV2Fixture)
it('should transform all items to V1 format with correct structure', () => {
expect(history).toEqual(expectedV1Fixture)
})
it('should add taskType "History" to all items', () => {
history.forEach((item) => {
expect(item.taskType).toBe('History')
})
})
it('should transform prompt to V1 tuple [priority, id, {}, extra_data, outputNodeIds]', () => {
const firstItem = history[0]
expect(firstItem.prompt[0]).toBe(1) // Synthetic priority based on timestamp
expect(firstItem.prompt[1]).toBe('complete-item-id')
expect(firstItem.prompt[2]).toEqual({}) // history v2 does not return this data
expect(firstItem.prompt[3]).toMatchObject({ client_id: 'test-client' })
expect(firstItem.prompt[4]).toEqual(['9'])
})
it('should handle missing optional status field', () => {
expect(history[1].prompt[1]).toBe('no-status-id')
expect(history[1].status).toBeUndefined()
})
it('should handle missing optional meta field', () => {
expect(history[2].prompt[1]).toBe('no-meta-id')
expect(history[2].meta).toBeUndefined()
})
it('should derive output node IDs from outputs object keys', () => {
const multiOutputItem = history[3]
expect(multiOutputItem.prompt[4]).toEqual(
expect.arrayContaining(['3', '9', '12'])
)
expect(multiOutputItem.prompt[4]).toHaveLength(3)
})
})
describe('given empty history array', () => {
it('should return empty array', () => {
const emptyResponse: HistoryResponseV2 = { history: [] }
const history = mapHistoryV2toHistory(emptyResponse)
expect(history).toEqual([])
})
})
describe('given empty outputs object', () => {
it('should return empty array for output node IDs', () => {
const v2Response: HistoryResponseV2 = {
history: [
{
prompt_id: 'test-id',
prompt: {
priority: 0,
prompt_id: 'test-id',
extra_data: { client_id: 'test' }
},
outputs: {}
}
]
}
const history = mapHistoryV2toHistory(v2Response)
expect(history[0].prompt[4]).toEqual([])
})
})
describe('given missing client_id', () => {
it('should accept history items without client_id', () => {
const v2Response: HistoryResponseV2 = {
history: [
{
prompt_id: 'test-id',
prompt: {
priority: 0,
prompt_id: 'test-id',
extra_data: {}
},
outputs: {}
}
]
}
const history = mapHistoryV2toHistory(v2Response)
expect(history[0].prompt[3].client_id).toBeUndefined()
})
})
describe('timestamp-based priority assignment', () => {
it('assigns priority 0 to items without execution_success timestamp', () => {
const result = mapHistoryV2toHistory(historyV2WithMissingTimestamp)
expect(result).toHaveLength(3)
const item1000 = findResultByPromptId(result, 'item-timestamp-1000')
const item2000 = findResultByPromptId(result, 'item-timestamp-2000')
const itemNoTimestamp = findResultByPromptId(result, 'item-no-timestamp')
expect(item2000.prompt[0]).toBe(2)
expect(item1000.prompt[0]).toBe(1)
expect(itemNoTimestamp.prompt[0]).toBe(0)
})
it('correctly sorts and assigns priorities for multiple items', () => {
const result = mapHistoryV2toHistory(historyV2FiveItemsSorting)
expect(result).toHaveLength(5)
const item1000 = findResultByPromptId(result, 'item-timestamp-1000')
const item2000 = findResultByPromptId(result, 'item-timestamp-2000')
const item3000 = findResultByPromptId(result, 'item-timestamp-3000')
const item4000 = findResultByPromptId(result, 'item-timestamp-4000')
const item5000 = findResultByPromptId(result, 'item-timestamp-5000')
expect(item5000.prompt[0]).toBe(5)
expect(item4000.prompt[0]).toBe(4)
expect(item3000.prompt[0]).toBe(3)
expect(item2000.prompt[0]).toBe(2)
expect(item1000.prompt[0]).toBe(1)
})
it('assigns priority 0 to all items when multiple items lack timestamps', () => {
const result = mapHistoryV2toHistory(historyV2MultipleNoTimestamp)
expect(result).toHaveLength(3)
const item1 = findResultByPromptId(result, 'item-no-timestamp-1')
const item2 = findResultByPromptId(result, 'item-no-timestamp-2')
const item3 = findResultByPromptId(result, 'item-no-timestamp-3')
expect(item1.prompt[0]).toBe(0)
expect(item2.prompt[0]).toBe(0)
expect(item3.prompt[0]).toBe(0)
})
})
})

View File

@@ -1,52 +0,0 @@
/**
* @fileoverview Unit tests for V1 history fetcher.
*/
import { describe, expect, it, vi } from 'vitest'
import { fetchHistoryV1 } from '@/platform/remote/comfyui/history/fetchers/fetchHistoryV1'
import { historyV1RawResponse } from '@tests-ui/fixtures/historyFixtures'
describe('fetchHistoryV1', () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => historyV1RawResponse
})
it('should fetch from /history endpoint with default max_items', async () => {
await fetchHistoryV1(mockFetchApi)
expect(mockFetchApi).toHaveBeenCalledWith('/history?max_items=200')
})
it('should fetch with custom max_items parameter', async () => {
await fetchHistoryV1(mockFetchApi, 50)
expect(mockFetchApi).toHaveBeenCalledWith('/history?max_items=50')
})
it('should transform object response to array with taskType and preserve fields', async () => {
const result = await fetchHistoryV1(mockFetchApi)
expect(result.History).toHaveLength(2)
result.History.forEach((item) => {
expect(item.taskType).toBe('History')
})
expect(result.History[0]).toMatchObject({
taskType: 'History',
prompt: [24, 'complete-item-id', {}, expect.any(Object), ['9']],
outputs: expect.any(Object),
status: expect.any(Object),
meta: expect.any(Object)
})
})
it('should handle empty response object', async () => {
const emptyMock = vi.fn().mockResolvedValue({
json: async () => ({})
})
const result = await fetchHistoryV1(emptyMock)
expect(result.History).toEqual([])
})
})

View File

@@ -1,41 +0,0 @@
/**
* @fileoverview Unit tests for V2 history fetcher.
*/
import { describe, expect, it, vi } from 'vitest'
import { fetchHistoryV2 } from '@/platform/remote/comfyui/history/fetchers/fetchHistoryV2'
import {
expectedV1Fixture,
historyV2Fixture
} from '@tests-ui/fixtures/historyFixtures'
describe('fetchHistoryV2', () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => historyV2Fixture
})
it('should fetch from /history_v2 endpoint with default max_items', async () => {
await fetchHistoryV2(mockFetchApi)
expect(mockFetchApi).toHaveBeenCalledWith('/history_v2?max_items=200')
})
it('should fetch with custom max_items parameter', async () => {
await fetchHistoryV2(mockFetchApi, 50)
expect(mockFetchApi).toHaveBeenCalledWith('/history_v2?max_items=50')
})
it('should adapt V2 response to V1-compatible format', async () => {
const result = await fetchHistoryV2(mockFetchApi)
expect(result.History).toEqual(expectedV1Fixture)
expect(result).toHaveProperty('History')
expect(Array.isArray(result.History)).toBe(true)
result.History.forEach((item) => {
expect(item.taskType).toBe('History')
expect(item.prompt).toHaveLength(5)
})
})
})

View File

@@ -1,333 +1,129 @@
/**
* @fileoverview Tests for history reconciliation (V1 and V2)
* @fileoverview Tests for job list reconciliation
*/
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { describe, expect, it } from 'vitest'
import { reconcileHistory } from '@/platform/remote/comfyui/history/reconciliation'
import type { TaskItem } from '@/schemas/apiSchema'
import { reconcileJobs } from '@/platform/remote/comfyui/history/reconciliation'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/types/jobTypes'
// Mock distribution types
vi.mock('@/platform/distribution/types', () => ({
isCloud: false,
isDesktop: true
}))
function createHistoryItem(promptId: string, queueIndex = 0): TaskItem {
function createJob(id: string, createTime = 0, priority?: number): JobListItem {
return {
taskType: 'History',
prompt: [queueIndex, promptId, {}, {}, []],
status: { status_str: 'success', completed: true, messages: [] },
outputs: {}
id,
status: 'completed',
create_time: createTime,
priority: priority ?? createTime
}
}
function getAllPromptIds(result: TaskItem[]): string[] {
return result.map((item) => item.prompt[1])
function getAllIds(result: JobListItem[]): string[] {
return result.map((item) => item.id)
}
describe('reconcileHistory (V1)', () => {
beforeEach(async () => {
const distTypes = await import('@/platform/distribution/types')
vi.mocked(distTypes).isCloud = false
})
describe('when filtering by queueIndex', () => {
it('should retain items with queueIndex greater than lastKnownQueueIndex', () => {
const serverHistory = [
createHistoryItem('new-1', 11),
createHistoryItem('new-2', 10),
createHistoryItem('old', 5)
]
const clientHistory = [createHistoryItem('old', 5)]
const result = reconcileHistory(serverHistory, clientHistory, 10, 9)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(3)
expect(promptIds).toContain('new-1')
expect(promptIds).toContain('new-2')
expect(promptIds).toContain('old')
})
it('should evict items with queueIndex less than or equal to lastKnownQueueIndex', () => {
const serverHistory = [
createHistoryItem('new', 11),
createHistoryItem('existing', 10),
createHistoryItem('old-should-not-appear', 5)
]
const clientHistory = [createHistoryItem('existing', 10)]
const result = reconcileHistory(serverHistory, clientHistory, 10, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
expect(promptIds).toContain('new')
expect(promptIds).toContain('existing')
expect(promptIds).not.toContain('old-should-not-appear')
})
it('should retain all server items when lastKnownQueueIndex is undefined', () => {
const serverHistory = [
createHistoryItem('item-1', 5),
createHistoryItem('item-2', 4)
]
const result = reconcileHistory(serverHistory, [], 10, undefined)
expect(result).toHaveLength(2)
expect(result[0].prompt[1]).toBe('item-1')
expect(result[1].prompt[1]).toBe('item-2')
})
})
describe('when reconciling with existing client items', () => {
it('should retain client items that still exist on server', () => {
const serverHistory = [
createHistoryItem('new', 11),
createHistoryItem('existing-1', 9),
createHistoryItem('existing-2', 8)
]
const clientHistory = [
createHistoryItem('existing-1', 9),
createHistoryItem('existing-2', 8)
]
const result = reconcileHistory(serverHistory, clientHistory, 10, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(3)
expect(promptIds).toContain('new')
expect(promptIds).toContain('existing-1')
expect(promptIds).toContain('existing-2')
})
it('should evict client items that no longer exist on server', () => {
const serverHistory = [
createHistoryItem('new', 11),
createHistoryItem('keep', 9)
]
const clientHistory = [
createHistoryItem('keep', 9),
createHistoryItem('removed-from-server', 8)
]
const result = reconcileHistory(serverHistory, clientHistory, 10, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
expect(promptIds).toContain('new')
expect(promptIds).toContain('keep')
expect(promptIds).not.toContain('removed-from-server')
})
})
describe('when limiting the result count', () => {
it('should respect the maxItems constraint', () => {
const serverHistory = Array.from({ length: 10 }, (_, i) =>
createHistoryItem(`item-${i}`, 20 + i)
)
const result = reconcileHistory(serverHistory, [], 5, 15)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(5)
})
it('should evict lowest priority items when exceeding capacity', () => {
const serverHistory = [
createHistoryItem('new-1', 13),
createHistoryItem('new-2', 12),
createHistoryItem('new-3', 11),
createHistoryItem('existing', 9)
]
const clientHistory = [createHistoryItem('existing', 9)]
const result = reconcileHistory(serverHistory, clientHistory, 2, 10)
expect(result).toHaveLength(2)
expect(result[0].prompt[1]).toBe('new-1')
expect(result[1].prompt[1]).toBe('new-2')
})
})
describe('when handling empty collections', () => {
it('should return all server items when client history is empty', () => {
const serverHistory = [
createHistoryItem('item-1', 10),
createHistoryItem('item-2', 9)
]
const result = reconcileHistory(serverHistory, [], 10, 8)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
})
it('should return empty result when server history is empty', () => {
const clientHistory = [createHistoryItem('item-1', 5)]
const result = reconcileHistory([], clientHistory, 10, 5)
expect(result).toHaveLength(0)
})
it('should return empty result when both collections are empty', () => {
const result = reconcileHistory([], [], 10, undefined)
expect(result).toHaveLength(0)
})
})
})
describe('reconcileHistory (V2/Cloud)', () => {
beforeEach(async () => {
const distTypes = await import('@/platform/distribution/types')
vi.mocked(distTypes).isCloud = true
})
describe('reconcileJobs', () => {
describe('when adding new items from server', () => {
it('should retain items with promptIds not present in client history', () => {
it('should retain items with IDs not present in client history', () => {
const serverHistory = [
createHistoryItem('new-item'),
createHistoryItem('existing-item')
createJob('new-item', 10),
createJob('existing-item', 5)
]
const clientHistory = [createHistoryItem('existing-item')]
const clientHistory = [createJob('existing-item', 5)]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const result = reconcileJobs(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
expect(promptIds).toContain('new-item')
expect(promptIds).toContain('existing-item')
const ids = getAllIds(result)
expect(ids).toHaveLength(2)
expect(ids).toContain('new-item')
expect(ids).toContain('existing-item')
})
it('should respect priority ordering when retaining multiple new items', () => {
it('should respect create_time ordering when adding multiple new items', () => {
const serverHistory = [
createHistoryItem('new-1'),
createHistoryItem('new-2'),
createHistoryItem('existing')
createJob('new-1', 20),
createJob('new-2', 15),
createJob('existing', 10)
]
const clientHistory = [createHistoryItem('existing')]
const clientHistory = [createJob('existing', 10)]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const result = reconcileJobs(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(3)
expect(promptIds).toContain('new-1')
expect(promptIds).toContain('new-2')
expect(promptIds).toContain('existing')
expect(result).toHaveLength(3)
expect(result[0].id).toBe('new-1')
expect(result[1].id).toBe('new-2')
expect(result[2].id).toBe('existing')
})
})
describe('when reconciling with existing client items', () => {
it('should retain client items that still exist on server', () => {
const serverHistory = [
createHistoryItem('item-1'),
createHistoryItem('item-2')
]
const clientHistory = [
createHistoryItem('item-1'),
createHistoryItem('item-2')
]
const serverHistory = [createJob('item-1', 10), createJob('item-2', 5)]
const clientHistory = [createJob('item-1', 10), createJob('item-2', 5)]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const result = reconcileJobs(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
expect(promptIds).toContain('item-1')
expect(promptIds).toContain('item-2')
const ids = getAllIds(result)
expect(ids).toHaveLength(2)
expect(ids).toContain('item-1')
expect(ids).toContain('item-2')
})
it('should evict client items that no longer exist on server', () => {
const serverHistory = [createHistoryItem('item-1')]
const clientHistory = [
createHistoryItem('item-1'),
createHistoryItem('old-item')
]
const serverHistory = [createJob('item-1', 10)]
const clientHistory = [createJob('item-1', 10), createJob('old-item', 5)]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const result = reconcileJobs(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(1)
expect(promptIds).toContain('item-1')
expect(promptIds).not.toContain('old-item')
})
})
describe('when detecting new items by promptId', () => {
it('should retain new items regardless of queueIndex values', () => {
const serverHistory = [
createHistoryItem('existing', 100),
createHistoryItem('new-item', 50)
]
const clientHistory = [createHistoryItem('existing', 100)]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toContain('new-item')
expect(promptIds).toContain('existing')
const ids = getAllIds(result)
expect(ids).toHaveLength(1)
expect(ids).toContain('item-1')
expect(ids).not.toContain('old-item')
})
})
describe('when limiting the result count', () => {
it('should respect the maxItems constraint', () => {
const serverHistory = Array.from({ length: 10 }, (_, i) =>
createHistoryItem(`server-${i}`)
)
const clientHistory = Array.from({ length: 5 }, (_, i) =>
createHistoryItem(`client-${i}`)
createJob(`item-${i}`, 100 - i)
)
const result = reconcileHistory(serverHistory, clientHistory, 5)
const result = reconcileJobs(serverHistory, [], 5)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(5)
expect(result).toHaveLength(5)
})
it('should evict lowest priority items when exceeding capacity', () => {
const serverHistory = [
createHistoryItem('new-1'),
createHistoryItem('new-2'),
createHistoryItem('existing')
createJob('high', 30),
createJob('medium', 20),
createJob('low', 10)
]
const clientHistory = [createHistoryItem('existing')]
const result = reconcileHistory(serverHistory, clientHistory, 2)
const result = reconcileJobs(serverHistory, [], 2)
expect(result).toHaveLength(2)
expect(result[0].prompt[1]).toBe('new-1')
expect(result[1].prompt[1]).toBe('new-2')
expect(result[0].id).toBe('high')
expect(result[1].id).toBe('medium')
})
})
describe('when handling empty collections', () => {
it('should return all server items when client history is empty', () => {
const serverHistory = [
createHistoryItem('item-1'),
createHistoryItem('item-2')
]
const serverHistory = [createJob('item-1', 10), createJob('item-2', 5)]
const result = reconcileHistory(serverHistory, [], 10)
const result = reconcileJobs(serverHistory, [], 10)
expect(result).toHaveLength(2)
expect(result[0].prompt[1]).toBe('item-1')
expect(result[1].prompt[1]).toBe('item-2')
})
it('should return empty result when server history is empty', () => {
const clientHistory = [
createHistoryItem('item-1'),
createHistoryItem('item-2')
]
const clientHistory = [createJob('item-1', 10)]
const result = reconcileHistory([], clientHistory, 10)
const result = reconcileJobs([], clientHistory, 10)
expect(result).toHaveLength(0)
})
it('should return empty result when both collections are empty', () => {
const result = reconcileHistory([], [], 10)
const result = reconcileJobs([], [], 10)
expect(result).toHaveLength(0)
})

View File

@@ -1,7 +1,7 @@
import { describe, expect, it, vi } from 'vitest'
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import { getWorkflowFromHistory } from '@/platform/workflow/cloud/getWorkflowFromHistory'
import { fetchJobDetail, extractWorkflow } from '@/platform/remote/comfyui/jobs'
const mockWorkflow: ComfyWorkflowJSON = {
id: 'test-workflow-id',
@@ -16,75 +16,61 @@ const mockWorkflow: ComfyWorkflowJSON = {
version: 0.4
}
const mockHistoryResponse = {
'test-prompt-id': {
prompt: {
priority: 1,
prompt_id: 'test-prompt-id',
extra_data: {
client_id: 'test-client',
extra_pnginfo: {
workflow: mockWorkflow
}
}
},
outputs: {},
status: {
status_str: 'success',
completed: true,
messages: []
// Jobs API detail response structure (matches actual /jobs/{id} response)
const mockJobDetailResponse = {
id: 'test-prompt-id',
status: 'completed',
create_time: 1234567890,
execution_time: 18.5,
extra_data: {
extra_pnginfo: {
workflow: mockWorkflow
}
},
prompt: {},
outputs: {
'20': {
images: [
{ filename: 'test.png', subfolder: '', type: 'output' },
{ filename: 'test2.png', subfolder: '', type: 'output' }
]
}
}
}
describe('getWorkflowFromHistory', () => {
it('should fetch workflow from /history_v2/{prompt_id} endpoint', async () => {
describe('fetchJobDetail', () => {
it('should fetch job detail from /jobs/{prompt_id} endpoint', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => mockHistoryResponse
ok: true,
json: async () => mockJobDetailResponse
})
await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
await fetchJobDetail(mockFetchApi, 'test-prompt-id')
expect(mockFetchApi).toHaveBeenCalledWith('/history_v2/test-prompt-id')
expect(mockFetchApi).toHaveBeenCalledWith('/jobs/test-prompt-id')
})
it('should extract and return workflow from response', async () => {
it('should return job detail with workflow and outputs', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => mockHistoryResponse
ok: true,
json: async () => mockJobDetailResponse
})
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
const result = await fetchJobDetail(mockFetchApi, 'test-prompt-id')
expect(result).toEqual(mockWorkflow)
expect(result).toBeDefined()
expect(result?.id).toBe('test-prompt-id')
expect(result?.outputs).toEqual(mockJobDetailResponse.outputs)
expect(result?.extra_data).toBeDefined()
})
it('should return undefined when prompt_id not found in response', async () => {
it('should return undefined when job not found (non-OK response)', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => ({})
ok: false,
status: 404
})
const result = await getWorkflowFromHistory(mockFetchApi, 'nonexistent-id')
expect(result).toBeUndefined()
})
it('should return undefined when workflow is missing from extra_pnginfo', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => ({
'test-prompt-id': {
prompt: {
priority: 1,
prompt_id: 'test-prompt-id',
extra_data: {
client_id: 'test-client'
}
},
outputs: {}
}
})
})
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
const result = await fetchJobDetail(mockFetchApi, 'nonexistent-id')
expect(result).toBeUndefined()
})
@@ -92,19 +78,45 @@ describe('getWorkflowFromHistory', () => {
it('should handle fetch errors gracefully', async () => {
const mockFetchApi = vi.fn().mockRejectedValue(new Error('Network error'))
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
const result = await fetchJobDetail(mockFetchApi, 'test-prompt-id')
expect(result).toBeUndefined()
})
it('should handle malformed JSON responses', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
ok: true,
json: async () => {
throw new Error('Invalid JSON')
}
})
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
const result = await fetchJobDetail(mockFetchApi, 'test-prompt-id')
expect(result).toBeUndefined()
})
})
describe('extractWorkflow', () => {
it('should extract workflow from job detail', () => {
const result = extractWorkflow(mockJobDetailResponse as any)
expect(result).toEqual(mockWorkflow)
})
it('should return undefined when job is undefined', () => {
const result = extractWorkflow(undefined)
expect(result).toBeUndefined()
})
it('should return undefined when workflow is missing extra_pnginfo', () => {
const jobWithoutWorkflow = {
...mockJobDetailResponse,
extra_data: {}
}
const result = extractWorkflow(jobWithoutWorkflow as any)
expect(result).toBeUndefined()
})

View File

@@ -3,12 +3,7 @@ import { beforeEach, describe, expect, it, vi } from 'vitest'
import { useAssetsStore } from '@/stores/assetsStore'
import { api } from '@/scripts/api'
import type {
HistoryTaskItem,
TaskPrompt,
TaskStatus,
TaskOutput
} from '@/schemas/apiSchema'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/types/jobTypes'
// Mock the api module
vi.mock('@/scripts/api', () => ({
@@ -50,24 +45,19 @@ vi.mock('@/stores/queueStore', () => ({
url: string
}
| undefined
public promptId: string
constructor(
public taskType: string,
public prompt: TaskPrompt,
public status: TaskStatus | undefined,
public outputs: TaskOutput
) {
this.flatOutputs = this.outputs
? [
{
supportsPreview: true,
filename: 'test.png',
subfolder: '',
type: 'output',
url: 'http://test.com/test.png'
}
]
: []
constructor(public job: JobListItem) {
this.promptId = job.id
this.flatOutputs = [
{
supportsPreview: true,
filename: 'test.png',
subfolder: '',
type: 'output',
url: 'http://test.com/test.png'
}
]
this.previewOutput = this.flatOutputs[0]
}
}
@@ -79,17 +69,17 @@ vi.mock('@/platform/assets/composables/media/assetMappers', () => ({
id: `${type}-${index}`,
name,
size: 0,
created_at: new Date(Date.now() - index * 1000).toISOString(), // Unique timestamps
created_at: new Date(Date.now() - index * 1000).toISOString(),
tags: [type],
preview_url: `http://test.com/${name}`
})),
mapTaskOutputToAssetItem: vi.fn((task, output) => {
const index = parseInt(task.prompt[1].split('_')[1]) || 0
const index = parseInt(task.promptId.split('_')[1]) || 0
return {
id: task.prompt[1], // Use promptId as asset ID
id: task.promptId,
name: output.filename,
size: 0,
created_at: new Date(Date.now() - index * 1000).toISOString(), // Unique timestamps
created_at: new Date(Date.now() - index * 1000).toISOString(),
tags: ['output'],
preview_url: output.url,
user_metadata: {}
@@ -100,43 +90,18 @@ vi.mock('@/platform/assets/composables/media/assetMappers', () => ({
describe('assetsStore - Refactored (Option A)', () => {
let store: ReturnType<typeof useAssetsStore>
// Helper function to create mock history items
const createMockHistoryItem = (index: number): HistoryTaskItem => ({
taskType: 'History' as const,
prompt: [
1000 + index, // queueIndex
`prompt_${index}`, // promptId
{}, // promptInputs
{
extra_pnginfo: {
workflow: {
last_node_id: 1,
last_link_id: 1,
nodes: [],
links: [],
groups: [],
config: {},
version: 1
}
}
}, // extraData
[] // outputsToExecute
],
status: {
status_str: 'success' as const,
completed: true,
messages: []
},
outputs: {
'1': {
images: [
{
filename: `output_${index}.png`,
subfolder: '',
type: 'output' as const
}
]
}
// Helper function to create mock job items
const createMockJobItem = (index: number): JobListItem => ({
id: `prompt_${index}`,
status: 'completed',
create_time: 1000 + index,
update_time: 1000 + index,
last_state_update: 1000 + index,
priority: 1000 + index,
preview_output: {
filename: `output_${index}.png`,
subfolder: '',
type: 'output'
}
})
@@ -149,11 +114,9 @@ describe('assetsStore - Refactored (Option A)', () => {
describe('Initial Load', () => {
it('should load initial history items', async () => {
const mockHistory = Array.from({ length: 10 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValue({
History: mockHistory
})
vi.mocked(api.getHistory).mockResolvedValue(mockHistory)
await store.updateHistory()
@@ -166,11 +129,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should set hasMoreHistory to true when batch is full', async () => {
const mockHistory = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValue({
History: mockHistory
})
vi.mocked(api.getHistory).mockResolvedValue(mockHistory)
await store.updateHistory()
@@ -194,11 +155,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should accumulate items when loading more', async () => {
// First batch - full BATCH_SIZE
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
expect(store.historyAssets).toHaveLength(200)
@@ -206,11 +165,9 @@ describe('assetsStore - Refactored (Option A)', () => {
// Second batch - different items
const secondBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(200 + i)
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: secondBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(secondBatch)
await store.loadMoreHistory()
@@ -222,24 +179,20 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should prevent duplicate items during pagination', async () => {
// First batch - full BATCH_SIZE
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
expect(store.historyAssets).toHaveLength(200)
// Second batch with some duplicates
const secondBatch = [
createMockHistoryItem(2), // Duplicate
createMockHistoryItem(5), // Duplicate
...Array.from({ length: 198 }, (_, i) => createMockHistoryItem(200 + i)) // New
createMockJobItem(2), // Duplicate
createMockJobItem(5), // Duplicate
...Array.from({ length: 198 }, (_, i) => createMockJobItem(200 + i)) // New
]
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: secondBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(secondBatch)
await store.loadMoreHistory()
@@ -255,11 +208,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should stop loading when no more items', async () => {
// First batch - less than BATCH_SIZE
const firstBatch = Array.from({ length: 50 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
expect(store.hasMoreHistory).toBe(false)
@@ -274,11 +225,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should handle race conditions with concurrent loads', async () => {
// Setup initial state with full batch
const initialBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: initialBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(initialBatch)
await store.updateHistory()
expect(store.hasMoreHistory).toBe(true)
@@ -286,12 +235,10 @@ describe('assetsStore - Refactored (Option A)', () => {
vi.mocked(api.getHistory).mockClear()
// Setup slow API response
let resolveLoadMore: (value: { History: HistoryTaskItem[] }) => void
const loadMorePromise = new Promise<{ History: HistoryTaskItem[] }>(
(resolve) => {
resolveLoadMore = resolve
}
)
let resolveLoadMore: (value: JobListItem[]) => void
const loadMorePromise = new Promise<JobListItem[]>((resolve) => {
resolveLoadMore = resolve
})
vi.mocked(api.getHistory).mockReturnValueOnce(loadMorePromise)
// Start first loadMore
@@ -302,9 +249,9 @@ describe('assetsStore - Refactored (Option A)', () => {
// Resolve
const secondBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(200 + i)
createMockJobItem(200 + i)
)
resolveLoadMore!({ History: secondBatch })
resolveLoadMore!(secondBatch)
await Promise.all([firstLoad, secondLoad])
@@ -317,21 +264,17 @@ describe('assetsStore - Refactored (Option A)', () => {
// Initial load
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
// Load additional batches
for (let batch = 1; batch < BATCH_COUNT; batch++) {
const items = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(batch * 200 + i)
createMockJobItem(batch * 200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: items
})
vi.mocked(api.getHistory).mockResolvedValueOnce(items)
await store.loadMoreHistory()
}
@@ -344,21 +287,17 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should maintain date sorting after pagination', async () => {
// First batch
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
// Second batch
const secondBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(200 + i)
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: secondBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(secondBatch)
await store.loadMoreHistory()
@@ -375,11 +314,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should preserve existing data when loadMore fails', async () => {
// First successful load - full batch
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
expect(store.historyAssets).toHaveLength(200)
@@ -399,11 +336,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should clear error state on successful retry', async () => {
// First load succeeds
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
@@ -416,11 +351,9 @@ describe('assetsStore - Refactored (Option A)', () => {
// Third load succeeds
const thirdBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(200 + i)
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: thirdBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(thirdBatch)
await store.loadMoreHistory()
@@ -447,11 +380,9 @@ describe('assetsStore - Refactored (Option A)', () => {
for (let batch = 0; batch < batches; batch++) {
const items = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(batch * 200 + i)
createMockJobItem(batch * 200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: items
})
vi.mocked(api.getHistory).mockResolvedValueOnce(items)
if (batch === 0) {
await store.updateHistory()
@@ -473,11 +404,9 @@ describe('assetsStore - Refactored (Option A)', () => {
// Load items beyond limit
for (let batch = 0; batch < 6; batch++) {
const items = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(batch * 200 + i)
createMockJobItem(batch * 200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: items
})
vi.mocked(api.getHistory).mockResolvedValueOnce(items)
if (batch === 0) {
await store.updateHistory()
@@ -500,11 +429,9 @@ describe('assetsStore - Refactored (Option A)', () => {
describe('jobDetailView Support', () => {
it('should include outputCount and allOutputs in user_metadata', async () => {
const mockHistory = Array.from({ length: 5 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValue({
History: mockHistory
})
vi.mocked(api.getHistory).mockResolvedValue(mockHistory)
await store.updateHistory()

View File

@@ -1,34 +1,39 @@
import { createPinia, setActivePinia } from 'pinia'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type {
HistoryTaskItem,
PendingTaskItem,
RunningTaskItem,
TaskOutput,
TaskPrompt,
TaskStatus
} from '@/schemas/apiSchema'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/types/jobTypes'
import type { TaskOutput } from '@/schemas/apiSchema'
import { api } from '@/scripts/api'
import { TaskItemImpl, useQueueStore } from '@/stores/queueStore'
// Fixture factories
const createTaskPrompt = (
queueIndex: number,
promptId: string,
inputs: Record<string, any> = {},
extraData: Record<string, any> = {},
outputsToExecute: any[] = []
): TaskPrompt => [queueIndex, promptId, inputs, extraData, outputsToExecute]
// Fixture factory for JobListItem
function createJob(
id: string,
status: JobListItem['status'],
createTime: number = Date.now(),
priority?: number
): JobListItem {
return {
id,
status,
create_time: createTime,
update_time: createTime,
last_state_update: createTime,
priority: priority ?? createTime
}
}
const createTaskStatus = (
statusStr: 'success' | 'error' = 'success',
messages: any[] = []
): TaskStatus => ({
status_str: statusStr,
completed: true,
messages
})
function createRunningJob(createTime: number, id: string): JobListItem {
return createJob(id, 'in_progress', createTime)
}
function createPendingJob(createTime: number, id: string): JobListItem {
return createJob(id, 'pending', createTime)
}
function createHistoryJob(createTime: number, id: string): JobListItem {
return createJob(id, 'completed', createTime)
}
const createTaskOutput = (
nodeId: string = 'node-1',
@@ -39,35 +44,6 @@ const createTaskOutput = (
}
})
const createRunningTask = (
queueIndex: number,
promptId: string
): RunningTaskItem => ({
taskType: 'Running',
prompt: createTaskPrompt(queueIndex, promptId),
remove: { name: 'Cancel', cb: () => {} }
})
const createPendingTask = (
queueIndex: number,
promptId: string
): PendingTaskItem => ({
taskType: 'Pending',
prompt: createTaskPrompt(queueIndex, promptId)
})
const createHistoryTask = (
queueIndex: number,
promptId: string,
outputs: TaskOutput = createTaskOutput(),
status: TaskStatus = createTaskStatus()
): HistoryTaskItem => ({
taskType: 'History',
prompt: createTaskPrompt(queueIndex, promptId),
status,
outputs
})
// Mock API
vi.mock('@/scripts/api', () => ({
api: {
@@ -83,17 +59,13 @@ vi.mock('@/scripts/api', () => ({
describe('TaskItemImpl', () => {
it('should remove animated property from outputs during construction', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
images: [{ filename: 'test.png', type: 'output', subfolder: '' }],
animated: [false]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
images: [{ filename: 'test.png', type: 'output', subfolder: '' }],
animated: [false]
}
)
})
// Check that animated property was removed
expect('animated' in taskItem.outputs['node-1']).toBe(false)
@@ -103,90 +75,72 @@ describe('TaskItemImpl', () => {
})
it('should handle outputs without animated property', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
images: [{ filename: 'test.png', type: 'output', subfolder: '' }]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
images: [{ filename: 'test.png', type: 'output', subfolder: '' }]
}
)
})
expect(taskItem.outputs['node-1'].images).toBeDefined()
expect(taskItem.outputs['node-1'].images?.[0]?.filename).toBe('test.png')
})
it('should recognize webm video from core', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
video: [{ filename: 'test.webm', type: 'output', subfolder: '' }]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
video: [{ filename: 'test.webm', type: 'output', subfolder: '' }]
}
)
})
const output = taskItem.flatOutputs[0]
expect(output.htmlVideoType).toBe('video/webm')
expect(output.isVideo).toBe(true)
expect(output.isWebm).toBe(true)
expect(output.isVhsFormat).toBe(false)
expect(output.isImage).toBe(false)
})
// https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite/blob/0a75c7958fe320efcb052f1d9f8451fd20c730a8/videohelpersuite/nodes.py#L578-L590
it('should recognize webm video from VHS', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
gifs: [
{
filename: 'test.webm',
type: 'output',
subfolder: '',
format: 'video/webm',
frame_rate: 30
}
]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
gifs: [
{
filename: 'test.webm',
type: 'output',
subfolder: '',
format: 'video/webm',
frame_rate: 30
}
]
}
)
})
const output = taskItem.flatOutputs[0]
expect(output.htmlVideoType).toBe('video/webm')
expect(output.isVideo).toBe(true)
expect(output.isWebm).toBe(true)
expect(output.isVhsFormat).toBe(true)
expect(output.isImage).toBe(false)
})
it('should recognize mp4 video from core', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
images: [
{
filename: 'test.mp4',
type: 'output',
subfolder: ''
}
],
animated: [true]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
images: [
{
filename: 'test.mp4',
type: 'output',
subfolder: ''
}
],
animated: [true]
}
)
})
const output = taskItem.flatOutputs[0]
@@ -205,22 +159,18 @@ describe('TaskItemImpl', () => {
audioFormats.forEach(({ extension, mimeType }) => {
it(`should recognize ${extension} audio`, () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
audio: [
{
filename: `test.${extension}`,
type: 'output',
subfolder: ''
}
]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
audio: [
{
filename: `test.${extension}`,
type: 'output',
subfolder: ''
}
]
}
)
})
const output = taskItem.flatOutputs[0]
@@ -267,15 +217,16 @@ describe('useQueueStore', () => {
describe('update() - basic functionality', () => {
it('should load running and pending tasks from API', async () => {
const runningTask = createRunningTask(1, 'run-1')
const pendingTask1 = createPendingTask(2, 'pend-1')
const pendingTask2 = createPendingTask(3, 'pend-2')
const runningJob = createRunningJob(1, 'run-1')
const pendingJob1 = createPendingJob(2, 'pend-1')
const pendingJob2 = createPendingJob(3, 'pend-2')
// API returns pre-sorted data (newest first)
mockGetQueue.mockResolvedValue({
Running: [runningTask],
Pending: [pendingTask1, pendingTask2]
Running: [runningJob],
Pending: [pendingJob2, pendingJob1] // Pre-sorted by create_time desc
})
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
@@ -287,13 +238,11 @@ describe('useQueueStore', () => {
})
it('should load history tasks from API', async () => {
const historyTask1 = createHistoryTask(5, 'hist-1')
const historyTask2 = createHistoryTask(4, 'hist-2')
const historyJob1 = createHistoryJob(5, 'hist-1')
const historyJob2 = createHistoryJob(4, 'hist-2')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [historyTask1, historyTask2]
})
mockGetHistory.mockResolvedValue([historyJob1, historyJob2])
await store.update()
@@ -304,7 +253,7 @@ describe('useQueueStore', () => {
it('should set loading state correctly', async () => {
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
expect(store.isLoading).toBe(false)
@@ -317,7 +266,7 @@ describe('useQueueStore', () => {
it('should clear loading state even if API fails', async () => {
mockGetQueue.mockRejectedValue(new Error('API error'))
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await expect(store.update()).rejects.toThrow('API error')
expect(store.isLoading).toBe(false)
@@ -326,14 +275,12 @@ describe('useQueueStore', () => {
describe('update() - sorting', () => {
it('should sort tasks by queueIndex descending', async () => {
const task1 = createHistoryTask(1, 'hist-1')
const task2 = createHistoryTask(5, 'hist-2')
const task3 = createHistoryTask(3, 'hist-3')
const job1 = createHistoryJob(1, 'hist-1')
const job2 = createHistoryJob(5, 'hist-2')
const job3 = createHistoryJob(3, 'hist-3')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [task1, task2, task3]
})
mockGetHistory.mockResolvedValue([job1, job2, job3])
await store.update()
@@ -342,16 +289,17 @@ describe('useQueueStore', () => {
expect(store.historyTasks[2].queueIndex).toBe(1)
})
it('should sort pending tasks by queueIndex descending', async () => {
const pend1 = createPendingTask(10, 'pend-1')
const pend2 = createPendingTask(15, 'pend-2')
const pend3 = createPendingTask(12, 'pend-3')
it('should preserve API sort order for pending tasks', async () => {
const pend1 = createPendingJob(10, 'pend-1')
const pend2 = createPendingJob(15, 'pend-2')
const pend3 = createPendingJob(12, 'pend-3')
// API returns pre-sorted data (newest first)
mockGetQueue.mockResolvedValue({
Running: [],
Pending: [pend1, pend2, pend3]
Pending: [pend2, pend3, pend1] // Pre-sorted by create_time desc
})
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
@@ -363,19 +311,17 @@ describe('useQueueStore', () => {
describe('update() - queue index collision (THE BUG FIX)', () => {
it('should NOT confuse different prompts with same queueIndex', async () => {
const hist1 = createHistoryTask(50, 'prompt-uuid-aaa')
const hist1 = createHistoryJob(50, 'prompt-uuid-aaa')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1] })
mockGetHistory.mockResolvedValue([hist1])
await store.update()
expect(store.historyTasks).toHaveLength(1)
expect(store.historyTasks[0].promptId).toBe('prompt-uuid-aaa')
const hist2 = createHistoryTask(51, 'prompt-uuid-bbb')
mockGetHistory.mockResolvedValue({
History: [hist2]
})
const hist2 = createHistoryJob(51, 'prompt-uuid-bbb')
mockGetHistory.mockResolvedValue([hist2])
await store.update()
@@ -385,19 +331,17 @@ describe('useQueueStore', () => {
})
it('should correctly reconcile when queueIndex is reused', async () => {
const hist1 = createHistoryTask(100, 'first-prompt-at-100')
const hist2 = createHistoryTask(99, 'prompt-at-99')
const hist1 = createHistoryJob(100, 'first-prompt-at-100')
const hist2 = createHistoryJob(99, 'prompt-at-99')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1, hist2] })
mockGetHistory.mockResolvedValue([hist1, hist2])
await store.update()
expect(store.historyTasks).toHaveLength(2)
const hist3 = createHistoryTask(101, 'second-prompt-at-101')
mockGetHistory.mockResolvedValue({
History: [hist3, hist2]
})
const hist3 = createHistoryJob(101, 'second-prompt-at-101')
mockGetHistory.mockResolvedValue([hist3, hist2])
await store.update()
@@ -409,23 +353,19 @@ describe('useQueueStore', () => {
})
it('should handle multiple queueIndex collisions simultaneously', async () => {
const hist1 = createHistoryTask(10, 'old-at-10')
const hist2 = createHistoryTask(20, 'old-at-20')
const hist3 = createHistoryTask(30, 'keep-at-30')
const hist1 = createHistoryJob(10, 'old-at-10')
const hist2 = createHistoryJob(20, 'old-at-20')
const hist3 = createHistoryJob(30, 'keep-at-30')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [hist3, hist2, hist1]
})
mockGetHistory.mockResolvedValue([hist3, hist2, hist1])
await store.update()
expect(store.historyTasks).toHaveLength(3)
const newHist1 = createHistoryTask(31, 'new-at-31')
const newHist2 = createHistoryTask(32, 'new-at-32')
mockGetHistory.mockResolvedValue({
History: [newHist2, newHist1, hist3]
})
const newHist1 = createHistoryJob(31, 'new-at-31')
const newHist2 = createHistoryJob(32, 'new-at-32')
mockGetHistory.mockResolvedValue([newHist2, newHist1, hist3])
await store.update()
@@ -437,19 +377,17 @@ describe('useQueueStore', () => {
describe('update() - history reconciliation', () => {
it('should keep existing items still on server (by promptId)', async () => {
const hist1 = createHistoryTask(10, 'existing-1')
const hist2 = createHistoryTask(9, 'existing-2')
const hist1 = createHistoryJob(10, 'existing-1')
const hist2 = createHistoryJob(9, 'existing-2')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1, hist2] })
mockGetHistory.mockResolvedValue([hist1, hist2])
await store.update()
expect(store.historyTasks).toHaveLength(2)
const hist3 = createHistoryTask(11, 'new-1')
mockGetHistory.mockResolvedValue({
History: [hist3, hist1, hist2]
})
const hist3 = createHistoryJob(11, 'new-1')
mockGetHistory.mockResolvedValue([hist3, hist1, hist2])
await store.update()
@@ -460,16 +398,16 @@ describe('useQueueStore', () => {
})
it('should remove items no longer on server', async () => {
const hist1 = createHistoryTask(10, 'remove-me')
const hist2 = createHistoryTask(9, 'keep-me')
const hist1 = createHistoryJob(10, 'remove-me')
const hist2 = createHistoryJob(9, 'keep-me')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1, hist2] })
mockGetHistory.mockResolvedValue([hist1, hist2])
await store.update()
expect(store.historyTasks).toHaveLength(2)
mockGetHistory.mockResolvedValue({ History: [hist2] })
mockGetHistory.mockResolvedValue([hist2])
await store.update()
@@ -478,18 +416,16 @@ describe('useQueueStore', () => {
})
it('should add new items from server', async () => {
const hist1 = createHistoryTask(5, 'old-1')
const hist1 = createHistoryJob(5, 'old-1')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1] })
mockGetHistory.mockResolvedValue([hist1])
await store.update()
const hist2 = createHistoryTask(6, 'new-1')
const hist3 = createHistoryTask(7, 'new-2')
mockGetHistory.mockResolvedValue({
History: [hist3, hist2, hist1]
})
const hist2 = createHistoryJob(6, 'new-1')
const hist3 = createHistoryJob(7, 'new-2')
mockGetHistory.mockResolvedValue([hist3, hist2, hist1])
await store.update()
@@ -503,12 +439,12 @@ describe('useQueueStore', () => {
it('should enforce maxHistoryItems limit', async () => {
store.maxHistoryItems = 3
const tasks = Array.from({ length: 5 }, (_, i) =>
createHistoryTask(10 - i, `hist-${i}`)
const jobs = Array.from({ length: 5 }, (_, i) =>
createHistoryJob(10 - i, `hist-${i}`)
)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
@@ -522,21 +458,19 @@ describe('useQueueStore', () => {
store.maxHistoryItems = 5
const initial = Array.from({ length: 3 }, (_, i) =>
createHistoryTask(10 + i, `existing-${i}`)
createHistoryJob(10 + i, `existing-${i}`)
)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: initial })
mockGetHistory.mockResolvedValue(initial)
await store.update()
expect(store.historyTasks).toHaveLength(3)
const newTasks = Array.from({ length: 4 }, (_, i) =>
createHistoryTask(20 + i, `new-${i}`)
const newJobs = Array.from({ length: 4 }, (_, i) =>
createHistoryJob(20 + i, `new-${i}`)
)
mockGetHistory.mockResolvedValue({
History: [...newTasks, ...initial]
})
mockGetHistory.mockResolvedValue([...newJobs, ...initial])
await store.update()
@@ -547,10 +481,10 @@ describe('useQueueStore', () => {
it('should handle maxHistoryItems = 0', async () => {
store.maxHistoryItems = 0
const tasks = [createHistoryTask(10, 'hist-1')]
const jobs = [createHistoryJob(10, 'hist-1')]
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
@@ -560,13 +494,13 @@ describe('useQueueStore', () => {
it('should handle maxHistoryItems = 1', async () => {
store.maxHistoryItems = 1
const tasks = [
createHistoryTask(10, 'hist-1'),
createHistoryTask(9, 'hist-2')
const jobs = [
createHistoryJob(10, 'hist-1'),
createHistoryJob(9, 'hist-2')
]
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
@@ -577,18 +511,18 @@ describe('useQueueStore', () => {
it('should dynamically adjust when maxHistoryItems changes', async () => {
store.maxHistoryItems = 10
const tasks = Array.from({ length: 15 }, (_, i) =>
createHistoryTask(20 - i, `hist-${i}`)
const jobs = Array.from({ length: 15 }, (_, i) =>
createHistoryJob(20 - i, `hist-${i}`)
)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
expect(store.historyTasks).toHaveLength(10)
store.maxHistoryItems = 5
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
expect(store.historyTasks).toHaveLength(5)
@@ -597,19 +531,17 @@ describe('useQueueStore', () => {
describe('computed properties', () => {
it('tasks should combine pending, running, and history in correct order', async () => {
const running = createRunningTask(5, 'run-1')
const pending1 = createPendingTask(6, 'pend-1')
const pending2 = createPendingTask(7, 'pend-2')
const hist1 = createHistoryTask(3, 'hist-1')
const hist2 = createHistoryTask(4, 'hist-2')
const running = createRunningJob(5, 'run-1')
const pending1 = createPendingJob(6, 'pend-1')
const pending2 = createPendingJob(7, 'pend-2')
const hist1 = createHistoryJob(3, 'hist-1')
const hist2 = createHistoryJob(4, 'hist-2')
mockGetQueue.mockResolvedValue({
Running: [running],
Pending: [pending1, pending2]
})
mockGetHistory.mockResolvedValue({
History: [hist2, hist1]
})
mockGetHistory.mockResolvedValue([hist2, hist1])
await store.update()
@@ -624,9 +556,9 @@ describe('useQueueStore', () => {
it('hasPendingTasks should be true when pending tasks exist', async () => {
mockGetQueue.mockResolvedValue({
Running: [],
Pending: [createPendingTask(1, 'pend-1')]
Pending: [createPendingJob(1, 'pend-1')]
})
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
expect(store.hasPendingTasks).toBe(true)
@@ -634,21 +566,19 @@ describe('useQueueStore', () => {
it('hasPendingTasks should be false when no pending tasks', async () => {
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
expect(store.hasPendingTasks).toBe(false)
})
it('lastHistoryQueueIndex should return highest queue index', async () => {
const hist1 = createHistoryTask(10, 'hist-1')
const hist2 = createHistoryTask(25, 'hist-2')
const hist3 = createHistoryTask(15, 'hist-3')
const hist1 = createHistoryJob(10, 'hist-1')
const hist2 = createHistoryJob(25, 'hist-2')
const hist3 = createHistoryJob(15, 'hist-3')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [hist1, hist2, hist3]
})
mockGetHistory.mockResolvedValue([hist1, hist2, hist3])
await store.update()
expect(store.lastHistoryQueueIndex).toBe(25)
@@ -656,7 +586,7 @@ describe('useQueueStore', () => {
it('lastHistoryQueueIndex should be -1 when no history', async () => {
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
expect(store.lastHistoryQueueIndex).toBe(-1)
@@ -666,19 +596,17 @@ describe('useQueueStore', () => {
describe('clear()', () => {
beforeEach(async () => {
mockGetQueue.mockResolvedValue({
Running: [createRunningTask(1, 'run-1')],
Pending: [createPendingTask(2, 'pend-1')]
})
mockGetHistory.mockResolvedValue({
History: [createHistoryTask(3, 'hist-1')]
Running: [createRunningJob(1, 'run-1')],
Pending: [createPendingJob(2, 'pend-1')]
})
mockGetHistory.mockResolvedValue([createHistoryJob(3, 'hist-1')])
await store.update()
})
it('should clear both queue and history by default', async () => {
mockClearItems.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.clear()
@@ -693,9 +621,7 @@ describe('useQueueStore', () => {
it('should clear only queue when specified', async () => {
mockClearItems.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [createHistoryTask(3, 'hist-1')]
})
mockGetHistory.mockResolvedValue([createHistoryJob(3, 'hist-1')])
await store.clear(['queue'])
@@ -707,10 +633,10 @@ describe('useQueueStore', () => {
it('should clear only history when specified', async () => {
mockClearItems.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({
Running: [createRunningTask(1, 'run-1')],
Pending: [createPendingTask(2, 'pend-1')]
Running: [createRunningJob(1, 'run-1')],
Pending: [createPendingJob(2, 'pend-1')]
})
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.clear(['history'])
@@ -729,11 +655,12 @@ describe('useQueueStore', () => {
describe('delete()', () => {
it('should delete task from queue', async () => {
const task = new TaskItemImpl('Pending', createTaskPrompt(1, 'pend-1'))
const job = createPendingJob(1, 'pend-1')
const task = new TaskItemImpl(job)
mockDeleteItem.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.delete(task)
@@ -741,16 +668,12 @@ describe('useQueueStore', () => {
})
it('should delete task from history', async () => {
const task = new TaskItemImpl(
'History',
createTaskPrompt(1, 'hist-1'),
createTaskStatus(),
createTaskOutput()
)
const job = createHistoryJob(1, 'hist-1')
const task = new TaskItemImpl(job, createTaskOutput())
mockDeleteItem.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.delete(task)
@@ -758,11 +681,12 @@ describe('useQueueStore', () => {
})
it('should refresh store after deletion', async () => {
const task = new TaskItemImpl('Pending', createTaskPrompt(1, 'pend-1'))
const job = createPendingJob(1, 'pend-1')
const task = new TaskItemImpl(job)
mockDeleteItem.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.delete(task)

View File

@@ -1,14 +1,11 @@
import { createPinia, setActivePinia } from 'pinia'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { ComfyApp } from '@/scripts/app'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/types/jobTypes'
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import type { ComfyApp } from '@/scripts/app'
import { TaskItemImpl } from '@/stores/queueStore'
import * as getWorkflowModule from '@/platform/workflow/cloud'
vi.mock('@/platform/distribution/types', () => ({
isCloud: true
}))
import * as jobsModule from '@/platform/remote/comfyui/jobs'
vi.mock('@/services/extensionService', () => ({
useExtensionService: vi.fn(() => ({
@@ -29,53 +26,44 @@ const mockWorkflow: ComfyWorkflowJSON = {
version: 0.4
}
const createHistoryTaskWithWorkflow = (): TaskItemImpl => {
return new TaskItemImpl(
'History',
[
0, // queueIndex
'test-prompt-id', // promptId
{}, // promptInputs
{
client_id: 'test-client',
extra_pnginfo: {
workflow: mockWorkflow
}
},
[] // outputsToExecute
],
{
status_str: 'success',
completed: true,
messages: []
},
{} // outputs
)
// Mock job detail response (matches actual /jobs/{id} API response structure)
const mockJobDetail = {
id: 'test-prompt-id',
status: 'completed' as const,
create_time: Date.now(),
execution_time: 10.5,
extra_data: {
extra_pnginfo: {
workflow: mockWorkflow
}
},
prompt: {},
outputs: {
'1': { images: [{ filename: 'test.png', subfolder: '', type: 'output' }] }
}
}
const createHistoryTaskWithoutWorkflow = (): TaskItemImpl => {
return new TaskItemImpl(
'History',
[
0,
'test-prompt-id',
{},
{
client_id: 'test-client'
// No extra_pnginfo.workflow
},
[]
],
{
status_str: 'success',
completed: true,
messages: []
},
{}
)
function createHistoryJob(id: string): JobListItem {
const now = Date.now()
return {
id,
status: 'completed',
create_time: now,
priority: now
}
}
describe('TaskItemImpl.loadWorkflow - cloud history workflow fetching', () => {
function createRunningJob(id: string): JobListItem {
const now = Date.now()
return {
id,
status: 'in_progress',
create_time: now,
priority: now
}
}
describe('TaskItemImpl.loadWorkflow - workflow fetching', () => {
let mockApp: ComfyApp
let mockFetchApi: ReturnType<typeof vi.fn>
@@ -91,30 +79,19 @@ describe('TaskItemImpl.loadWorkflow - cloud history workflow fetching', () => {
fetchApi: mockFetchApi
}
} as unknown as ComfyApp
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory')
})
it('should load workflow directly when workflow is in extra_pnginfo', async () => {
const task = createHistoryTaskWithWorkflow()
it('should fetch workflow from API for history tasks', async () => {
const job = createHistoryJob('test-prompt-id')
const task = new TaskItemImpl(job)
await task.loadWorkflow(mockApp)
expect(mockApp.loadGraphData).toHaveBeenCalledWith(mockWorkflow)
expect(mockFetchApi).not.toHaveBeenCalled()
})
it('should fetch workflow from cloud when workflow is missing from history task', async () => {
const task = createHistoryTaskWithoutWorkflow()
// Mock getWorkflowFromHistory to return workflow
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory').mockResolvedValue(
mockWorkflow
vi.spyOn(jobsModule, 'fetchJobDetail').mockResolvedValue(
mockJobDetail as jobsModule.JobDetail
)
await task.loadWorkflow(mockApp)
expect(getWorkflowModule.getWorkflowFromHistory).toHaveBeenCalledWith(
expect(jobsModule.fetchJobDetail).toHaveBeenCalledWith(
expect.any(Function),
'test-prompt-id'
)
@@ -122,54 +99,40 @@ describe('TaskItemImpl.loadWorkflow - cloud history workflow fetching', () => {
})
it('should not load workflow when fetch returns undefined', async () => {
const task = createHistoryTaskWithoutWorkflow()
const job = createHistoryJob('test-prompt-id')
const task = new TaskItemImpl(job)
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory').mockResolvedValue(
undefined
)
vi.spyOn(jobsModule, 'fetchJobDetail').mockResolvedValue(undefined)
await task.loadWorkflow(mockApp)
expect(getWorkflowModule.getWorkflowFromHistory).toHaveBeenCalled()
expect(jobsModule.fetchJobDetail).toHaveBeenCalled()
expect(mockApp.loadGraphData).not.toHaveBeenCalled()
})
it('should only fetch for history tasks, not running tasks', async () => {
const runningTask = new TaskItemImpl(
'Running',
[
0,
'test-prompt-id',
{},
{
client_id: 'test-client'
},
[]
],
undefined,
{}
)
const job = createRunningJob('test-prompt-id')
const runningTask = new TaskItemImpl(job)
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory').mockResolvedValue(
mockWorkflow
vi.spyOn(jobsModule, 'fetchJobDetail').mockResolvedValue(
mockJobDetail as jobsModule.JobDetail
)
await runningTask.loadWorkflow(mockApp)
expect(getWorkflowModule.getWorkflowFromHistory).not.toHaveBeenCalled()
expect(jobsModule.fetchJobDetail).not.toHaveBeenCalled()
expect(mockApp.loadGraphData).not.toHaveBeenCalled()
})
it('should handle fetch errors gracefully by returning undefined', async () => {
const task = createHistoryTaskWithoutWorkflow()
const job = createHistoryJob('test-prompt-id')
const task = new TaskItemImpl(job)
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory').mockResolvedValue(
undefined
)
vi.spyOn(jobsModule, 'fetchJobDetail').mockResolvedValue(undefined)
await task.loadWorkflow(mockApp)
expect(getWorkflowModule.getWorkflowFromHistory).toHaveBeenCalled()
expect(jobsModule.fetchJobDetail).toHaveBeenCalled()
expect(mockApp.loadGraphData).not.toHaveBeenCalled()
})
})