Merge main into fix/remove-any-types-part7

This commit is contained in:
Johnpaul
2026-01-16 23:15:38 +01:00
122 changed files with 3889 additions and 4342 deletions

26
src/AGENTS.md Normal file
View File

@@ -0,0 +1,26 @@
# Source Code Guidelines
## Error Handling
- User-friendly and actionable messages
- Proper error propagation
## Security
- Sanitize HTML with DOMPurify
- Validate trusted sources
- Never log secrets
## State Management (Stores)
- Follow domain-driven design for organizing files/folders
- Clear public interfaces
- Restrict extension access
- Clean up subscriptions
## General Guidelines
- Use `es-toolkit` for utility functions
- Use TypeScript for type safety
- Avoid `@ts-expect-error` - fix the underlying issue
- Use `vue-i18n` for ALL user-facing strings (`src/locales/en/main.json`)

View File

@@ -1,57 +1,3 @@
# Source Code Guidelines
## Service Layer
### API Calls
- Use `api.apiURL()` for backend endpoints
- Use `api.fileURL()` for static files
#### ✅ Correct Usage
```typescript
// Backend API call
const response = await api.get(api.apiURL('/prompt'))
// Static file
const template = await fetch(api.fileURL('/templates/default.json'))
```
#### ❌ Incorrect Usage
```typescript
// WRONG - Direct URL construction
const response = await fetch('/api/prompt')
const template = await fetch('/templates/default.json')
```
### Error Handling
- User-friendly and actionable messages
- Proper error propagation
### Security
- Sanitize HTML with DOMPurify
- Validate trusted sources
- Never log secrets
## State Management (Stores)
### Store Design
- Follow domain-driven design
- Clear public interfaces
- Restrict extension access
### Best Practices
- Use TypeScript for type safety
- Implement proper error handling
- Clean up subscriptions
- Avoid @ts-expect-error
## General Guidelines
- Use es-toolkit for utility functions
- Implement proper TypeScript types
- Follow Vue 3 composition API style guide
- Use vue-i18n for ALL user-facing strings in `src/locales/en/main.json`
<!-- We forked the path, yet here we are again—
Maintaining two files where one would have been sane. -->
@AGENTS.md

6
src/components/AGENTS.md Normal file
View File

@@ -0,0 +1,6 @@
# Component Guidelines
## Component Communication
- Prefer `emit/@event-name` for state changes
- Use `defineExpose` only for imperative operations (`form.validate()`, `modal.open()`)

View File

@@ -1,45 +1,3 @@
# Component Guidelines
## Vue 3 Composition API
- Use setup() function
- Destructure props (Vue 3.5 style)
- Use ref/reactive for state
- Implement computed() for derived state
- Use provide/inject for dependency injection
## Component Communication
- Prefer `emit/@event-name` for state changes
- Use `defineExpose` only for imperative operations (`form.validate()`, `modal.open()`)
- Events promote loose coupling
## UI Framework
- Deprecated PrimeVue component replacements:
- Dropdown → Select
- OverlayPanel → Popover
- Calendar → DatePicker
- InputSwitch → ToggleSwitch
- Sidebar → Drawer
- Chips → AutoComplete with multiple enabled
- TabMenu → Tabs without panels
- Steps → Stepper without panels
- InlineMessage → Message
## Styling
- Use Tailwind CSS only (no custom CSS)
- Use the correct tokens from style.css in the design system package
- For common operations, try to use existing VueUse composables that automatically handle effect scope
- Example: Use `useElementHover` instead of manually managing mouseover/mouseout event listeners
- Example: Use `useIntersectionObserver` for visibility detection instead of custom scroll handlers
## Best Practices
- Extract complex conditionals to computed
- Implement cleanup for async operations
- Use vue-i18n for ALL UI strings
- Use lifecycle hooks: onMounted, onUpdated
- Use Teleport/Suspense when needed
- Proper props and emits definitions
<!-- "Play nice with others," mother always said,
But Claude prefers its own file name instead. -->
@AGENTS.md

View File

@@ -1,5 +1,5 @@
<template>
<div class="flex h-full items-center">
<div class="flex h-full items-center" :class="cn(!isDocked && '-ml-2')">
<div
v-if="isDragging && !isDocked"
:class="actionbarClass"
@@ -77,7 +77,6 @@ const { isIdle: isExecutionIdle } = storeToRefs(useExecutionStore())
const position = computed(() => settingsStore.get('Comfy.UseNewMenu'))
const visible = computed(() => position.value !== 'Disabled')
const tabContainer = document.querySelector('.workflow-tabs-container')
const panelRef = ref<HTMLElement | null>(null)
const dragHandleRef = ref<HTMLElement | null>(null)
const isDocked = useLocalStorage('Comfy.MenuPosition.Docked', true)
@@ -88,14 +87,7 @@ const storedPosition = useLocalStorage('Comfy.MenuPosition.Floating', {
const { x, y, style, isDragging } = useDraggable(panelRef, {
initialValue: { x: 0, y: 0 },
handle: dragHandleRef,
containerElement: document.body,
onMove: (event) => {
// Prevent dragging the menu over the top of the tabs
const minY = tabContainer?.getBoundingClientRect().bottom ?? 40
if (event.y < minY) {
event.y = minY
}
}
containerElement: document.body
})
// Update storedPosition when x or y changes

View File

@@ -55,17 +55,4 @@ const dialogStore = useDialogStore()
@apply p-2 2xl:p-[var(--p-dialog-content-padding)];
@apply pt-0;
}
.manager-dialog {
height: 80vh;
max-width: 1724px;
max-height: 1026px;
}
@media (min-width: 3000px) {
.manager-dialog {
max-width: 2200px;
max-height: 1320px;
}
}
</style>

View File

@@ -262,7 +262,7 @@ const focusAssetInSidebar = async (item: JobListItem) => {
const inspectJobAsset = wrapWithErrorHandlingAsync(
async (item: JobListItem) => {
openResultGallery(item)
await openResultGallery(item)
await focusAssetInSidebar(item)
}
)

View File

@@ -1,6 +1,9 @@
import type { Meta, StoryObj } from '@storybook/vue3-vite'
import type { TaskStatus } from '@/schemas/apiSchema'
import type {
JobListItem,
JobStatus
} from '@/platform/remote/comfyui/jobs/jobTypes'
import { useExecutionStore } from '@/stores/executionStore'
import { TaskItemImpl, useQueueStore } from '@/stores/queueStore'
@@ -37,91 +40,86 @@ function resetStores() {
exec.nodeProgressStatesByPrompt = {}
}
function makeTask(
id: string,
priority: number,
fields: Partial<JobListItem> & { status: JobStatus; create_time: number }
): TaskItemImpl {
const job: JobListItem = {
id,
priority,
last_state_update: null,
update_time: fields.create_time,
...fields
}
return new TaskItemImpl(job)
}
function makePendingTask(
id: string,
index: number,
createTimeMs?: number
priority: number,
createTimeMs: number
): TaskItemImpl {
const extraData = {
client_id: 'c1',
...(typeof createTimeMs === 'number' ? { create_time: createTimeMs } : {})
}
return new TaskItemImpl('Pending', [index, id, {}, extraData, []])
return makeTask(id, priority, {
status: 'pending',
create_time: createTimeMs
})
}
function makeRunningTask(
id: string,
index: number,
createTimeMs?: number
priority: number,
createTimeMs: number
): TaskItemImpl {
const extraData = {
client_id: 'c1',
...(typeof createTimeMs === 'number' ? { create_time: createTimeMs } : {})
}
return new TaskItemImpl('Running', [index, id, {}, extraData, []])
return makeTask(id, priority, {
status: 'in_progress',
create_time: createTimeMs
})
}
function makeRunningTaskWithStart(
id: string,
index: number,
priority: number,
startedSecondsAgo: number
): TaskItemImpl {
const start = Date.now() - startedSecondsAgo * 1000
const status: TaskStatus = {
status_str: 'success',
completed: false,
messages: [['execution_start', { prompt_id: id, timestamp: start } as any]]
}
return new TaskItemImpl(
'Running',
[index, id, {}, { client_id: 'c1', create_time: start - 5000 }, []],
status
)
return makeTask(id, priority, {
status: 'in_progress',
create_time: start - 5000,
update_time: start
})
}
function makeHistoryTask(
id: string,
index: number,
priority: number,
durationSec: number,
ok: boolean,
errorMessage?: string
): TaskItemImpl {
const start = Date.now() - durationSec * 1000 - 1000
const end = start + durationSec * 1000
const messages: TaskStatus['messages'] = ok
? [
['execution_start', { prompt_id: id, timestamp: start } as any],
['execution_success', { prompt_id: id, timestamp: end } as any]
]
: [
['execution_start', { prompt_id: id, timestamp: start } as any],
[
'execution_error',
{
prompt_id: id,
timestamp: end,
node_id: '1',
node_type: 'Node',
executed: [],
exception_message:
errorMessage || 'Demo error: Node failed during execution',
exception_type: 'RuntimeError',
traceback: [],
current_inputs: {},
current_outputs: {}
} as any
]
]
const status: TaskStatus = {
status_str: ok ? 'success' : 'error',
completed: true,
messages
}
return new TaskItemImpl(
'History',
[index, id, {}, { client_id: 'c1', create_time: start }, []],
status
)
const now = Date.now()
const executionEndTime = now
const executionStartTime = now - durationSec * 1000
return makeTask(id, priority, {
status: ok ? 'completed' : 'failed',
create_time: executionStartTime - 5000,
update_time: now,
execution_start_time: executionStartTime,
execution_end_time: executionEndTime,
execution_error: errorMessage
? {
prompt_id: id,
timestamp: now,
node_id: '1',
node_type: 'ExampleNode',
exception_message: errorMessage,
exception_type: 'RuntimeError',
traceback: [],
current_inputs: {},
current_outputs: {}
}
: undefined
})
}
export const Queued: Story = {
@@ -140,8 +138,12 @@ export const Queued: Story = {
makePendingTask(jobId, queueIndex, Date.now() - 90_000)
]
// Add some other pending jobs to give context
queue.pendingTasks.push(makePendingTask('job-older-1', 100))
queue.pendingTasks.push(makePendingTask('job-older-2', 101))
queue.pendingTasks.push(
makePendingTask('job-older-1', 100, Date.now() - 60_000)
)
queue.pendingTasks.push(
makePendingTask('job-older-2', 101, Date.now() - 30_000)
)
// Queued at (in metadata on prompt[4])

View File

@@ -12,7 +12,7 @@
v-for="ji in group.items"
:key="ji.id"
:job-id="ji.id"
:workflow-id="ji.taskRef?.workflow?.id"
:workflow-id="ji.taskRef?.workflowId"
:state="ji.state"
:title="ji.title"
:right-text="ji.meta"

View File

@@ -2,116 +2,49 @@ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { computed, ref } from 'vue'
import type { ComputedRef } from 'vue'
import type { ExecutionErrorWsMessage } from '@/schemas/apiSchema'
import type { TaskItemImpl } from '@/stores/queueStore'
import type {
JobErrorDialogService,
UseJobErrorReportingOptions
} from '@/components/queue/job/useJobErrorReporting'
import * as jobErrorReporting from '@/components/queue/job/useJobErrorReporting'
import type { JobErrorDialogService } from '@/components/queue/job/useJobErrorReporting'
import { useJobErrorReporting } from '@/components/queue/job/useJobErrorReporting'
import type { ExecutionError } from '@/platform/remote/comfyui/jobs/jobTypes'
const createExecutionErrorMessage = (
overrides: Partial<ExecutionErrorWsMessage> = {}
): ExecutionErrorWsMessage => ({
prompt_id: 'prompt',
timestamp: 100,
node_id: 'node-1',
node_type: 'KSampler',
executed: [],
exception_message: 'default failure',
exception_type: 'RuntimeError',
traceback: ['Trace line'],
current_inputs: {},
current_outputs: {},
...overrides
})
const createTaskWithMessages = (
messages: Array<[string, unknown]> | undefined = []
const createTaskWithError = (
promptId: string,
errorMessage?: string,
executionError?: ExecutionError,
createTime?: number
): TaskItemImpl =>
({
status: {
status_str: 'error',
completed: false,
messages
}
}) as TaskItemImpl
describe('extractExecutionError', () => {
it('returns null when task has no execution error messages', () => {
expect(jobErrorReporting.extractExecutionError(null)).toBeNull()
expect(
jobErrorReporting.extractExecutionError({
status: undefined
} as TaskItemImpl)
).toBeNull()
expect(
jobErrorReporting.extractExecutionError({
status: {
status_str: 'error',
completed: false,
messages: {} as unknown as Array<[string, unknown]>
}
} as TaskItemImpl)
).toBeNull()
expect(
jobErrorReporting.extractExecutionError(createTaskWithMessages([]))
).toBeNull()
expect(
jobErrorReporting.extractExecutionError(
createTaskWithMessages([
['execution_start', { prompt_id: 'prompt', timestamp: 1 }]
] as Array<[string, unknown]>)
)
).toBeNull()
})
it('returns detail and message for execution_error entries', () => {
const detail = createExecutionErrorMessage({ exception_message: 'Kaboom' })
const result = jobErrorReporting.extractExecutionError(
createTaskWithMessages([
['execution_success', { prompt_id: 'prompt', timestamp: 2 }],
['execution_error', detail]
] as Array<[string, unknown]>)
)
expect(result).toEqual({
detail,
message: 'Kaboom'
})
})
it('falls back to an empty message when the tuple lacks detail', () => {
const result = jobErrorReporting.extractExecutionError(
createTaskWithMessages([
['execution_error'] as unknown as [string, ExecutionErrorWsMessage]
])
)
expect(result).toEqual({ detail: undefined, message: '' })
})
})
promptId,
errorMessage,
executionError,
createTime: createTime ?? Date.now()
}) as Partial<TaskItemImpl> as TaskItemImpl
describe('useJobErrorReporting', () => {
let taskState = ref<TaskItemImpl | null>(null)
let taskForJob: ComputedRef<TaskItemImpl | null>
let copyToClipboard: UseJobErrorReportingOptions['copyToClipboard']
let showExecutionErrorDialog: JobErrorDialogService['showExecutionErrorDialog']
let showErrorDialog: JobErrorDialogService['showErrorDialog']
let copyToClipboard: ReturnType<typeof vi.fn>
let showErrorDialog: ReturnType<typeof vi.fn>
let showExecutionErrorDialog: ReturnType<typeof vi.fn>
let dialog: JobErrorDialogService
let composable: ReturnType<typeof jobErrorReporting.useJobErrorReporting>
let composable: ReturnType<typeof useJobErrorReporting>
beforeEach(() => {
vi.clearAllMocks()
taskState = ref<TaskItemImpl | null>(null)
taskForJob = computed(() => taskState.value)
copyToClipboard = vi.fn()
showExecutionErrorDialog = vi.fn()
showErrorDialog = vi.fn()
showExecutionErrorDialog = vi.fn()
dialog = {
showExecutionErrorDialog,
showErrorDialog
}
composable = jobErrorReporting.useJobErrorReporting({
showErrorDialog,
showExecutionErrorDialog
} as unknown as JobErrorDialogService
composable = useJobErrorReporting({
taskForJob,
copyToClipboard,
copyToClipboard: copyToClipboard as (
value: string
) => void | Promise<void>,
dialog
})
})
@@ -121,73 +54,87 @@ describe('useJobErrorReporting', () => {
})
it('exposes a computed message that reflects the current task error', () => {
taskState.value = createTaskWithMessages([
[
'execution_error',
createExecutionErrorMessage({ exception_message: 'First failure' })
]
])
taskState.value = createTaskWithError('job-1', 'First failure')
expect(composable.errorMessageValue.value).toBe('First failure')
taskState.value = createTaskWithMessages([
[
'execution_error',
createExecutionErrorMessage({ exception_message: 'Second failure' })
]
])
taskState.value = createTaskWithError('job-2', 'Second failure')
expect(composable.errorMessageValue.value).toBe('Second failure')
})
it('returns empty string when no error message', () => {
taskState.value = createTaskWithError('job-1')
expect(composable.errorMessageValue.value).toBe('')
})
it('returns empty string when task is null', () => {
taskState.value = null
expect(composable.errorMessageValue.value).toBe('')
})
it('only calls the copy handler when a message exists', () => {
taskState.value = createTaskWithMessages([
[
'execution_error',
createExecutionErrorMessage({ exception_message: 'Clipboard failure' })
]
])
taskState.value = createTaskWithError('job-1', 'Clipboard failure')
composable.copyErrorMessage()
expect(copyToClipboard).toHaveBeenCalledTimes(1)
expect(copyToClipboard).toHaveBeenCalledWith('Clipboard failure')
vi.mocked(copyToClipboard).mockClear()
taskState.value = createTaskWithMessages([])
copyToClipboard.mockClear()
taskState.value = createTaskWithError('job-2')
composable.copyErrorMessage()
expect(copyToClipboard).not.toHaveBeenCalled()
})
it('prefers the detailed execution dialog when detail is available', () => {
const detail = createExecutionErrorMessage({
exception_message: 'Detailed failure'
})
taskState.value = createTaskWithMessages([['execution_error', detail]])
it('shows simple error dialog when only errorMessage present', () => {
taskState.value = createTaskWithError('job-1', 'Queue job error')
composable.reportJobError()
expect(showExecutionErrorDialog).toHaveBeenCalledTimes(1)
expect(showExecutionErrorDialog).toHaveBeenCalledWith(detail)
expect(showErrorDialog).not.toHaveBeenCalled()
})
it('shows a fallback dialog when only a message is available', () => {
const message = 'Queue job error'
taskState.value = createTaskWithMessages([])
const valueSpy = vi
.spyOn(composable.errorMessageValue, 'value', 'get')
.mockReturnValue(message)
expect(composable.errorMessageValue.value).toBe(message)
composable.reportJobError()
expect(showExecutionErrorDialog).not.toHaveBeenCalled()
expect(showErrorDialog).toHaveBeenCalledTimes(1)
const [errorArg, optionsArg] = vi.mocked(showErrorDialog).mock.calls[0]
const [errorArg, optionsArg] = showErrorDialog.mock.calls[0]
expect(errorArg).toBeInstanceOf(Error)
expect(errorArg.message).toBe(message)
expect(errorArg.message).toBe('Queue job error')
expect(optionsArg).toEqual({ reportType: 'queueJobError' })
valueSpy.mockRestore()
expect(showExecutionErrorDialog).not.toHaveBeenCalled()
})
it('does nothing when no error could be extracted', () => {
taskState.value = createTaskWithMessages([])
it('does nothing when no task exists', () => {
taskState.value = null
composable.reportJobError()
expect(showExecutionErrorDialog).not.toHaveBeenCalled()
expect(showErrorDialog).not.toHaveBeenCalled()
expect(showExecutionErrorDialog).not.toHaveBeenCalled()
})
it('shows rich error dialog when execution_error available on task', () => {
const executionError: ExecutionError = {
prompt_id: 'job-1',
timestamp: 12345,
node_id: '5',
node_type: 'KSampler',
executed: ['1', '2'],
exception_message: 'CUDA out of memory',
exception_type: 'RuntimeError',
traceback: ['line 1', 'line 2'],
current_inputs: {},
current_outputs: {}
}
taskState.value = createTaskWithError(
'job-1',
'CUDA out of memory',
executionError,
12345
)
composable.reportJobError()
expect(showExecutionErrorDialog).toHaveBeenCalledTimes(1)
expect(showExecutionErrorDialog).toHaveBeenCalledWith(executionError)
expect(showErrorDialog).not.toHaveBeenCalled()
})
it('does nothing when no error message and no execution_error', () => {
taskState.value = createTaskWithError('job-1')
composable.reportJobError()
expect(showErrorDialog).not.toHaveBeenCalled()
expect(showExecutionErrorDialog).not.toHaveBeenCalled()
})
})

View File

@@ -1,13 +1,13 @@
import { computed } from 'vue'
import type { ComputedRef } from 'vue'
import type { ExecutionErrorWsMessage } from '@/schemas/apiSchema'
import type { ExecutionErrorDialogInput } from '@/services/dialogService'
import type { TaskItemImpl } from '@/stores/queueStore'
type CopyHandler = (value: string) => void | Promise<void>
export type JobErrorDialogService = {
showExecutionErrorDialog: (error: ExecutionErrorWsMessage) => void
showExecutionErrorDialog: (executionError: ExecutionErrorDialogInput) => void
showErrorDialog: (
error: Error,
options?: {
@@ -17,30 +17,7 @@ export type JobErrorDialogService = {
) => void
}
type JobExecutionError = {
detail?: ExecutionErrorWsMessage
message: string
}
export const extractExecutionError = (
task: TaskItemImpl | null
): JobExecutionError | null => {
const status = (task as TaskItemImpl | null)?.status
const messages = (status as { messages?: unknown[] } | undefined)?.messages
if (!Array.isArray(messages) || !messages.length) return null
const record = messages.find((entry: unknown) => {
return Array.isArray(entry) && entry[0] === 'execution_error'
}) as [string, ExecutionErrorWsMessage?] | undefined
if (!record) return null
const detail = record[1]
const message = String(detail?.exception_message ?? '')
return {
detail,
message
}
}
export type UseJobErrorReportingOptions = {
type UseJobErrorReportingOptions = {
taskForJob: ComputedRef<TaskItemImpl | null>
copyToClipboard: CopyHandler
dialog: JobErrorDialogService
@@ -51,10 +28,7 @@ export const useJobErrorReporting = ({
copyToClipboard,
dialog
}: UseJobErrorReportingOptions) => {
const errorMessageValue = computed(() => {
const error = extractExecutionError(taskForJob.value)
return error?.message ?? ''
})
const errorMessageValue = computed(() => taskForJob.value?.errorMessage ?? '')
const copyErrorMessage = () => {
if (errorMessageValue.value) {
@@ -63,11 +37,12 @@ export const useJobErrorReporting = ({
}
const reportJobError = () => {
const error = extractExecutionError(taskForJob.value)
if (error?.detail) {
dialog.showExecutionErrorDialog(error.detail)
const executionError = taskForJob.value?.executionError
if (executionError) {
dialog.showExecutionErrorDialog(executionError)
return
}
if (errorMessageValue.value) {
dialog.showErrorDialog(new Error(errorMessageValue.value), {
reportType: 'queueJobError'

View File

@@ -240,12 +240,19 @@ import type { AssetItem } from '@/platform/assets/schemas/assetSchema'
import type { MediaKind } from '@/platform/assets/schemas/mediaAssetSchema'
import { isCloud } from '@/platform/distribution/types'
import { useSettingStore } from '@/platform/settings/settingStore'
import { getJobDetail } from '@/services/jobOutputCache'
import { useCommandStore } from '@/stores/commandStore'
import { useDialogStore } from '@/stores/dialogStore'
import { ResultItemImpl, useQueueStore } from '@/stores/queueStore'
import { formatDuration, getMediaTypeFromFilename } from '@/utils/formatUtil'
import { cn } from '@/utils/tailwindUtil'
interface JobOutputItem {
filename: string
subfolder: string
type: string
}
const { t, n } = useI18n()
const commandStore = useCommandStore()
const queueStore = useQueueStore()
@@ -492,6 +499,35 @@ function handleContextMenuHide() {
})
}
const handleBulkDownload = (assets: AssetItem[]) => {
downloadMultipleAssets(assets)
clearSelection()
}
const handleBulkDelete = async (assets: AssetItem[]) => {
await deleteMultipleAssets(assets)
clearSelection()
}
const handleClearQueue = async () => {
await commandStore.execute('Comfy.ClearPendingTasks')
}
const handleBulkAddToWorkflow = async (assets: AssetItem[]) => {
await addMultipleToWorkflow(assets)
clearSelection()
}
const handleBulkOpenWorkflow = async (assets: AssetItem[]) => {
await openMultipleWorkflows(assets)
clearSelection()
}
const handleBulkExportWorkflow = async (assets: AssetItem[]) => {
await exportMultipleWorkflows(assets)
clearSelection()
}
const handleZoomClick = (asset: AssetItem) => {
const mediaType = getMediaTypeFromFilename(asset.name)
@@ -519,16 +555,16 @@ const handleZoomClick = (asset: AssetItem) => {
}
}
const enterFolderView = (asset: AssetItem) => {
const enterFolderView = async (asset: AssetItem) => {
const metadata = getOutputAssetMetadata(asset.user_metadata)
if (!metadata) {
console.warn('Invalid output asset metadata')
return
}
const { promptId, allOutputs, executionTimeInSeconds } = metadata
const { promptId, allOutputs, executionTimeInSeconds, outputCount } = metadata
if (!promptId || !Array.isArray(allOutputs) || allOutputs.length === 0) {
if (!promptId) {
console.warn('Missing required folder view data')
return
}
@@ -536,7 +572,48 @@ const enterFolderView = (asset: AssetItem) => {
folderPromptId.value = promptId
folderExecutionTime.value = executionTimeInSeconds
folderAssets.value = allOutputs.map((output) => ({
// Determine which outputs to display
let outputsToDisplay = allOutputs ?? []
// If outputCount indicates more outputs than we have, fetch full outputs
const needsFullOutputs =
typeof outputCount === 'number' &&
outputCount > 1 &&
outputsToDisplay.length < outputCount
if (needsFullOutputs) {
try {
const jobDetail = await getJobDetail(promptId)
if (jobDetail?.outputs) {
// Convert job outputs to ResultItemImpl array
outputsToDisplay = Object.entries(jobDetail.outputs).flatMap(
([nodeId, nodeOutputs]) =>
Object.entries(nodeOutputs).flatMap(([mediaType, items]) =>
(items as JobOutputItem[])
.map(
(item) =>
new ResultItemImpl({
...item,
nodeId,
mediaType
})
)
.filter((r) => r.supportsPreview)
)
)
}
} catch (error) {
console.error('Failed to fetch job detail for folder view:', error)
outputsToDisplay = []
}
}
if (outputsToDisplay.length === 0) {
console.warn('No outputs available for folder view')
return
}
folderAssets.value = outputsToDisplay.map((output) => ({
id: `${output.nodeId}-${output.filename}`,
name: output.filename,
size: 0,
@@ -609,35 +686,6 @@ const handleDeleteSelected = async () => {
clearSelection()
}
const handleBulkDownload = (assets: AssetItem[]) => {
downloadMultipleAssets(assets)
clearSelection()
}
const handleBulkDelete = async (assets: AssetItem[]) => {
await deleteMultipleAssets(assets)
clearSelection()
}
const handleBulkAddToWorkflow = async (assets: AssetItem[]) => {
await addMultipleToWorkflow(assets)
clearSelection()
}
const handleBulkOpenWorkflow = async (assets: AssetItem[]) => {
await openMultipleWorkflows(assets)
clearSelection()
}
const handleBulkExportWorkflow = async (assets: AssetItem[]) => {
await exportMultipleWorkflows(assets)
clearSelection()
}
const handleClearQueue = async () => {
await commandStore.execute('Comfy.ClearPendingTasks')
}
const handleApproachEnd = useDebounceFn(async () => {
if (
activeTab.value === 'output' &&

View File

@@ -1,5 +1,5 @@
<template>
<div class="flex h-full shrink-0 items-center gap-1">
<div class="flex h-full shrink-0 items-center gap-1 empty:hidden">
<Button
v-for="(button, index) in actionBarButtonStore.buttons"
:key="index"

View File

@@ -47,7 +47,7 @@ const transform = computed(() => {
<template>
<div
ref="zoomPane"
class="contain-size flex place-content-center"
class="contain-size place-content-center"
@wheel="handleWheel"
@pointerdown.prevent="handleDown"
@pointermove="handleMove"

View File

@@ -2,7 +2,7 @@
<div class="base-widget-layout rounded-2xl overflow-hidden relative">
<Button
v-show="!isRightPanelOpen && hasRightPanel"
size="icon"
size="lg"
:class="
cn('absolute top-4 right-18 z-10', 'transition-opacity duration-200', {
'opacity-0 pointer-events-none': isRightPanelOpen || !hasRightPanel
@@ -10,7 +10,7 @@
"
@click="toggleRightPanel"
>
<i class="icon-[lucide--panel-right] text-sm" />
<i class="icon-[lucide--panel-right]" />
</Button>
<Button
size="lg"
@@ -64,7 +64,7 @@
>
<Button
v-if="isRightPanelOpen && hasRightPanel"
size="icon"
size="lg"
@click="toggleRightPanel"
>
<i class="icon-[lucide--panel-right-close]" />
@@ -90,7 +90,7 @@
</div>
<aside
v-if="hasRightPanel && isRightPanelOpen"
class="w-1/4 min-w-40 max-w-80"
class="w-1/4 min-w-40 max-w-80 pt-16 pb-8"
>
<slot name="rightPanel"></slot>
</aside>
@@ -111,6 +111,10 @@ const { contentTitle } = defineProps<{
contentTitle: string
}>()
const isRightPanelOpen = defineModel<boolean>('rightPanelOpen', {
default: false
})
const BREAKPOINTS = { md: 880 }
const PANEL_SIZES = {
width: 'w-1/3',
@@ -125,7 +129,6 @@ const breakpoints = useBreakpoints(BREAKPOINTS)
const notMobile = breakpoints.greater('md')
const isLeftPanelOpen = ref<boolean>(true)
const isRightPanelOpen = ref<boolean>(false)
const mobileMenuOpen = ref<boolean>(false)
const hasRightPanel = computed(() => !!slots.rightPanel)

View File

@@ -41,8 +41,8 @@ export const useCurrentUser = () => {
whenever(() => authStore.tokenRefreshTrigger, callback)
const onUserLogout = (callback: () => void) => {
watch(resolvedUserInfo, (user) => {
if (!user) callback()
watch(resolvedUserInfo, (user, prevUser) => {
if (prevUser && !user) callback()
})
}

View File

@@ -1,41 +0,0 @@
import { breakpointsTailwind, useBreakpoints } from '@vueuse/core'
import { ref, watch } from 'vue'
type BreakpointKey = keyof typeof breakpointsTailwind
/**
* Composable for element with responsive collapsed state
* @param breakpointThreshold - Breakpoint at which the element should become collapsible
*/
export const useResponsiveCollapse = (
breakpointThreshold: BreakpointKey = 'lg'
) => {
const breakpoints = useBreakpoints(breakpointsTailwind)
const isSmallScreen = breakpoints.smallerOrEqual(breakpointThreshold)
const isOpen = ref(!isSmallScreen.value)
/**
* Handles screen size changes to automatically open/close the element
* when crossing the breakpoint threshold
*/
const onIsSmallScreenChange = () => {
if (isSmallScreen.value && isOpen.value) {
isOpen.value = false
} else if (!isSmallScreen.value && !isOpen.value) {
isOpen.value = true
}
}
watch(isSmallScreen, onIsSmallScreenChange)
return {
breakpoints,
isOpen,
isSmallScreen,
open: () => (isOpen.value = true),
close: () => (isOpen.value = false),
toggle: () => (isOpen.value = !isOpen.value)
}
}

View File

@@ -16,7 +16,7 @@ type TestTask = {
executionTime?: number
executionEndTimestamp?: number
createTime?: number
workflow?: { id?: string }
workflowId?: string
}
const translations: Record<string, string> = {
@@ -161,7 +161,7 @@ const createTask = (
executionTime: overrides.executionTime,
executionEndTimestamp: overrides.executionEndTimestamp,
createTime: overrides.createTime,
workflow: overrides.workflow
workflowId: overrides.workflowId
})
const mountUseJobList = () => {
@@ -305,7 +305,7 @@ describe('useJobList', () => {
expect(vi.getTimerCount()).toBe(0)
})
it('sorts all tasks by queue index descending', async () => {
it('sorts all tasks by priority descending', async () => {
queueStoreMock.pendingTasks = [
createTask({ promptId: 'p', queueIndex: 1, mockState: 'pending' })
]
@@ -360,13 +360,13 @@ describe('useJobList', () => {
promptId: 'wf-1',
queueIndex: 2,
mockState: 'pending',
workflow: { id: 'workflow-1' }
workflowId: 'workflow-1'
}),
createTask({
promptId: 'wf-2',
queueIndex: 1,
mockState: 'pending',
workflow: { id: 'workflow-2' }
workflowId: 'workflow-2'
})
]

View File

@@ -238,7 +238,7 @@ export function useJobList() {
const activeId = workflowStore.activeWorkflow?.activeState?.id
if (!activeId) return []
entries = entries.filter(({ task }) => {
const wid = task.workflow?.id
const wid = task.workflowId
return !!wid && wid === activeId
})
}

View File

@@ -73,6 +73,7 @@ vi.mock('@/scripts/utils', () => ({
}))
const dialogServiceMock = {
showErrorDialog: vi.fn(),
showExecutionErrorDialog: vi.fn(),
prompt: vi.fn()
}
@@ -103,6 +104,11 @@ vi.mock('@/stores/queueStore', () => ({
useQueueStore: () => queueStoreMock
}))
const getJobWorkflowMock = vi.fn()
vi.mock('@/services/jobOutputCache', () => ({
getJobWorkflow: (...args: any[]) => getJobWorkflowMock(...args)
}))
const createAnnotatedPathMock = vi.fn()
vi.mock('@/utils/createAnnotatedPath', () => ({
createAnnotatedPath: (...args: any[]) => createAnnotatedPathMock(...args)
@@ -132,9 +138,7 @@ const createJobItem = (
title: overrides.title ?? 'Test job',
meta: overrides.meta ?? 'meta',
state: overrides.state ?? 'completed',
taskRef: overrides.taskRef as Partial<TaskItemImpl> | undefined as
| TaskItemImpl
| undefined,
taskRef: overrides.taskRef as TaskItemImpl | undefined,
iconName: overrides.iconName,
iconImageUrl: overrides.iconImageUrl,
showClear: overrides.showClear,
@@ -181,6 +185,8 @@ describe('useJobMenu', () => {
LoadVideo: { id: 'LoadVideo' },
LoadAudio: { id: 'LoadAudio' }
}
// Default: no workflow available via lazy loading
getJobWorkflowMock.mockResolvedValue(undefined)
})
const setCurrentItem = (item: JobListItem | null) => {
@@ -190,10 +196,13 @@ describe('useJobMenu', () => {
it('opens workflow when workflow data exists', async () => {
const { openJobWorkflow } = mountJobMenu()
const workflow = { nodes: [] }
setCurrentItem(createJobItem({ id: '55', taskRef: { workflow } }))
// Mock lazy loading via fetchJobDetail + extractWorkflow
getJobWorkflowMock.mockResolvedValue(workflow)
setCurrentItem(createJobItem({ id: '55' }))
await openJobWorkflow()
expect(getJobWorkflowMock).toHaveBeenCalledWith('55')
expect(workflowStoreMock.createTemporary).toHaveBeenCalledWith(
'Job 55.json',
workflow
@@ -268,11 +277,10 @@ describe('useJobMenu', () => {
it('copies error message from failed job entry', async () => {
const { jobMenuEntries } = mountJobMenu()
const error = { exception_message: 'boom' }
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { status: { messages: [['execution_error', error]] } } as any
taskRef: { errorMessage: 'Something went wrong' } as any
})
)
@@ -280,31 +288,75 @@ describe('useJobMenu', () => {
const entry = findActionEntry(jobMenuEntries.value, 'copy-error')
await entry?.onClick?.()
expect(copyToClipboardMock).toHaveBeenCalledWith('boom')
expect(copyToClipboardMock).toHaveBeenCalledWith('Something went wrong')
})
it('reports error via dialog when entry triggered', async () => {
it('reports error via rich dialog when execution_error available', async () => {
const executionError = {
prompt_id: 'job-1',
timestamp: 12345,
node_id: '5',
node_type: 'KSampler',
executed: ['1', '2'],
exception_message: 'CUDA out of memory',
exception_type: 'RuntimeError',
traceback: ['line 1', 'line 2'],
current_inputs: {},
current_outputs: {}
}
const { jobMenuEntries } = mountJobMenu()
const error = { exception_message: 'bad', extra: 1 }
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { status: { messages: [['execution_error', error]] } } as any
taskRef: {
errorMessage: 'CUDA out of memory',
executionError,
createTime: 12345
} as any
})
)
await nextTick()
const entry = findActionEntry(jobMenuEntries.value, 'report-error')
void entry?.onClick?.()
await entry?.onClick?.()
expect(dialogServiceMock.showExecutionErrorDialog).toHaveBeenCalledTimes(1)
expect(dialogServiceMock.showExecutionErrorDialog).toHaveBeenCalledWith(
error
executionError
)
expect(dialogServiceMock.showErrorDialog).not.toHaveBeenCalled()
})
it('falls back to simple error dialog when no execution_error', async () => {
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { errorMessage: 'Job failed with error' } as any
})
)
await nextTick()
const entry = findActionEntry(jobMenuEntries.value, 'report-error')
await entry?.onClick?.()
expect(dialogServiceMock.showExecutionErrorDialog).not.toHaveBeenCalled()
expect(dialogServiceMock.showErrorDialog).toHaveBeenCalledTimes(1)
const [errorArg, optionsArg] =
dialogServiceMock.showErrorDialog.mock.calls[0]
expect(errorArg).toBeInstanceOf(Error)
expect(errorArg.message).toBe('Job failed with error')
expect(optionsArg).toEqual({ reportType: 'queueJobError' })
})
it('ignores error actions when message missing', async () => {
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(createJobItem({ state: 'failed', taskRef: { status: {} } }))
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { errorMessage: undefined } as any
})
)
await nextTick()
const copyEntry = findActionEntry(jobMenuEntries.value, 'copy-error')
@@ -313,6 +365,7 @@ describe('useJobMenu', () => {
await reportEntry?.onClick?.()
expect(copyToClipboardMock).not.toHaveBeenCalled()
expect(dialogServiceMock.showErrorDialog).not.toHaveBeenCalled()
expect(dialogServiceMock.showExecutionErrorDialog).not.toHaveBeenCalled()
})
@@ -488,12 +541,13 @@ describe('useJobMenu', () => {
})
it('exports workflow with default filename when prompting disabled', async () => {
const workflow = { foo: 'bar' }
getJobWorkflowMock.mockResolvedValue(workflow)
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
id: '7',
state: 'completed',
taskRef: { workflow: { foo: 'bar' } }
state: 'completed'
})
)
@@ -513,11 +567,11 @@ describe('useJobMenu', () => {
it('prompts for filename when setting enabled', async () => {
settingStoreMock.get.mockReturnValue(true)
dialogServiceMock.prompt.mockResolvedValue('custom-name')
getJobWorkflowMock.mockResolvedValue({})
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
state: 'completed',
taskRef: { workflow: {} }
state: 'completed'
})
)
@@ -537,12 +591,12 @@ describe('useJobMenu', () => {
it('keeps existing json extension when exporting workflow', async () => {
settingStoreMock.get.mockReturnValue(true)
dialogServiceMock.prompt.mockResolvedValue('existing.json')
getJobWorkflowMock.mockResolvedValue({ foo: 'bar' })
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
id: '42',
state: 'completed',
taskRef: { workflow: { foo: 'bar' } }
state: 'completed'
})
)
@@ -558,11 +612,11 @@ describe('useJobMenu', () => {
it('abandons export when prompt cancelled', async () => {
settingStoreMock.get.mockReturnValue(true)
dialogServiceMock.prompt.mockResolvedValue('')
getJobWorkflowMock.mockResolvedValue({})
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(
createJobItem({
state: 'completed',
taskRef: { workflow: {} }
state: 'completed'
})
)
@@ -682,7 +736,12 @@ describe('useJobMenu', () => {
it('returns failed menu entries with error actions', async () => {
const { jobMenuEntries } = mountJobMenu()
setCurrentItem(createJobItem({ state: 'failed', taskRef: { status: {} } }))
setCurrentItem(
createJobItem({
state: 'failed',
taskRef: { errorMessage: 'Some error' } as any
})
)
await nextTick()
expect(jobMenuEntries.value.map((entry) => entry.key)).toEqual([

View File

@@ -9,15 +9,11 @@ import { useMediaAssetActions } from '@/platform/assets/composables/useMediaAsse
import { useSettingStore } from '@/platform/settings/settingStore'
import { useWorkflowService } from '@/platform/workflow/core/services/workflowService'
import { useWorkflowStore } from '@/platform/workflow/management/stores/workflowStore'
import type {
ExecutionErrorWsMessage,
ResultItem,
ResultItemType,
TaskStatus
} from '@/schemas/apiSchema'
import type { ResultItem, ResultItemType } from '@/schemas/apiSchema'
import { api } from '@/scripts/api'
import { downloadBlob } from '@/scripts/utils'
import { useDialogService } from '@/services/dialogService'
import { getJobWorkflow } from '@/services/jobOutputCache'
import { useLitegraphService } from '@/services/litegraphService'
import { useNodeDefStore } from '@/stores/nodeDefStore'
import { useQueueStore } from '@/stores/queueStore'
@@ -59,7 +55,7 @@ export function useJobMenu(
const openJobWorkflow = async (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const data = target.taskRef?.workflow
const data = await getJobWorkflow(target.id)
if (!data) return
const filename = `Job ${target.id}.json`
const temp = workflowStore.createTemporary(filename, data)
@@ -83,37 +79,39 @@ export function useJobMenu(
await queueStore.update()
}
const findExecutionError = (
messages: TaskStatus['messages'] | undefined
): ExecutionErrorWsMessage | undefined => {
const errMessage = messages?.find((m) => m[0] === 'execution_error')
if (errMessage && errMessage[0] === 'execution_error') {
return errMessage[1]
}
return undefined
}
const copyErrorMessage = async (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const err = findExecutionError(target.taskRef?.status?.messages)
const message = err?.exception_message
if (message) await copyToClipboard(String(message))
const message = target?.taskRef?.errorMessage
if (message) await copyToClipboard(message)
}
const reportError = (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const err = findExecutionError(target.taskRef?.status?.messages)
if (err) useDialogService().showExecutionErrorDialog(err)
// Use execution_error from list response if available
const executionError = target.taskRef?.executionError
if (executionError) {
useDialogService().showExecutionErrorDialog(executionError)
return
}
// Fall back to simple error dialog
const message = target.taskRef?.errorMessage
if (message) {
useDialogService().showErrorDialog(new Error(message), {
reportType: 'queueJobError'
})
}
}
// This is very magical only because it matches the respective backend implementation
// There is or will be a better way to do this
const addOutputLoaderNode = async (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const result: ResultItemImpl | undefined = target.taskRef?.previewOutput
const addOutputLoaderNode = async () => {
const item = currentMenuItem()
if (!item) return
const result: ResultItemImpl | undefined = item.taskRef?.previewOutput
if (!result) return
let nodeType: 'LoadImage' | 'LoadVideo' | 'LoadAudio' | null = null
@@ -161,10 +159,10 @@ export function useJobMenu(
/**
* Trigger a download of the job's previewable output asset.
*/
const downloadPreviewAsset = (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const result: ResultItemImpl | undefined = target.taskRef?.previewOutput
const downloadPreviewAsset = () => {
const item = currentMenuItem()
if (!item) return
const result: ResultItemImpl | undefined = item.taskRef?.previewOutput
if (!result) return
downloadFile(result.url)
}
@@ -172,14 +170,14 @@ export function useJobMenu(
/**
* Export the workflow JSON attached to the job.
*/
const exportJobWorkflow = async (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const data = target.taskRef?.workflow
const exportJobWorkflow = async () => {
const item = currentMenuItem()
if (!item) return
const data = await getJobWorkflow(item.id)
if (!data) return
const settingStore = useSettingStore()
let filename = `Job ${target.id}.json`
let filename = `Job ${item.id}.json`
if (settingStore.get('Comfy.PromptFilename')) {
const input = await useDialogService().prompt({
@@ -196,10 +194,10 @@ export function useJobMenu(
downloadBlob(filename, blob)
}
const deleteJobAsset = async (item?: JobListItem | null) => {
const target = resolveItem(item)
if (!target) return
const task = target.taskRef as TaskItemImpl | undefined
const deleteJobAsset = async () => {
const item = currentMenuItem()
if (!item) return
const task = item.taskRef as TaskItemImpl | undefined
const preview = task?.previewOutput
if (!task || !preview) return
@@ -210,8 +208,8 @@ export function useJobMenu(
}
}
const removeFailedJob = async (item?: JobListItem | null) => {
const task = resolveItem(item)?.taskRef as TaskItemImpl | undefined
const removeFailedJob = async () => {
const task = currentMenuItem()?.taskRef as TaskItemImpl | undefined
if (!task) return
await queueStore.delete(task)
}
@@ -242,8 +240,8 @@ export function useJobMenu(
icon: 'icon-[lucide--zoom-in]',
onClick: onInspectAsset
? () => {
const current = resolveItem()
if (current) onInspectAsset(current)
const item = currentMenuItem()
if (item) onInspectAsset(item)
}
: undefined
},
@@ -254,33 +252,33 @@ export function useJobMenu(
'Add to current workflow'
),
icon: 'icon-[comfy--node]',
onClick: () => addOutputLoaderNode(resolveItem())
onClick: addOutputLoaderNode
},
{
key: 'download',
label: st('queue.jobMenu.download', 'Download'),
icon: 'icon-[lucide--download]',
onClick: () => downloadPreviewAsset(resolveItem())
onClick: downloadPreviewAsset
},
{ kind: 'divider', key: 'd1' },
{
key: 'open-workflow',
label: jobMenuOpenWorkflowLabel.value,
icon: 'icon-[comfy--workflow]',
onClick: () => openJobWorkflow(resolveItem())
onClick: openJobWorkflow
},
{
key: 'export-workflow',
label: st('queue.jobMenu.exportWorkflow', 'Export workflow'),
icon: 'icon-[comfy--file-output]',
onClick: () => exportJobWorkflow(resolveItem())
onClick: exportJobWorkflow
},
{ kind: 'divider', key: 'd2' },
{
key: 'copy-id',
label: jobMenuCopyJobIdLabel.value,
icon: 'icon-[lucide--copy]',
onClick: () => copyJobId(resolveItem())
onClick: copyJobId
},
{ kind: 'divider', key: 'd3' },
...(hasDeletableAsset
@@ -289,7 +287,7 @@ export function useJobMenu(
key: 'delete',
label: st('queue.jobMenu.deleteAsset', 'Delete asset'),
icon: 'icon-[lucide--trash-2]',
onClick: () => deleteJobAsset(resolveItem())
onClick: deleteJobAsset
}
]
: [])
@@ -301,33 +299,33 @@ export function useJobMenu(
key: 'open-workflow',
label: jobMenuOpenWorkflowFailedLabel.value,
icon: 'icon-[comfy--workflow]',
onClick: () => openJobWorkflow(resolveItem())
onClick: openJobWorkflow
},
{ kind: 'divider', key: 'd1' },
{
key: 'copy-id',
label: jobMenuCopyJobIdLabel.value,
icon: 'icon-[lucide--copy]',
onClick: () => copyJobId(resolveItem())
onClick: copyJobId
},
{
key: 'copy-error',
label: st('queue.jobMenu.copyErrorMessage', 'Copy error message'),
icon: 'icon-[lucide--copy]',
onClick: () => copyErrorMessage(resolveItem())
onClick: copyErrorMessage
},
{
key: 'report-error',
label: st('queue.jobMenu.reportError', 'Report error'),
icon: 'icon-[lucide--message-circle-warning]',
onClick: () => reportError(resolveItem())
onClick: reportError
},
{ kind: 'divider', key: 'd2' },
{
key: 'delete',
label: st('queue.jobMenu.removeJob', 'Remove job'),
icon: 'icon-[lucide--circle-minus]',
onClick: () => removeFailedJob(resolveItem())
onClick: removeFailedJob
}
]
}
@@ -336,21 +334,21 @@ export function useJobMenu(
key: 'open-workflow',
label: jobMenuOpenWorkflowLabel.value,
icon: 'icon-[comfy--workflow]',
onClick: () => openJobWorkflow(resolveItem())
onClick: openJobWorkflow
},
{ kind: 'divider', key: 'd1' },
{
key: 'copy-id',
label: jobMenuCopyJobIdLabel.value,
icon: 'icon-[lucide--copy]',
onClick: () => copyJobId(resolveItem())
onClick: copyJobId
},
{ kind: 'divider', key: 'd2' },
{
key: 'cancel-job',
label: jobMenuCancelLabel.value,
icon: 'icon-[lucide--x]',
onClick: () => cancelJob(resolveItem())
onClick: cancelJob
}
]
})

View File

@@ -1,35 +1,74 @@
import { describe, it, expect } from 'vitest'
import { createPinia, setActivePinia } from 'pinia'
import { beforeEach, describe, expect, it } from 'vitest'
import { useResultGallery } from '@/composables/queue/useResultGallery'
import type { JobListItem } from '@/composables/queue/useJobList'
import type { JobListItem as JobListViewItem } from '@/composables/queue/useJobList'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/jobTypes'
import { ResultItemImpl, TaskItemImpl } from '@/stores/queueStore'
type PreviewLike = { url: string; supportsPreview: boolean }
const createResultItem = (
url: string,
supportsPreview = true
): ResultItemImpl => {
const item = new ResultItemImpl({
filename: url,
subfolder: '',
type: 'output',
nodeId: 'node-1',
mediaType: supportsPreview ? 'images' : 'unknown'
})
// Override url getter for test matching
Object.defineProperty(item, 'url', { get: () => url })
Object.defineProperty(item, 'supportsPreview', { get: () => supportsPreview })
return item
}
const createPreview = (url: string, supportsPreview = true): PreviewLike => ({
url,
supportsPreview
const createMockJob = (id: string, outputsCount = 1): JobListItem => ({
id,
status: 'completed',
create_time: Date.now(),
preview_output: null,
outputs_count: outputsCount,
priority: 0
})
const createTask = (preview?: PreviewLike) => ({
previewOutput: preview
})
const createTask = (
preview?: ResultItemImpl,
allOutputs?: ResultItemImpl[],
outputsCount = 1
): TaskItemImpl => {
const job = createMockJob(
`task-${Math.random().toString(36).slice(2)}`,
outputsCount
)
const flatOutputs = allOutputs ?? (preview ? [preview] : [])
return new TaskItemImpl(job, {}, flatOutputs)
}
const createJobItem = (id: string, preview?: PreviewLike): JobListItem =>
const createJobViewItem = (
id: string,
taskRef?: TaskItemImpl
): JobListViewItem =>
({
id,
title: `Job ${id}`,
meta: '',
state: 'completed',
showClear: false,
taskRef: preview ? { previewOutput: preview } : undefined
}) as JobListItem
taskRef
}) as JobListViewItem
describe('useResultGallery', () => {
it('collects only previewable outputs and preserves their order', () => {
const previewable = [createPreview('p-1'), createPreview('p-2')]
beforeEach(() => {
setActivePinia(createPinia())
})
it('collects only previewable outputs and preserves their order', async () => {
const previewable = [createResultItem('p-1'), createResultItem('p-2')]
const nonPreviewable = createResultItem('skip-me', false)
const tasks = [
createTask(previewable[0]),
createTask({ url: 'skip-me', supportsPreview: false }),
createTask(nonPreviewable),
createTask(previewable[1]),
createTask()
]
@@ -38,28 +77,28 @@ describe('useResultGallery', () => {
() => tasks
)
onViewItem(createJobItem('job-1', previewable[0]))
await onViewItem(createJobViewItem('job-1', tasks[0]))
expect(galleryItems.value).toEqual(previewable)
expect(galleryItems.value).toEqual([previewable[0]])
expect(galleryActiveIndex.value).toBe(0)
})
it('does not change state when there are no previewable tasks', () => {
it('does not change state when there are no previewable tasks', async () => {
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => []
)
onViewItem(createJobItem('job-missing'))
await onViewItem(createJobViewItem('job-missing'))
expect(galleryItems.value).toEqual([])
expect(galleryActiveIndex.value).toBe(-1)
})
it('activates the index that matches the viewed preview URL', () => {
it('activates the index that matches the viewed preview URL', async () => {
const previewable = [
createPreview('p-1'),
createPreview('p-2'),
createPreview('p-3')
createResultItem('p-1'),
createResultItem('p-2'),
createResultItem('p-3')
]
const tasks = previewable.map((preview) => createTask(preview))
@@ -67,37 +106,66 @@ describe('useResultGallery', () => {
() => tasks
)
onViewItem(createJobItem('job-2', createPreview('p-2')))
await onViewItem(createJobViewItem('job-2', tasks[1]))
expect(galleryItems.value).toEqual(previewable)
expect(galleryActiveIndex.value).toBe(1)
expect(galleryItems.value).toEqual([previewable[1]])
expect(galleryActiveIndex.value).toBe(0)
})
it('defaults to the first entry when the clicked job lacks a preview', () => {
const previewable = [createPreview('p-1'), createPreview('p-2')]
it('defaults to the first entry when the clicked job lacks a preview', async () => {
const previewable = [createResultItem('p-1'), createResultItem('p-2')]
const tasks = previewable.map((preview) => createTask(preview))
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => tasks
)
onViewItem(createJobItem('job-no-preview'))
await onViewItem(createJobViewItem('job-no-preview'))
expect(galleryItems.value).toEqual(previewable)
expect(galleryActiveIndex.value).toBe(0)
})
it('defaults to the first entry when no gallery item matches the preview URL', () => {
const previewable = [createPreview('p-1'), createPreview('p-2')]
it('defaults to the first entry when no gallery item matches the preview URL', async () => {
const previewable = [createResultItem('p-1'), createResultItem('p-2')]
const tasks = previewable.map((preview) => createTask(preview))
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => tasks
)
onViewItem(createJobItem('job-mismatch', createPreview('missing')))
const taskWithMismatchedPreview = createTask(createResultItem('missing'))
await onViewItem(
createJobViewItem('job-mismatch', taskWithMismatchedPreview)
)
expect(galleryItems.value).toEqual(previewable)
expect(galleryItems.value).toEqual([createResultItem('missing')])
expect(galleryActiveIndex.value).toBe(0)
})
it('loads full outputs when task has only preview outputs', async () => {
const previewOutput = createResultItem('preview-1')
const fullOutputs = [
createResultItem('full-1'),
createResultItem('full-2'),
createResultItem('full-3')
]
// Create a task with outputsCount > 1 to trigger lazy loading
const job = createMockJob('task-1', 3)
const task = new TaskItemImpl(job, {}, [previewOutput])
// Mock loadFullOutputs to return full outputs
const loadedTask = new TaskItemImpl(job, {}, fullOutputs)
task.loadFullOutputs = async () => loadedTask
const { galleryItems, galleryActiveIndex, onViewItem } = useResultGallery(
() => [task]
)
await onViewItem(createJobViewItem('job-1', task))
expect(galleryItems.value).toEqual(fullOutputs)
expect(galleryActiveIndex.value).toBe(0)
})
})

View File

@@ -1,39 +1,42 @@
import { ref, shallowRef } from 'vue'
import type { JobListItem } from '@/composables/queue/useJobList'
/** Minimal preview item interface for gallery filtering. */
interface PreviewItem {
url: string
supportsPreview: boolean
}
/** Minimal task interface for gallery preview. */
interface TaskWithPreview<T extends PreviewItem = PreviewItem> {
previewOutput?: T
}
import { findActiveIndex, getOutputsForTask } from '@/services/jobOutputCache'
import type { ResultItemImpl, TaskItemImpl } from '@/stores/queueStore'
/**
* Manages result gallery state and activation for queue items.
*/
export function useResultGallery<T extends PreviewItem>(
getFilteredTasks: () => TaskWithPreview<T>[]
) {
export function useResultGallery(getFilteredTasks: () => TaskItemImpl[]) {
const galleryActiveIndex = ref(-1)
const galleryItems = shallowRef<T[]>([])
const galleryItems = shallowRef<ResultItemImpl[]>([])
const onViewItem = (item: JobListItem) => {
const items: T[] = getFilteredTasks().flatMap((t) => {
const preview = t.previewOutput
return preview && preview.supportsPreview ? [preview] : []
})
async function onViewItem(item: JobListItem) {
const tasks = getFilteredTasks()
if (!tasks.length) return
const targetTask = item.taskRef
const targetOutputs = targetTask
? await getOutputsForTask(targetTask)
: null
// Request was superseded by a newer one
if (targetOutputs === null && targetTask) return
// Use target's outputs if available, otherwise fall back to all previews
const items = targetOutputs?.length
? targetOutputs
: tasks
.map((t) => t.previewOutput)
.filter((o): o is ResultItemImpl => !!o)
if (!items.length) return
galleryItems.value = items
const activeUrl: string | undefined = item.taskRef?.previewOutput?.url
const idx = activeUrl ? items.findIndex((o) => o.url === activeUrl) : 0
galleryActiveIndex.value = idx >= 0 ? idx : 0
galleryActiveIndex.value = findActiveIndex(
items,
item.taskRef?.previewOutput?.url
)
}
return {

View File

@@ -17,7 +17,8 @@ export enum ServerFeatureFlag {
ONBOARDING_SURVEY_ENABLED = 'onboarding_survey_enabled',
HUGGINGFACE_MODEL_IMPORT_ENABLED = 'huggingface_model_import_enabled',
LINEAR_TOGGLE_ENABLED = 'linear_toggle_enabled',
ASYNC_MODEL_UPLOAD_ENABLED = 'async_model_upload_enabled'
ASYNC_MODEL_UPLOAD_ENABLED = 'async_model_upload_enabled',
TEAM_WORKSPACES_ENABLED = 'team_workspaces_enabled'
}
/**
@@ -92,6 +93,12 @@ export function useFeatureFlags() {
false
)
)
},
get teamWorkspacesEnabled() {
return (
remoteConfig.value.team_workspaces_enabled ??
api.getServerFeature(ServerFeatureFlag.TEAM_WORKSPACES_ENABLED, false)
)
}
})

View File

@@ -0,0 +1,41 @@
# Litegraph Guidelines
## Code Philosophy
- Write concise, legible, and easily maintainable code
- Avoid repetition where possible, but not at expense of legibility
- Prefer running single tests, not the whole suite, for performance
## Code Style
- Prefer single line `if` syntax for concise expressions
- Take advantage of `TypedArray` `subarray` when appropriate
- The `size` and `pos` properties of `Rectangle` share the same array buffer
- Prefer returning `undefined` over `null`
- Type assertions are a last resort (acceptable for legacy code interop)
## Circular Dependencies in Tests
**CRITICAL**: Always import from the barrel export for subgraph code:
```typescript
// ✅ Correct - barrel import
import { LGraph, Subgraph, SubgraphNode } from "@/lib/litegraph/src/litegraph"
// ❌ Wrong - causes circular dependency
import { LGraph } from "@/lib/litegraph/src/LGraph"
```
**Root cause**: `LGraph``Subgraph` circular dependency (Subgraph extends LGraph, LGraph creates Subgraph instances).
## Test Helpers
```typescript
import { createTestSubgraph, createTestSubgraphNode } from "./fixtures/subgraphHelpers"
function createTestSetup() {
const subgraph = createTestSubgraph()
const subgraphNode = createTestSubgraphNode(subgraph)
return { subgraph, subgraphNode }
}
```

View File

@@ -1,62 +1,3 @@
- This codebase has extensive eslint autofix rules and IDEs are configured to use eslint as the format on save tool. Run ESLint instead of manually figuring out whitespace fixes or other trivial style concerns. Review the results and correct any remaining eslint errors.
- Take advantage of `TypedArray` `subarray` when appropriate.
- The `size` and `pos` properties of `Rectangle` share the same array buffer (`subarray`); they may be used to set the rectangles size and position.
- Prefer single line `if` syntax over adding curly braces, when the statement has a very concise expression and concise, single line statement.
- Do not replace `&&=` or `||=` with `=` when there is no reason to do so. If you do find a reason to remove either `&&=` or `||=`, leave a comment explaining why the removal occurred.
- You are allowed to research code on https://developer.mozilla.org/ and https://stackoverflow.com without asking.
- When adding features, always write vitest unit tests using cursor rules in @.cursor
- When writing methods, prefer returning idiomatic JavaScript `undefined` over `null`.
# Bash commands
- `pnpm typecheck` Run the typechecker
- `pnpm build` Build the project
- `pnpm lint:fix` Run ESLint
# Code style
- Always prefer best practices when writing code.
- Write using concise, legible, and easily maintainable code.
- Avoid repetition where possible, but not at the expense of code legibility.
- Type assertions are an absolute last resort. In almost all cases, they are a crutch that leads to brittle code.
# Workflow
- Be sure to typecheck when you're done making a series of code changes
- Prefer running single tests, and not the whole test suite, for performance
# Testing Guidelines
## Avoiding Circular Dependencies in Tests
**CRITICAL**: When writing tests for subgraph-related code, always import from the barrel export to avoid circular dependency issues:
```typescript
// ✅ CORRECT - Use barrel import
import { LGraph, Subgraph, SubgraphNode } from "@/lib/litegraph/src/litegraph"
// ❌ WRONG - Direct imports cause circular dependency
import { LGraph } from "@/lib/litegraph/src/LGraph"
import { Subgraph } from "@/lib/litegraph/src/subgraph/Subgraph"
import { SubgraphNode } from "@/lib/litegraph/src/subgraph/SubgraphNode"
```
**Root cause**: `LGraph` and `Subgraph` have a circular dependency:
- `LGraph.ts` imports `Subgraph` (creates instances with `new Subgraph()`)
- `Subgraph.ts` extends `LGraph`
The barrel export (`@/litegraph`) handles this properly, but direct imports cause module loading failures.
## Test Setup for Subgraphs
Use the provided test helpers for consistent setup:
```typescript
import { createTestSubgraph, createTestSubgraphNode } from "./fixtures/subgraphHelpers"
function createTestSetup() {
const subgraph = createTestSubgraph()
const subgraphNode = createTestSubgraphNode(subgraph)
return { subgraph, subgraphNode }
}
```
<!-- The graphs connect, the nodes align with grace,
Yet Claude alone won't join the commonplace. -->
@AGENTS.md

View File

@@ -8,6 +8,7 @@ import { useLayoutMutations } from '@/renderer/core/layout/operations/layoutMuta
import { layoutStore } from '@/renderer/core/layout/store/layoutStore'
import { LayoutSource } from '@/renderer/core/layout/types'
import { removeNodeTitleHeight } from '@/renderer/core/layout/utils/nodeSizeUtil'
import { forEachNode } from '@/utils/graphTraversalUtil'
import { CanvasPointer } from './CanvasPointer'
import type { ContextMenu } from './ContextMenu'
@@ -4057,6 +4058,8 @@ export class LGraphCanvas implements CustomEventDispatcher<LGraphCanvasEventMap>
layoutStore.batchUpdateNodeBounds(newPositions)
this.selectItems(created)
forEachNode(graph, (n) => n.onGraphConfigured?.())
forEachNode(graph, (n) => n.onAfterGraphConfigured?.())
graph.afterChange()
this.emitAfterChange()

View File

@@ -848,6 +848,7 @@
"releaseTitle": "إصدار {package} {version}",
"reloadToApplyChanges": "أعد التحميل لتطبيق التغييرات",
"removeImage": "إزالة الصورة",
"removeTag": "إزالة الوسم",
"removeVideo": "إزالة الفيديو",
"rename": "إعادة تسمية",
"reportIssue": "إرسال تقرير",
@@ -2575,6 +2576,21 @@
"exportWorkflow": "تصدير سير العمل",
"saveWorkflow": "حفظ سير العمل"
},
"workspace": {
"unsavedChanges": {
"message": "لديك تغييرات غير محفوظة. هل تريد تجاهلها والانتقال إلى مساحة عمل أخرى؟",
"title": "تغييرات غير محفوظة"
}
},
"workspaceAuth": {
"errors": {
"accessDenied": "ليس لديك صلاحية الوصول إلى هذه مساحة العمل",
"invalidFirebaseToken": "فشل التحقق من الهوية. يرجى محاولة تسجيل الدخول مرة أخرى.",
"notAuthenticated": "يجب تسجيل الدخول للوصول إلى مساحات العمل",
"tokenExchangeFailed": "فشل التحقق من مساحة العمل: {error}",
"workspaceNotFound": "لم يتم العثور على مساحة العمل"
}
},
"zoomControls": {
"hideMinimap": "إخفاء الخريطة المصغرة",
"label": "عناصر التحكم في التكبير",

View File

@@ -273,7 +273,7 @@
"noItems": "No items"
},
"manager": {
"title": "Custom Nodes Manager",
"title": "Nodes Manager",
"legacyMenuNotAvailable": "Legacy manager menu is not available, defaulting to the new manager menu.",
"legacyManagerUI": "Use Legacy UI",
"legacyManagerUIDescription": "To use the legacy Manager UI, start ComfyUI with --enable-manager-legacy-ui",
@@ -2496,7 +2496,7 @@
},
"linearMode": {
"linearMode": "Simple Mode",
"beta": "Beta - Give Feedback",
"beta": "Simple Mode in Beta - Feedback",
"graphMode": "Graph Mode",
"dragAndDropImage": "Click to browse or drag an image",
"runCount": "Run count:",
@@ -2592,5 +2592,20 @@
"completed": "Completed",
"failed": "Failed"
}
},
"workspace": {
"unsavedChanges": {
"title": "Unsaved Changes",
"message": "You have unsaved changes. Do you want to discard them and switch workspaces?"
}
},
"workspaceAuth": {
"errors": {
"notAuthenticated": "You must be logged in to access workspaces",
"invalidFirebaseToken": "Authentication failed. Please try logging in again.",
"accessDenied": "You do not have access to this workspace",
"workspaceNotFound": "Workspace not found",
"tokenExchangeFailed": "Failed to authenticate with workspace: {error}"
}
}
}
}

View File

@@ -848,6 +848,7 @@
"releaseTitle": "Lanzamiento de {package} {version}",
"reloadToApplyChanges": "Recargar para aplicar cambios",
"removeImage": "Eliminar imagen",
"removeTag": "Eliminar etiqueta",
"removeVideo": "Eliminar video",
"rename": "Renombrar",
"reportIssue": "Enviar informe",
@@ -2575,6 +2576,21 @@
"exportWorkflow": "Exportar flujo de trabajo",
"saveWorkflow": "Guardar flujo de trabajo"
},
"workspace": {
"unsavedChanges": {
"message": "Tienes cambios no guardados. ¿Quieres descartarlos y cambiar de espacio de trabajo?",
"title": "Cambios no guardados"
}
},
"workspaceAuth": {
"errors": {
"accessDenied": "No tienes acceso a este espacio de trabajo",
"invalidFirebaseToken": "La autenticación ha fallado. Por favor, intenta iniciar sesión de nuevo.",
"notAuthenticated": "Debes iniciar sesión para acceder a los espacios de trabajo",
"tokenExchangeFailed": "No se pudo autenticar con el espacio de trabajo: {error}",
"workspaceNotFound": "Espacio de trabajo no encontrado"
}
},
"zoomControls": {
"hideMinimap": "Ocultar minimapa",
"label": "Controles de zoom",

View File

@@ -848,6 +848,7 @@
"releaseTitle": "انتشار {package} نسخه {version}",
"reloadToApplyChanges": "برای اعمال تغییرات بارگذاری مجدد کنید",
"removeImage": "حذف تصویر",
"removeTag": "حذف برچسب",
"removeVideo": "حذف ویدیو",
"rename": "تغییر نام",
"reportIssue": "ارسال گزارش",
@@ -2586,6 +2587,21 @@
"exportWorkflow": "خروجی گرفتن از workflow",
"saveWorkflow": "ذخیره workflow"
},
"workspace": {
"unsavedChanges": {
"message": "شما تغییرات ذخیره‌نشده دارید. آیا می‌خواهید آن‌ها را رها کرده و فضای کاری را تغییر دهید؟",
"title": "تغییرات ذخیره‌نشده"
}
},
"workspaceAuth": {
"errors": {
"accessDenied": "شما به این فضای کاری دسترسی ندارید.",
"invalidFirebaseToken": "احراز هویت ناموفق بود. لطفاً دوباره وارد شوید.",
"notAuthenticated": "برای دسترسی به فضاهای کاری باید وارد شوید.",
"tokenExchangeFailed": "احراز هویت با فضای کاری ناموفق بود: {error}",
"workspaceNotFound": "فضای کاری پیدا نشد."
}
},
"zoomControls": {
"hideMinimap": "مخفی‌سازی نقشه کوچک",
"label": "کنترل‌های بزرگ‌نمایی",

View File

@@ -848,6 +848,7 @@
"releaseTitle": "Publication de {package} {version}",
"reloadToApplyChanges": "Recharger pour appliquer les modifications",
"removeImage": "Supprimer l'image",
"removeTag": "Supprimer le tag",
"removeVideo": "Supprimer la vidéo",
"rename": "Renommer",
"reportIssue": "Envoyer le rapport",
@@ -2575,6 +2576,21 @@
"exportWorkflow": "Exporter le flux de travail",
"saveWorkflow": "Enregistrer le flux de travail"
},
"workspace": {
"unsavedChanges": {
"message": "Vous avez des modifications non enregistrées. Voulez-vous les abandonner et changer despace de travail ?",
"title": "Modifications non enregistrées"
}
},
"workspaceAuth": {
"errors": {
"accessDenied": "Vous navez pas accès à cet espace de travail",
"invalidFirebaseToken": "Échec de lauthentification. Veuillez vous reconnecter.",
"notAuthenticated": "Vous devez être connecté pour accéder aux espaces de travail",
"tokenExchangeFailed": "Échec de lauthentification avec lespace de travail : {error}",
"workspaceNotFound": "Espace de travail introuvable"
}
},
"zoomControls": {
"hideMinimap": "Masquer la mini-carte",
"label": "Contrôles de zoom",

View File

@@ -848,6 +848,7 @@
"releaseTitle": "{package} {version} リリース",
"reloadToApplyChanges": "変更を適用するには再読み込みしてください",
"removeImage": "画像を削除",
"removeTag": "タグを削除",
"removeVideo": "ビデオを削除",
"rename": "名前を変更",
"reportIssue": "報告する",
@@ -2575,6 +2576,21 @@
"exportWorkflow": "ワークフローをエクスポート",
"saveWorkflow": "ワークフローを保存"
},
"workspace": {
"unsavedChanges": {
"message": "未保存の変更があります。破棄してワークスペースを切り替えますか?",
"title": "未保存の変更"
}
},
"workspaceAuth": {
"errors": {
"accessDenied": "このワークスペースへのアクセス権がありません",
"invalidFirebaseToken": "認証に失敗しました。もう一度ログインしてください。",
"notAuthenticated": "ワークスペースにアクセスするにはログインが必要です",
"tokenExchangeFailed": "ワークスペースの認証に失敗しました: {error}",
"workspaceNotFound": "ワークスペースが見つかりません"
}
},
"zoomControls": {
"hideMinimap": "ミニマップを非表示",
"label": "ズームコントロール",

View File

@@ -848,6 +848,7 @@
"releaseTitle": "{package} {version} 릴리스",
"reloadToApplyChanges": "변경 사항을 적용하려면 새로 고침하세요.",
"removeImage": "이미지 제거",
"removeTag": "태그 제거",
"removeVideo": "비디오 제거",
"rename": "이름 바꾸기",
"reportIssue": "보고서 보내기",
@@ -2575,6 +2576,21 @@
"exportWorkflow": "워크플로 내보내기",
"saveWorkflow": "워크플로 저장"
},
"workspace": {
"unsavedChanges": {
"message": "저장되지 않은 변경 사항이 있습니다. 변경 사항을 취소하고 워크스페이스를 전환하시겠습니까?",
"title": "저장되지 않은 변경 사항"
}
},
"workspaceAuth": {
"errors": {
"accessDenied": "이 워크스페이스에 접근할 수 없습니다.",
"invalidFirebaseToken": "인증에 실패했습니다. 다시 로그인해 주세요.",
"notAuthenticated": "워크스페이스에 접근하려면 로그인해야 합니다.",
"tokenExchangeFailed": "워크스페이스 인증에 실패했습니다: {error}",
"workspaceNotFound": "워크스페이스를 찾을 수 없습니다."
}
},
"zoomControls": {
"hideMinimap": "미니맵 숨기기",
"label": "줌 컨트롤",

View File

@@ -848,6 +848,7 @@
"releaseTitle": "Lançamento {package} {version}",
"reloadToApplyChanges": "Recarregue para aplicar as alterações",
"removeImage": "Remover imagem",
"removeTag": "Remover tag",
"removeVideo": "Remover vídeo",
"rename": "Renomear",
"reportIssue": "Enviar relatório",
@@ -2586,6 +2587,21 @@
"exportWorkflow": "Exportar Fluxo de Trabalho",
"saveWorkflow": "Salvar fluxo de trabalho"
},
"workspace": {
"unsavedChanges": {
"message": "Você tem alterações não salvas. Deseja descartá-las e trocar de espaço de trabalho?",
"title": "Alterações não salvas"
}
},
"workspaceAuth": {
"errors": {
"accessDenied": "Você não tem acesso a este espaço de trabalho",
"invalidFirebaseToken": "Falha na autenticação. Por favor, tente fazer login novamente.",
"notAuthenticated": "Você precisa estar logado para acessar os espaços de trabalho",
"tokenExchangeFailed": "Falha ao autenticar com o espaço de trabalho: {error}",
"workspaceNotFound": "Espaço de trabalho não encontrado"
}
},
"zoomControls": {
"hideMinimap": "Ocultar Minimapa",
"label": "Controles de Zoom",

View File

@@ -848,6 +848,7 @@
"releaseTitle": "Релиз {package} {version}",
"reloadToApplyChanges": "Перезагрузите, чтобы применить изменения",
"removeImage": "Удалить изображение",
"removeTag": "Удалить тег",
"removeVideo": "Удалить видео",
"rename": "Переименовать",
"reportIssue": "Отправить отчёт",
@@ -2575,6 +2576,21 @@
"exportWorkflow": "Экспорт рабочего процесса",
"saveWorkflow": "Сохранить рабочий процесс"
},
"workspace": {
"unsavedChanges": {
"message": "У вас есть несохранённые изменения. Хотите их отменить и переключиться на другое рабочее пространство?",
"title": "Несохранённые изменения"
}
},
"workspaceAuth": {
"errors": {
"accessDenied": "У вас нет доступа к этому рабочему пространству",
"invalidFirebaseToken": "Ошибка аутентификации. Пожалуйста, попробуйте войти снова.",
"notAuthenticated": "Вы должны войти в систему, чтобы получить доступ к рабочим пространствам",
"tokenExchangeFailed": "Не удалось выполнить аутентификацию с рабочим пространством: {error}",
"workspaceNotFound": "Рабочее пространство не найдено"
}
},
"zoomControls": {
"hideMinimap": "Скрыть миникарту",
"label": "Управление масштабом",

View File

@@ -848,6 +848,7 @@
"releaseTitle": "{package} {version} Sürümü",
"reloadToApplyChanges": "Değişiklikleri uygulamak için yeniden yükleyin",
"removeImage": "Görüntüyü kaldır",
"removeTag": "Etiketi kaldır",
"removeVideo": "Videoyu kaldır",
"rename": "Yeniden Adlandır",
"reportIssue": "Rapor Gönder",
@@ -2575,6 +2576,21 @@
"exportWorkflow": "İş Akışını Dışa Aktar",
"saveWorkflow": "İş akışını kaydet"
},
"workspace": {
"unsavedChanges": {
"message": "Kaydedilmemiş değişiklikleriniz var. Bunları iptal edip çalışma alanlarını değiştirmek istiyor musunuz?",
"title": "Kaydedilmemiş Değişiklikler"
}
},
"workspaceAuth": {
"errors": {
"accessDenied": "Bu çalışma alanına erişiminiz yok",
"invalidFirebaseToken": "Kimlik doğrulama başarısız oldu. Lütfen tekrar giriş yapmayı deneyin.",
"notAuthenticated": "Çalışma alanlarına erişmek için giriş yapmalısınız",
"tokenExchangeFailed": "Çalışma alanı ile kimlik doğrulama başarısız oldu: {error}",
"workspaceNotFound": "Çalışma alanı bulunamadı"
}
},
"zoomControls": {
"hideMinimap": "Mini Haritayı Gizle",
"label": "Yakınlaştırma Kontrolleri",

View File

@@ -848,6 +848,7 @@
"releaseTitle": "{package} {version} 版本發佈",
"reloadToApplyChanges": "重新載入以套用變更",
"removeImage": "移除圖片",
"removeTag": "移除標籤",
"removeVideo": "移除影片",
"rename": "重新命名",
"reportIssue": "送出回報",
@@ -2575,6 +2576,21 @@
"exportWorkflow": "匯出工作流程",
"saveWorkflow": "儲存工作流程"
},
"workspace": {
"unsavedChanges": {
"message": "您有未儲存的變更。是否要捨棄這些變更並切換工作區?",
"title": "未儲存的變更"
}
},
"workspaceAuth": {
"errors": {
"accessDenied": "您沒有存取此工作區的權限",
"invalidFirebaseToken": "驗證失敗。請重新登入。",
"notAuthenticated": "您必須登入才能存取工作區",
"tokenExchangeFailed": "與工作區驗證失敗:{error}",
"workspaceNotFound": "找不到工作區"
}
},
"zoomControls": {
"hideMinimap": "隱藏小地圖",
"label": "縮放控制",

View File

@@ -848,6 +848,7 @@
"releaseTitle": "{package} {version} 发布",
"reloadToApplyChanges": "重新加载以应用更改",
"removeImage": "移除图片",
"removeTag": "移除标签",
"removeVideo": "移除视频",
"rename": "重命名",
"reportIssue": "发送报告",
@@ -2586,6 +2587,21 @@
"exportWorkflow": "导出工作流",
"saveWorkflow": "保存工作流"
},
"workspace": {
"unsavedChanges": {
"message": "您有未保存的更改。是否要放弃这些更改并切换工作区?",
"title": "未保存的更改"
}
},
"workspaceAuth": {
"errors": {
"accessDenied": "您无权访问此工作区",
"invalidFirebaseToken": "身份验证失败。请重新登录。",
"notAuthenticated": "您必须登录才能访问工作区",
"tokenExchangeFailed": "与工作区认证失败:{error}",
"workspaceNotFound": "未找到工作区"
}
},
"zoomControls": {
"hideMinimap": "隐藏小地图",
"label": "缩放控制",

View File

@@ -32,7 +32,6 @@ export function mapTaskOutputToAssetItem(
subfolder: output.subfolder,
executionTimeInSeconds: taskItem.executionTimeInSeconds,
format: output.format,
workflow: taskItem.workflow,
create_time: taskItem.createTime
}

View File

@@ -1,5 +1,6 @@
import { api } from '@/scripts/api'
import { isCloud } from '@/platform/distribution/types'
import { remoteConfig } from '@/platform/remoteConfig/remoteConfig'
import { api } from '@/scripts/api'
import { useFirebaseAuthStore } from '@/stores/firebaseAuthStore'
/**
@@ -10,31 +11,59 @@ export const useSessionCookie = () => {
/**
* Creates or refreshes the session cookie.
* Called after login and on token refresh.
*
* When team_workspaces_enabled is true, uses Firebase token directly
* (since getAuthHeader() returns workspace token which shouldn't be used for session creation).
* When disabled, uses getAuthHeader() for backward compatibility.
*/
const createSession = async (): Promise<void> => {
if (!isCloud) return
const authStore = useFirebaseAuthStore()
const authHeader = await authStore.getAuthHeader()
try {
const authStore = useFirebaseAuthStore()
if (!authHeader) {
throw new Error('No auth header available for session creation')
}
let authHeader: Record<string, string>
const response = await fetch(api.apiURL('/auth/session'), {
method: 'POST',
credentials: 'include',
headers: {
...authHeader,
'Content-Type': 'application/json'
if (remoteConfig.value.team_workspaces_enabled) {
const firebaseToken = await authStore.getIdToken()
if (!firebaseToken) {
console.warn(
'Failed to create session cookie:',
'No Firebase token available for session creation'
)
return
}
authHeader = { Authorization: `Bearer ${firebaseToken}` }
} else {
const header = await authStore.getAuthHeader()
if (!header) {
console.warn(
'Failed to create session cookie:',
'No auth header available for session creation'
)
return
}
authHeader = header
}
})
if (!response.ok) {
const errorData = await response.json().catch(() => ({}))
throw new Error(
`Failed to create session: ${errorData.message || response.statusText}`
)
const response = await fetch(api.apiURL('/auth/session'), {
method: 'POST',
credentials: 'include',
headers: {
...authHeader,
'Content-Type': 'application/json'
}
})
if (!response.ok) {
const errorData = await response.json().catch(() => ({}))
console.warn(
'Failed to create session cookie:',
errorData.message || response.statusText
)
}
} catch (error) {
console.warn('Failed to create session cookie:', error)
}
}
@@ -45,16 +74,21 @@ export const useSessionCookie = () => {
const deleteSession = async (): Promise<void> => {
if (!isCloud) return
const response = await fetch(api.apiURL('/auth/session'), {
method: 'DELETE',
credentials: 'include'
})
try {
const response = await fetch(api.apiURL('/auth/session'), {
method: 'DELETE',
credentials: 'include'
})
if (!response.ok) {
const errorData = await response.json().catch(() => ({}))
throw new Error(
`Failed to delete session: ${errorData.message || response.statusText}`
)
if (!response.ok) {
const errorData = await response.json().catch(() => ({}))
console.warn(
'Failed to delete session cookie:',
errorData.message || response.statusText
)
}
} catch (error) {
console.warn('Failed to delete session cookie:', error)
}
}

View File

@@ -0,0 +1,670 @@
import { createPinia, setActivePinia, storeToRefs } from 'pinia'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
useWorkspaceAuthStore,
WorkspaceAuthError
} from '@/stores/workspaceAuthStore'
import { WORKSPACE_STORAGE_KEYS } from './workspaceConstants'
const mockGetIdToken = vi.fn()
vi.mock('@/stores/firebaseAuthStore', () => ({
useFirebaseAuthStore: () => ({
getIdToken: mockGetIdToken
})
}))
vi.mock('@/scripts/api', () => ({
api: {
apiURL: (route: string) => `https://api.example.com/api${route}`
}
}))
vi.mock('@/i18n', () => ({
t: (key: string) => key
}))
const mockRemoteConfig = vi.hoisted(() => ({
value: {
team_workspaces_enabled: true
}
}))
vi.mock('@/platform/remoteConfig/remoteConfig', () => ({
remoteConfig: mockRemoteConfig
}))
const mockWorkspace = {
id: 'workspace-123',
name: 'Test Workspace',
type: 'team' as const
}
const mockWorkspaceWithRole = {
...mockWorkspace,
role: 'owner' as const
}
const mockTokenResponse = {
token: 'workspace-token-abc',
expires_at: new Date(Date.now() + 3600 * 1000).toISOString(),
workspace: mockWorkspace,
role: 'owner' as const,
permissions: ['owner:*']
}
describe('useWorkspaceAuthStore', () => {
beforeEach(() => {
setActivePinia(createPinia())
vi.clearAllMocks()
vi.useFakeTimers()
sessionStorage.clear()
})
afterEach(() => {
vi.useRealTimers()
})
describe('initial state', () => {
it('has correct initial state values', () => {
const store = useWorkspaceAuthStore()
const {
currentWorkspace,
workspaceToken,
isAuthenticated,
isLoading,
error
} = storeToRefs(store)
expect(currentWorkspace.value).toBeNull()
expect(workspaceToken.value).toBeNull()
expect(isAuthenticated.value).toBe(false)
expect(isLoading.value).toBe(false)
expect(error.value).toBeNull()
})
})
describe('initializeFromSession', () => {
it('returns true and populates state when valid session data exists', () => {
const futureExpiry = Date.now() + 3600 * 1000
sessionStorage.setItem(
WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE,
JSON.stringify(mockWorkspaceWithRole)
)
sessionStorage.setItem(WORKSPACE_STORAGE_KEYS.TOKEN, 'valid-token')
sessionStorage.setItem(
WORKSPACE_STORAGE_KEYS.EXPIRES_AT,
futureExpiry.toString()
)
const store = useWorkspaceAuthStore()
const { currentWorkspace, workspaceToken } = storeToRefs(store)
const result = store.initializeFromSession()
expect(result).toBe(true)
expect(currentWorkspace.value).toEqual(mockWorkspaceWithRole)
expect(workspaceToken.value).toBe('valid-token')
})
it('returns false when sessionStorage is empty', () => {
const store = useWorkspaceAuthStore()
const result = store.initializeFromSession()
expect(result).toBe(false)
})
it('returns false and clears storage when token is expired', () => {
const pastExpiry = Date.now() - 1000
sessionStorage.setItem(
WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE,
JSON.stringify(mockWorkspaceWithRole)
)
sessionStorage.setItem(WORKSPACE_STORAGE_KEYS.TOKEN, 'expired-token')
sessionStorage.setItem(
WORKSPACE_STORAGE_KEYS.EXPIRES_AT,
pastExpiry.toString()
)
const store = useWorkspaceAuthStore()
const result = store.initializeFromSession()
expect(result).toBe(false)
expect(
sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE)
).toBeNull()
expect(sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.TOKEN)).toBeNull()
expect(
sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.EXPIRES_AT)
).toBeNull()
})
it('returns false and clears storage when data is malformed', () => {
sessionStorage.setItem(
WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE,
'invalid-json{'
)
sessionStorage.setItem(WORKSPACE_STORAGE_KEYS.TOKEN, 'some-token')
sessionStorage.setItem(WORKSPACE_STORAGE_KEYS.EXPIRES_AT, 'not-a-number')
const store = useWorkspaceAuthStore()
const result = store.initializeFromSession()
expect(result).toBe(false)
expect(
sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE)
).toBeNull()
expect(sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.TOKEN)).toBeNull()
expect(
sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.EXPIRES_AT)
).toBeNull()
})
it('returns false when partial session data exists (missing token)', () => {
sessionStorage.setItem(
WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE,
JSON.stringify(mockWorkspaceWithRole)
)
sessionStorage.setItem(
WORKSPACE_STORAGE_KEYS.EXPIRES_AT,
(Date.now() + 3600 * 1000).toString()
)
const store = useWorkspaceAuthStore()
const result = store.initializeFromSession()
expect(result).toBe(false)
})
})
describe('switchWorkspace', () => {
it('successfully exchanges Firebase token for workspace token', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockTokenResponse)
})
)
const store = useWorkspaceAuthStore()
const { currentWorkspace, workspaceToken, isAuthenticated } =
storeToRefs(store)
await store.switchWorkspace('workspace-123')
expect(currentWorkspace.value).toEqual(mockWorkspaceWithRole)
expect(workspaceToken.value).toBe('workspace-token-abc')
expect(isAuthenticated.value).toBe(true)
})
it('stores workspace data in sessionStorage', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockTokenResponse)
})
)
const store = useWorkspaceAuthStore()
await store.switchWorkspace('workspace-123')
expect(
sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE)
).toBe(JSON.stringify(mockWorkspaceWithRole))
expect(sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.TOKEN)).toBe(
'workspace-token-abc'
)
expect(
sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.EXPIRES_AT)
).toBeTruthy()
})
it('sets isLoading to true during operation', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
let resolveResponse: (value: unknown) => void
const responsePromise = new Promise((resolve) => {
resolveResponse = resolve
})
vi.stubGlobal('fetch', vi.fn().mockReturnValue(responsePromise))
const store = useWorkspaceAuthStore()
const { isLoading } = storeToRefs(store)
const switchPromise = store.switchWorkspace('workspace-123')
expect(isLoading.value).toBe(true)
resolveResponse!({
ok: true,
json: () => Promise.resolve(mockTokenResponse)
})
await switchPromise
expect(isLoading.value).toBe(false)
})
it('throws WorkspaceAuthError with code NOT_AUTHENTICATED when Firebase token unavailable', async () => {
mockGetIdToken.mockResolvedValue(undefined)
const store = useWorkspaceAuthStore()
const { error } = storeToRefs(store)
await expect(store.switchWorkspace('workspace-123')).rejects.toThrow(
WorkspaceAuthError
)
expect(error.value).toBeInstanceOf(WorkspaceAuthError)
expect((error.value as WorkspaceAuthError).code).toBe('NOT_AUTHENTICATED')
})
it('throws WorkspaceAuthError with code ACCESS_DENIED on 403 response', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
ok: false,
status: 403,
statusText: 'Forbidden',
json: () => Promise.resolve({ message: 'Access denied' })
})
)
const store = useWorkspaceAuthStore()
const { error } = storeToRefs(store)
await expect(store.switchWorkspace('workspace-123')).rejects.toThrow(
WorkspaceAuthError
)
expect(error.value).toBeInstanceOf(WorkspaceAuthError)
expect((error.value as WorkspaceAuthError).code).toBe('ACCESS_DENIED')
})
it('throws WorkspaceAuthError with code WORKSPACE_NOT_FOUND on 404 response', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
ok: false,
status: 404,
statusText: 'Not Found',
json: () => Promise.resolve({ message: 'Workspace not found' })
})
)
const store = useWorkspaceAuthStore()
const { error } = storeToRefs(store)
await expect(store.switchWorkspace('workspace-123')).rejects.toThrow(
WorkspaceAuthError
)
expect(error.value).toBeInstanceOf(WorkspaceAuthError)
expect((error.value as WorkspaceAuthError).code).toBe(
'WORKSPACE_NOT_FOUND'
)
})
it('throws WorkspaceAuthError with code INVALID_FIREBASE_TOKEN on 401 response', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
ok: false,
status: 401,
statusText: 'Unauthorized',
json: () => Promise.resolve({ message: 'Invalid token' })
})
)
const store = useWorkspaceAuthStore()
const { error } = storeToRefs(store)
await expect(store.switchWorkspace('workspace-123')).rejects.toThrow(
WorkspaceAuthError
)
expect(error.value).toBeInstanceOf(WorkspaceAuthError)
expect((error.value as WorkspaceAuthError).code).toBe(
'INVALID_FIREBASE_TOKEN'
)
})
it('throws WorkspaceAuthError with code TOKEN_EXCHANGE_FAILED on other errors', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
ok: false,
status: 500,
statusText: 'Internal Server Error',
json: () => Promise.resolve({ message: 'Server error' })
})
)
const store = useWorkspaceAuthStore()
const { error } = storeToRefs(store)
await expect(store.switchWorkspace('workspace-123')).rejects.toThrow(
WorkspaceAuthError
)
expect(error.value).toBeInstanceOf(WorkspaceAuthError)
expect((error.value as WorkspaceAuthError).code).toBe(
'TOKEN_EXCHANGE_FAILED'
)
})
it('sends correct request to API', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
const mockFetch = vi.fn().mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockTokenResponse)
})
vi.stubGlobal('fetch', mockFetch)
const store = useWorkspaceAuthStore()
await store.switchWorkspace('workspace-123')
expect(mockFetch).toHaveBeenCalledWith(
'https://api.example.com/api/auth/token',
{
method: 'POST',
headers: {
Authorization: 'Bearer firebase-token-xyz',
'Content-Type': 'application/json'
},
body: JSON.stringify({ workspace_id: 'workspace-123' })
}
)
})
})
describe('clearWorkspaceContext', () => {
it('clears all state refs', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockTokenResponse)
})
)
const store = useWorkspaceAuthStore()
const { currentWorkspace, workspaceToken, error, isAuthenticated } =
storeToRefs(store)
await store.switchWorkspace('workspace-123')
expect(isAuthenticated.value).toBe(true)
store.clearWorkspaceContext()
expect(currentWorkspace.value).toBeNull()
expect(workspaceToken.value).toBeNull()
expect(error.value).toBeNull()
expect(isAuthenticated.value).toBe(false)
})
it('clears sessionStorage', async () => {
sessionStorage.setItem(
WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE,
JSON.stringify(mockWorkspaceWithRole)
)
sessionStorage.setItem(WORKSPACE_STORAGE_KEYS.TOKEN, 'some-token')
sessionStorage.setItem(WORKSPACE_STORAGE_KEYS.EXPIRES_AT, '12345')
const store = useWorkspaceAuthStore()
store.clearWorkspaceContext()
expect(
sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE)
).toBeNull()
expect(sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.TOKEN)).toBeNull()
expect(
sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.EXPIRES_AT)
).toBeNull()
})
})
describe('getWorkspaceAuthHeader', () => {
it('returns null when no workspace token', () => {
const store = useWorkspaceAuthStore()
const header = store.getWorkspaceAuthHeader()
expect(header).toBeNull()
})
it('returns proper Authorization header when workspace token exists', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockTokenResponse)
})
)
const store = useWorkspaceAuthStore()
await store.switchWorkspace('workspace-123')
const header = store.getWorkspaceAuthHeader()
expect(header).toEqual({
Authorization: 'Bearer workspace-token-abc'
})
})
})
describe('token refresh scheduling', () => {
it('schedules token refresh 5 minutes before expiry', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
const expiresInMs = 3600 * 1000
const tokenResponseWithFutureExpiry = {
...mockTokenResponse,
expires_at: new Date(Date.now() + expiresInMs).toISOString()
}
const mockFetch = vi.fn().mockResolvedValue({
ok: true,
json: () => Promise.resolve(tokenResponseWithFutureExpiry)
})
vi.stubGlobal('fetch', mockFetch)
const store = useWorkspaceAuthStore()
await store.switchWorkspace('workspace-123')
expect(mockFetch).toHaveBeenCalledTimes(1)
const refreshBufferMs = 5 * 60 * 1000
const refreshDelay = expiresInMs - refreshBufferMs
vi.advanceTimersByTime(refreshDelay - 1)
expect(mockFetch).toHaveBeenCalledTimes(1)
await vi.advanceTimersByTimeAsync(1)
expect(mockFetch).toHaveBeenCalledTimes(2)
})
it('clears context when refresh fails with ACCESS_DENIED', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
const expiresInMs = 3600 * 1000
const tokenResponseWithFutureExpiry = {
...mockTokenResponse,
expires_at: new Date(Date.now() + expiresInMs).toISOString()
}
const mockFetch = vi
.fn()
.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve(tokenResponseWithFutureExpiry)
})
.mockResolvedValueOnce({
ok: false,
status: 403,
statusText: 'Forbidden',
json: () => Promise.resolve({ message: 'Access denied' })
})
vi.stubGlobal('fetch', mockFetch)
const store = useWorkspaceAuthStore()
const { currentWorkspace, workspaceToken } = storeToRefs(store)
await store.switchWorkspace('workspace-123')
expect(workspaceToken.value).toBe('workspace-token-abc')
const refreshBufferMs = 5 * 60 * 1000
const refreshDelay = expiresInMs - refreshBufferMs
vi.advanceTimersByTime(refreshDelay)
await vi.waitFor(() => {
expect(currentWorkspace.value).toBeNull()
})
expect(workspaceToken.value).toBeNull()
})
})
describe('refreshToken', () => {
it('does nothing when no current workspace', async () => {
const mockFetch = vi.fn()
vi.stubGlobal('fetch', mockFetch)
const store = useWorkspaceAuthStore()
await store.refreshToken()
expect(mockFetch).not.toHaveBeenCalled()
})
it('refreshes token for current workspace', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
const mockFetch = vi.fn().mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockTokenResponse)
})
vi.stubGlobal('fetch', mockFetch)
const store = useWorkspaceAuthStore()
const { workspaceToken } = storeToRefs(store)
await store.switchWorkspace('workspace-123')
expect(mockFetch).toHaveBeenCalledTimes(1)
mockFetch.mockResolvedValue({
ok: true,
json: () =>
Promise.resolve({
...mockTokenResponse,
token: 'refreshed-token'
})
})
await store.refreshToken()
expect(mockFetch).toHaveBeenCalledTimes(2)
expect(workspaceToken.value).toBe('refreshed-token')
})
})
describe('isAuthenticated computed', () => {
it('returns true when both workspace and token are present', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockTokenResponse)
})
)
const store = useWorkspaceAuthStore()
const { isAuthenticated } = storeToRefs(store)
await store.switchWorkspace('workspace-123')
expect(isAuthenticated.value).toBe(true)
})
it('returns false when workspace is null', () => {
const store = useWorkspaceAuthStore()
const { isAuthenticated } = storeToRefs(store)
expect(isAuthenticated.value).toBe(false)
})
it('returns false when currentWorkspace is set but workspaceToken is null', async () => {
mockGetIdToken.mockResolvedValue(null)
const store = useWorkspaceAuthStore()
const { currentWorkspace, workspaceToken, isAuthenticated } =
storeToRefs(store)
currentWorkspace.value = mockWorkspaceWithRole
workspaceToken.value = null
expect(isAuthenticated.value).toBe(false)
})
})
describe('feature flag disabled', () => {
beforeEach(() => {
mockRemoteConfig.value.team_workspaces_enabled = false
})
afterEach(() => {
mockRemoteConfig.value.team_workspaces_enabled = true
})
it('initializeFromSession returns false when flag disabled', () => {
const futureExpiry = Date.now() + 3600 * 1000
sessionStorage.setItem(
WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE,
JSON.stringify(mockWorkspaceWithRole)
)
sessionStorage.setItem(WORKSPACE_STORAGE_KEYS.TOKEN, 'valid-token')
sessionStorage.setItem(
WORKSPACE_STORAGE_KEYS.EXPIRES_AT,
futureExpiry.toString()
)
const store = useWorkspaceAuthStore()
const { currentWorkspace, workspaceToken } = storeToRefs(store)
const result = store.initializeFromSession()
expect(result).toBe(false)
expect(currentWorkspace.value).toBeNull()
expect(workspaceToken.value).toBeNull()
})
it('switchWorkspace is a no-op when flag disabled', async () => {
mockGetIdToken.mockResolvedValue('firebase-token-xyz')
const mockFetch = vi.fn()
vi.stubGlobal('fetch', mockFetch)
const store = useWorkspaceAuthStore()
const { currentWorkspace, workspaceToken, isLoading } = storeToRefs(store)
await store.switchWorkspace('workspace-123')
expect(mockFetch).not.toHaveBeenCalled()
expect(currentWorkspace.value).toBeNull()
expect(workspaceToken.value).toBeNull()
expect(isLoading.value).toBe(false)
})
})
})

View File

@@ -0,0 +1,166 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { useWorkspaceSwitch } from '@/platform/auth/workspace/useWorkspaceSwitch'
import type { WorkspaceWithRole } from '@/platform/auth/workspace/workspaceTypes'
const mockSwitchWorkspace = vi.hoisted(() => vi.fn())
const mockCurrentWorkspace = vi.hoisted(() => ({
value: null as WorkspaceWithRole | null
}))
vi.mock('@/stores/workspaceAuthStore', () => ({
useWorkspaceAuthStore: () => ({
switchWorkspace: mockSwitchWorkspace
})
}))
vi.mock('pinia', () => ({
storeToRefs: () => ({
currentWorkspace: mockCurrentWorkspace
})
}))
const mockModifiedWorkflows = vi.hoisted(
() => [] as Array<{ isModified: boolean }>
)
vi.mock('@/platform/workflow/management/stores/workflowStore', () => ({
useWorkflowStore: () => ({
get modifiedWorkflows() {
return mockModifiedWorkflows
}
})
}))
const mockConfirm = vi.hoisted(() => vi.fn())
vi.mock('@/services/dialogService', () => ({
useDialogService: () => ({
confirm: mockConfirm
})
}))
vi.mock('vue-i18n', () => ({
useI18n: () => ({
t: (key: string) => key
})
}))
const mockReload = vi.fn()
describe('useWorkspaceSwitch', () => {
beforeEach(() => {
vi.clearAllMocks()
mockCurrentWorkspace.value = {
id: 'workspace-1',
name: 'Test Workspace',
type: 'personal',
role: 'owner'
}
mockModifiedWorkflows.length = 0
vi.stubGlobal('location', { reload: mockReload })
})
afterEach(() => {
vi.unstubAllGlobals()
})
describe('hasUnsavedChanges', () => {
it('returns true when there are modified workflows', () => {
mockModifiedWorkflows.push({ isModified: true })
const { hasUnsavedChanges } = useWorkspaceSwitch()
expect(hasUnsavedChanges()).toBe(true)
})
it('returns true when multiple workflows are modified', () => {
mockModifiedWorkflows.push({ isModified: true }, { isModified: true })
const { hasUnsavedChanges } = useWorkspaceSwitch()
expect(hasUnsavedChanges()).toBe(true)
})
it('returns false when no workflows are modified', () => {
mockModifiedWorkflows.length = 0
const { hasUnsavedChanges } = useWorkspaceSwitch()
expect(hasUnsavedChanges()).toBe(false)
})
})
describe('switchWithConfirmation', () => {
it('returns true immediately if switching to the same workspace', async () => {
const { switchWithConfirmation } = useWorkspaceSwitch()
const result = await switchWithConfirmation('workspace-1')
expect(result).toBe(true)
expect(mockSwitchWorkspace).not.toHaveBeenCalled()
expect(mockConfirm).not.toHaveBeenCalled()
})
it('switches directly without dialog when no unsaved changes', async () => {
mockModifiedWorkflows.length = 0
mockSwitchWorkspace.mockResolvedValue(undefined)
const { switchWithConfirmation } = useWorkspaceSwitch()
const result = await switchWithConfirmation('workspace-2')
expect(result).toBe(true)
expect(mockConfirm).not.toHaveBeenCalled()
expect(mockSwitchWorkspace).toHaveBeenCalledWith('workspace-2')
expect(mockReload).toHaveBeenCalled()
})
it('shows confirmation dialog when there are unsaved changes', async () => {
mockModifiedWorkflows.push({ isModified: true })
mockConfirm.mockResolvedValue(true)
mockSwitchWorkspace.mockResolvedValue(undefined)
const { switchWithConfirmation } = useWorkspaceSwitch()
await switchWithConfirmation('workspace-2')
expect(mockConfirm).toHaveBeenCalledWith({
title: 'workspace.unsavedChanges.title',
message: 'workspace.unsavedChanges.message',
type: 'dirtyClose'
})
})
it('returns false if user cancels the confirmation dialog', async () => {
mockModifiedWorkflows.push({ isModified: true })
mockConfirm.mockResolvedValue(false)
const { switchWithConfirmation } = useWorkspaceSwitch()
const result = await switchWithConfirmation('workspace-2')
expect(result).toBe(false)
expect(mockSwitchWorkspace).not.toHaveBeenCalled()
expect(mockReload).not.toHaveBeenCalled()
})
it('calls switchWorkspace and reloads page after user confirms', async () => {
mockModifiedWorkflows.push({ isModified: true })
mockConfirm.mockResolvedValue(true)
mockSwitchWorkspace.mockResolvedValue(undefined)
const { switchWithConfirmation } = useWorkspaceSwitch()
const result = await switchWithConfirmation('workspace-2')
expect(result).toBe(true)
expect(mockSwitchWorkspace).toHaveBeenCalledWith('workspace-2')
expect(mockReload).toHaveBeenCalled()
})
it('returns false if switchWorkspace throws an error', async () => {
mockModifiedWorkflows.length = 0
mockSwitchWorkspace.mockRejectedValue(new Error('Switch failed'))
const { switchWithConfirmation } = useWorkspaceSwitch()
const result = await switchWithConfirmation('workspace-2')
expect(result).toBe(false)
expect(mockReload).not.toHaveBeenCalled()
})
})
})

View File

@@ -0,0 +1,49 @@
import { storeToRefs } from 'pinia'
import { useI18n } from 'vue-i18n'
import { useWorkflowStore } from '@/platform/workflow/management/stores/workflowStore'
import { useDialogService } from '@/services/dialogService'
import { useWorkspaceAuthStore } from '@/stores/workspaceAuthStore'
export function useWorkspaceSwitch() {
const { t } = useI18n()
const workspaceAuthStore = useWorkspaceAuthStore()
const { currentWorkspace } = storeToRefs(workspaceAuthStore)
const workflowStore = useWorkflowStore()
const dialogService = useDialogService()
function hasUnsavedChanges(): boolean {
return workflowStore.modifiedWorkflows.length > 0
}
async function switchWithConfirmation(workspaceId: string): Promise<boolean> {
if (currentWorkspace.value?.id === workspaceId) {
return true
}
if (hasUnsavedChanges()) {
const confirmed = await dialogService.confirm({
title: t('workspace.unsavedChanges.title'),
message: t('workspace.unsavedChanges.message'),
type: 'dirtyClose'
})
if (!confirmed) {
return false
}
}
try {
await workspaceAuthStore.switchWorkspace(workspaceId)
window.location.reload()
return true
} catch {
return false
}
}
return {
hasUnsavedChanges,
switchWithConfirmation
}
}

View File

@@ -0,0 +1,7 @@
export const WORKSPACE_STORAGE_KEYS = {
CURRENT_WORKSPACE: 'Comfy.Workspace.Current',
TOKEN: 'Comfy.Workspace.Token',
EXPIRES_AT: 'Comfy.Workspace.ExpiresAt'
} as const
export const TOKEN_REFRESH_BUFFER_MS = 5 * 60 * 1000

View File

@@ -0,0 +1,6 @@
export interface WorkspaceWithRole {
id: string
name: string
type: 'personal' | 'team'
role: 'owner' | 'member'
}

View File

@@ -1,380 +0,0 @@
/**
* @fileoverview Test fixtures for history tests.
*/
import type { HistoryResponseV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
import type { HistoryTaskItem } from '@/schemas/apiSchema'
/**
* V1 API raw response format (object with prompt IDs as keys)
*/
export const historyV1RawResponse: Record<
string,
Omit<HistoryTaskItem, 'taskType'>
> = {
'complete-item-id': {
prompt: [
24,
'complete-item-id',
{},
{
client_id: 'test-client',
extra_pnginfo: {
workflow: {
id: '44f0c9f9-b5a7-48de-99fc-7e80c1570241',
revision: 0,
last_node_id: 9,
last_link_id: 9,
nodes: [],
links: [],
groups: [],
config: {},
extra: {},
version: 0.4
}
}
},
['9']
],
outputs: {
'9': {
images: [
{
filename: 'test.png',
subfolder: '',
type: 'output'
}
]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_start',
{ prompt_id: 'complete-item-id', timestamp: 1234567890 }
],
[
'execution_success',
{ prompt_id: 'complete-item-id', timestamp: 1234567900 }
]
]
},
meta: {
'9': {
node_id: '9',
display_node: '9'
}
}
},
'no-status-id': {
prompt: [
23,
'no-status-id',
{},
{
client_id: 'inference'
},
['10']
],
outputs: {
'10': {
images: []
}
},
status: undefined,
meta: {
'10': {
node_id: '10',
display_node: '10'
}
}
}
}
/**
* V2 response with multiple edge cases:
* - Item 0: Complete with all fields
* - Item 1: Missing optional status field
* - Item 2: Missing optional meta field
* - Item 3: Multiple output nodes
*/
export const historyV2Fixture: HistoryResponseV2 = {
history: [
{
prompt_id: 'complete-item-id',
prompt: {
priority: 24,
prompt_id: 'complete-item-id',
extra_data: {
client_id: 'test-client',
extra_pnginfo: {
workflow: {
id: '44f0c9f9-b5a7-48de-99fc-7e80c1570241',
revision: 0,
last_node_id: 9,
last_link_id: 9,
nodes: [],
links: [],
groups: [],
config: {},
extra: {},
version: 0.4
}
}
}
},
outputs: {
'9': {
images: [
{
filename: 'test.png',
subfolder: '',
type: 'output'
}
]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_start',
{ prompt_id: 'complete-item-id', timestamp: 1234567890 }
],
[
'execution_success',
{ prompt_id: 'complete-item-id', timestamp: 1234567900 }
]
]
},
meta: {
'9': {
node_id: '9',
display_node: '9'
}
}
},
{
prompt_id: 'no-status-id',
prompt: {
priority: 23,
prompt_id: 'no-status-id',
extra_data: {
client_id: 'inference'
}
},
outputs: {
'10': {
images: []
}
},
meta: {
'10': {
node_id: '10',
display_node: '10'
}
}
},
{
prompt_id: 'no-meta-id',
prompt: {
priority: 22,
prompt_id: 'no-meta-id',
extra_data: {
client_id: 'web-ui'
}
},
outputs: {
'11': {
audio: []
}
},
status: {
status_str: 'error',
completed: false,
messages: []
}
},
{
prompt_id: 'multi-output-id',
prompt: {
priority: 21,
prompt_id: 'multi-output-id',
extra_data: {
client_id: 'batch-processor'
}
},
outputs: {
'3': {
images: [{ filename: 'img1.png', type: 'output', subfolder: '' }]
},
'9': {
images: [{ filename: 'img2.png', type: 'output', subfolder: '' }]
},
'12': {
video: [{ filename: 'video.mp4', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
},
meta: {
'3': { node_id: '3', display_node: '3' },
'9': { node_id: '9', display_node: '9' },
'12': { node_id: '12', display_node: '12' }
}
}
]
}
/**
* Expected V1 transformation of historyV2Fixture
* Priority is now synthetic based on execution_success timestamp:
* - complete-item-id: has timestamp → priority 1 (only one with timestamp)
* - no-status-id: no status → priority 0
* - no-meta-id: empty messages → priority 0
* - multi-output-id: empty messages → priority 0
*/
export const expectedV1Fixture: HistoryTaskItem[] = [
{
taskType: 'History',
prompt: [
1,
'complete-item-id',
{},
{
client_id: 'test-client',
extra_pnginfo: {
workflow: {
id: '44f0c9f9-b5a7-48de-99fc-7e80c1570241',
revision: 0,
last_node_id: 9,
last_link_id: 9,
nodes: [],
links: [],
groups: [],
config: {},
extra: {},
version: 0.4
}
}
},
['9']
],
outputs: {
'9': {
images: [
{
filename: 'test.png',
subfolder: '',
type: 'output'
}
]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_start',
{ prompt_id: 'complete-item-id', timestamp: 1234567890 }
],
[
'execution_success',
{ prompt_id: 'complete-item-id', timestamp: 1234567900 }
]
]
},
meta: {
'9': {
node_id: '9',
display_node: '9'
}
}
},
{
taskType: 'History',
prompt: [
0,
'no-status-id',
{},
{
client_id: 'inference'
},
['10']
],
outputs: {
'10': {
images: []
}
},
status: undefined,
meta: {
'10': {
node_id: '10',
display_node: '10'
}
}
},
{
taskType: 'History',
prompt: [
0,
'no-meta-id',
{},
{
client_id: 'web-ui'
},
['11']
],
outputs: {
'11': {
audio: []
}
},
status: {
status_str: 'error',
completed: false,
messages: []
},
meta: undefined
},
{
taskType: 'History',
prompt: [
0,
'multi-output-id',
{},
{
client_id: 'batch-processor'
},
['3', '9', '12']
],
outputs: {
'3': {
images: [{ filename: 'img1.png', type: 'output', subfolder: '' }]
},
'9': {
images: [{ filename: 'img2.png', type: 'output', subfolder: '' }]
},
'12': {
video: [{ filename: 'video.mp4', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
},
meta: {
'3': { node_id: '3', display_node: '3' },
'9': { node_id: '9', display_node: '9' },
'12': { node_id: '12', display_node: '12' }
}
}
]

View File

@@ -1,434 +0,0 @@
/**
* @fileoverview Unit tests for V2 to V1 history adapter.
*/
import { describe, expect, it } from 'vitest'
import { mapHistoryV2toHistory } from '@/platform/remote/comfyui/history/adapters/v2ToV1Adapter'
import { zRawHistoryItemV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
import type { HistoryResponseV2 } from '@/platform/remote/comfyui/history/types/historyV2Types'
import {
expectedV1Fixture,
historyV2Fixture
} from '@/platform/remote/comfyui/history/__fixtures__/historyFixtures'
import type { HistoryTaskItem } from '@/platform/remote/comfyui/history/types/historyV1Types'
const historyV2WithMissingTimestamp: HistoryResponseV2 = {
history: [
{
prompt_id: 'item-timestamp-1000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-1000',
extra_data: {
client_id: 'test-client'
}
},
outputs: {
'1': {
images: [{ filename: 'test1.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-1000', timestamp: 1000 }
]
]
}
},
{
prompt_id: 'item-timestamp-2000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-2000',
extra_data: {
client_id: 'test-client'
}
},
outputs: {
'2': {
images: [{ filename: 'test2.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-2000', timestamp: 2000 }
]
]
}
},
{
prompt_id: 'item-no-timestamp',
prompt: {
priority: 0,
prompt_id: 'item-no-timestamp',
extra_data: {
client_id: 'test-client'
}
},
outputs: {
'3': {
images: [{ filename: 'test3.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
}
}
]
}
const historyV2FiveItemsSorting: HistoryResponseV2 = {
history: [
{
prompt_id: 'item-timestamp-3000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-3000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'1': {
images: [{ filename: 'test1.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-3000', timestamp: 3000 }
]
]
}
},
{
prompt_id: 'item-timestamp-1000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-1000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'2': {
images: [{ filename: 'test2.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-1000', timestamp: 1000 }
]
]
}
},
{
prompt_id: 'item-timestamp-5000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-5000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'3': {
images: [{ filename: 'test3.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-5000', timestamp: 5000 }
]
]
}
},
{
prompt_id: 'item-timestamp-2000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-2000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'4': {
images: [{ filename: 'test4.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-2000', timestamp: 2000 }
]
]
}
},
{
prompt_id: 'item-timestamp-4000',
prompt: {
priority: 0,
prompt_id: 'item-timestamp-4000',
extra_data: { client_id: 'test-client' }
},
outputs: {
'5': {
images: [{ filename: 'test5.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: [
[
'execution_success',
{ prompt_id: 'item-timestamp-4000', timestamp: 4000 }
]
]
}
}
]
}
const historyV2MultipleNoTimestamp: HistoryResponseV2 = {
history: [
{
prompt_id: 'item-no-timestamp-1',
prompt: {
priority: 0,
prompt_id: 'item-no-timestamp-1',
extra_data: { client_id: 'test-client' }
},
outputs: {
'1': {
images: [{ filename: 'test1.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
}
},
{
prompt_id: 'item-no-timestamp-2',
prompt: {
priority: 0,
prompt_id: 'item-no-timestamp-2',
extra_data: { client_id: 'test-client' }
},
outputs: {
'2': {
images: [{ filename: 'test2.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
}
},
{
prompt_id: 'item-no-timestamp-3',
prompt: {
priority: 0,
prompt_id: 'item-no-timestamp-3',
extra_data: { client_id: 'test-client' }
},
outputs: {
'3': {
images: [{ filename: 'test3.png', type: 'output', subfolder: '' }]
}
},
status: {
status_str: 'success',
completed: true,
messages: []
}
}
]
}
function findResultByPromptId(
result: HistoryTaskItem[],
promptId: string
): HistoryTaskItem {
const item = result.find((item) => item.prompt[1] === promptId)
if (!item) {
throw new Error(`Expected item with promptId ${promptId} not found`)
}
return item
}
describe('mapHistoryV2toHistory', () => {
describe('fixture validation', () => {
it('should have valid fixture data', () => {
// Validate all items in the fixture to ensure test data is correct
historyV2Fixture.history.forEach((item: unknown) => {
expect(() => zRawHistoryItemV2.parse(item)).not.toThrow()
})
})
})
describe('given a complete V2 history response with edge cases', () => {
const history = mapHistoryV2toHistory(historyV2Fixture)
it('should transform all items to V1 format with correct structure', () => {
expect(history).toEqual(expectedV1Fixture)
})
it('should add taskType "History" to all items', () => {
history.forEach((item) => {
expect(item.taskType).toBe('History')
})
})
it('should transform prompt to V1 tuple [priority, id, {}, extra_data, outputNodeIds]', () => {
const firstItem = history[0]
expect(firstItem.prompt[0]).toBe(1) // Synthetic priority based on timestamp
expect(firstItem.prompt[1]).toBe('complete-item-id')
expect(firstItem.prompt[2]).toEqual({}) // history v2 does not return this data
expect(firstItem.prompt[3]).toMatchObject({ client_id: 'test-client' })
expect(firstItem.prompt[4]).toEqual(['9'])
})
it('should handle missing optional status field', () => {
expect(history[1].prompt[1]).toBe('no-status-id')
expect(history[1].status).toBeUndefined()
})
it('should handle missing optional meta field', () => {
expect(history[2].prompt[1]).toBe('no-meta-id')
expect(history[2].meta).toBeUndefined()
})
it('should derive output node IDs from outputs object keys', () => {
const multiOutputItem = history[3]
expect(multiOutputItem.prompt[4]).toEqual(
expect.arrayContaining(['3', '9', '12'])
)
expect(multiOutputItem.prompt[4]).toHaveLength(3)
})
})
describe('given empty history array', () => {
it('should return empty array', () => {
const emptyResponse: HistoryResponseV2 = { history: [] }
const history = mapHistoryV2toHistory(emptyResponse)
expect(history).toEqual([])
})
})
describe('given empty outputs object', () => {
it('should return empty array for output node IDs', () => {
const v2Response: HistoryResponseV2 = {
history: [
{
prompt_id: 'test-id',
prompt: {
priority: 0,
prompt_id: 'test-id',
extra_data: { client_id: 'test' }
},
outputs: {}
}
]
}
const history = mapHistoryV2toHistory(v2Response)
expect(history[0].prompt[4]).toEqual([])
})
})
describe('given missing client_id', () => {
it('should accept history items without client_id', () => {
const v2Response: HistoryResponseV2 = {
history: [
{
prompt_id: 'test-id',
prompt: {
priority: 0,
prompt_id: 'test-id',
extra_data: {}
},
outputs: {}
}
]
}
const history = mapHistoryV2toHistory(v2Response)
expect(history[0].prompt[3].client_id).toBeUndefined()
})
})
describe('timestamp-based priority assignment', () => {
it('assigns priority 0 to items without execution_success timestamp', () => {
const result = mapHistoryV2toHistory(historyV2WithMissingTimestamp)
expect(result).toHaveLength(3)
const item1000 = findResultByPromptId(result, 'item-timestamp-1000')
const item2000 = findResultByPromptId(result, 'item-timestamp-2000')
const itemNoTimestamp = findResultByPromptId(result, 'item-no-timestamp')
expect(item2000.prompt[0]).toBe(2)
expect(item1000.prompt[0]).toBe(1)
expect(itemNoTimestamp.prompt[0]).toBe(0)
})
it('correctly sorts and assigns priorities for multiple items', () => {
const result = mapHistoryV2toHistory(historyV2FiveItemsSorting)
expect(result).toHaveLength(5)
const item1000 = findResultByPromptId(result, 'item-timestamp-1000')
const item2000 = findResultByPromptId(result, 'item-timestamp-2000')
const item3000 = findResultByPromptId(result, 'item-timestamp-3000')
const item4000 = findResultByPromptId(result, 'item-timestamp-4000')
const item5000 = findResultByPromptId(result, 'item-timestamp-5000')
expect(item5000.prompt[0]).toBe(5)
expect(item4000.prompt[0]).toBe(4)
expect(item3000.prompt[0]).toBe(3)
expect(item2000.prompt[0]).toBe(2)
expect(item1000.prompt[0]).toBe(1)
})
it('assigns priority 0 to all items when multiple items lack timestamps', () => {
const result = mapHistoryV2toHistory(historyV2MultipleNoTimestamp)
expect(result).toHaveLength(3)
const item1 = findResultByPromptId(result, 'item-no-timestamp-1')
const item2 = findResultByPromptId(result, 'item-no-timestamp-2')
const item3 = findResultByPromptId(result, 'item-no-timestamp-3')
expect(item1.prompt[0]).toBe(0)
expect(item2.prompt[0]).toBe(0)
expect(item3.prompt[0]).toBe(0)
})
})
})

View File

@@ -1,74 +0,0 @@
/**
* @fileoverview Adapter to convert V2 history format to V1 format
* @module platform/remote/comfyui/history/adapters/v2ToV1Adapter
*/
import type { HistoryTaskItem, TaskPrompt } from '../types/historyV1Types'
import type {
HistoryResponseV2,
RawHistoryItemV2,
TaskOutput,
TaskPromptV2
} from '../types/historyV2Types'
function mapPromptV2toV1(
promptV2: TaskPromptV2,
outputs: TaskOutput,
syntheticPriority: number,
createTime?: number
): TaskPrompt {
const extraData = {
...(promptV2.extra_data ?? {}),
...(typeof createTime === 'number' ? { create_time: createTime } : {})
}
return [
syntheticPriority,
promptV2.prompt_id,
{},
extraData,
Object.keys(outputs)
]
}
function getExecutionSuccessTimestamp(item: RawHistoryItemV2): number {
return (
item.status?.messages?.find((m) => m[0] === 'execution_success')?.[1]
?.timestamp ?? 0
)
}
export function mapHistoryV2toHistory(
historyV2Response: HistoryResponseV2
): HistoryTaskItem[] {
const { history } = historyV2Response
// Sort by execution_success timestamp, descending (newest first)
history.sort((a, b) => {
return getExecutionSuccessTimestamp(b) - getExecutionSuccessTimestamp(a)
})
// Count items with valid timestamps for synthetic priority calculation
const countWithTimestamps = history.filter(
(item) => getExecutionSuccessTimestamp(item) > 0
).length
return history.map((item, index): HistoryTaskItem => {
const { prompt, outputs, status, meta } = item
const timestamp = getExecutionSuccessTimestamp(item)
// Items with timestamps get priority based on sorted position (highest first)
const syntheticPriority = timestamp > 0 ? countWithTimestamps - index : 0
return {
taskType: 'History' as const,
prompt: mapPromptV2toV1(
prompt,
outputs,
syntheticPriority,
item.create_time
),
status,
outputs,
meta
}
})
}

View File

@@ -1,52 +0,0 @@
/**
* @fileoverview Unit tests for V1 history fetcher.
*/
import { describe, expect, it, vi } from 'vitest'
import { fetchHistoryV1 } from '@/platform/remote/comfyui/history/fetchers/fetchHistoryV1'
import { historyV1RawResponse } from '@/platform/remote/comfyui/history/__fixtures__/historyFixtures'
describe('fetchHistoryV1', () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => historyV1RawResponse
})
it('should fetch from /history endpoint with default max_items', async () => {
await fetchHistoryV1(mockFetchApi)
expect(mockFetchApi).toHaveBeenCalledWith('/history?max_items=200')
})
it('should fetch with custom max_items parameter', async () => {
await fetchHistoryV1(mockFetchApi, 50)
expect(mockFetchApi).toHaveBeenCalledWith('/history?max_items=50')
})
it('should transform object response to array with taskType and preserve fields', async () => {
const result = await fetchHistoryV1(mockFetchApi)
expect(result.History).toHaveLength(2)
result.History.forEach((item) => {
expect(item.taskType).toBe('History')
})
expect(result.History[0]).toMatchObject({
taskType: 'History',
prompt: [24, 'complete-item-id', {}, expect.any(Object), ['9']],
outputs: expect.any(Object),
status: expect.any(Object),
meta: expect.any(Object)
})
})
it('should handle empty response object', async () => {
const emptyMock = vi.fn().mockResolvedValue({
json: async () => ({})
})
const result = await fetchHistoryV1(emptyMock)
expect(result.History).toEqual([])
})
})

View File

@@ -1,51 +0,0 @@
/**
* @fileoverview V1 History Fetcher - Desktop/localhost API
* @module platform/remote/comfyui/history/fetchers/fetchHistoryV1
*
* Fetches history directly from V1 API endpoint.
* Used by desktop and localhost distributions.
*/
import type {
HistoryTaskItem,
HistoryV1Response
} from '../types/historyV1Types'
/**
* Fetches history from V1 API endpoint
* @param api - API instance with fetchApi method
* @param maxItems - Maximum number of history items to fetch
* @param offset - Offset for pagination (must be non-negative integer)
* @returns Promise resolving to V1 history response
* @throws Error if offset is invalid (negative or non-integer)
*/
export async function fetchHistoryV1(
fetchApi: (url: string) => Promise<Response>,
maxItems: number = 200,
offset?: number
): Promise<HistoryV1Response> {
// Validate offset parameter
if (offset !== undefined && (offset < 0 || !Number.isInteger(offset))) {
throw new Error(
`Invalid offset parameter: ${offset}. Must be a non-negative integer.`
)
}
const params = new URLSearchParams({ max_items: maxItems.toString() })
if (offset !== undefined) {
params.set('offset', offset.toString())
}
const url = `/history?${params.toString()}`
const res = await fetchApi(url)
const json: Record<
string,
Omit<HistoryTaskItem, 'taskType'>
> = await res.json()
return {
History: Object.values(json).map((item) => ({
...item,
taskType: 'History'
}))
}
}

View File

@@ -1,41 +0,0 @@
/**
* @fileoverview Unit tests for V2 history fetcher.
*/
import { describe, expect, it, vi } from 'vitest'
import { fetchHistoryV2 } from '@/platform/remote/comfyui/history/fetchers/fetchHistoryV2'
import {
expectedV1Fixture,
historyV2Fixture
} from '@/platform/remote/comfyui/history/__fixtures__/historyFixtures'
describe('fetchHistoryV2', () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => historyV2Fixture
})
it('should fetch from /history_v2 endpoint with default max_items', async () => {
await fetchHistoryV2(mockFetchApi)
expect(mockFetchApi).toHaveBeenCalledWith('/history_v2?max_items=200')
})
it('should fetch with custom max_items parameter', async () => {
await fetchHistoryV2(mockFetchApi, 50)
expect(mockFetchApi).toHaveBeenCalledWith('/history_v2?max_items=50')
})
it('should adapt V2 response to V1-compatible format', async () => {
const result = await fetchHistoryV2(mockFetchApi)
expect(result.History).toEqual(expectedV1Fixture)
expect(result).toHaveProperty('History')
expect(Array.isArray(result.History)).toBe(true)
result.History.forEach((item) => {
expect(item.taskType).toBe('History')
expect(item.prompt).toHaveLength(5)
})
})
})

View File

@@ -1,42 +0,0 @@
/**
* @fileoverview V2 History Fetcher - Cloud API with adapter
* @module platform/remote/comfyui/history/fetchers/fetchHistoryV2
*
* Fetches history from V2 API endpoint and converts to V1 format.
* Used exclusively by cloud distribution.
*/
import { mapHistoryV2toHistory } from '../adapters/v2ToV1Adapter'
import type { HistoryV1Response } from '../types/historyV1Types'
import type { HistoryResponseV2 } from '../types/historyV2Types'
/**
* Fetches history from V2 API endpoint and adapts to V1 format
* @param fetchApi - API instance with fetchApi method
* @param maxItems - Maximum number of history items to fetch
* @param offset - Offset for pagination (must be non-negative integer)
* @returns Promise resolving to V1 history response (adapted from V2)
* @throws Error if offset is invalid (negative or non-integer)
*/
export async function fetchHistoryV2(
fetchApi: (url: string) => Promise<Response>,
maxItems: number = 200,
offset?: number
): Promise<HistoryV1Response> {
// Validate offset parameter
if (offset !== undefined && (offset < 0 || !Number.isInteger(offset))) {
throw new Error(
`Invalid offset parameter: ${offset}. Must be a non-negative integer.`
)
}
const params = new URLSearchParams({ max_items: maxItems.toString() })
if (offset !== undefined) {
params.set('offset', offset.toString())
}
const url = `/history_v2?${params.toString()}`
const res = await fetchApi(url)
const rawData: HistoryResponseV2 = await res.json()
const adaptedHistory = mapHistoryV2toHistory(rawData)
return { History: adaptedHistory }
}

View File

@@ -1,29 +0,0 @@
/**
* @fileoverview History API module - Distribution-aware exports
* @module platform/remote/comfyui/history
*
* This module provides a unified history fetching interface that automatically
* uses the correct implementation based on build-time distribution constant.
*
* - Cloud builds: Uses V2 API with adapter (tree-shakes V1 fetcher)
* - Desktop/localhost builds: Uses V1 API directly (tree-shakes V2 fetcher + adapter)
*
* The rest of the application only needs to import from this module and use
* V1 types - all distribution-specific details are encapsulated here.
*/
import { isCloud } from '@/platform/distribution/types'
import { fetchHistoryV1 } from './fetchers/fetchHistoryV1'
import { fetchHistoryV2 } from './fetchers/fetchHistoryV2'
/**
* Fetches history using the appropriate API for the current distribution.
* Build-time constant enables dead code elimination - only one implementation
* will be included in the final bundle.
*/
export const fetchHistory = isCloud ? fetchHistoryV2 : fetchHistoryV1
/**
* Export only V1 types publicly - consumers don't need to know about V2
*/
export type * from './types'

View File

@@ -1,335 +0,0 @@
/**
* @fileoverview Tests for history reconciliation (V1 and V2)
*/
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { reconcileHistory } from '@/platform/remote/comfyui/history/reconciliation'
import type { TaskItem } from '@/schemas/apiSchema'
// Mock distribution types
vi.mock('@/platform/distribution/types', () => ({
isCloud: false,
isDesktop: true
}))
function createHistoryItem(promptId: string, queueIndex = 0): TaskItem {
return {
taskType: 'History',
prompt: [queueIndex, promptId, {}, {}, []],
status: { status_str: 'success', completed: true, messages: [] },
outputs: {}
}
}
function getAllPromptIds(result: TaskItem[]): string[] {
return result.map((item) => item.prompt[1])
}
describe('reconcileHistory (V1)', () => {
beforeEach(async () => {
const distTypes = await import('@/platform/distribution/types')
vi.mocked(distTypes).isCloud = false
})
describe('when filtering by queueIndex', () => {
it('should retain items with queueIndex greater than lastKnownQueueIndex', () => {
const serverHistory = [
createHistoryItem('new-1', 11),
createHistoryItem('new-2', 10),
createHistoryItem('old', 5)
]
const clientHistory = [createHistoryItem('old', 5)]
const result = reconcileHistory(serverHistory, clientHistory, 10, 9)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(3)
expect(promptIds).toContain('new-1')
expect(promptIds).toContain('new-2')
expect(promptIds).toContain('old')
})
it('should evict items with queueIndex less than or equal to lastKnownQueueIndex', () => {
const serverHistory = [
createHistoryItem('new', 11),
createHistoryItem('existing', 10),
createHistoryItem('old-should-not-appear', 5)
]
const clientHistory = [createHistoryItem('existing', 10)]
const result = reconcileHistory(serverHistory, clientHistory, 10, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
expect(promptIds).toContain('new')
expect(promptIds).toContain('existing')
expect(promptIds).not.toContain('old-should-not-appear')
})
it('should retain all server items when lastKnownQueueIndex is undefined', () => {
const serverHistory = [
createHistoryItem('item-1', 5),
createHistoryItem('item-2', 4)
]
const result = reconcileHistory(serverHistory, [], 10, undefined)
expect(result).toHaveLength(2)
expect(result[0].prompt[1]).toBe('item-1')
expect(result[1].prompt[1]).toBe('item-2')
})
})
describe('when reconciling with existing client items', () => {
it('should retain client items that still exist on server', () => {
const serverHistory = [
createHistoryItem('new', 11),
createHistoryItem('existing-1', 9),
createHistoryItem('existing-2', 8)
]
const clientHistory = [
createHistoryItem('existing-1', 9),
createHistoryItem('existing-2', 8)
]
const result = reconcileHistory(serverHistory, clientHistory, 10, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(3)
expect(promptIds).toContain('new')
expect(promptIds).toContain('existing-1')
expect(promptIds).toContain('existing-2')
})
it('should evict client items that no longer exist on server', () => {
const serverHistory = [
createHistoryItem('new', 11),
createHistoryItem('keep', 9)
]
const clientHistory = [
createHistoryItem('keep', 9),
createHistoryItem('removed-from-server', 8)
]
const result = reconcileHistory(serverHistory, clientHistory, 10, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
expect(promptIds).toContain('new')
expect(promptIds).toContain('keep')
expect(promptIds).not.toContain('removed-from-server')
})
})
describe('when limiting the result count', () => {
it('should respect the maxItems constraint', () => {
const serverHistory = Array.from({ length: 10 }, (_, i) =>
createHistoryItem(`item-${i}`, 20 + i)
)
const result = reconcileHistory(serverHistory, [], 5, 15)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(5)
})
it('should evict lowest priority items when exceeding capacity', () => {
const serverHistory = [
createHistoryItem('new-1', 13),
createHistoryItem('new-2', 12),
createHistoryItem('new-3', 11),
createHistoryItem('existing', 9)
]
const clientHistory = [createHistoryItem('existing', 9)]
const result = reconcileHistory(serverHistory, clientHistory, 2, 10)
expect(result).toHaveLength(2)
expect(result[0].prompt[1]).toBe('new-1')
expect(result[1].prompt[1]).toBe('new-2')
})
})
describe('when handling empty collections', () => {
it('should return all server items when client history is empty', () => {
const serverHistory = [
createHistoryItem('item-1', 10),
createHistoryItem('item-2', 9)
]
const result = reconcileHistory(serverHistory, [], 10, 8)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
})
it('should return empty result when server history is empty', () => {
const clientHistory = [createHistoryItem('item-1', 5)]
const result = reconcileHistory([], clientHistory, 10, 5)
expect(result).toHaveLength(0)
})
it('should return empty result when both collections are empty', () => {
const result = reconcileHistory([], [], 10, undefined)
expect(result).toHaveLength(0)
})
})
})
describe('reconcileHistory (V2/Cloud)', () => {
beforeEach(async () => {
const distTypes = await import('@/platform/distribution/types')
vi.mocked(distTypes).isCloud = true
})
describe('when adding new items from server', () => {
it('should retain items with promptIds not present in client history', () => {
const serverHistory = [
createHistoryItem('new-item'),
createHistoryItem('existing-item')
]
const clientHistory = [createHistoryItem('existing-item')]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
expect(promptIds).toContain('new-item')
expect(promptIds).toContain('existing-item')
})
it('should respect priority ordering when retaining multiple new items', () => {
const serverHistory = [
createHistoryItem('new-1'),
createHistoryItem('new-2'),
createHistoryItem('existing')
]
const clientHistory = [createHistoryItem('existing')]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(3)
expect(promptIds).toContain('new-1')
expect(promptIds).toContain('new-2')
expect(promptIds).toContain('existing')
})
})
describe('when reconciling with existing client items', () => {
it('should retain client items that still exist on server', () => {
const serverHistory = [
createHistoryItem('item-1'),
createHistoryItem('item-2')
]
const clientHistory = [
createHistoryItem('item-1'),
createHistoryItem('item-2')
]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(2)
expect(promptIds).toContain('item-1')
expect(promptIds).toContain('item-2')
})
it('should evict client items that no longer exist on server', () => {
const serverHistory = [createHistoryItem('item-1')]
const clientHistory = [
createHistoryItem('item-1'),
createHistoryItem('old-item')
]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(1)
expect(promptIds).toContain('item-1')
expect(promptIds).not.toContain('old-item')
})
})
describe('when detecting new items by promptId', () => {
it('should retain new items regardless of queueIndex values', () => {
const serverHistory = [
createHistoryItem('existing', 100),
createHistoryItem('new-item', 50)
]
const clientHistory = [createHistoryItem('existing', 100)]
const result = reconcileHistory(serverHistory, clientHistory, 10)
const promptIds = getAllPromptIds(result)
expect(promptIds).toContain('new-item')
expect(promptIds).toContain('existing')
})
})
describe('when limiting the result count', () => {
it('should respect the maxItems constraint', () => {
const serverHistory = Array.from({ length: 10 }, (_, i) =>
createHistoryItem(`server-${i}`)
)
const clientHistory = Array.from({ length: 5 }, (_, i) =>
createHistoryItem(`client-${i}`)
)
const result = reconcileHistory(serverHistory, clientHistory, 5)
const promptIds = getAllPromptIds(result)
expect(promptIds).toHaveLength(5)
})
it('should evict lowest priority items when exceeding capacity', () => {
const serverHistory = [
createHistoryItem('new-1'),
createHistoryItem('new-2'),
createHistoryItem('existing')
]
const clientHistory = [createHistoryItem('existing')]
const result = reconcileHistory(serverHistory, clientHistory, 2)
expect(result).toHaveLength(2)
expect(result[0].prompt[1]).toBe('new-1')
expect(result[1].prompt[1]).toBe('new-2')
})
})
describe('when handling empty collections', () => {
it('should return all server items when client history is empty', () => {
const serverHistory = [
createHistoryItem('item-1'),
createHistoryItem('item-2')
]
const result = reconcileHistory(serverHistory, [], 10)
expect(result).toHaveLength(2)
expect(result[0].prompt[1]).toBe('item-1')
expect(result[1].prompt[1]).toBe('item-2')
})
it('should return empty result when server history is empty', () => {
const clientHistory = [
createHistoryItem('item-1'),
createHistoryItem('item-2')
]
const result = reconcileHistory([], clientHistory, 10)
expect(result).toHaveLength(0)
})
it('should return empty result when both collections are empty', () => {
const result = reconcileHistory([], [], 10)
expect(result).toHaveLength(0)
})
})
})

View File

@@ -1,122 +0,0 @@
/**
* @fileoverview History reconciliation for V1 and V2 APIs
* @module platform/remote/comfyui/history/reconciliation
*
* Returns list of items that should be displayed, sorted by queueIndex (newest first).
* Caller is responsible for mapping to their own class instances.
*
* V1: QueueIndex-based filtering for stable monotonic indices
* V2: PromptId-based merging for synthetic priorities (V2 assigns synthetic
* priorities after timestamp sorting, so new items may have lower priority
* than existing items)
*/
import { isCloud } from '@/platform/distribution/types'
import type { TaskItem } from '@/schemas/apiSchema'
/**
* V1 reconciliation: QueueIndex-based filtering works because V1 has stable,
* monotonically increasing queue indices.
*
* Sort order: Sorts serverHistory by queueIndex descending (newest first) to ensure
* consistent ordering. JavaScript .filter() maintains iteration order, so filtered
* results remain sorted. clientHistory is assumed already sorted from previous update.
*
* @returns All items to display, sorted by queueIndex descending (newest first)
*/
function reconcileHistoryV1(
serverHistory: TaskItem[],
clientHistory: TaskItem[],
maxItems: number,
lastKnownQueueIndex: number | undefined
): TaskItem[] {
const sortedServerHistory = serverHistory.sort(
(a, b) => b.prompt[0] - a.prompt[0]
)
const serverPromptIds = new Set(
sortedServerHistory.map((item) => item.prompt[1])
)
// If undefined, treat as initial sync (all items are new)
const itemsAddedSinceLastSync =
lastKnownQueueIndex === undefined
? sortedServerHistory
: sortedServerHistory.filter(
(item) => item.prompt[0] > lastKnownQueueIndex
)
const clientItemsStillOnServer = clientHistory.filter((item) =>
serverPromptIds.has(item.prompt[1])
)
// Merge new and reused items, sort by queueIndex descending, limit to maxItems
return [...itemsAddedSinceLastSync, ...clientItemsStillOnServer]
.sort((a, b) => b.prompt[0] - a.prompt[0])
.slice(0, maxItems)
}
/**
* V2 reconciliation: PromptId-based merging because V2 assigns synthetic
* priorities after sorting by timestamp.
*
* Sort order: Sorts serverHistory by queueIndex descending (newest first) to ensure
* consistent ordering. JavaScript .filter() maintains iteration order, so filtered
* results remain sorted. clientHistory is assumed already sorted from previous update.
*
* @returns All items to display, sorted by queueIndex descending (newest first)
*/
function reconcileHistoryV2(
serverHistory: TaskItem[],
clientHistory: TaskItem[],
maxItems: number
): TaskItem[] {
const sortedServerHistory = serverHistory.sort(
(a, b) => b.prompt[0] - a.prompt[0]
)
const serverPromptIds = new Set(
sortedServerHistory.map((item) => item.prompt[1])
)
const clientPromptIds = new Set(clientHistory.map((item) => item.prompt[1]))
const newItems = sortedServerHistory.filter(
(item) => !clientPromptIds.has(item.prompt[1])
)
const clientItemsStillOnServer = clientHistory.filter((item) =>
serverPromptIds.has(item.prompt[1])
)
// Merge new and reused items, sort by queueIndex descending, limit to maxItems
return [...newItems, ...clientItemsStillOnServer]
.sort((a, b) => b.prompt[0] - a.prompt[0])
.slice(0, maxItems)
}
/**
* Reconciles server history with client history.
* Automatically uses V1 (queueIndex-based) or V2 (promptId-based) algorithm based on
* distribution type.
*
* @param serverHistory - Server's current history items
* @param clientHistory - Client's existing history items
* @param maxItems - Maximum number of items to return
* @param lastKnownQueueIndex - Last queue index seen (V1 only, optional for V2)
* @returns All items that should be displayed, sorted by queueIndex descending
*/
export function reconcileHistory(
serverHistory: TaskItem[],
clientHistory: TaskItem[],
maxItems: number,
lastKnownQueueIndex?: number
): TaskItem[] {
if (isCloud) {
return reconcileHistoryV2(serverHistory, clientHistory, maxItems)
}
return reconcileHistoryV1(
serverHistory,
clientHistory,
maxItems,
lastKnownQueueIndex
)
}

View File

@@ -1,15 +0,0 @@
/**
* @fileoverview History V1 types - Public interface used throughout the app
* @module platform/remote/comfyui/history/types/historyV1Types
*
* These types represent the V1 history format that the application expects.
* Both desktop (direct V1 API) and cloud (V2 API + adapter) return data in this format.
*/
import type { HistoryTaskItem, TaskPrompt } from '@/schemas/apiSchema'
export interface HistoryV1Response {
History: HistoryTaskItem[]
}
export type { HistoryTaskItem, TaskPrompt }

View File

@@ -1,46 +0,0 @@
/**
* @fileoverview History V2 types and schemas - Internal cloud API format
* @module platform/remote/comfyui/history/types/historyV2Types
*
* These types and schemas represent the V2 history format returned by the cloud API.
* They are only used internally and are converted to V1 format via adapter.
*
* IMPORTANT: These types should NOT be used outside this history module.
*/
import { z } from 'zod'
import {
zExtraData,
zPromptId,
zQueueIndex,
zStatus,
zTaskMeta,
zTaskOutput
} from '@/schemas/apiSchema'
const zTaskPromptV2 = z.object({
priority: zQueueIndex,
prompt_id: zPromptId,
extra_data: zExtraData
})
const zRawHistoryItemV2 = z.object({
prompt_id: zPromptId,
prompt: zTaskPromptV2,
status: zStatus.optional(),
outputs: zTaskOutput,
meta: zTaskMeta.optional(),
create_time: z.number().int().optional()
})
const zHistoryResponseV2 = z.object({
history: z.array(zRawHistoryItemV2)
})
export type TaskPromptV2 = z.infer<typeof zTaskPromptV2>
export type RawHistoryItemV2 = z.infer<typeof zRawHistoryItemV2>
export type HistoryResponseV2 = z.infer<typeof zHistoryResponseV2>
export type TaskOutput = z.infer<typeof zTaskOutput>
export { zRawHistoryItemV2 }

View File

@@ -1,9 +0,0 @@
/**
* @fileoverview Public history types export
* @module platform/remote/comfyui/history/types
*
* Only V1 types are exported publicly - the rest of the app
* should never need to know about V2 types or implementation details.
*/
export type * from './historyV1Types'

View File

@@ -16,17 +16,13 @@ type JobsListResponse = z.infer<typeof zJobsListResponse>
function createMockJob(
id: string,
status: 'pending' | 'in_progress' | 'completed' = 'completed',
status: 'pending' | 'in_progress' | 'completed' | 'failed' = 'completed',
overrides: Partial<RawJobListItem> = {}
): RawJobListItem {
return {
id,
status,
create_time: Date.now(),
execution_start_time: null,
execution_end_time: null,
preview_output: null,
outputs_count: 0,
...overrides
}
}
@@ -63,7 +59,7 @@ describe('fetchJobs', () => {
const result = await fetchHistory(mockFetch)
expect(mockFetch).toHaveBeenCalledWith(
'/jobs?status=completed&limit=200&offset=0'
'/jobs?status=completed,failed,cancelled&limit=200&offset=0'
)
expect(result).toHaveLength(2)
expect(result[0].id).toBe('job1')
@@ -113,7 +109,7 @@ describe('fetchJobs', () => {
const result = await fetchHistory(mockFetch, 200, 5)
expect(mockFetch).toHaveBeenCalledWith(
'/jobs?status=completed&limit=200&offset=5'
'/jobs?status=completed,failed,cancelled&limit=200&offset=5'
)
// Priority base is total - offset = 10 - 5 = 5
expect(result[0].priority).toBe(5) // (total - offset) - 0

View File

@@ -68,7 +68,7 @@ function assignPriority(
}
/**
* Fetches history (completed jobs)
* Fetches history (terminal state jobs: completed, failed, cancelled)
* Assigns synthetic priority starting from total (lower than queue jobs).
*/
export async function fetchHistory(
@@ -78,7 +78,7 @@ export async function fetchHistory(
): Promise<JobListItem[]> {
const { jobs, total } = await fetchJobsRaw(
fetchApi,
['completed'],
['completed', 'failed', 'cancelled'],
maxItems,
offset
)

View File

@@ -21,12 +21,15 @@ const zJobStatus = z.enum([
const zPreviewOutput = z.object({
filename: z.string(),
subfolder: z.string(),
type: resultItemType
type: resultItemType,
nodeId: z.string(),
mediaType: z.string()
})
/**
* Execution error details for error jobs.
* Contains the same structure as ExecutionErrorWsMessage from WebSocket.
* Execution error from Jobs API.
* Similar to ExecutionErrorWsMessage but with optional prompt_id/timestamp/executed
* since these may not be present in stored errors or infrastructure-generated errors.
*/
const zExecutionError = z
.object({
@@ -43,6 +46,8 @@ const zExecutionError = z
})
.passthrough()
export type ExecutionError = z.infer<typeof zExecutionError>
/**
* Raw job from API - uses passthrough to allow extra fields
*/
@@ -105,3 +110,9 @@ export type RawJobListItem = z.infer<typeof zRawJobListItem>
/** Job list item with priority always set (server-provided or synthetic) */
export type JobListItem = RawJobListItem & { priority: number }
export type JobDetail = z.infer<typeof zJobDetail>
/** Task type used in the API (queue vs history endpoints) */
export type APITaskType = 'queue' | 'history'
/** Internal task type derived from job status for UI display */
export type TaskType = 'Running' | 'Pending' | 'History'

View File

@@ -42,4 +42,5 @@ export type RemoteConfig = {
huggingface_model_import_enabled?: boolean
linear_toggle_enabled?: boolean
async_model_upload_enabled?: boolean
team_workspaces_enabled?: boolean
}

View File

@@ -413,6 +413,7 @@ export type ExecutionTriggerSource =
| 'keybinding'
| 'legacy_ui'
| 'unknown'
| 'linear'
/**
* Union type for all possible telemetry event properties

View File

@@ -1,11 +1,13 @@
import { describe, expect, it, vi } from 'vitest'
import type { JobDetail } from '@/platform/remote/comfyui/jobs/jobTypes'
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import { getWorkflowFromHistory } from '@/platform/workflow/cloud/getWorkflowFromHistory'
import {
extractWorkflow,
fetchJobDetail
} from '@/platform/remote/comfyui/jobs/fetchJobs'
const mockWorkflow: ComfyWorkflowJSON = {
id: 'test-workflow-id',
revision: 0,
last_node_id: 5,
last_link_id: 3,
nodes: [],
@@ -16,75 +18,63 @@ const mockWorkflow: ComfyWorkflowJSON = {
version: 0.4
}
const mockHistoryResponse = {
'test-prompt-id': {
prompt: {
priority: 1,
prompt_id: 'test-prompt-id',
extra_data: {
client_id: 'test-client',
extra_pnginfo: {
workflow: mockWorkflow
}
// Jobs API detail response structure (matches actual /jobs/{id} response)
// workflow is nested at: workflow.extra_data.extra_pnginfo.workflow
const mockJobDetailResponse: JobDetail = {
id: 'test-prompt-id',
status: 'completed',
create_time: 1234567890,
update_time: 1234567900,
workflow: {
extra_data: {
extra_pnginfo: {
workflow: mockWorkflow
}
},
outputs: {},
status: {
status_str: 'success',
completed: true,
messages: []
}
},
outputs: {
'20': {
images: [
{ filename: 'test.png', subfolder: '', type: 'output' },
{ filename: 'test2.png', subfolder: '', type: 'output' }
]
}
}
}
describe('getWorkflowFromHistory', () => {
it('should fetch workflow from /history_v2/{prompt_id} endpoint', async () => {
describe('fetchJobDetail', () => {
it('should fetch job detail from /jobs/{prompt_id} endpoint', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => mockHistoryResponse
ok: true,
json: async () => mockJobDetailResponse
})
await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
await fetchJobDetail(mockFetchApi, 'test-prompt-id')
expect(mockFetchApi).toHaveBeenCalledWith('/history_v2/test-prompt-id')
expect(mockFetchApi).toHaveBeenCalledWith('/jobs/test-prompt-id')
})
it('should extract and return workflow from response', async () => {
it('should return job detail with workflow and outputs', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => mockHistoryResponse
ok: true,
json: async () => mockJobDetailResponse
})
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
const result = await fetchJobDetail(mockFetchApi, 'test-prompt-id')
expect(result).toEqual(mockWorkflow)
expect(result).toBeDefined()
expect(result?.id).toBe('test-prompt-id')
expect(result?.outputs).toEqual(mockJobDetailResponse.outputs)
expect(result?.workflow).toBeDefined()
})
it('should return undefined when prompt_id not found in response', async () => {
it('should return undefined when job not found (non-OK response)', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => ({})
ok: false,
status: 404
})
const result = await getWorkflowFromHistory(mockFetchApi, 'nonexistent-id')
expect(result).toBeUndefined()
})
it('should return undefined when workflow is missing from extra_pnginfo', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
json: async () => ({
'test-prompt-id': {
prompt: {
priority: 1,
prompt_id: 'test-prompt-id',
extra_data: {
client_id: 'test-client'
}
},
outputs: {}
}
})
})
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
const result = await fetchJobDetail(mockFetchApi, 'nonexistent-id')
expect(result).toBeUndefined()
})
@@ -92,19 +82,45 @@ describe('getWorkflowFromHistory', () => {
it('should handle fetch errors gracefully', async () => {
const mockFetchApi = vi.fn().mockRejectedValue(new Error('Network error'))
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
const result = await fetchJobDetail(mockFetchApi, 'test-prompt-id')
expect(result).toBeUndefined()
})
it('should handle malformed JSON responses', async () => {
const mockFetchApi = vi.fn().mockResolvedValue({
ok: true,
json: async () => {
throw new Error('Invalid JSON')
}
})
const result = await getWorkflowFromHistory(mockFetchApi, 'test-prompt-id')
const result = await fetchJobDetail(mockFetchApi, 'test-prompt-id')
expect(result).toBeUndefined()
})
})
describe('extractWorkflow', () => {
it('should extract workflow from job detail', async () => {
const result = await extractWorkflow(mockJobDetailResponse)
expect(result).toEqual(mockWorkflow)
})
it('should return undefined when job is undefined', async () => {
const result = await extractWorkflow(undefined)
expect(result).toBeUndefined()
})
it('should return undefined when workflow is missing', async () => {
const jobWithoutWorkflow: JobDetail = {
...mockJobDetailResponse,
workflow: {}
}
const result = await extractWorkflow(jobWithoutWorkflow)
expect(result).toBeUndefined()
})

View File

@@ -1,21 +0,0 @@
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import type { PromptId } from '@/schemas/apiSchema'
export async function getWorkflowFromHistory(
fetchApi: (url: string) => Promise<Response>,
promptId: PromptId
): Promise<ComfyWorkflowJSON | undefined> {
try {
const res = await fetchApi(`/history_v2/${promptId}`)
const json = await res.json()
const historyItem = json[promptId]
if (!historyItem) return undefined
const workflow = historyItem.prompt?.extra_data?.extra_pnginfo?.workflow
return workflow ?? undefined
} catch (error) {
console.error(`Failed to fetch workflow for prompt ${promptId}:`, error)
return undefined
}
}

View File

@@ -1,10 +0,0 @@
/**
* Cloud: Fetches workflow by prompt_id. Desktop: Returns undefined (workflows already in history).
*/
import { isCloud } from '@/platform/distribution/types'
import { getWorkflowFromHistory as cloudImpl } from './getWorkflowFromHistory'
export const getWorkflowFromHistory = isCloud
? cloudImpl
: async () => undefined

View File

@@ -8,17 +8,18 @@ import type { AssetItem } from '@/platform/assets/schemas/assetSchema'
import { getOutputAssetMetadata } from '@/platform/assets/schemas/assetMetadataSchema'
import { getAssetUrl } from '@/platform/assets/utils/assetUrlUtil'
import { getWorkflowDataFromFile } from '@/scripts/metadata/parser'
import { getJobWorkflow } from '@/services/jobOutputCache'
/**
* Extract workflow from AssetItem (async - may need file fetch)
* Tries metadata first (for output assets), then falls back to extracting from file
* This supports both output assets (with embedded metadata) and input assets (PNG with workflow)
* Extract workflow from AssetItem using jobs API
* For output assets: uses jobs API (getJobWorkflow)
* For input assets: extracts from file metadata
*
* @param asset The asset item to extract workflow from
* @returns WorkflowSource with workflow and generated filename
*
* @example
* const asset = { name: 'output.png', user_metadata: { workflow: {...} } }
* const asset = { name: 'output.png', user_metadata: { promptId: '123' } }
* const { workflow, filename } = await extractWorkflowFromAsset(asset)
*/
export async function extractWorkflowFromAsset(asset: AssetItem): Promise<{
@@ -27,17 +28,14 @@ export async function extractWorkflowFromAsset(asset: AssetItem): Promise<{
}> {
const baseFilename = asset.name.replace(/\.[^/.]+$/, '.json')
// Strategy 1: Try metadata first (for output assets)
// For output assets: use jobs API (with caching and validation)
const metadata = getOutputAssetMetadata(asset.user_metadata)
if (metadata?.workflow) {
return {
workflow: metadata.workflow as ComfyWorkflowJSON,
filename: baseFilename
}
if (metadata?.promptId) {
const workflow = await getJobWorkflow(metadata.promptId)
return { workflow: workflow ?? null, filename: baseFilename }
}
// Strategy 2: Try extracting from file (for input assets with embedded workflow)
// This supports PNG, WEBP, FLAC, and other formats with metadata
// For input assets: extract from file metadata (PNG/WEBP/FLAC with embedded workflow)
try {
const fileUrl = getAssetUrl(asset)
const response = await fetch(fileUrl)

View File

@@ -18,6 +18,7 @@ const height = ref('')
ref="imageRef"
:src
v-bind="slotProps"
class="h-full object-contain w-full"
@load="
() => {
if (!imageRef) return

View File

@@ -118,9 +118,6 @@ async function runButtonClick(e: Event) {
? 'Comfy.QueuePromptFront'
: 'Comfy.QueuePrompt'
useTelemetry()?.trackUiButtonClicked({
button_id: props.mobile ? 'queue_run_linear_mobile' : 'queue_run_linear'
})
if (batchCount.value > 1) {
useTelemetry()?.trackUiButtonClicked({
button_id: 'queue_run_multiple_batches_submitted'
@@ -129,7 +126,7 @@ async function runButtonClick(e: Event) {
await commandStore.execute(commandId, {
metadata: {
subscribe_to_run: false,
trigger_source: 'button'
trigger_source: 'linear'
}
})
} finally {

View File

@@ -178,7 +178,7 @@ async function rerun(e: Event) {
/>
<img
v-else
class="pointer-events-none object-contain flex-1 max-h-full md:contain-size brightness-50 opacity-10"
class="pointer-events-none flex-1 max-h-full md:contain-size brightness-50 opacity-10"
src="/assets/images/comfy-logo-mono.svg"
/>
</template>

View File

@@ -247,18 +247,16 @@ useEventListener(document.body, 'keydown', (e: KeyboardEvent) => {
"
/>
</section>
<section
v-for="(item, index) in outputs.media.value"
:key="index"
data-testid="linear-job"
class="py-3 not-md:h-24 border-border-subtle flex md:flex-col md:w-full px-1 first:border-t-0 first:border-l-0 md:border-t-2 not-md:border-l-2"
>
<template v-for="(item, index) in outputs.media.value" :key="index">
<div
class="border-border-subtle not-md:border-l md:border-t first:border-none not-md:h-21 md:w-full m-3"
/>
<template v-for="(output, key) in allOutputs(item)" :key>
<img
v-if="getMediaType(output) === 'images'"
:class="
cn(
'p-1 rounded-lg aspect-square object-cover',
'p-1 rounded-lg aspect-square object-cover not-md:h-20 md:w-full',
index === selectedIndex[0] &&
key === selectedIndex[1] &&
'border-2'
@@ -286,7 +284,7 @@ useEventListener(document.body, 'keydown', (e: KeyboardEvent) => {
/>
</div>
</template>
</section>
</template>
</article>
</div>
<Teleport

View File

@@ -1,18 +1,14 @@
import { z } from 'zod'
import { LinkMarkerShape } from '@/lib/litegraph/src/litegraph'
import {
zComfyWorkflow,
zNodeId
} from '@/platform/workflow/validation/schemas/workflowSchema'
import { zNodeId } from '@/platform/workflow/validation/schemas/workflowSchema'
import { colorPalettesSchema } from '@/schemas/colorPaletteSchema'
import { zKeybinding } from '@/schemas/keyBindingSchema'
import { NodeBadgeMode } from '@/types/nodeSource'
import { LinkReleaseTriggerAction } from '@/types/searchBoxTypes'
const zNodeType = z.string()
export const zQueueIndex = z.number()
export const zPromptId = z.string()
const zPromptId = z.string()
export type PromptId = z.infer<typeof zPromptId>
export const resultItemType = z.enum(['input', 'output', 'temp'])
export type ResultItemType = z.infer<typeof resultItemType>
@@ -173,142 +169,9 @@ export type AssetDownloadWsMessage = z.infer<typeof zAssetDownloadWsMessage>
export type NotificationWsMessage = z.infer<typeof zNotificationWsMessage>
const zPromptInputItem = z.object({
inputs: z.record(z.string(), z.any()),
class_type: zNodeType
})
const zPromptInputs = z.record(zPromptInputItem)
const zExtraPngInfo = z
.object({
workflow: zComfyWorkflow
})
.passthrough()
export const zExtraData = z
.object({
/** extra_pnginfo can be missing is backend execution gets a validation error. */
extra_pnginfo: zExtraPngInfo.optional(),
client_id: z.string().optional(),
// Cloud/Adapters: creation time in milliseconds when available
create_time: z.number().int().optional()
})
// Allow backend/adapters/extensions to add arbitrary metadata
.passthrough()
const zOutputsToExecute = z.array(zNodeId)
const zExecutionStartMessage = z.tuple([
z.literal('execution_start'),
zExecutionStartWsMessage
])
const zExecutionSuccessMessage = z.tuple([
z.literal('execution_success'),
zExecutionSuccessWsMessage
])
const zExecutionCachedMessage = z.tuple([
z.literal('execution_cached'),
zExecutionCachedWsMessage
])
const zExecutionInterruptedMessage = z.tuple([
z.literal('execution_interrupted'),
zExecutionInterruptedWsMessage
])
const zExecutionErrorMessage = z.tuple([
z.literal('execution_error'),
zExecutionErrorWsMessage
])
const zStatusMessage = z.union([
zExecutionStartMessage,
zExecutionSuccessMessage,
zExecutionCachedMessage,
zExecutionInterruptedMessage,
zExecutionErrorMessage
])
export const zStatus = z.object({
status_str: z.enum(['success', 'error']),
completed: z.boolean(),
messages: z.array(zStatusMessage)
})
const zTaskPrompt = z.tuple([
zQueueIndex,
zPromptId,
zPromptInputs,
zExtraData,
zOutputsToExecute
])
const zRunningTaskItem = z.object({
taskType: z.literal('Running'),
prompt: zTaskPrompt,
// @Deprecated
remove: z.object({
name: z.literal('Cancel'),
cb: z.function()
})
})
const zPendingTaskItem = z.object({
taskType: z.literal('Pending'),
prompt: zTaskPrompt
})
export const zTaskOutput = z.record(zNodeId, zOutputs)
const zNodeOutputsMeta = z.object({
node_id: zNodeId,
display_node: zNodeId,
prompt_id: zPromptId.optional(),
read_node_id: zNodeId.optional()
})
export const zTaskMeta = z.record(zNodeId, zNodeOutputsMeta)
const zHistoryTaskItem = z.object({
taskType: z.literal('History'),
prompt: zTaskPrompt,
status: zStatus.optional(),
outputs: zTaskOutput,
meta: zTaskMeta.optional()
})
const zTaskItem = z.union([
zRunningTaskItem,
zPendingTaskItem,
zHistoryTaskItem
])
const zTaskType = z.union([
z.literal('Running'),
z.literal('Pending'),
z.literal('History')
])
export type TaskType = z.infer<typeof zTaskType>
export type TaskPrompt = z.infer<typeof zTaskPrompt>
export type TaskStatus = z.infer<typeof zStatus>
export type TaskOutput = z.infer<typeof zTaskOutput>
// Individual TaskPrompt components for raw queue response handling
export type QueueIndex = z.infer<typeof zQueueIndex>
export type PromptInputs = z.infer<typeof zPromptInputs>
export type ExtraData = z.infer<typeof zExtraData>
export type OutputsToExecute = z.infer<typeof zOutputsToExecute>
// `/queue`
export type RunningTaskItem = z.infer<typeof zRunningTaskItem>
export type PendingTaskItem = z.infer<typeof zPendingTaskItem>
// `/history`
export type HistoryTaskItem = z.infer<typeof zHistoryTaskItem>
export type TaskItem = z.infer<typeof zTaskItem>
const zEmbeddingsResponse = z.array(z.string())
const zExtensionsResponse = z.array(z.string())
const zError = z.object({

View File

@@ -10,6 +10,7 @@ import type {
} from '@/platform/assets/schemas/assetSchema'
import { isCloud } from '@/platform/distribution/types'
import { useToastStore } from '@/platform/updates/common/toastStore'
import type { IFuseOptions } from 'fuse.js'
import {
type TemplateInfo,
type WorkflowTemplates
@@ -30,37 +31,35 @@ import type {
ExecutionStartWsMessage,
ExecutionSuccessWsMessage,
ExtensionsResponse,
ExtraData,
FeatureFlagsWsMessage,
HistoryTaskItem,
LogsRawResponse,
LogsWsMessage,
NotificationWsMessage,
OutputsToExecute,
PendingTaskItem,
PreviewMethod,
ProgressStateWsMessage,
ProgressTextWsMessage,
ProgressWsMessage,
PromptId,
PromptInputs,
PromptResponse,
QueueIndex,
RunningTaskItem,
Settings,
StatusWsMessage,
StatusWsMessageStatus,
SystemStats,
TaskPrompt,
User,
UserDataFullInfo
} from '@/schemas/apiSchema'
import type {
JobDetail,
JobListItem
} from '@/platform/remote/comfyui/jobs/jobTypes'
import type { ComfyNodeDef } from '@/schemas/nodeDefSchema'
import type { useFirebaseAuthStore } from '@/stores/firebaseAuthStore'
import type { AuthHeader } from '@/types/authTypes'
import type { NodeExecutionId } from '@/types/nodeIdentification'
import { fetchHistory } from '@/platform/remote/comfyui/history'
import type { IFuseOptions } from 'fuse.js'
import {
fetchHistory,
fetchJobDetail,
fetchQueue
} from '@/platform/remote/comfyui/jobs/fetchJobs'
interface QueuePromptRequestBody {
client_id: string
@@ -676,7 +675,6 @@ export class ComfyApi extends EventTarget {
case 'logs':
case 'b_preview':
case 'notification':
case 'asset_download':
this.dispatchCustomEvent(msg.type, msg.data)
break
case 'feature_flags':
@@ -899,70 +897,13 @@ export class ComfyApi extends EventTarget {
* @returns The currently running and queued items
*/
async getQueue(): Promise<{
Running: RunningTaskItem[]
Pending: PendingTaskItem[]
Running: JobListItem[]
Pending: JobListItem[]
}> {
try {
const res = await this.fetchApi('/queue')
const data = await res.json()
// Raw queue prompt tuple types from different backends:
// - V1 Backend: [idx, prompt_id, inputs, extra_data, outputs_to_execute]
// - Cloud: [idx, prompt_id, inputs, outputs_to_execute, metadata]
type V1RawPrompt = [
QueueIndex,
PromptId,
PromptInputs,
ExtraData,
OutputsToExecute
]
type CloudRawPrompt = [
QueueIndex,
PromptId,
PromptInputs,
OutputsToExecute,
Record<string, unknown>
]
type RawQueuePrompt = V1RawPrompt | CloudRawPrompt
const normalizeQueuePrompt = (prompt: RawQueuePrompt): TaskPrompt => {
if (!Array.isArray(prompt)) {
console.warn('Unexpected non-array queue prompt:', prompt)
return prompt as TaskPrompt
}
const fourth = prompt[3]
// Cloud shape: 4th is array (outputs), 5th is metadata object
if (Array.isArray(fourth)) {
const cloudPrompt = prompt as CloudRawPrompt
const extraData: ExtraData = { ...cloudPrompt[4] }
return [
cloudPrompt[0],
cloudPrompt[1],
cloudPrompt[2],
extraData,
cloudPrompt[3]
]
}
// V1 shape already matches TaskPrompt
return prompt as V1RawPrompt
}
return {
// Running action uses a different endpoint for cancelling
Running: data.queue_running.map((prompt: RawQueuePrompt) => {
const np = normalizeQueuePrompt(prompt)
return {
taskType: 'Running' as const,
prompt: np,
// prompt[1] is the prompt id
remove: { name: 'Cancel' as const, cb: () => api.interrupt(np[1]) }
}
}),
Pending: data.queue_pending.map((prompt: RawQueuePrompt) => ({
taskType: 'Pending' as const,
prompt: normalizeQueuePrompt(prompt)
}))
}
return await fetchQueue(this.fetchApi.bind(this))
} catch (error) {
console.error(error)
console.error('Failed to fetch queue:', error)
return { Running: [], Pending: [] }
}
}
@@ -974,7 +915,7 @@ export class ComfyApi extends EventTarget {
async getHistory(
max_items: number = 200,
options?: { offset?: number }
): Promise<{ History: HistoryTaskItem[] }> {
): Promise<JobListItem[]> {
try {
return await fetchHistory(
this.fetchApi.bind(this),
@@ -983,10 +924,19 @@ export class ComfyApi extends EventTarget {
)
} catch (error) {
console.error(error)
return { History: [] }
return []
}
}
/**
* Gets detailed job info including outputs and workflow
* @param jobId The job/prompt ID
* @returns Full job details or undefined if not found
*/
async getJobDetail(jobId: string): Promise<JobDetail | undefined> {
return fetchJobDetail(this.fetchApi.bind(this), jobId)
}
/**
* Gets system & device stats
* @returns System stats such as python version, OS, per device info
@@ -1296,29 +1246,6 @@ export class ComfyApi extends EventTarget {
}
}
/**
* Gets the Fuse options from the server.
*
* @returns The Fuse options, or null if not found or invalid
*/
async getFuseOptions(): Promise<IFuseOptions<TemplateInfo> | null> {
try {
const res = await axios.get(
this.fileURL('/templates/fuse_options.json'),
{
headers: {
'Content-Type': 'application/json'
}
}
)
const contentType = res.headers['content-type']
return contentType?.includes('application/json') ? res.data : null
} catch (error) {
console.error('Error loading fuse options:', error)
return null
}
}
/**
* Gets the custom nodes i18n data from the server.
*
@@ -1354,6 +1281,24 @@ export class ComfyApi extends EventTarget {
getServerFeatures(): Record<string, unknown> {
return { ...this.serverFeatureFlags }
}
async getFuseOptions(): Promise<IFuseOptions<TemplateInfo> | null> {
try {
const res = await axios.get(
this.fileURL('/templates/fuse_options.json'),
{
headers: {
'Content-Type': 'application/json'
}
}
)
const contentType = res.headers['content-type']
return contentType?.includes('application/json') ? res.data : null
} catch (error) {
console.error('Error loading fuse options:', error)
return null
}
}
}
export const api = new ComfyApi()

View File

@@ -1,6 +1,6 @@
import { useSettingStore } from '@/platform/settings/settingStore'
import { WORKFLOW_ACCEPT_STRING } from '@/platform/workflow/core/types/formats'
import { type StatusWsMessageStatus, type TaskItem } from '@/schemas/apiSchema'
import { type StatusWsMessageStatus } from '@/schemas/apiSchema'
import { useDialogService } from '@/services/dialogService'
import { isCloud } from '@/platform/distribution/types'
import { useTelemetry } from '@/platform/telemetry'
@@ -33,6 +33,17 @@ type Props = {
type Children = Element[] | Element | string | string[]
/**
* @deprecated Legacy queue item structure from old history API.
* Will be removed when ComfyList is migrated to Jobs API.
*/
interface LegacyQueueItem {
prompt: [unknown, string, unknown, { extra_pnginfo: { workflow: unknown } }]
outputs?: Record<string, unknown>
meta?: Record<string, { display_node?: string }>
remove?: { name: string; cb: () => Promise<void> | void }
}
type ElementType<K extends string> = K extends keyof HTMLElementTagNameMap
? HTMLElementTagNameMap[K]
: HTMLElement
@@ -259,29 +270,28 @@ class ComfyList {
$el('div.comfy-list-items', [
// @ts-expect-error fixme ts strict error
...(this.#reverse ? items[section].reverse() : items[section]).map(
(item: TaskItem) => {
(item: LegacyQueueItem) => {
// Allow items to specify a custom remove action (e.g. for interrupt current prompt)
const removeAction =
'remove' in item
? item.remove
: {
name: 'Delete',
cb: () => api.deleteItem(this.#type, item.prompt[1])
}
const removeAction = item.remove ?? {
name: 'Delete',
cb: () => api.deleteItem(this.#type, item.prompt[1])
}
return $el('div', { textContent: item.prompt[0] + ': ' }, [
$el('button', {
textContent: 'Load',
onclick: async () => {
await app.loadGraphData(
// @ts-expect-error fixme ts strict error
item.prompt[3].extra_pnginfo.workflow,
item.prompt[3].extra_pnginfo.workflow as Parameters<
typeof app.loadGraphData
>[0],
true,
false
)
if ('outputs' in item) {
if ('outputs' in item && item.outputs) {
app.nodeOutputs = {}
for (const [key, value] of Object.entries(item.outputs)) {
const realKey = item['meta']?.[key]?.display_node ?? key
// @ts-expect-error fixme ts strict error
app.nodeOutputs[realKey] = value
}
}

View File

@@ -19,15 +19,12 @@ import { useTelemetry } from '@/platform/telemetry'
import { isCloud } from '@/platform/distribution/types'
import { useSubscription } from '@/platform/cloud/subscription/composables/useSubscription'
import SettingDialogContent from '@/platform/settings/components/SettingDialogContent.vue'
import type { ExecutionErrorWsMessage } from '@/schemas/apiSchema'
import { useDialogStore } from '@/stores/dialogStore'
import type {
DialogComponentProps,
ShowDialogOptions
} from '@/stores/dialogStore'
import ManagerDialogContent from '@/workbench/extensions/manager/components/manager/ManagerDialogContent.vue'
import ManagerHeader from '@/workbench/extensions/manager/components/manager/ManagerHeader.vue'
import ImportFailedNodeContent from '@/workbench/extensions/manager/components/manager/ImportFailedNodeContent.vue'
import ImportFailedNodeFooter from '@/workbench/extensions/manager/components/manager/ImportFailedNodeFooter.vue'
import ImportFailedNodeHeader from '@/workbench/extensions/manager/components/manager/ImportFailedNodeHeader.vue'
@@ -45,6 +42,18 @@ export type ConfirmationDialogType =
| 'dirtyClose'
| 'reinstall'
/**
* Minimal interface for execution error dialogs.
* Satisfied by both ExecutionErrorWsMessage (WebSocket) and ExecutionError (Jobs API).
*/
export interface ExecutionErrorDialogInput {
exception_type: string
exception_message: string
node_id: string | number
node_type: string
traceback: string[]
}
export const useDialogService = () => {
const dialogStore = useDialogStore()
@@ -115,7 +124,7 @@ export const useDialogService = () => {
})
}
function showExecutionErrorDialog(executionError: ExecutionErrorWsMessage) {
function showExecutionErrorDialog(executionError: ExecutionErrorDialogInput) {
const props: ComponentAttrs<typeof ErrorDialogContent> = {
error: {
exceptionType: executionError.exception_type,
@@ -141,32 +150,6 @@ export const useDialogService = () => {
})
}
function showManagerDialog(
props: ComponentAttrs<typeof ManagerDialogContent> = {}
) {
dialogStore.showDialog({
key: 'global-manager',
component: ManagerDialogContent,
headerComponent: ManagerHeader,
dialogComponentProps: {
closable: true,
pt: {
pcCloseButton: {
root: {
class: 'bg-dialog-surface w-9 h-9 p-1.5 rounded-full text-white'
}
},
header: { class: 'py-0! px-6 m-0! h-[68px]' },
content: {
class: 'p-0! h-full w-[90vw] max-w-full flex-1 overflow-hidden'
},
root: { class: 'manager-dialog' }
}
},
props
})
}
function parseError(error: Error) {
const filename =
'fileName' in error
@@ -408,20 +391,10 @@ export const useDialogService = () => {
}
}
function toggleManagerDialog(
props?: ComponentAttrs<typeof ManagerDialogContent>
) {
if (dialogStore.isDialogOpen('global-manager')) {
dialogStore.closeDialog({ key: 'global-manager' })
} else {
showManagerDialog(props)
}
}
function showLayoutDialog(options: {
key: string
component: Component
props: { onClose: () => void }
props: { onClose: () => void } & Record<string, unknown>
dialogComponentProps?: DialogComponentProps
}) {
const layoutDefaultProps: DialogComponentProps = {
@@ -552,7 +525,6 @@ export const useDialogService = () => {
showSettingsDialog,
showAboutDialog,
showExecutionErrorDialog,
showManagerDialog,
showApiNodesSignInDialog,
showSignInDialog,
showSubscriptionRequiredDialog,
@@ -562,7 +534,6 @@ export const useDialogService = () => {
prompt,
showErrorDialog,
confirm,
toggleManagerDialog,
showLayoutDialog,
showImportFailedNodeDialog,
showNodeConflictDialog

View File

@@ -0,0 +1,278 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type {
JobDetail,
JobListItem
} from '@/platform/remote/comfyui/jobs/jobTypes'
import { ResultItemImpl, TaskItemImpl } from '@/stores/queueStore'
vi.mock('@/platform/remote/comfyui/jobs/fetchJobs', () => ({
fetchJobDetail: vi.fn(),
extractWorkflow: vi.fn()
}))
function createResultItem(url: string, supportsPreview = true): ResultItemImpl {
const item = new ResultItemImpl({
filename: url,
subfolder: '',
type: 'output',
nodeId: 'node-1',
mediaType: supportsPreview ? 'images' : 'unknown'
})
Object.defineProperty(item, 'url', { get: () => url })
Object.defineProperty(item, 'supportsPreview', { get: () => supportsPreview })
return item
}
function createMockJob(id: string, outputsCount = 1): JobListItem {
return {
id,
status: 'completed',
create_time: Date.now(),
preview_output: null,
outputs_count: outputsCount,
priority: 0
}
}
function createTask(
preview?: ResultItemImpl,
allOutputs?: ResultItemImpl[],
outputsCount = 1
): TaskItemImpl {
const job = createMockJob(
`task-${Math.random().toString(36).slice(2)}`,
outputsCount
)
const flatOutputs = allOutputs ?? (preview ? [preview] : [])
return new TaskItemImpl(job, {}, flatOutputs)
}
describe('jobOutputCache', () => {
beforeEach(() => {
vi.resetModules()
vi.clearAllMocks()
})
describe('findActiveIndex', () => {
it('returns index of matching URL', async () => {
const { findActiveIndex } = await import('@/services/jobOutputCache')
const items = [
createResultItem('a'),
createResultItem('b'),
createResultItem('c')
]
expect(findActiveIndex(items, 'b')).toBe(1)
})
it('returns 0 when URL not found', async () => {
const { findActiveIndex } = await import('@/services/jobOutputCache')
const items = [createResultItem('a'), createResultItem('b')]
expect(findActiveIndex(items, 'missing')).toBe(0)
})
it('returns 0 when URL is undefined', async () => {
const { findActiveIndex } = await import('@/services/jobOutputCache')
const items = [createResultItem('a'), createResultItem('b')]
expect(findActiveIndex(items, undefined)).toBe(0)
})
})
describe('getOutputsForTask', () => {
it('returns previewable outputs directly when no lazy load needed', async () => {
const { getOutputsForTask } = await import('@/services/jobOutputCache')
const outputs = [createResultItem('p-1'), createResultItem('p-2')]
const task = createTask(undefined, outputs, 1)
const result = await getOutputsForTask(task)
expect(result).toEqual(outputs)
})
it('lazy loads when outputsCount > 1', async () => {
const { getOutputsForTask } = await import('@/services/jobOutputCache')
const previewOutput = createResultItem('preview')
const fullOutputs = [
createResultItem('full-1'),
createResultItem('full-2')
]
const job = createMockJob('task-1', 3)
const task = new TaskItemImpl(job, {}, [previewOutput])
const loadedTask = new TaskItemImpl(job, {}, fullOutputs)
task.loadFullOutputs = vi.fn().mockResolvedValue(loadedTask)
const result = await getOutputsForTask(task)
expect(result).toEqual(fullOutputs)
expect(task.loadFullOutputs).toHaveBeenCalled()
})
it('caches loaded tasks', async () => {
const { getOutputsForTask } = await import('@/services/jobOutputCache')
const fullOutputs = [createResultItem('full-1')]
const job = createMockJob('task-1', 3)
const task = new TaskItemImpl(job, {}, [createResultItem('preview')])
const loadedTask = new TaskItemImpl(job, {}, fullOutputs)
task.loadFullOutputs = vi.fn().mockResolvedValue(loadedTask)
// First call should load
await getOutputsForTask(task)
expect(task.loadFullOutputs).toHaveBeenCalledTimes(1)
// Second call should use cache
await getOutputsForTask(task)
expect(task.loadFullOutputs).toHaveBeenCalledTimes(1)
})
it('falls back to preview outputs on load error', async () => {
const { getOutputsForTask } = await import('@/services/jobOutputCache')
const previewOutput = createResultItem('preview')
const job = createMockJob('task-1', 3)
const task = new TaskItemImpl(job, {}, [previewOutput])
task.loadFullOutputs = vi
.fn()
.mockRejectedValue(new Error('Network error'))
const result = await getOutputsForTask(task)
expect(result).toEqual([previewOutput])
})
it('returns null when request is superseded', async () => {
const { getOutputsForTask } = await import('@/services/jobOutputCache')
const job1 = createMockJob('task-1', 3)
const job2 = createMockJob('task-2', 3)
const task1 = new TaskItemImpl(job1, {}, [createResultItem('preview-1')])
const task2 = new TaskItemImpl(job2, {}, [createResultItem('preview-2')])
const loadedTask1 = new TaskItemImpl(job1, {}, [
createResultItem('full-1')
])
const loadedTask2 = new TaskItemImpl(job2, {}, [
createResultItem('full-2')
])
// Task1 loads slowly, task2 loads quickly
task1.loadFullOutputs = vi.fn().mockImplementation(
() =>
new Promise((resolve) => {
setTimeout(() => resolve(loadedTask1), 50)
})
)
task2.loadFullOutputs = vi.fn().mockResolvedValue(loadedTask2)
// Start task1, then immediately start task2
const promise1 = getOutputsForTask(task1)
const promise2 = getOutputsForTask(task2)
const [result1, result2] = await Promise.all([promise1, promise2])
// Task2 should succeed, task1 should return null (superseded)
expect(result1).toBeNull()
expect(result2).toEqual([createResultItem('full-2')])
})
})
describe('getJobDetail', () => {
it('fetches and caches job detail', async () => {
const { getJobDetail } = await import('@/services/jobOutputCache')
const { fetchJobDetail } =
await import('@/platform/remote/comfyui/jobs/fetchJobs')
const mockDetail: JobDetail = {
id: 'job-1',
status: 'completed',
create_time: Date.now(),
priority: 0,
outputs: {}
}
vi.mocked(fetchJobDetail).mockResolvedValue(mockDetail)
const result = await getJobDetail('job-1')
expect(result).toEqual(mockDetail)
expect(fetchJobDetail).toHaveBeenCalledWith(expect.any(Function), 'job-1')
})
it('returns cached job detail on subsequent calls', async () => {
const { getJobDetail } = await import('@/services/jobOutputCache')
const { fetchJobDetail } =
await import('@/platform/remote/comfyui/jobs/fetchJobs')
const mockDetail: JobDetail = {
id: 'job-2',
status: 'completed',
create_time: Date.now(),
priority: 0,
outputs: {}
}
vi.mocked(fetchJobDetail).mockResolvedValue(mockDetail)
// First call
await getJobDetail('job-2')
expect(fetchJobDetail).toHaveBeenCalledTimes(1)
// Second call should use cache
const result = await getJobDetail('job-2')
expect(result).toEqual(mockDetail)
expect(fetchJobDetail).toHaveBeenCalledTimes(1)
})
it('returns undefined on fetch error', async () => {
const { getJobDetail } = await import('@/services/jobOutputCache')
const { fetchJobDetail } =
await import('@/platform/remote/comfyui/jobs/fetchJobs')
vi.mocked(fetchJobDetail).mockRejectedValue(new Error('Network error'))
const result = await getJobDetail('job-error')
expect(result).toBeUndefined()
})
})
describe('getJobWorkflow', () => {
it('fetches job detail and extracts workflow', async () => {
const { getJobWorkflow } = await import('@/services/jobOutputCache')
const { fetchJobDetail, extractWorkflow } =
await import('@/platform/remote/comfyui/jobs/fetchJobs')
const mockDetail: JobDetail = {
id: 'job-wf',
status: 'completed',
create_time: Date.now(),
priority: 0,
outputs: {}
}
const mockWorkflow = { version: 1 }
vi.mocked(fetchJobDetail).mockResolvedValue(mockDetail)
vi.mocked(extractWorkflow).mockResolvedValue(mockWorkflow as any)
const result = await getJobWorkflow('job-wf')
expect(result).toEqual(mockWorkflow)
expect(extractWorkflow).toHaveBeenCalledWith(mockDetail)
})
it('returns undefined when job detail not found', async () => {
const { getJobWorkflow } = await import('@/services/jobOutputCache')
const { fetchJobDetail, extractWorkflow } =
await import('@/platform/remote/comfyui/jobs/fetchJobs')
vi.mocked(fetchJobDetail).mockResolvedValue(undefined)
vi.mocked(extractWorkflow).mockResolvedValue(undefined)
const result = await getJobWorkflow('missing')
expect(result).toBeUndefined()
})
})
})

View File

@@ -0,0 +1,103 @@
/**
* @fileoverview Job output cache for caching and managing job data
* @module services/jobOutputCache
*
* Centralizes job output and detail caching with LRU eviction.
* Provides helpers for working with previewable outputs and workflows.
*/
import QuickLRU from '@alloc/quick-lru'
import type { JobDetail } from '@/platform/remote/comfyui/jobs/jobTypes'
import { extractWorkflow } from '@/platform/remote/comfyui/jobs/fetchJobs'
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import { api } from '@/scripts/api'
import { ResultItemImpl } from '@/stores/queueStore'
import type { TaskItemImpl } from '@/stores/queueStore'
const MAX_TASK_CACHE_SIZE = 50
const MAX_JOB_DETAIL_CACHE_SIZE = 50
const taskCache = new QuickLRU<string, TaskItemImpl>({
maxSize: MAX_TASK_CACHE_SIZE
})
const jobDetailCache = new QuickLRU<string, JobDetail>({
maxSize: MAX_JOB_DETAIL_CACHE_SIZE
})
// Track latest request to dedupe stale responses
let latestTaskRequestId: string | null = null
// ===== Task Output Caching =====
export function findActiveIndex(
items: readonly ResultItemImpl[],
url?: string
): number {
return ResultItemImpl.findByUrl(items, url)
}
/**
* Gets previewable outputs for a task, with lazy loading, caching, and request deduping.
* Returns null if a newer request superseded this one while loading.
*/
export async function getOutputsForTask(
task: TaskItemImpl
): Promise<ResultItemImpl[] | null> {
const requestId = String(task.promptId)
latestTaskRequestId = requestId
const outputsCount = task.outputsCount ?? 0
const needsLazyLoad = outputsCount > 1
if (!needsLazyLoad) {
return [...task.previewableOutputs]
}
const cached = taskCache.get(requestId)
if (cached) {
return [...cached.previewableOutputs]
}
try {
const loadedTask = await task.loadFullOutputs()
// Check if request was superseded while loading
if (latestTaskRequestId !== requestId) {
return null
}
taskCache.set(requestId, loadedTask)
return [...loadedTask.previewableOutputs]
} catch (error) {
console.warn('Failed to load full outputs, using preview:', error)
return [...task.previewableOutputs]
}
}
// ===== Job Detail Caching =====
export async function getJobDetail(
jobId: string
): Promise<JobDetail | undefined> {
const cached = jobDetailCache.get(jobId)
if (cached) return cached
try {
const detail = await api.getJobDetail(jobId)
if (detail) {
jobDetailCache.set(jobId, detail)
}
return detail
} catch (error) {
console.warn('Failed to fetch job detail:', error)
return undefined
}
}
export async function getJobWorkflow(
jobId: string
): Promise<ComfyWorkflowJSON | undefined> {
const detail = await getJobDetail(jobId)
return await extractWorkflow(detail)
}

View File

@@ -3,12 +3,7 @@ import { beforeEach, describe, expect, it, vi } from 'vitest'
import { useAssetsStore } from '@/stores/assetsStore'
import { api } from '@/scripts/api'
import type {
HistoryTaskItem,
TaskPrompt,
TaskStatus,
TaskOutput
} from '@/schemas/apiSchema'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/jobTypes'
// Mock the api module
vi.mock('@/scripts/api', () => ({
@@ -53,26 +48,25 @@ vi.mock('@/stores/queueStore', () => ({
url: string
}
| undefined
public promptId: string
constructor(
public taskType: string,
public prompt: TaskPrompt,
public status: TaskStatus | undefined,
public outputs: TaskOutput
) {
this.flatOutputs = this.outputs
? [
{
supportsPreview: true,
filename: 'test.png',
subfolder: '',
type: 'output',
url: 'http://test.com/test.png'
}
]
: []
constructor(public job: JobListItem) {
this.promptId = job.id
this.flatOutputs = [
{
supportsPreview: true,
filename: 'test.png',
subfolder: '',
type: 'output',
url: 'http://test.com/test.png'
}
]
this.previewOutput = this.flatOutputs[0]
}
get previewableOutputs() {
return this.flatOutputs.filter((o) => o.supportsPreview)
}
}
}))
@@ -82,17 +76,17 @@ vi.mock('@/platform/assets/composables/media/assetMappers', () => ({
id: `${type}-${index}`,
name,
size: 0,
created_at: new Date(Date.now() - index * 1000).toISOString(), // Unique timestamps
created_at: new Date(Date.now() - index * 1000).toISOString(),
tags: [type],
preview_url: `http://test.com/${name}`
})),
mapTaskOutputToAssetItem: vi.fn((task, output) => {
const index = parseInt(task.prompt[1].split('_')[1]) || 0
const index = parseInt(task.promptId.split('_')[1]) || 0
return {
id: task.prompt[1], // Use promptId as asset ID
id: task.promptId,
name: output.filename,
size: 0,
created_at: new Date(Date.now() - index * 1000).toISOString(), // Unique timestamps
created_at: new Date(Date.now() - index * 1000).toISOString(),
tags: ['output'],
preview_url: output.url,
user_metadata: {}
@@ -103,43 +97,20 @@ vi.mock('@/platform/assets/composables/media/assetMappers', () => ({
describe('assetsStore - Refactored (Option A)', () => {
let store: ReturnType<typeof useAssetsStore>
// Helper function to create mock history items
const createMockHistoryItem = (index: number): HistoryTaskItem => ({
taskType: 'History' as const,
prompt: [
1000 + index, // queueIndex
`prompt_${index}`, // promptId
{}, // promptInputs
{
extra_pnginfo: {
workflow: {
last_node_id: 1,
last_link_id: 1,
nodes: [],
links: [],
groups: [],
config: {},
version: 1
}
}
}, // extraData
[] // outputsToExecute
],
status: {
status_str: 'success' as const,
completed: true,
messages: []
},
outputs: {
'1': {
images: [
{
filename: `output_${index}.png`,
subfolder: '',
type: 'output' as const
}
]
}
// Helper function to create mock job items
const createMockJobItem = (index: number): JobListItem => ({
id: `prompt_${index}`,
status: 'completed',
create_time: 1000 + index,
update_time: 1000 + index,
last_state_update: 1000 + index,
priority: 1000 + index,
preview_output: {
filename: `output_${index}.png`,
subfolder: '',
type: 'output',
nodeId: 'node_1',
mediaType: 'images'
}
})
@@ -152,11 +123,9 @@ describe('assetsStore - Refactored (Option A)', () => {
describe('Initial Load', () => {
it('should load initial history items', async () => {
const mockHistory = Array.from({ length: 10 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValue({
History: mockHistory
})
vi.mocked(api.getHistory).mockResolvedValue(mockHistory)
await store.updateHistory()
@@ -169,11 +138,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should set hasMoreHistory to true when batch is full', async () => {
const mockHistory = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValue({
History: mockHistory
})
vi.mocked(api.getHistory).mockResolvedValue(mockHistory)
await store.updateHistory()
@@ -197,11 +164,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should accumulate items when loading more', async () => {
// First batch - full BATCH_SIZE
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
expect(store.historyAssets).toHaveLength(200)
@@ -209,11 +174,9 @@ describe('assetsStore - Refactored (Option A)', () => {
// Second batch - different items
const secondBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(200 + i)
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: secondBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(secondBatch)
await store.loadMoreHistory()
@@ -225,24 +188,20 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should prevent duplicate items during pagination', async () => {
// First batch - full BATCH_SIZE
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
expect(store.historyAssets).toHaveLength(200)
// Second batch with some duplicates
const secondBatch = [
createMockHistoryItem(2), // Duplicate
createMockHistoryItem(5), // Duplicate
...Array.from({ length: 198 }, (_, i) => createMockHistoryItem(200 + i)) // New
createMockJobItem(2), // Duplicate
createMockJobItem(5), // Duplicate
...Array.from({ length: 198 }, (_, i) => createMockJobItem(200 + i)) // New
]
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: secondBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(secondBatch)
await store.loadMoreHistory()
@@ -258,11 +217,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should stop loading when no more items', async () => {
// First batch - less than BATCH_SIZE
const firstBatch = Array.from({ length: 50 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
expect(store.hasMoreHistory).toBe(false)
@@ -277,11 +234,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should handle race conditions with concurrent loads', async () => {
// Setup initial state with full batch
const initialBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: initialBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(initialBatch)
await store.updateHistory()
expect(store.hasMoreHistory).toBe(true)
@@ -289,12 +244,10 @@ describe('assetsStore - Refactored (Option A)', () => {
vi.mocked(api.getHistory).mockClear()
// Setup slow API response
let resolveLoadMore: (value: { History: HistoryTaskItem[] }) => void
const loadMorePromise = new Promise<{ History: HistoryTaskItem[] }>(
(resolve) => {
resolveLoadMore = resolve
}
)
let resolveLoadMore: (value: JobListItem[]) => void
const loadMorePromise = new Promise<JobListItem[]>((resolve) => {
resolveLoadMore = resolve
})
vi.mocked(api.getHistory).mockReturnValueOnce(loadMorePromise)
// Start first loadMore
@@ -305,9 +258,9 @@ describe('assetsStore - Refactored (Option A)', () => {
// Resolve
const secondBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(200 + i)
createMockJobItem(200 + i)
)
resolveLoadMore!({ History: secondBatch })
resolveLoadMore!(secondBatch)
await Promise.all([firstLoad, secondLoad])
@@ -320,21 +273,17 @@ describe('assetsStore - Refactored (Option A)', () => {
// Initial load
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
// Load additional batches
for (let batch = 1; batch < BATCH_COUNT; batch++) {
const items = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(batch * 200 + i)
createMockJobItem(batch * 200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: items
})
vi.mocked(api.getHistory).mockResolvedValueOnce(items)
await store.loadMoreHistory()
}
@@ -347,21 +296,17 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should maintain date sorting after pagination', async () => {
// First batch
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
// Second batch
const secondBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(200 + i)
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: secondBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(secondBatch)
await store.loadMoreHistory()
@@ -378,11 +323,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should preserve existing data when loadMore fails', async () => {
// First successful load - full batch
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
expect(store.historyAssets).toHaveLength(200)
@@ -402,11 +345,9 @@ describe('assetsStore - Refactored (Option A)', () => {
it('should clear error state on successful retry', async () => {
// First load succeeds
const firstBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: firstBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(firstBatch)
await store.updateHistory()
@@ -419,11 +360,9 @@ describe('assetsStore - Refactored (Option A)', () => {
// Third load succeeds
const thirdBatch = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(200 + i)
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: thirdBatch
})
vi.mocked(api.getHistory).mockResolvedValueOnce(thirdBatch)
await store.loadMoreHistory()
@@ -450,11 +389,9 @@ describe('assetsStore - Refactored (Option A)', () => {
for (let batch = 0; batch < batches; batch++) {
const items = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(batch * 200 + i)
createMockJobItem(batch * 200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: items
})
vi.mocked(api.getHistory).mockResolvedValueOnce(items)
if (batch === 0) {
await store.updateHistory()
@@ -476,11 +413,9 @@ describe('assetsStore - Refactored (Option A)', () => {
// Load items beyond limit
for (let batch = 0; batch < 6; batch++) {
const items = Array.from({ length: 200 }, (_, i) =>
createMockHistoryItem(batch * 200 + i)
createMockJobItem(batch * 200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce({
History: items
})
vi.mocked(api.getHistory).mockResolvedValueOnce(items)
if (batch === 0) {
await store.updateHistory()
@@ -503,11 +438,9 @@ describe('assetsStore - Refactored (Option A)', () => {
describe('jobDetailView Support', () => {
it('should include outputCount and allOutputs in user_metadata', async () => {
const mockHistory = Array.from({ length: 5 }, (_, i) =>
createMockHistoryItem(i)
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValue({
History: mockHistory
})
vi.mocked(api.getHistory).mockResolvedValue(mockHistory)
await store.updateHistory()

View File

@@ -9,7 +9,7 @@ import {
import type { AssetItem } from '@/platform/assets/schemas/assetSchema'
import { assetService } from '@/platform/assets/services/assetService'
import { isCloud } from '@/platform/distribution/types'
import type { TaskItem } from '@/schemas/apiSchema'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/jobTypes'
import { api } from '@/scripts/api'
import { TaskItemImpl } from './queueStore'
@@ -48,27 +48,18 @@ async function fetchInputFilesFromCloud(): Promise<AssetItem[]> {
}
/**
* Convert history task items to asset items
* Convert history job items to asset items
*/
function mapHistoryToAssets(historyItems: TaskItem[]): AssetItem[] {
function mapHistoryToAssets(historyItems: JobListItem[]): AssetItem[] {
const assetItems: AssetItem[] = []
for (const item of historyItems) {
// Type guard for HistoryTaskItem which has status and outputs
if (item.taskType !== 'History') {
for (const job of historyItems) {
// Only process completed jobs with preview output
if (job.status !== 'completed' || !job.preview_output) {
continue
}
if (!item.outputs || !item.status || item.status?.status_str === 'error') {
continue
}
const task = new TaskItemImpl(
'History',
item.prompt,
item.status,
item.outputs
)
const task = new TaskItemImpl(job)
if (!task.previewOutput) {
continue
@@ -76,11 +67,10 @@ function mapHistoryToAssets(historyItems: TaskItem[]): AssetItem[] {
const assetItem = mapTaskOutputToAssetItem(task, task.previewOutput)
const supportedOutputs = task.flatOutputs.filter((o) => o.supportsPreview)
assetItem.user_metadata = {
...assetItem.user_metadata,
outputCount: supportedOutputs.length,
allOutputs: supportedOutputs
outputCount: job.outputs_count,
allOutputs: task.previewableOutputs
}
assetItems.push(assetItem)
@@ -143,8 +133,8 @@ export const useAssetsStore = defineStore('assets', () => {
offset: historyOffset.value
})
// Convert TaskItems to AssetItems
const newAssets = mapHistoryToAssets(history.History)
// Convert JobListItems to AssetItems
const newAssets = mapHistoryToAssets(history)
if (loadMore) {
// Filter out duplicates and insert in sorted order
@@ -176,7 +166,7 @@ export const useAssetsStore = defineStore('assets', () => {
// Update pagination state
historyOffset.value += BATCH_SIZE
hasMoreHistory.value = history.History.length === BATCH_SIZE
hasMoreHistory.value = history.length === BATCH_SIZE
if (allHistoryItems.value.length > MAX_HISTORY_ITEMS) {
const removed = allHistoryItems.value.slice(MAX_HISTORY_ITEMS)

View File

@@ -396,10 +396,8 @@ export const useExecutionStore = defineStore('execution', () => {
error: e.detail.exception_message
})
}
const pid = e.detail?.prompt_id
// Clear initialization for errored prompt if present
if (e.detail?.prompt_id) clearInitializationByPromptId(e.detail.prompt_id)
resetExecutionState(pid)
clearInitializationByPromptId(e.detail.prompt_id)
resetExecutionState(e.detail.prompt_id)
}
/**

View File

@@ -23,7 +23,9 @@ import { useFirebaseAuth } from 'vuefire'
import { getComfyApiBaseUrl } from '@/config/comfyApi'
import { t } from '@/i18n'
import { WORKSPACE_STORAGE_KEYS } from '@/platform/auth/workspace/workspaceConstants'
import { isCloud } from '@/platform/distribution/types'
import { remoteConfig } from '@/platform/remoteConfig/remoteConfig'
import { useTelemetry } from '@/platform/telemetry'
import { useDialogService } from '@/services/dialogService'
import { useApiKeyAuthStore } from '@/stores/apiKeyAuthStore'
@@ -107,6 +109,15 @@ export const useFirebaseAuthStore = defineStore('firebaseAuth', () => {
isInitialized.value = true
if (user === null) {
lastTokenUserId.value = null
// Clear workspace sessionStorage on logout to prevent stale tokens
try {
sessionStorage.removeItem(WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE)
sessionStorage.removeItem(WORKSPACE_STORAGE_KEYS.TOKEN)
sessionStorage.removeItem(WORKSPACE_STORAGE_KEYS.EXPIRES_AT)
} catch {
// Ignore sessionStorage errors (e.g., in private browsing mode)
}
}
// Reset balance when auth state changes
@@ -152,16 +163,34 @@ export const useFirebaseAuthStore = defineStore('firebaseAuth', () => {
/**
* Retrieves the appropriate authentication header for API requests.
* Checks for authentication in the following order:
* 1. Firebase authentication token (if user is logged in)
* 2. API key (if stored in the browser's credential manager)
* 1. Workspace token (if team_workspaces_enabled and user has active workspace context)
* 2. Firebase authentication token (if user is logged in)
* 3. API key (if stored in the browser's credential manager)
*
* @returns {Promise<AuthHeader | null>}
* - A LoggedInAuthHeader with Bearer token if Firebase authenticated
* - A LoggedInAuthHeader with Bearer token (workspace or Firebase)
* - An ApiKeyAuthHeader with X-API-KEY if API key exists
* - null if neither authentication method is available
* - null if no authentication method is available
*/
const getAuthHeader = async (): Promise<AuthHeader | null> => {
// If available, set header with JWT used to identify the user to Firebase service
if (remoteConfig.value.team_workspaces_enabled) {
const workspaceToken = sessionStorage.getItem(
WORKSPACE_STORAGE_KEYS.TOKEN
)
const expiresAt = sessionStorage.getItem(
WORKSPACE_STORAGE_KEYS.EXPIRES_AT
)
if (workspaceToken && expiresAt) {
const expiryTime = parseInt(expiresAt, 10)
if (Date.now() < expiryTime) {
return {
Authorization: `Bearer ${workspaceToken}`
}
}
}
}
const token = await getIdToken()
if (token) {
return {
@@ -169,7 +198,6 @@ export const useFirebaseAuthStore = defineStore('firebaseAuth', () => {
}
}
// If not authenticated with Firebase, try falling back to API key if available
return useApiKeyAuthStore().getAuthHeader()
}

View File

@@ -1,14 +1,14 @@
import { createPinia, setActivePinia } from 'pinia'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { ComfyApp } from '@/scripts/app'
import type {
JobDetail,
JobListItem
} from '@/platform/remote/comfyui/jobs/jobTypes'
import type { ComfyWorkflowJSON } from '@/platform/workflow/validation/schemas/workflowSchema'
import type { ComfyApp } from '@/scripts/app'
import { TaskItemImpl } from '@/stores/queueStore'
import * as getWorkflowModule from '@/platform/workflow/cloud'
vi.mock('@/platform/distribution/types', () => ({
isCloud: true
}))
import * as jobOutputCache from '@/services/jobOutputCache'
vi.mock('@/services/extensionService', () => ({
useExtensionService: vi.fn(() => ({
@@ -17,8 +17,6 @@ vi.mock('@/services/extensionService', () => ({
}))
const mockWorkflow: ComfyWorkflowJSON = {
id: 'test-workflow-id',
revision: 0,
last_node_id: 5,
last_link_id: 3,
nodes: [],
@@ -29,53 +27,46 @@ const mockWorkflow: ComfyWorkflowJSON = {
version: 0.4
}
const createHistoryTaskWithWorkflow = (): TaskItemImpl => {
return new TaskItemImpl(
'History',
[
0, // queueIndex
'test-prompt-id', // promptId
{}, // promptInputs
{
client_id: 'test-client',
extra_pnginfo: {
workflow: mockWorkflow
}
},
[] // outputsToExecute
],
{
status_str: 'success',
completed: true,
messages: []
},
{} // outputs
)
// Mock job detail response (matches actual /jobs/{id} API response structure)
// workflow is nested at: workflow.extra_data.extra_pnginfo.workflow
const mockJobDetail = {
id: 'test-prompt-id',
status: 'completed' as const,
create_time: Date.now(),
update_time: Date.now(),
workflow: {
extra_data: {
extra_pnginfo: {
workflow: mockWorkflow
}
}
},
outputs: {
'1': { images: [{ filename: 'test.png', subfolder: '', type: 'output' }] }
}
}
const createHistoryTaskWithoutWorkflow = (): TaskItemImpl => {
return new TaskItemImpl(
'History',
[
0,
'test-prompt-id',
{},
{
client_id: 'test-client'
// No extra_pnginfo.workflow
},
[]
],
{
status_str: 'success',
completed: true,
messages: []
},
{}
)
function createHistoryJob(id: string): JobListItem {
const now = Date.now()
return {
id,
status: 'completed',
create_time: now,
priority: now
}
}
describe('TaskItemImpl.loadWorkflow - cloud history workflow fetching', () => {
function createRunningJob(id: string): JobListItem {
const now = Date.now()
return {
id,
status: 'in_progress',
create_time: now,
priority: now
}
}
describe('TaskItemImpl.loadWorkflow - workflow fetching', () => {
let mockApp: ComfyApp
let mockFetchApi: ReturnType<typeof vi.fn>
@@ -91,85 +82,57 @@ describe('TaskItemImpl.loadWorkflow - cloud history workflow fetching', () => {
fetchApi: mockFetchApi
}
} as unknown as ComfyApp
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory')
})
it('should load workflow directly when workflow is in extra_pnginfo', async () => {
const task = createHistoryTaskWithWorkflow()
it('should fetch workflow from API for history tasks', async () => {
const job = createHistoryJob('test-prompt-id')
const task = new TaskItemImpl(job)
await task.loadWorkflow(mockApp)
expect(mockApp.loadGraphData).toHaveBeenCalledWith(mockWorkflow)
expect(mockFetchApi).not.toHaveBeenCalled()
})
it('should fetch workflow from cloud when workflow is missing from history task', async () => {
const task = createHistoryTaskWithoutWorkflow()
// Mock getWorkflowFromHistory to return workflow
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory').mockResolvedValue(
mockWorkflow
vi.spyOn(jobOutputCache, 'getJobDetail').mockResolvedValue(
mockJobDetail as JobDetail
)
await task.loadWorkflow(mockApp)
expect(getWorkflowModule.getWorkflowFromHistory).toHaveBeenCalledWith(
expect.any(Function),
'test-prompt-id'
)
expect(jobOutputCache.getJobDetail).toHaveBeenCalledWith('test-prompt-id')
expect(mockApp.loadGraphData).toHaveBeenCalledWith(mockWorkflow)
})
it('should not load workflow when fetch returns undefined', async () => {
const task = createHistoryTaskWithoutWorkflow()
const job = createHistoryJob('test-prompt-id')
const task = new TaskItemImpl(job)
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory').mockResolvedValue(
undefined
)
vi.spyOn(jobOutputCache, 'getJobDetail').mockResolvedValue(undefined)
await task.loadWorkflow(mockApp)
expect(getWorkflowModule.getWorkflowFromHistory).toHaveBeenCalled()
expect(jobOutputCache.getJobDetail).toHaveBeenCalled()
expect(mockApp.loadGraphData).not.toHaveBeenCalled()
})
it('should only fetch for history tasks, not running tasks', async () => {
const runningTask = new TaskItemImpl(
'Running',
[
0,
'test-prompt-id',
{},
{
client_id: 'test-client'
},
[]
],
undefined,
{}
)
const job = createRunningJob('test-prompt-id')
const runningTask = new TaskItemImpl(job)
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory').mockResolvedValue(
mockWorkflow
vi.spyOn(jobOutputCache, 'getJobDetail').mockResolvedValue(
mockJobDetail as JobDetail
)
await runningTask.loadWorkflow(mockApp)
expect(getWorkflowModule.getWorkflowFromHistory).not.toHaveBeenCalled()
expect(jobOutputCache.getJobDetail).not.toHaveBeenCalled()
expect(mockApp.loadGraphData).not.toHaveBeenCalled()
})
it('should handle fetch errors gracefully by returning undefined', async () => {
const task = createHistoryTaskWithoutWorkflow()
const job = createHistoryJob('test-prompt-id')
const task = new TaskItemImpl(job)
vi.spyOn(getWorkflowModule, 'getWorkflowFromHistory').mockResolvedValue(
undefined
)
vi.spyOn(jobOutputCache, 'getJobDetail').mockResolvedValue(undefined)
await task.loadWorkflow(mockApp)
expect(getWorkflowModule.getWorkflowFromHistory).toHaveBeenCalled()
expect(jobOutputCache.getJobDetail).toHaveBeenCalled()
expect(mockApp.loadGraphData).not.toHaveBeenCalled()
})
})

View File

@@ -1,34 +1,39 @@
import { createPinia, setActivePinia } from 'pinia'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type {
HistoryTaskItem,
PendingTaskItem,
RunningTaskItem,
TaskOutput,
TaskPrompt,
TaskStatus
} from '@/schemas/apiSchema'
import type { JobListItem } from '@/platform/remote/comfyui/jobs/jobTypes'
import type { TaskOutput } from '@/schemas/apiSchema'
import { api } from '@/scripts/api'
import { TaskItemImpl, useQueueStore } from '@/stores/queueStore'
// Fixture factories
const createTaskPrompt = (
queueIndex: number,
promptId: string,
inputs: Record<string, any> = {},
extraData: Record<string, any> = {},
outputsToExecute: any[] = []
): TaskPrompt => [queueIndex, promptId, inputs, extraData, outputsToExecute]
// Fixture factory for JobListItem
function createJob(
id: string,
status: JobListItem['status'],
createTime: number = Date.now(),
priority?: number
): JobListItem {
return {
id,
status,
create_time: createTime,
update_time: createTime,
last_state_update: createTime,
priority: priority ?? createTime
}
}
const createTaskStatus = (
statusStr: 'success' | 'error' = 'success',
messages: any[] = []
): TaskStatus => ({
status_str: statusStr,
completed: true,
messages
})
function createRunningJob(createTime: number, id: string): JobListItem {
return createJob(id, 'in_progress', createTime)
}
function createPendingJob(createTime: number, id: string): JobListItem {
return createJob(id, 'pending', createTime)
}
function createHistoryJob(createTime: number, id: string): JobListItem {
return createJob(id, 'completed', createTime)
}
const createTaskOutput = (
nodeId: string = 'node-1',
@@ -39,35 +44,6 @@ const createTaskOutput = (
}
})
const createRunningTask = (
queueIndex: number,
promptId: string
): RunningTaskItem => ({
taskType: 'Running',
prompt: createTaskPrompt(queueIndex, promptId),
remove: { name: 'Cancel', cb: () => {} }
})
const createPendingTask = (
queueIndex: number,
promptId: string
): PendingTaskItem => ({
taskType: 'Pending',
prompt: createTaskPrompt(queueIndex, promptId)
})
const createHistoryTask = (
queueIndex: number,
promptId: string,
outputs: TaskOutput = createTaskOutput(),
status: TaskStatus = createTaskStatus()
): HistoryTaskItem => ({
taskType: 'History',
prompt: createTaskPrompt(queueIndex, promptId),
status,
outputs
})
// Mock API
vi.mock('@/scripts/api', () => ({
api: {
@@ -83,17 +59,13 @@ vi.mock('@/scripts/api', () => ({
describe('TaskItemImpl', () => {
it('should remove animated property from outputs during construction', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
images: [{ filename: 'test.png', type: 'output', subfolder: '' }],
animated: [false]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
images: [{ filename: 'test.png', type: 'output', subfolder: '' }],
animated: [false]
}
)
})
// Check that animated property was removed
expect('animated' in taskItem.outputs['node-1']).toBe(false)
@@ -103,90 +75,72 @@ describe('TaskItemImpl', () => {
})
it('should handle outputs without animated property', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
images: [{ filename: 'test.png', type: 'output', subfolder: '' }]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
images: [{ filename: 'test.png', type: 'output', subfolder: '' }]
}
)
})
expect(taskItem.outputs['node-1'].images).toBeDefined()
expect(taskItem.outputs['node-1'].images?.[0]?.filename).toBe('test.png')
})
it('should recognize webm video from core', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
video: [{ filename: 'test.webm', type: 'output', subfolder: '' }]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
video: [{ filename: 'test.webm', type: 'output', subfolder: '' }]
}
)
})
const output = taskItem.flatOutputs[0]
expect(output.htmlVideoType).toBe('video/webm')
expect(output.isVideo).toBe(true)
expect(output.isWebm).toBe(true)
expect(output.isVhsFormat).toBe(false)
expect(output.isImage).toBe(false)
})
// https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite/blob/0a75c7958fe320efcb052f1d9f8451fd20c730a8/videohelpersuite/nodes.py#L578-L590
it('should recognize webm video from VHS', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
gifs: [
{
filename: 'test.webm',
type: 'output',
subfolder: '',
format: 'video/webm',
frame_rate: 30
}
]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
gifs: [
{
filename: 'test.webm',
type: 'output',
subfolder: '',
format: 'video/webm',
frame_rate: 30
}
]
}
)
})
const output = taskItem.flatOutputs[0]
expect(output.htmlVideoType).toBe('video/webm')
expect(output.isVideo).toBe(true)
expect(output.isWebm).toBe(true)
expect(output.isVhsFormat).toBe(true)
expect(output.isImage).toBe(false)
})
it('should recognize mp4 video from core', () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
images: [
{
filename: 'test.mp4',
type: 'output',
subfolder: ''
}
],
animated: [true]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
images: [
{
filename: 'test.mp4',
type: 'output',
subfolder: ''
}
],
animated: [true]
}
)
})
const output = taskItem.flatOutputs[0]
@@ -205,22 +159,18 @@ describe('TaskItemImpl', () => {
audioFormats.forEach(({ extension, mimeType }) => {
it(`should recognize ${extension} audio`, () => {
const taskItem = new TaskItemImpl(
'History',
[0, 'prompt-id', {}, { client_id: 'client-id' }, []],
{ status_str: 'success', messages: [], completed: true },
{
'node-1': {
audio: [
{
filename: `test.${extension}`,
type: 'output',
subfolder: ''
}
]
}
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job, {
'node-1': {
audio: [
{
filename: `test.${extension}`,
type: 'output',
subfolder: ''
}
]
}
)
})
const output = taskItem.flatOutputs[0]
@@ -232,6 +182,58 @@ describe('TaskItemImpl', () => {
})
})
})
describe('error extraction getters', () => {
it('errorMessage returns undefined when no execution_error', () => {
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job)
expect(taskItem.errorMessage).toBeUndefined()
})
it('errorMessage returns the exception_message from execution_error', () => {
const job: JobListItem = {
...createHistoryJob(0, 'prompt-id'),
status: 'failed',
execution_error: {
node_id: 'node-1',
node_type: 'KSampler',
exception_message: 'GPU out of memory',
exception_type: 'RuntimeError',
traceback: ['line 1', 'line 2'],
current_inputs: {},
current_outputs: {}
}
}
const taskItem = new TaskItemImpl(job)
expect(taskItem.errorMessage).toBe('GPU out of memory')
})
it('executionError returns undefined when no execution_error', () => {
const job = createHistoryJob(0, 'prompt-id')
const taskItem = new TaskItemImpl(job)
expect(taskItem.executionError).toBeUndefined()
})
it('executionError returns the full error object from execution_error', () => {
const errorDetail = {
node_id: 'node-1',
node_type: 'KSampler',
executed: ['node-0'],
exception_message: 'Invalid dimensions',
exception_type: 'ValueError',
traceback: ['traceback line'],
current_inputs: { input1: 'value' },
current_outputs: {}
}
const job: JobListItem = {
...createHistoryJob(0, 'prompt-id'),
status: 'failed',
execution_error: errorDetail
}
const taskItem = new TaskItemImpl(job)
expect(taskItem.executionError).toEqual(errorDetail)
})
})
})
describe('useQueueStore', () => {
@@ -267,15 +269,16 @@ describe('useQueueStore', () => {
describe('update() - basic functionality', () => {
it('should load running and pending tasks from API', async () => {
const runningTask = createRunningTask(1, 'run-1')
const pendingTask1 = createPendingTask(2, 'pend-1')
const pendingTask2 = createPendingTask(3, 'pend-2')
const runningJob = createRunningJob(1, 'run-1')
const pendingJob1 = createPendingJob(2, 'pend-1')
const pendingJob2 = createPendingJob(3, 'pend-2')
// API returns pre-sorted data (newest first)
mockGetQueue.mockResolvedValue({
Running: [runningTask],
Pending: [pendingTask1, pendingTask2]
Running: [runningJob],
Pending: [pendingJob2, pendingJob1] // Pre-sorted by create_time desc
})
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
@@ -287,13 +290,11 @@ describe('useQueueStore', () => {
})
it('should load history tasks from API', async () => {
const historyTask1 = createHistoryTask(5, 'hist-1')
const historyTask2 = createHistoryTask(4, 'hist-2')
const historyJob1 = createHistoryJob(5, 'hist-1')
const historyJob2 = createHistoryJob(4, 'hist-2')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [historyTask1, historyTask2]
})
mockGetHistory.mockResolvedValue([historyJob1, historyJob2])
await store.update()
@@ -304,7 +305,7 @@ describe('useQueueStore', () => {
it('should set loading state correctly', async () => {
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
expect(store.isLoading).toBe(false)
@@ -317,7 +318,7 @@ describe('useQueueStore', () => {
it('should clear loading state even if API fails', async () => {
mockGetQueue.mockRejectedValue(new Error('API error'))
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await expect(store.update()).rejects.toThrow('API error')
expect(store.isLoading).toBe(false)
@@ -326,14 +327,12 @@ describe('useQueueStore', () => {
describe('update() - sorting', () => {
it('should sort tasks by queueIndex descending', async () => {
const task1 = createHistoryTask(1, 'hist-1')
const task2 = createHistoryTask(5, 'hist-2')
const task3 = createHistoryTask(3, 'hist-3')
const job1 = createHistoryJob(1, 'hist-1')
const job2 = createHistoryJob(5, 'hist-2')
const job3 = createHistoryJob(3, 'hist-3')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [task1, task2, task3]
})
mockGetHistory.mockResolvedValue([job1, job2, job3])
await store.update()
@@ -342,16 +341,17 @@ describe('useQueueStore', () => {
expect(store.historyTasks[2].queueIndex).toBe(1)
})
it('should sort pending tasks by queueIndex descending', async () => {
const pend1 = createPendingTask(10, 'pend-1')
const pend2 = createPendingTask(15, 'pend-2')
const pend3 = createPendingTask(12, 'pend-3')
it('should preserve API sort order for pending tasks', async () => {
const pend1 = createPendingJob(10, 'pend-1')
const pend2 = createPendingJob(15, 'pend-2')
const pend3 = createPendingJob(12, 'pend-3')
// API returns pre-sorted data (newest first)
mockGetQueue.mockResolvedValue({
Running: [],
Pending: [pend1, pend2, pend3]
Pending: [pend2, pend3, pend1] // Pre-sorted by create_time desc
})
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
@@ -363,19 +363,17 @@ describe('useQueueStore', () => {
describe('update() - queue index collision (THE BUG FIX)', () => {
it('should NOT confuse different prompts with same queueIndex', async () => {
const hist1 = createHistoryTask(50, 'prompt-uuid-aaa')
const hist1 = createHistoryJob(50, 'prompt-uuid-aaa')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1] })
mockGetHistory.mockResolvedValue([hist1])
await store.update()
expect(store.historyTasks).toHaveLength(1)
expect(store.historyTasks[0].promptId).toBe('prompt-uuid-aaa')
const hist2 = createHistoryTask(51, 'prompt-uuid-bbb')
mockGetHistory.mockResolvedValue({
History: [hist2]
})
const hist2 = createHistoryJob(51, 'prompt-uuid-bbb')
mockGetHistory.mockResolvedValue([hist2])
await store.update()
@@ -385,19 +383,17 @@ describe('useQueueStore', () => {
})
it('should correctly reconcile when queueIndex is reused', async () => {
const hist1 = createHistoryTask(100, 'first-prompt-at-100')
const hist2 = createHistoryTask(99, 'prompt-at-99')
const hist1 = createHistoryJob(100, 'first-prompt-at-100')
const hist2 = createHistoryJob(99, 'prompt-at-99')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1, hist2] })
mockGetHistory.mockResolvedValue([hist1, hist2])
await store.update()
expect(store.historyTasks).toHaveLength(2)
const hist3 = createHistoryTask(101, 'second-prompt-at-101')
mockGetHistory.mockResolvedValue({
History: [hist3, hist2]
})
const hist3 = createHistoryJob(101, 'second-prompt-at-101')
mockGetHistory.mockResolvedValue([hist3, hist2])
await store.update()
@@ -409,23 +405,19 @@ describe('useQueueStore', () => {
})
it('should handle multiple queueIndex collisions simultaneously', async () => {
const hist1 = createHistoryTask(10, 'old-at-10')
const hist2 = createHistoryTask(20, 'old-at-20')
const hist3 = createHistoryTask(30, 'keep-at-30')
const hist1 = createHistoryJob(10, 'old-at-10')
const hist2 = createHistoryJob(20, 'old-at-20')
const hist3 = createHistoryJob(30, 'keep-at-30')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [hist3, hist2, hist1]
})
mockGetHistory.mockResolvedValue([hist3, hist2, hist1])
await store.update()
expect(store.historyTasks).toHaveLength(3)
const newHist1 = createHistoryTask(31, 'new-at-31')
const newHist2 = createHistoryTask(32, 'new-at-32')
mockGetHistory.mockResolvedValue({
History: [newHist2, newHist1, hist3]
})
const newHist1 = createHistoryJob(31, 'new-at-31')
const newHist2 = createHistoryJob(32, 'new-at-32')
mockGetHistory.mockResolvedValue([newHist2, newHist1, hist3])
await store.update()
@@ -437,19 +429,17 @@ describe('useQueueStore', () => {
describe('update() - history reconciliation', () => {
it('should keep existing items still on server (by promptId)', async () => {
const hist1 = createHistoryTask(10, 'existing-1')
const hist2 = createHistoryTask(9, 'existing-2')
const hist1 = createHistoryJob(10, 'existing-1')
const hist2 = createHistoryJob(9, 'existing-2')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1, hist2] })
mockGetHistory.mockResolvedValue([hist1, hist2])
await store.update()
expect(store.historyTasks).toHaveLength(2)
const hist3 = createHistoryTask(11, 'new-1')
mockGetHistory.mockResolvedValue({
History: [hist3, hist1, hist2]
})
const hist3 = createHistoryJob(11, 'new-1')
mockGetHistory.mockResolvedValue([hist3, hist1, hist2])
await store.update()
@@ -460,16 +450,16 @@ describe('useQueueStore', () => {
})
it('should remove items no longer on server', async () => {
const hist1 = createHistoryTask(10, 'remove-me')
const hist2 = createHistoryTask(9, 'keep-me')
const hist1 = createHistoryJob(10, 'remove-me')
const hist2 = createHistoryJob(9, 'keep-me')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1, hist2] })
mockGetHistory.mockResolvedValue([hist1, hist2])
await store.update()
expect(store.historyTasks).toHaveLength(2)
mockGetHistory.mockResolvedValue({ History: [hist2] })
mockGetHistory.mockResolvedValue([hist2])
await store.update()
@@ -478,18 +468,16 @@ describe('useQueueStore', () => {
})
it('should add new items from server', async () => {
const hist1 = createHistoryTask(5, 'old-1')
const hist1 = createHistoryJob(5, 'old-1')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [hist1] })
mockGetHistory.mockResolvedValue([hist1])
await store.update()
const hist2 = createHistoryTask(6, 'new-1')
const hist3 = createHistoryTask(7, 'new-2')
mockGetHistory.mockResolvedValue({
History: [hist3, hist2, hist1]
})
const hist2 = createHistoryJob(6, 'new-1')
const hist3 = createHistoryJob(7, 'new-2')
mockGetHistory.mockResolvedValue([hist3, hist2, hist1])
await store.update()
@@ -497,18 +485,69 @@ describe('useQueueStore', () => {
expect(store.historyTasks.map((t) => t.promptId)).toContain('new-1')
expect(store.historyTasks.map((t) => t.promptId)).toContain('new-2')
})
it('should recreate TaskItemImpl when outputs_count changes', async () => {
// Initial load without outputs_count
const jobWithoutOutputsCount = createHistoryJob(10, 'job-1')
delete (jobWithoutOutputsCount as any).outputs_count
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue([jobWithoutOutputsCount])
await store.update()
expect(store.historyTasks).toHaveLength(1)
const initialTask = store.historyTasks[0]
expect(initialTask.outputsCount).toBeUndefined()
// Second load with outputs_count now populated
const jobWithOutputsCount = {
...createHistoryJob(10, 'job-1'),
outputs_count: 2
}
mockGetHistory.mockResolvedValue([jobWithOutputsCount])
await store.update()
// Should have recreated the TaskItemImpl with new outputs_count
expect(store.historyTasks).toHaveLength(1)
const updatedTask = store.historyTasks[0]
expect(updatedTask.outputsCount).toBe(2)
// Should be a different instance
expect(updatedTask).not.toBe(initialTask)
})
it('should reuse TaskItemImpl when outputs_count unchanged', async () => {
const job = {
...createHistoryJob(10, 'job-1'),
outputs_count: 2
}
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue([job])
await store.update()
const initialTask = store.historyTasks[0]
// Same job with same outputs_count
mockGetHistory.mockResolvedValue([{ ...job }])
await store.update()
// Should reuse the same instance
expect(store.historyTasks[0]).toBe(initialTask)
})
})
describe('update() - maxHistoryItems limit', () => {
it('should enforce maxHistoryItems limit', async () => {
store.maxHistoryItems = 3
const tasks = Array.from({ length: 5 }, (_, i) =>
createHistoryTask(10 - i, `hist-${i}`)
const jobs = Array.from({ length: 5 }, (_, i) =>
createHistoryJob(10 - i, `hist-${i}`)
)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
@@ -522,21 +561,19 @@ describe('useQueueStore', () => {
store.maxHistoryItems = 5
const initial = Array.from({ length: 3 }, (_, i) =>
createHistoryTask(10 + i, `existing-${i}`)
createHistoryJob(10 + i, `existing-${i}`)
)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: initial })
mockGetHistory.mockResolvedValue(initial)
await store.update()
expect(store.historyTasks).toHaveLength(3)
const newTasks = Array.from({ length: 4 }, (_, i) =>
createHistoryTask(20 + i, `new-${i}`)
const newJobs = Array.from({ length: 4 }, (_, i) =>
createHistoryJob(20 + i, `new-${i}`)
)
mockGetHistory.mockResolvedValue({
History: [...newTasks, ...initial]
})
mockGetHistory.mockResolvedValue([...newJobs, ...initial])
await store.update()
@@ -547,10 +584,10 @@ describe('useQueueStore', () => {
it('should handle maxHistoryItems = 0', async () => {
store.maxHistoryItems = 0
const tasks = [createHistoryTask(10, 'hist-1')]
const jobs = [createHistoryJob(10, 'hist-1')]
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
@@ -560,13 +597,13 @@ describe('useQueueStore', () => {
it('should handle maxHistoryItems = 1', async () => {
store.maxHistoryItems = 1
const tasks = [
createHistoryTask(10, 'hist-1'),
createHistoryTask(9, 'hist-2')
const jobs = [
createHistoryJob(10, 'hist-1'),
createHistoryJob(9, 'hist-2')
]
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
@@ -577,18 +614,18 @@ describe('useQueueStore', () => {
it('should dynamically adjust when maxHistoryItems changes', async () => {
store.maxHistoryItems = 10
const tasks = Array.from({ length: 15 }, (_, i) =>
createHistoryTask(20 - i, `hist-${i}`)
const jobs = Array.from({ length: 15 }, (_, i) =>
createHistoryJob(20 - i, `hist-${i}`)
)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
expect(store.historyTasks).toHaveLength(10)
store.maxHistoryItems = 5
mockGetHistory.mockResolvedValue({ History: tasks })
mockGetHistory.mockResolvedValue(jobs)
await store.update()
expect(store.historyTasks).toHaveLength(5)
@@ -597,19 +634,17 @@ describe('useQueueStore', () => {
describe('computed properties', () => {
it('tasks should combine pending, running, and history in correct order', async () => {
const running = createRunningTask(5, 'run-1')
const pending1 = createPendingTask(6, 'pend-1')
const pending2 = createPendingTask(7, 'pend-2')
const hist1 = createHistoryTask(3, 'hist-1')
const hist2 = createHistoryTask(4, 'hist-2')
const running = createRunningJob(5, 'run-1')
const pending1 = createPendingJob(6, 'pend-1')
const pending2 = createPendingJob(7, 'pend-2')
const hist1 = createHistoryJob(3, 'hist-1')
const hist2 = createHistoryJob(4, 'hist-2')
mockGetQueue.mockResolvedValue({
Running: [running],
Pending: [pending1, pending2]
})
mockGetHistory.mockResolvedValue({
History: [hist2, hist1]
})
mockGetHistory.mockResolvedValue([hist2, hist1])
await store.update()
@@ -624,9 +659,9 @@ describe('useQueueStore', () => {
it('hasPendingTasks should be true when pending tasks exist', async () => {
mockGetQueue.mockResolvedValue({
Running: [],
Pending: [createPendingTask(1, 'pend-1')]
Pending: [createPendingJob(1, 'pend-1')]
})
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
expect(store.hasPendingTasks).toBe(true)
@@ -634,21 +669,19 @@ describe('useQueueStore', () => {
it('hasPendingTasks should be false when no pending tasks', async () => {
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
expect(store.hasPendingTasks).toBe(false)
})
it('lastHistoryQueueIndex should return highest queue index', async () => {
const hist1 = createHistoryTask(10, 'hist-1')
const hist2 = createHistoryTask(25, 'hist-2')
const hist3 = createHistoryTask(15, 'hist-3')
const hist1 = createHistoryJob(10, 'hist-1')
const hist2 = createHistoryJob(25, 'hist-2')
const hist3 = createHistoryJob(15, 'hist-3')
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [hist1, hist2, hist3]
})
mockGetHistory.mockResolvedValue([hist1, hist2, hist3])
await store.update()
expect(store.lastHistoryQueueIndex).toBe(25)
@@ -656,7 +689,7 @@ describe('useQueueStore', () => {
it('lastHistoryQueueIndex should be -1 when no history', async () => {
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.update()
expect(store.lastHistoryQueueIndex).toBe(-1)
@@ -666,19 +699,17 @@ describe('useQueueStore', () => {
describe('clear()', () => {
beforeEach(async () => {
mockGetQueue.mockResolvedValue({
Running: [createRunningTask(1, 'run-1')],
Pending: [createPendingTask(2, 'pend-1')]
})
mockGetHistory.mockResolvedValue({
History: [createHistoryTask(3, 'hist-1')]
Running: [createRunningJob(1, 'run-1')],
Pending: [createPendingJob(2, 'pend-1')]
})
mockGetHistory.mockResolvedValue([createHistoryJob(3, 'hist-1')])
await store.update()
})
it('should clear both queue and history by default', async () => {
mockClearItems.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.clear()
@@ -693,9 +724,7 @@ describe('useQueueStore', () => {
it('should clear only queue when specified', async () => {
mockClearItems.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({
History: [createHistoryTask(3, 'hist-1')]
})
mockGetHistory.mockResolvedValue([createHistoryJob(3, 'hist-1')])
await store.clear(['queue'])
@@ -707,10 +736,10 @@ describe('useQueueStore', () => {
it('should clear only history when specified', async () => {
mockClearItems.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({
Running: [createRunningTask(1, 'run-1')],
Pending: [createPendingTask(2, 'pend-1')]
Running: [createRunningJob(1, 'run-1')],
Pending: [createPendingJob(2, 'pend-1')]
})
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.clear(['history'])
@@ -729,11 +758,12 @@ describe('useQueueStore', () => {
describe('delete()', () => {
it('should delete task from queue', async () => {
const task = new TaskItemImpl('Pending', createTaskPrompt(1, 'pend-1'))
const job = createPendingJob(1, 'pend-1')
const task = new TaskItemImpl(job)
mockDeleteItem.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.delete(task)
@@ -741,16 +771,12 @@ describe('useQueueStore', () => {
})
it('should delete task from history', async () => {
const task = new TaskItemImpl(
'History',
createTaskPrompt(1, 'hist-1'),
createTaskStatus(),
createTaskOutput()
)
const job = createHistoryJob(1, 'hist-1')
const task = new TaskItemImpl(job, createTaskOutput())
mockDeleteItem.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.delete(task)
@@ -758,11 +784,12 @@ describe('useQueueStore', () => {
})
it('should refresh store after deletion', async () => {
const task = new TaskItemImpl('Pending', createTaskPrompt(1, 'pend-1'))
const job = createPendingJob(1, 'pend-1')
const task = new TaskItemImpl(job)
mockDeleteItem.mockResolvedValue(undefined)
mockGetQueue.mockResolvedValue({ Running: [], Pending: [] })
mockGetHistory.mockResolvedValue({ History: [] })
mockGetHistory.mockResolvedValue([])
await store.delete(task)

View File

@@ -2,34 +2,27 @@ import _ from 'es-toolkit/compat'
import { defineStore } from 'pinia'
import { computed, ref, shallowRef, toRaw, toValue } from 'vue'
import { isCloud } from '@/platform/distribution/types'
import { reconcileHistory } from '@/platform/remote/comfyui/history/reconciliation'
import { useSettingStore } from '@/platform/settings/settingStore'
import { getWorkflowFromHistory } from '@/platform/workflow/cloud'
import { extractWorkflow } from '@/platform/remote/comfyui/jobs/fetchJobs'
import type {
ComfyWorkflowJSON,
NodeId
} from '@/platform/workflow/validation/schemas/workflowSchema'
APITaskType,
JobListItem,
TaskType
} from '@/platform/remote/comfyui/jobs/jobTypes'
import type { NodeId } from '@/platform/workflow/validation/schemas/workflowSchema'
import type {
HistoryTaskItem,
ResultItem,
StatusWsMessageStatus,
TaskItem,
TaskOutput,
TaskPrompt,
TaskStatus,
TaskType
TaskOutput
} from '@/schemas/apiSchema'
import { api } from '@/scripts/api'
import type { ComfyApp } from '@/scripts/app'
import { useExtensionService } from '@/services/extensionService'
import { getJobDetail } from '@/services/jobOutputCache'
import { useNodeOutputStore } from '@/stores/imagePreviewStore'
import { useExecutionStore } from '@/stores/executionStore'
import { useSettingStore } from '@/platform/settings/settingStore'
import { getMediaTypeFromFilename } from '@/utils/formatUtil'
// Task type used in the API.
type APITaskType = 'queue' | 'history'
enum TaskItemDisplayStatus {
Running = 'Running',
Pending = 'Pending',
@@ -212,32 +205,44 @@ export class ResultItemImpl {
get supportsPreview(): boolean {
return this.isImage || this.isVideo || this.isAudio || this.is3D
}
static filterPreviewable(
outputs: readonly ResultItemImpl[]
): ResultItemImpl[] {
return outputs.filter((o) => o.supportsPreview)
}
static findByUrl(items: readonly ResultItemImpl[], url?: string): number {
if (!url) return 0
const idx = items.findIndex((o) => o.url === url)
return idx >= 0 ? idx : 0
}
}
export class TaskItemImpl {
readonly taskType: TaskType
readonly prompt: TaskPrompt
readonly status?: TaskStatus
readonly job: JobListItem
readonly outputs: TaskOutput
readonly flatOutputs: ReadonlyArray<ResultItemImpl>
constructor(
taskType: TaskType,
prompt: TaskPrompt,
status?: TaskStatus,
job: JobListItem,
outputs?: TaskOutput,
flatOutputs?: ReadonlyArray<ResultItemImpl>
) {
this.taskType = taskType
this.prompt = prompt
this.status = status
this.job = job
// If no outputs provided but job has preview_output, create synthetic outputs
// using the real nodeId and mediaType from the backend response
const effectiveOutputs =
outputs ??
(job.preview_output
? {
[job.preview_output.nodeId]: {
[job.preview_output.mediaType]: [job.preview_output]
}
}
: {})
// Remove animated outputs from the outputs object
// outputs.animated is an array of boolean values that indicates if the images
// array in the result are animated or not.
// The queueStore does not use this information.
// It is part of the legacy API response. We should redesign the backend API.
// https://github.com/Comfy-Org/ComfyUI_frontend/issues/2739
this.outputs = _.mapValues(outputs ?? {}, (nodeOutputs) =>
this.outputs = _.mapValues(effectiveOutputs, (nodeOutputs) =>
_.omit(nodeOutputs, 'animated')
)
this.flatOutputs = flatOutputs ?? this.calculateFlatOutputs()
@@ -261,15 +266,31 @@ export class TaskItemImpl {
)
}
/** All outputs that support preview (images, videos, audio, 3D) */
get previewableOutputs(): readonly ResultItemImpl[] {
return ResultItemImpl.filterPreviewable(this.flatOutputs)
}
get previewOutput(): ResultItemImpl | undefined {
const previewable = this.previewableOutputs
// Prefer saved media files over the temp previews
return (
this.flatOutputs.find(
// Prefer saved media files over the temp previews
(output) => output.type === 'output' && output.supportsPreview
) ?? this.flatOutputs.find((output) => output.supportsPreview)
previewable.find((output) => output.type === 'output') ?? previewable[0]
)
}
// Derive taskType from job status
get taskType(): TaskType {
switch (this.job.status) {
case 'in_progress':
return 'Running'
case 'pending':
return 'Pending'
default:
return 'History'
}
}
get apiTaskType(): APITaskType {
switch (this.taskType) {
case 'Running':
@@ -285,61 +306,42 @@ export class TaskItemImpl {
}
get queueIndex() {
return this.prompt[0]
return this.job.priority
}
get promptId() {
return this.prompt[1]
return this.job.id
}
get promptInputs() {
return this.prompt[2]
get outputsCount(): number | undefined {
return this.job.outputs_count ?? undefined
}
get extraData() {
return this.prompt[3]
get status() {
return this.job.status
}
get outputsToExecute() {
return this.prompt[4]
get errorMessage(): string | undefined {
return this.job.execution_error?.exception_message ?? undefined
}
get extraPngInfo() {
return this.extraData.extra_pnginfo
get executionError() {
return this.job.execution_error ?? undefined
}
get clientId() {
return this.extraData.client_id
get workflowId(): string | undefined {
return this.job.workflow_id ?? undefined
}
get workflow(): ComfyWorkflowJSON | undefined {
return this.extraPngInfo?.workflow
get createTime(): number {
return this.job.create_time
}
get messages() {
return this.status?.messages || []
}
/**
* Server-provided creation time in milliseconds, when available.
*
* Sources:
* - Queue: 5th tuple element may be a metadata object with { create_time }.
* - History (Cloud V2): Adapter injects create_time into prompt[3].extra_data.
*/
get createTime(): number | undefined {
const extra = (this.extraData as any) || {}
const fromExtra =
typeof extra.create_time === 'number' ? extra.create_time : undefined
if (typeof fromExtra === 'number') return fromExtra
return undefined
}
get interrupted() {
return _.some(
this.messages,
(message) => message[0] === 'execution_interrupted'
get interrupted(): boolean {
return (
this.job.status === 'failed' &&
this.job.execution_error?.exception_type ===
'InterruptProcessingException'
)
}
@@ -352,42 +354,26 @@ export class TaskItemImpl {
}
get displayStatus(): TaskItemDisplayStatus {
switch (this.taskType) {
case 'Running':
switch (this.job.status) {
case 'in_progress':
return TaskItemDisplayStatus.Running
case 'Pending':
case 'pending':
return TaskItemDisplayStatus.Pending
case 'History':
if (this.interrupted) return TaskItemDisplayStatus.Cancelled
switch (this.status!.status_str) {
case 'success':
return TaskItemDisplayStatus.Completed
case 'error':
return TaskItemDisplayStatus.Failed
}
case 'completed':
return TaskItemDisplayStatus.Completed
case 'failed':
return TaskItemDisplayStatus.Failed
case 'cancelled':
return TaskItemDisplayStatus.Cancelled
}
}
get executionStartTimestamp() {
const message = this.messages.find(
(message) => message[0] === 'execution_start'
)
return message ? message[1].timestamp : undefined
return this.job.execution_start_time ?? undefined
}
get executionEndTimestamp() {
const messages = this.messages.filter((message) =>
[
'execution_success',
'execution_interrupted',
'execution_error'
].includes(message[0])
)
if (!messages.length) {
return undefined
}
return _.max(messages.map((message) => message[1].timestamp))
return this.job.execution_end_time ?? undefined
}
get executionTime() {
@@ -403,28 +389,48 @@ export class TaskItemImpl {
: undefined
}
public async loadWorkflow(app: ComfyApp) {
let workflowData = this.workflow
/**
* Loads full outputs for tasks that only have preview data
* Returns a new TaskItemImpl with full outputs and execution status
*/
public async loadFullOutputs(): Promise<TaskItemImpl> {
// Only load for history tasks (caller checks outputsCount > 1)
if (!this.isHistory) {
return this
}
const jobDetail = await getJobDetail(this.promptId)
if (isCloud && !workflowData && this.isHistory) {
workflowData = await getWorkflowFromHistory(
(url) => app.api.fetchApi(url),
this.promptId
)
if (!jobDetail?.outputs) {
return this
}
// Create new TaskItemImpl with full outputs
return new TaskItemImpl(this.job, jobDetail.outputs)
}
public async loadWorkflow(app: ComfyApp) {
if (!this.isHistory) {
return
}
// Single fetch for both workflow and outputs (with caching)
const jobDetail = await getJobDetail(this.promptId)
const workflowData = await extractWorkflow(jobDetail)
if (!workflowData) {
return
}
await app.loadGraphData(toRaw(workflowData))
if (!this.outputs) {
// Use full outputs from job detail, or fall back to existing outputs
const outputsToLoad = jobDetail?.outputs ?? this.outputs
if (!outputsToLoad) {
return
}
const nodeOutputsStore = useNodeOutputStore()
const rawOutputs = toRaw(this.outputs)
const rawOutputs = toRaw(outputsToLoad)
for (const nodeExecutionId in rawOutputs) {
nodeOutputsStore.setNodeOutputsByExecutionId(
nodeExecutionId,
@@ -445,15 +451,10 @@ export class TaskItemImpl {
return this.flatOutputs.map(
(output: ResultItemImpl, i: number) =>
new TaskItemImpl(
this.taskType,
[
this.queueIndex,
`${this.promptId}-${i}`,
this.promptInputs,
this.extraData,
this.outputsToExecute
],
this.status,
{
...this.job,
id: `${this.promptId}-${i}`
},
{
[output.nodeId]: {
[output.mediaType]: [output]
@@ -463,32 +464,8 @@ export class TaskItemImpl {
)
)
}
public toTaskItem(): TaskItem {
const item: HistoryTaskItem = {
taskType: 'History',
prompt: this.prompt,
status: this.status!,
outputs: this.outputs
}
return item
}
}
const sortNewestFirst = (a: TaskItemImpl, b: TaskItemImpl) =>
b.queueIndex - a.queueIndex
const toTaskItemImpls = (tasks: TaskItem[]): TaskItemImpl[] =>
tasks.map(
(task) =>
new TaskItemImpl(
task.taskType,
task.prompt,
'status' in task ? task.status : undefined,
'outputs' in task ? task.outputs : undefined
)
)
export const useQueueStore = defineStore('queue', () => {
// Use shallowRef because TaskItemImpl instances are immutable and arrays are
// replaced entirely (not mutated), so deep reactivity would waste performance
@@ -525,8 +502,9 @@ export const useQueueStore = defineStore('queue', () => {
api.getHistory(maxHistoryItems.value)
])
runningTasks.value = toTaskItemImpls(queue.Running).sort(sortNewestFirst)
pendingTasks.value = toTaskItemImpls(queue.Pending).sort(sortNewestFirst)
// API returns pre-sorted data (sort_by=create_time&order=desc)
runningTasks.value = queue.Running.map((job) => new TaskItemImpl(job))
pendingTasks.value = queue.Pending.map((job) => new TaskItemImpl(job))
const currentHistory = toValue(historyTasks)
@@ -534,7 +512,7 @@ export const useQueueStore = defineStore('queue', () => {
const executionStore = useExecutionStore()
appearedTasks.forEach((task) => {
const promptIdString = String(task.promptId)
const workflowId = task.workflow?.id
const workflowId = task.workflowId
if (workflowId && promptIdString) {
executionStore.registerPromptWorkflowIdMapping(
promptIdString,
@@ -543,22 +521,26 @@ export const useQueueStore = defineStore('queue', () => {
}
})
const items = reconcileHistory(
history.History,
currentHistory.map((impl) => impl.toTaskItem()),
toValue(maxHistoryItems),
toValue(lastHistoryQueueIndex)
)
// Sort by create_time descending and limit to maxItems
const sortedHistory = [...history]
.sort((a, b) => b.create_time - a.create_time)
.slice(0, toValue(maxHistoryItems))
// Reuse existing TaskItemImpl instances or create new
// Must recreate if outputs_count changed (e.g., API started returning it)
const existingByPromptId = new Map(
currentHistory.map((impl) => [impl.promptId, impl])
)
historyTasks.value = items.map(
(item) =>
existingByPromptId.get(item.prompt[1]) ?? toTaskItemImpls([item])[0]
)
historyTasks.value = sortedHistory.map((job) => {
const existing = existingByPromptId.get(job.id)
if (!existing) return new TaskItemImpl(job)
// Recreate if outputs_count changed to ensure lazy loading works
if (existing.outputsCount !== (job.outputs_count ?? undefined)) {
return new TaskItemImpl(job)
}
return existing
})
} finally {
isLoading.value = false
}

View File

@@ -0,0 +1,373 @@
import { defineStore } from 'pinia'
import { computed, ref, shallowRef } from 'vue'
import { z } from 'zod'
import { fromZodError } from 'zod-validation-error'
import { t } from '@/i18n'
import {
TOKEN_REFRESH_BUFFER_MS,
WORKSPACE_STORAGE_KEYS
} from '@/platform/auth/workspace/workspaceConstants'
import { remoteConfig } from '@/platform/remoteConfig/remoteConfig'
import { api } from '@/scripts/api'
import { useFirebaseAuthStore } from '@/stores/firebaseAuthStore'
import type { AuthHeader } from '@/types/authTypes'
import type { WorkspaceWithRole } from '@/platform/auth/workspace/workspaceTypes'
const WorkspaceWithRoleSchema = z.object({
id: z.string(),
name: z.string(),
type: z.enum(['personal', 'team']),
role: z.enum(['owner', 'member'])
})
const WorkspaceTokenResponseSchema = z.object({
token: z.string(),
expires_at: z.string(),
workspace: z.object({
id: z.string(),
name: z.string(),
type: z.enum(['personal', 'team'])
}),
role: z.enum(['owner', 'member']),
permissions: z.array(z.string())
})
export class WorkspaceAuthError extends Error {
constructor(
message: string,
public readonly code?: string
) {
super(message)
this.name = 'WorkspaceAuthError'
}
}
export const useWorkspaceAuthStore = defineStore('workspaceAuth', () => {
// State
const currentWorkspace = shallowRef<WorkspaceWithRole | null>(null)
const workspaceToken = ref<string | null>(null)
const isLoading = ref(false)
const error = ref<Error | null>(null)
// Timer state
let refreshTimerId: ReturnType<typeof setTimeout> | null = null
// Request ID to prevent stale refresh operations from overwriting newer workspace contexts
let refreshRequestId = 0
// Getters
const isAuthenticated = computed(
() => currentWorkspace.value !== null && workspaceToken.value !== null
)
// Private helpers
function stopRefreshTimer(): void {
if (refreshTimerId !== null) {
clearTimeout(refreshTimerId)
refreshTimerId = null
}
}
function scheduleTokenRefresh(expiresAt: number): void {
stopRefreshTimer()
const now = Date.now()
const refreshAt = expiresAt - TOKEN_REFRESH_BUFFER_MS
const delay = Math.max(0, refreshAt - now)
refreshTimerId = setTimeout(() => {
void refreshToken()
}, delay)
}
function persistToSession(
workspace: WorkspaceWithRole,
token: string,
expiresAt: number
): void {
try {
sessionStorage.setItem(
WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE,
JSON.stringify(workspace)
)
sessionStorage.setItem(WORKSPACE_STORAGE_KEYS.TOKEN, token)
sessionStorage.setItem(
WORKSPACE_STORAGE_KEYS.EXPIRES_AT,
expiresAt.toString()
)
} catch {
console.warn('Failed to persist workspace context to sessionStorage')
}
}
function clearSessionStorage(): void {
try {
sessionStorage.removeItem(WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE)
sessionStorage.removeItem(WORKSPACE_STORAGE_KEYS.TOKEN)
sessionStorage.removeItem(WORKSPACE_STORAGE_KEYS.EXPIRES_AT)
} catch {
console.warn('Failed to clear workspace context from sessionStorage')
}
}
// Actions
function init(): void {
initializeFromSession()
}
function destroy(): void {
stopRefreshTimer()
}
function initializeFromSession(): boolean {
if (!remoteConfig.value.team_workspaces_enabled) {
return false
}
try {
const workspaceJson = sessionStorage.getItem(
WORKSPACE_STORAGE_KEYS.CURRENT_WORKSPACE
)
const token = sessionStorage.getItem(WORKSPACE_STORAGE_KEYS.TOKEN)
const expiresAtStr = sessionStorage.getItem(
WORKSPACE_STORAGE_KEYS.EXPIRES_AT
)
if (!workspaceJson || !token || !expiresAtStr) {
return false
}
const expiresAt = parseInt(expiresAtStr, 10)
if (isNaN(expiresAt) || expiresAt <= Date.now()) {
clearSessionStorage()
return false
}
const parsedWorkspace = JSON.parse(workspaceJson)
const parseResult = WorkspaceWithRoleSchema.safeParse(parsedWorkspace)
if (!parseResult.success) {
clearSessionStorage()
return false
}
currentWorkspace.value = parseResult.data
workspaceToken.value = token
error.value = null
scheduleTokenRefresh(expiresAt)
return true
} catch {
clearSessionStorage()
return false
}
}
async function switchWorkspace(workspaceId: string): Promise<void> {
if (!remoteConfig.value.team_workspaces_enabled) {
return
}
// Only increment request ID when switching to a different workspace
// This invalidates stale refresh operations for the old workspace
// but allows refresh operations for the same workspace to complete
if (currentWorkspace.value?.id !== workspaceId) {
refreshRequestId++
}
isLoading.value = true
error.value = null
try {
const firebaseAuthStore = useFirebaseAuthStore()
const firebaseToken = await firebaseAuthStore.getIdToken()
if (!firebaseToken) {
throw new WorkspaceAuthError(
t('workspaceAuth.errors.notAuthenticated'),
'NOT_AUTHENTICATED'
)
}
const response = await fetch(api.apiURL('/auth/token'), {
method: 'POST',
headers: {
Authorization: `Bearer ${firebaseToken}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({ workspace_id: workspaceId })
})
if (!response.ok) {
const errorData = await response.json().catch(() => ({}))
const message = errorData.message || response.statusText
if (response.status === 401) {
throw new WorkspaceAuthError(
t('workspaceAuth.errors.invalidFirebaseToken'),
'INVALID_FIREBASE_TOKEN'
)
}
if (response.status === 403) {
throw new WorkspaceAuthError(
t('workspaceAuth.errors.accessDenied'),
'ACCESS_DENIED'
)
}
if (response.status === 404) {
throw new WorkspaceAuthError(
t('workspaceAuth.errors.workspaceNotFound'),
'WORKSPACE_NOT_FOUND'
)
}
throw new WorkspaceAuthError(
t('workspaceAuth.errors.tokenExchangeFailed', { error: message }),
'TOKEN_EXCHANGE_FAILED'
)
}
const rawData = await response.json()
const parseResult = WorkspaceTokenResponseSchema.safeParse(rawData)
if (!parseResult.success) {
throw new WorkspaceAuthError(
t('workspaceAuth.errors.tokenExchangeFailed', {
error: fromZodError(parseResult.error).message
}),
'TOKEN_EXCHANGE_FAILED'
)
}
const data = parseResult.data
const expiresAt = new Date(data.expires_at).getTime()
if (isNaN(expiresAt)) {
throw new WorkspaceAuthError(
t('workspaceAuth.errors.tokenExchangeFailed', {
error: 'Invalid expiry timestamp'
}),
'TOKEN_EXCHANGE_FAILED'
)
}
const workspaceWithRole: WorkspaceWithRole = {
...data.workspace,
role: data.role
}
currentWorkspace.value = workspaceWithRole
workspaceToken.value = data.token
persistToSession(workspaceWithRole, data.token, expiresAt)
scheduleTokenRefresh(expiresAt)
} catch (err) {
error.value = err instanceof Error ? err : new Error(String(err))
throw error.value
} finally {
isLoading.value = false
}
}
async function refreshToken(): Promise<void> {
if (!currentWorkspace.value) {
return
}
const workspaceId = currentWorkspace.value.id
// Capture the current request ID to detect if workspace context changed during refresh
const capturedRequestId = refreshRequestId
const maxRetries = 3
const baseDelayMs = 1000
for (let attempt = 0; attempt <= maxRetries; attempt++) {
// Check if workspace context changed since refresh started (user switched workspaces)
if (capturedRequestId !== refreshRequestId) {
console.warn(
'Aborting stale token refresh: workspace context changed during refresh'
)
return
}
try {
await switchWorkspace(workspaceId)
return
} catch (err) {
const isAuthError = err instanceof WorkspaceAuthError
const isPermanentError =
isAuthError &&
(err.code === 'ACCESS_DENIED' ||
err.code === 'WORKSPACE_NOT_FOUND' ||
err.code === 'INVALID_FIREBASE_TOKEN' ||
err.code === 'NOT_AUTHENTICATED')
if (isPermanentError) {
// Only clear context if this refresh is still for the current workspace
if (capturedRequestId === refreshRequestId) {
console.error('Workspace access revoked or auth invalid:', err)
clearWorkspaceContext()
}
return
}
const isTransientError =
isAuthError && err.code === 'TOKEN_EXCHANGE_FAILED'
if (isTransientError && attempt < maxRetries) {
const delay = baseDelayMs * Math.pow(2, attempt)
console.warn(
`Token refresh failed (attempt ${attempt + 1}/${maxRetries + 1}), retrying in ${delay}ms:`,
err
)
await new Promise((resolve) => setTimeout(resolve, delay))
continue
}
// Only clear context if this refresh is still for the current workspace
if (capturedRequestId === refreshRequestId) {
console.error('Failed to refresh workspace token after retries:', err)
clearWorkspaceContext()
}
}
}
}
function getWorkspaceAuthHeader(): AuthHeader | null {
if (!workspaceToken.value) {
return null
}
return {
Authorization: `Bearer ${workspaceToken.value}`
}
}
function clearWorkspaceContext(): void {
// Increment request ID to invalidate any in-flight stale refresh operations
refreshRequestId++
stopRefreshTimer()
currentWorkspace.value = null
workspaceToken.value = null
error.value = null
clearSessionStorage()
}
return {
// State
currentWorkspace,
workspaceToken,
isLoading,
error,
// Getters
isAuthenticated,
// Actions
init,
destroy,
initializeFromSession,
switchWorkspace,
refreshToken,
getWorkspaceAuthHeader,
clearWorkspaceContext
}
})

View File

@@ -1,180 +1,218 @@
<template>
<div
class="mx-auto flex h-full flex-col overflow-hidden"
:aria-label="$t('manager.title')"
<BaseModalLayout
v-model:right-panel-open="isRightPanelOpen"
:content-title="$t('manager.discoverCommunityContent')"
class="manager-dialog"
>
<ContentDivider :width="0.3" />
<Button
v-if="isSmallScreen"
variant="secondary"
size="icon"
:class="
cn(
'absolute top-1/2 z-10 -translate-y-1/2',
isSideNavOpen ? 'left-[12rem]' : 'left-2'
)
"
@click="toggleSideNav"
>
<i
:class="isSideNavOpen ? 'pi pi-chevron-left' : 'pi pi-chevron-right'"
/>
</Button>
<div class="relative flex flex-1 overflow-hidden">
<ManagerNavSidebar
v-if="isSideNavOpen"
v-model:selected-tab="selectedTab"
:tabs="tabs"
/>
<template #leftPanel>
<LeftSidePanel v-model="selectedNavId" :nav-items="navItems">
<template #header-icon>
<i class="icon-[lucide--puzzle]" />
</template>
<template #header-title>
<span class="text-neutral text-base">{{ $t('manager.title') }}</span>
</template>
</LeftSidePanel>
</template>
<template #header>
<div class="flex items-center gap-2">
<SingleSelect
v-model="searchMode"
class="min-w-34"
:options="filterOptions"
/>
<AutoCompletePlus
v-model.lazy="searchQuery"
:suggestions="suggestions"
:placeholder="$t('manager.searchPlaceholder')"
:complete-on-focus="false"
:delay="8"
option-label="query"
class="w-full min-w-md max-w-lg"
:pt="{
pcInputText: {
root: {
autofocus: true,
class: 'w-full rounded-lg h-10'
}
},
loader: { style: 'display: none' }
}"
:show-empty-message="false"
@complete="stubTrue"
@option-select="onOptionSelect"
/>
</div>
</template>
<template #contentFilter>
<!-- Conflict Warning Banner -->
<div
class="flex-1 overflow-auto bg-base-background"
:class="{
'transition-all duration-300': isSmallScreen
}"
v-if="shouldShowManagerBanner"
class="relative mx-6 mt-3 mb-4 flex items-center gap-6 rounded-lg bg-yellow-500/20 p-4"
>
<div class="flex h-full flex-col px-6">
<!-- Conflict Warning Banner -->
<div
v-if="shouldShowManagerBanner"
class="relative mt-3 mb-4 flex items-center gap-6 rounded-lg bg-yellow-500/20 p-4"
<i
class="icon-[lucide--triangle-alert] text-lg text-warning-background"
/>
<div class="flex flex-1 flex-col gap-2">
<p class="m-0 text-sm font-bold">
{{ $t('manager.conflicts.warningBanner.title') }}
</p>
<p class="m-0 text-xs">
{{ $t('manager.conflicts.warningBanner.message') }}
</p>
<p
class="m-0 cursor-pointer text-sm font-bold"
@click="onClickWarningLink"
>
<i
class="icon-[lucide--triangle-alert] text-lg text-warning-background"
/>
<div class="flex flex-1 flex-col gap-2">
<p class="m-0 text-sm font-bold">
{{ $t('manager.conflicts.warningBanner.title') }}
</p>
<p class="m-0 text-xs">
{{ $t('manager.conflicts.warningBanner.message') }}
</p>
<p
class="m-0 cursor-pointer text-sm font-bold"
@click="onClickWarningLink"
>
{{ $t('manager.conflicts.warningBanner.button') }}
</p>
</div>
<Button
class="absolute top-0 right-0"
variant="textonly"
size="icon"
@click="dismissWarningBanner"
>
<i class="pi pi-times text-xs text-base-foreground"></i>
</Button>
</div>
<RegistrySearchBar
v-model:search-query="searchQuery"
v-model:search-mode="searchMode"
v-model:sort-field="sortField"
:search-results="searchResults"
:suggestions="suggestions"
:is-missing-tab="isMissingTab"
:sort-options="sortOptions"
:is-update-available-tab="isUpdateAvailableTab"
{{ $t('manager.conflicts.warningBanner.button') }}
</p>
</div>
<Button
class="absolute top-0 right-0"
variant="textonly"
size="icon"
@click="dismissWarningBanner"
>
<i class="pi pi-times text-xs text-base-foreground"></i>
</Button>
</div>
<!-- Filters Row -->
<div class="relative flex flex-wrap justify-between gap-2 px-6 pb-4">
<div>
<PackInstallButton
v-if="isMissingTab && missingNodePacks.length > 0"
:disabled="isMissingLoading || !!missingError"
:node-packs="missingNodePacks"
size="lg"
:label="$t('manager.installAllMissingNodes')"
/>
<div class="flex-1 overflow-auto">
<div
v-if="isLoading"
class="h-full scrollbar-hide w-full overflow-auto"
>
<GridSkeleton :grid-style="GRID_STYLE" :skeleton-card-count />
</div>
<NoResultsPlaceholder
v-else-if="searchResults.length === 0"
:title="
comfyManagerStore.error
? $t('manager.errorConnecting')
: $t('manager.noResultsFound')
"
:message="
comfyManagerStore.error
? $t('manager.tryAgainLater')
: $t('manager.tryDifferentSearch')
"
/>
<div v-else class="h-full" @click="handleGridContainerClick">
<VirtualGrid
id="results-grid"
:items="resultsWithKeys"
:buffer-rows="4"
:grid-style="GRID_STYLE"
@approach-end="onApproachEnd"
>
<template #item="{ item }">
<PackCard
:node-pack="item"
:is-selected="
selectedNodePacks.some((pack) => pack.id === item.id)
"
@click.stop="
(event: MouseEvent) => selectNodePack(item, event)
"
/>
</template>
</VirtualGrid>
</div>
</div>
<PackUpdateButton
v-if="isUpdateAvailableTab && hasUpdateAvailable"
:node-packs="enabledUpdateAvailableNodePacks"
:has-disabled-update-packs="hasDisabledUpdatePacks"
size="lg"
/>
</div>
<!-- Sort Options on right -->
<div>
<SingleSelect
v-model="sortField"
:label="$t('g.sort')"
:options="availableSortOptions"
class="w-48"
>
<template #icon>
<i class="icon-[lucide--arrow-up-down] text-muted-foreground" />
</template>
</SingleSelect>
</div>
</div>
<div class="z-20 flex w-[clamp(250px,33%,306px)] border-l-0">
<ContentDivider orientation="vertical" :width="0.2" />
<div class="isolate flex w-full flex-col">
<InfoPanel
v-if="!hasMultipleSelections && selectedNodePack"
:node-pack="selectedNodePack"
/>
<InfoPanelMultiItem v-else :node-packs="selectedNodePacks" />
</div>
</template>
<template #content>
<div v-if="isLoading" class="scrollbar-hide h-full w-full overflow-auto">
<GridSkeleton :grid-style="GRID_STYLE" :skeleton-card-count />
</div>
</div>
</div>
<NoResultsPlaceholder
v-else-if="searchResults.length === 0"
:title="
comfyManagerStore.error
? $t('manager.errorConnecting')
: $t('manager.noResultsFound')
"
:message="
comfyManagerStore.error
? $t('manager.tryAgainLater')
: $t('manager.tryDifferentSearch')
"
/>
<div v-else class="h-full" @click="handleGridContainerClick">
<VirtualGrid
id="results-grid"
:items="resultsWithKeys"
:buffer-rows="4"
:grid-style="GRID_STYLE"
@approach-end="onApproachEnd"
>
<template #item="{ item }">
<PackCard
:node-pack="item"
:is-selected="
selectedNodePacks.some((pack) => pack.id === item.id)
"
@click.stop="(event: MouseEvent) => selectNodePack(item, event)"
/>
</template>
</VirtualGrid>
</div>
</template>
<template #rightPanel>
<InfoPanel
v-if="!hasMultipleSelections && selectedNodePack"
:node-pack="selectedNodePack"
/>
<InfoPanelMultiItem v-else :node-packs="selectedNodePacks" />
</template>
</BaseModalLayout>
</template>
<script setup lang="ts">
import { whenever } from '@vueuse/core'
import { merge } from 'es-toolkit/compat'
import { merge, stubTrue } from 'es-toolkit/compat'
import type { AutoCompleteOptionSelectEvent } from 'primevue/autocomplete'
import {
computed,
onBeforeUnmount,
onMounted,
onUnmounted,
provide,
ref,
watch,
watchEffect
} from 'vue'
import { useI18n } from 'vue-i18n'
import ContentDivider from '@/components/common/ContentDivider.vue'
import NoResultsPlaceholder from '@/components/common/NoResultsPlaceholder.vue'
import VirtualGrid from '@/components/common/VirtualGrid.vue'
import SingleSelect from '@/components/input/SingleSelect.vue'
import AutoCompletePlus from '@/components/primevueOverride/AutoCompletePlus.vue'
import Button from '@/components/ui/button/Button.vue'
import { useResponsiveCollapse } from '@/composables/element/useResponsiveCollapse'
import BaseModalLayout from '@/components/widget/layout/BaseModalLayout.vue'
import LeftSidePanel from '@/components/widget/panel/LeftSidePanel.vue'
import { useExternalLink } from '@/composables/useExternalLink'
import { useComfyRegistryStore } from '@/stores/comfyRegistryStore'
import type { components } from '@/types/comfyRegistryTypes'
import { cn } from '@/utils/tailwindUtil'
import ManagerNavSidebar from '@/workbench/extensions/manager/components/manager/ManagerNavSidebar.vue'
import type { NavItemData } from '@/types/navTypes'
import { OnCloseKey } from '@/types/widgetTypes'
import PackInstallButton from '@/workbench/extensions/manager/components/manager/button/PackInstallButton.vue'
import PackUpdateButton from '@/workbench/extensions/manager/components/manager/button/PackUpdateButton.vue'
import InfoPanel from '@/workbench/extensions/manager/components/manager/infoPanel/InfoPanel.vue'
import InfoPanelMultiItem from '@/workbench/extensions/manager/components/manager/infoPanel/InfoPanelMultiItem.vue'
import PackCard from '@/workbench/extensions/manager/components/manager/packCard/PackCard.vue'
import RegistrySearchBar from '@/workbench/extensions/manager/components/manager/registrySearchBar/RegistrySearchBar.vue'
import GridSkeleton from '@/workbench/extensions/manager/components/manager/skeleton/GridSkeleton.vue'
import { useInstalledPacks } from '@/workbench/extensions/manager/composables/nodePack/useInstalledPacks'
import { useMissingNodes } from '@/workbench/extensions/manager/composables/nodePack/useMissingNodes'
import { usePackUpdateStatus } from '@/workbench/extensions/manager/composables/nodePack/usePackUpdateStatus'
import { useUpdateAvailableNodes } from '@/workbench/extensions/manager/composables/nodePack/useUpdateAvailableNodes'
import { useWorkflowPacks } from '@/workbench/extensions/manager/composables/nodePack/useWorkflowPacks'
import { useConflictAcknowledgment } from '@/workbench/extensions/manager/composables/useConflictAcknowledgment'
import { useManagerStatePersistence } from '@/workbench/extensions/manager/composables/useManagerStatePersistence'
import { useRegistrySearch } from '@/workbench/extensions/manager/composables/useRegistrySearch'
import { useComfyManagerStore } from '@/workbench/extensions/manager/stores/comfyManagerStore'
import type { TabItem } from '@/workbench/extensions/manager/types/comfyManagerTypes'
import { ManagerTab } from '@/workbench/extensions/manager/types/comfyManagerTypes'
const { initialTab } = defineProps<{
const { initialTab, onClose } = defineProps<{
initialTab?: ManagerTab
onClose: () => void
}>()
provide(OnCloseKey, onClose)
const { t } = useI18n()
const { buildDocsUrl } = useExternalLink()
const comfyManagerStore = useComfyManagerStore()
@@ -186,46 +224,56 @@ const initialState = persistedState.loadStoredState()
const GRID_STYLE = {
display: 'grid',
gridTemplateColumns: 'repeat(auto-fill, minmax(17rem, 1fr))',
padding: '0.5rem',
gap: '1.5rem'
gap: '1.5rem',
padding: '0'
} as const
const {
isSmallScreen,
isOpen: isSideNavOpen,
toggle: toggleSideNav
} = useResponsiveCollapse()
// Use conflict acknowledgment state from composable
const {
shouldShowManagerBanner,
dismissWarningBanner,
dismissRedDotNotification
} = conflictAcknowledgment
const tabs = ref<TabItem[]>([
{ id: ManagerTab.All, label: t('g.all'), icon: 'pi-list' },
{ id: ManagerTab.Installed, label: t('g.installed'), icon: 'pi-box' },
// Missing nodes composable
const {
missingNodePacks,
isLoading: isMissingLoading,
error: missingError
} = useMissingNodes()
// Update available nodes composable
const {
hasUpdateAvailable,
enabledUpdateAvailableNodePacks,
hasDisabledUpdatePacks
} = useUpdateAvailableNodes()
// Navigation items for LeftSidePanel
const navItems = computed<NavItemData[]>(() => [
{ id: ManagerTab.All, label: t('g.all'), icon: 'pi pi-list' },
{ id: ManagerTab.Installed, label: t('g.installed'), icon: 'pi pi-box' },
{
id: ManagerTab.Workflow,
label: t('manager.inWorkflow'),
icon: 'pi-folder'
icon: 'pi pi-folder'
},
{
id: ManagerTab.Missing,
label: t('g.missing'),
icon: 'pi-exclamation-circle'
icon: 'pi pi-exclamation-circle'
},
{
id: ManagerTab.UpdateAvailable,
label: t('g.updateAvailable'),
icon: 'pi-sync'
icon: 'pi pi-sync'
}
])
const initialTabId = initialTab ?? initialState.selectedTabId
const selectedTab = ref<TabItem>(
tabs.value.find((tab) => tab.id === initialTabId) || tabs.value[0]
const initialTabId = initialTab ?? initialState.selectedTabId ?? ManagerTab.All
const selectedNavId = ref<string | null>(initialTabId)
const selectedTab = computed(() =>
navItems.value.find((item) => item.id === selectedNavId.value)
)
const {
@@ -243,6 +291,25 @@ const {
initialSearchQuery: initialState.searchQuery
})
pageNumber.value = 0
// Filter and sort options for SingleSelect
const filterOptions = computed(() => [
{ name: t('manager.filter.nodePack'), value: 'packs' },
{ name: t('g.nodes'), value: 'nodes' }
])
const availableSortOptions = computed(() => {
if (!sortOptions.value) return []
return sortOptions.value.map((field) => ({
name: field.label,
value: field.id
}))
})
const onOptionSelect = (event: AutoCompleteOptionSelectEvent) => {
searchQuery.value = event.value.query
}
const onApproachEnd = () => {
pageNumber.value++
}
@@ -433,6 +500,14 @@ const selectedNodePacks = ref<components['schemas']['Node'][]>([])
const selectedNodePack = computed<components['schemas']['Node'] | null>(() =>
selectedNodePacks.value.length === 1 ? selectedNodePacks.value[0] : null
)
const isRightPanelOpen = ref(false)
watch(
() => selectedNodePacks.value.length,
(length) => {
isRightPanelOpen.value = length > 0
}
)
const getLoadingCount = () => {
switch (selectedTab.value?.id) {
@@ -452,28 +527,26 @@ const getLoadingCount = () => {
const skeletonCardCount = computed(() => {
const loadingCount = getLoadingCount()
if (loadingCount) return loadingCount
return isSmallScreen.value ? 12 : 16
return 16
})
const selectNodePack = (
nodePack: components['schemas']['Node'],
event: MouseEvent
) => {
// Handle multi-select with Shift or Ctrl/Cmd key
if (event.shiftKey || event.ctrlKey || event.metaKey) {
const index = selectedNodePacks.value.findIndex(
(pack) => pack.id === nodePack.id
)
if (index === -1) {
// Add to selection if not already selected
selectedNodePacks.value.push(nodePack)
selectedNodePacks.value = [...selectedNodePacks.value, nodePack]
} else {
// Remove from selection if already selected
selectedNodePacks.value.splice(index, 1)
selectedNodePacks.value = selectedNodePacks.value.filter(
(pack) => pack.id !== nodePack.id
)
}
} else {
// Single select behavior
selectedNodePacks.value = [nodePack]
}
}
@@ -490,32 +563,24 @@ const handleGridContainerClick = (event: MouseEvent) => {
const hasMultipleSelections = computed(() => selectedNodePacks.value.length > 1)
// Track the last pack ID for which we've fetched full registry data
const lastFetchedPackId = ref<string | null>(null)
// Whenever a single pack is selected, fetch its full info once
whenever(selectedNodePack, async () => {
// Cancel any in-flight requests from previously selected node pack
getPackById.cancel()
// If only a single node pack is selected, fetch full node pack info from registry
const pack = selectedNodePack.value
if (!pack?.id) return
if (hasMultipleSelections.value) return
// Only fetch if we haven't already for this pack
if (lastFetchedPackId.value === pack.id) return
const data = await getPackById.call(pack.id)
// If selected node hasn't changed since request, merge registry & Algolia data
if (data?.id === pack.id) {
lastFetchedPackId.value = pack.id
const mergedPack = merge({}, pack, data)
// Update the pack in current selection without changing selection state
const packIndex = selectedNodePacks.value.findIndex(
(p) => p.id === mergedPack.id
)
if (packIndex !== -1) {
selectedNodePacks.value.splice(packIndex, 1, mergedPack)
}
// Replace pack in displayPacks so that children receive a fresh prop reference
const idx = displayPacks.value.findIndex((p) => p.id === mergedPack.id)
if (idx !== -1) {
displayPacks.value.splice(idx, 1, mergedPack)
@@ -527,7 +592,7 @@ let gridContainer: HTMLElement | null = null
onMounted(() => {
gridContainer = document.getElementById('results-grid')
})
watch([searchQuery, selectedTab], () => {
watch([searchQuery, selectedNavId], () => {
gridContainer ??= document.getElementById('results-grid')
if (gridContainer) {
pageNumber.value = 0
@@ -541,7 +606,7 @@ watchEffect(() => {
onBeforeUnmount(() => {
persistedState.persistState({
selectedTabId: selectedTab.value?.id,
selectedTabId: (selectedTab.value?.id as ManagerTab) ?? ManagerTab.All,
searchQuery: searchQuery.value,
searchMode: searchMode.value,
sortField: sortField.value

View File

@@ -1,45 +0,0 @@
import { mount } from '@vue/test-utils'
import { createPinia } from 'pinia'
import PrimeVue from 'primevue/config'
import { describe, expect, it } from 'vitest'
import { createI18n } from 'vue-i18n'
import enMessages from '@/locales/en/main.json' with { type: 'json' }
import ManagerHeader from './ManagerHeader.vue'
const i18n = createI18n({
legacy: false,
locale: 'en',
messages: {
en: enMessages
}
})
describe('ManagerHeader', () => {
const createWrapper = () => {
return mount(ManagerHeader, {
global: {
plugins: [createPinia(), PrimeVue, i18n]
}
})
}
it('renders the component title', () => {
const wrapper = createWrapper()
expect(wrapper.find('h2').text()).toBe(
enMessages.manager.discoverCommunityContent
)
})
it('has proper structure with flex container', () => {
const wrapper = createWrapper()
const flexContainer = wrapper.find('.flex.items-center')
expect(flexContainer.exists()).toBe(true)
const title = flexContainer.find('h2')
expect(title.exists()).toBe(true)
})
})

View File

@@ -1,11 +0,0 @@
<template>
<div class="w-full">
<div class="flex items-center">
<h2 class="text-left text-lg font-normal">
{{ $t('manager.discoverCommunityContent') }}
</h2>
</div>
</div>
</template>
<script setup lang="ts"></script>

View File

@@ -1,42 +0,0 @@
<template>
<aside
class="z-5 flex w-3/12 max-w-[250px] translate-x-0 transition-transform duration-300 ease-in-out"
>
<ScrollPanel class="flex-1">
<Listbox
v-model="selectedTab"
:options="tabs"
option-label="label"
list-style="max-height:unset"
class="w-full border-0 bg-transparent shadow-none"
:pt="{
list: { class: 'p-3 gap-2' },
option: { class: 'px-4 py-2 text-lg rounded-lg' },
optionGroup: { class: 'p-0 text-left text-inherit' }
}"
>
<template #option="slotProps">
<div class="flex items-center text-left">
<i :class="['pi', slotProps.option.icon, 'mr-2 text-sm']" />
<span class="text-sm">{{ slotProps.option.label }}</span>
</div>
</template>
</Listbox>
</ScrollPanel>
<ContentDivider orientation="vertical" :width="0.3" />
</aside>
</template>
<script setup lang="ts">
import Listbox from 'primevue/listbox'
import ScrollPanel from 'primevue/scrollpanel'
import ContentDivider from '@/components/common/ContentDivider.vue'
import type { TabItem } from '@/workbench/extensions/manager/types/comfyManagerTypes'
defineProps<{
tabs: TabItem[]
}>()
const selectedTab = defineModel<TabItem>('selectedTab')
</script>

View File

@@ -3,9 +3,8 @@
v-tooltip.top="
hasDisabledUpdatePacks ? $t('manager.disabledNodesWontUpdate') : null
"
variant="textonly"
class="border"
size="sm"
:size
:disabled="isUpdating"
@click="updateAllPacks"
>
@@ -19,14 +18,20 @@ import { ref } from 'vue'
import DotSpinner from '@/components/common/DotSpinner.vue'
import Button from '@/components/ui/button/Button.vue'
import type { ButtonVariants } from '@/components/ui/button/button.variants'
import type { components } from '@/types/comfyRegistryTypes'
import { useComfyManagerStore } from '@/workbench/extensions/manager/stores/comfyManagerStore'
type NodePack = components['schemas']['Node']
const { nodePacks, hasDisabledUpdatePacks } = defineProps<{
const {
nodePacks,
hasDisabledUpdatePacks,
size = 'sm'
} = defineProps<{
nodePacks: NodePack[]
hasDisabledUpdatePacks?: boolean
size?: ButtonVariants['size']
}>()
const isUpdating = ref<boolean>(false)

View File

@@ -1,130 +0,0 @@
<template>
<div class="relative w-full p-6">
<div class="flex h-12 items-center justify-between gap-1">
<div class="flex w-5/12 items-center">
<AutoComplete
v-model.lazy="searchQuery"
:suggestions="suggestions || []"
:placeholder="$t('manager.searchPlaceholder')"
:complete-on-focus="false"
:delay="8"
option-label="query"
class="w-full"
:pt="{
pcInputText: {
root: {
autofocus: true,
class: 'w-full rounded-2xl'
}
},
loader: {
style: 'display: none'
}
}"
:show-empty-message="false"
@complete="stubTrue"
@option-select="onOptionSelect"
/>
</div>
<PackInstallButton
v-if="isMissingTab && missingNodePacks.length > 0"
:disabled="isLoading || !!error"
:node-packs="missingNodePacks"
:label="$t('manager.installAllMissingNodes')"
/>
<PackUpdateButton
v-if="isUpdateAvailableTab && hasUpdateAvailable"
:node-packs="enabledUpdateAvailableNodePacks"
:has-disabled-update-packs="hasDisabledUpdatePacks"
/>
</div>
<div class="mt-3 flex text-sm">
<div class="ml-1 flex gap-6">
<SearchFilterDropdown
v-model:model-value="searchMode"
:options="filterOptions"
:label="$t('g.filter')"
/>
<SearchFilterDropdown
v-model:model-value="sortField"
:options="availableSortOptions"
:label="$t('g.sort')"
/>
</div>
<div class="ml-6 flex items-center gap-4">
<small v-if="hasResults" class="text-color-secondary">
{{ $t('g.resultsCount', { count: searchResults?.length || 0 }) }}
</small>
</div>
</div>
</div>
</template>
<script setup lang="ts">
import { stubTrue } from 'es-toolkit/compat'
import type { AutoCompleteOptionSelectEvent } from 'primevue/autocomplete'
import AutoComplete from 'primevue/autocomplete'
import { computed } from 'vue'
import { useI18n } from 'vue-i18n'
import type { components } from '@/types/comfyRegistryTypes'
import type {
QuerySuggestion,
SearchMode,
SortableField
} from '@/types/searchServiceTypes'
import PackInstallButton from '@/workbench/extensions/manager/components/manager/button/PackInstallButton.vue'
import PackUpdateButton from '@/workbench/extensions/manager/components/manager/button/PackUpdateButton.vue'
import SearchFilterDropdown from '@/workbench/extensions/manager/components/manager/registrySearchBar/SearchFilterDropdown.vue'
import { useMissingNodes } from '@/workbench/extensions/manager/composables/nodePack/useMissingNodes'
import { useUpdateAvailableNodes } from '@/workbench/extensions/manager/composables/nodePack/useUpdateAvailableNodes'
import { SortableAlgoliaField } from '@/workbench/extensions/manager/types/comfyManagerTypes'
import type { SearchOption } from '@/workbench/extensions/manager/types/comfyManagerTypes'
const { searchResults, sortOptions } = defineProps<{
searchResults?: components['schemas']['Node'][]
suggestions?: QuerySuggestion[]
sortOptions?: SortableField[]
isMissingTab?: boolean
isUpdateAvailableTab?: boolean
}>()
const searchQuery = defineModel<string>('searchQuery')
const searchMode = defineModel<SearchMode>('searchMode', { default: 'packs' })
const sortField = defineModel<string>('sortField', {
default: SortableAlgoliaField.Downloads
})
const { t } = useI18n()
// Get missing node packs from workflow with loading and error states
const { missingNodePacks, isLoading, error } = useMissingNodes()
// Use the composable to get update available nodes
const {
hasUpdateAvailable,
enabledUpdateAvailableNodePacks,
hasDisabledUpdatePacks
} = useUpdateAvailableNodes()
const hasResults = computed(
() => searchQuery.value?.trim() && searchResults?.length
)
const availableSortOptions = computed<SearchOption<string>[]>(() => {
if (!sortOptions) return []
return sortOptions.map((field) => ({
id: field.id,
label: field.label
}))
})
const filterOptions: SearchOption<SearchMode>[] = [
{ id: 'packs', label: t('manager.filter.nodePack') },
{ id: 'nodes', label: t('g.nodes') }
]
// When a dropdown query suggestion is selected, update the search query
const onOptionSelect = (event: AutoCompleteOptionSelectEvent) => {
searchQuery.value = event.value.query
}
</script>

View File

@@ -1,36 +0,0 @@
<template>
<div class="flex items-center gap-1">
<span class="text-muted">{{ label }}:</span>
<Dropdown
:model-value="modelValue"
:options="options"
option-label="label"
option-value="id"
class="min-w-[6rem] border-none bg-transparent shadow-none"
:pt="{
input: { class: 'py-0 px-1 border-none' },
trigger: { class: 'hidden' },
panel: { class: 'shadow-md' },
item: { class: 'py-2 px-3 text-sm' }
}"
@update:model-value="$emit('update:modelValue', $event)"
/>
</div>
</template>
<script setup lang="ts" generic="T">
// oxlint-disable-next-line no-restricted-imports -- TODO: Migrate to Select component
import Dropdown from 'primevue/dropdown'
import type { SearchOption } from '@/workbench/extensions/manager/types/comfyManagerTypes'
defineProps<{
options: SearchOption<T>[]
label: string
modelValue: T
}>()
defineEmits<{
'update:modelValue': [value: T]
}>()
</script>

View File

@@ -14,7 +14,6 @@ import { useComfyManagerStore } from '@/workbench/extensions/manager/stores/comf
/**
* Composable to find missing NodePacks from workflow
* Uses the same filtering approach as ManagerDialogContent.vue
* Automatically fetches workflow pack data when initialized
* This is a shared singleton composable - all components use the same instance
*/
@@ -25,7 +24,6 @@ export const useMissingNodes = createSharedComposable(() => {
const { workflowPacks, isLoading, error, startFetchWorkflowPacks } =
useWorkflowPacks()
// Same filtering logic as ManagerDialogContent.vue
const filterMissingPacks = (packs: components['schemas']['Node'][]) =>
packs.filter((pack) => !comfyManagerStore.isPackInstalled(pack.id))

View File

@@ -7,7 +7,6 @@ import { useComfyManagerStore } from '@/workbench/extensions/manager/stores/comf
/**
* Composable to find NodePacks that have updates available
* Uses the same filtering approach as ManagerDialogContent.vue
* Automatically fetches installed pack data when initialized
*/
export const useUpdateAvailableNodes = () => {
@@ -34,7 +33,6 @@ export const useUpdateAvailableNodes = () => {
return compare(latestVersion, installedVersion) > 0
}
// Same filtering logic as ManagerDialogContent.vue
const filterOutdatedPacks = (packs: components['schemas']['Node'][]) =>
packs.filter(isOutdatedPack)

View File

@@ -0,0 +1,36 @@
import { useDialogService } from '@/services/dialogService'
import { useDialogStore } from '@/stores/dialogStore'
import type { ManagerTab } from '@/workbench/extensions/manager/types/comfyManagerTypes'
import ManagerDialog from '@/workbench/extensions/manager/components/manager/ManagerDialog.vue'
const DIALOG_KEY = 'global-manager'
export function useManagerDialog() {
const dialogService = useDialogService()
const dialogStore = useDialogStore()
function hide() {
dialogStore.closeDialog({ key: DIALOG_KEY })
}
function show(initialTab?: ManagerTab) {
dialogService.showLayoutDialog({
key: DIALOG_KEY,
component: ManagerDialog,
props: {
onClose: hide,
initialTab
},
dialogComponentProps: {
pt: {
content: { class: '!px-0 overflow-hidden h-full !py-0' }
}
}
})
}
return {
show,
hide
}
}

View File

@@ -53,6 +53,13 @@ vi.mock('@/stores/toastStore', () => ({
}))
}))
vi.mock('@/workbench/extensions/manager/composables/useManagerDialog', () => ({
useManagerDialog: vi.fn(() => ({
show: vi.fn(),
hide: vi.fn()
}))
}))
describe('useManagerState', () => {
beforeEach(() => {
vi.clearAllMocks()

View File

@@ -7,6 +7,7 @@ import { api } from '@/scripts/api'
import { useDialogService } from '@/services/dialogService'
import { useCommandStore } from '@/stores/commandStore'
import { useSystemStatsStore } from '@/stores/systemStatsStore'
import { useManagerDialog } from '@/workbench/extensions/manager/composables/useManagerDialog'
import { ManagerTab } from '@/workbench/extensions/manager/types/comfyManagerTypes'
export enum ManagerUIState {
@@ -19,6 +20,7 @@ export function useManagerState() {
const systemStatsStore = useSystemStatsStore()
const { systemStats, isInitialized: systemInitialized } =
storeToRefs(systemStatsStore)
const managerDialog = useManagerDialog()
/**
* The current manager UI state.
@@ -186,11 +188,9 @@ export function useManagerState() {
detail: t('manager.legacyMenuNotAvailable'),
life: 3000
})
dialogService.showManagerDialog({ initialTab: ManagerTab.All })
await managerDialog.show(ManagerTab.All)
} else {
dialogService.showManagerDialog(
options?.initialTab ? { initialTab: options.initialTab } : undefined
)
await managerDialog.show(options?.initialTab)
}
break
}

View File

@@ -20,12 +20,6 @@ export enum ManagerTab {
UpdateAvailable = 'updateAvailable'
}
export interface TabItem {
id: ManagerTab
label: string
icon: string
}
export type TaskLog = {
taskName: string
taskId: string
@@ -37,11 +31,6 @@ export interface UseNodePacksOptions {
maxConcurrent?: number
}
export interface SearchOption<T> {
id: T
label: string
}
export enum SortableAlgoliaField {
Downloads = 'total_install',
Created = 'create_time',