Compare commits

...

4 Commits

Author SHA1 Message Date
Kelly Yang
5f1aab5d1b fix: route text preview_output outside media parsing pipeline
Text preview_output items (mediaType: 'text') carry a content field
instead of filename and are structurally incompatible with ResultItem.
Exclude them from synthetic effectiveOutputs and construct their
ResultItemImpl directly in calculateFlatOutputs, bypassing
parseNodeOutput and isResultItem entirely.

Also tighten isResultItem to require filename as a string, correctly
rejecting arbitrary non-media objects from custom node outputs.
2026-04-15 15:03:40 -07:00
Kelly Yang
8bf00e1a5c fix: resolve post-migration TODO comments and skipped test
Remove stale TODO comments from 3 litegraph test files whose tests were
already passing after the migration.

Fix the skipped 'should parse text outputs' test in queueStore by
correcting two bugs in resultItemParsing.ts:
- Remove 'text' from METADATA_KEYS: text media-type outputs were being
  silently discarded; isResultItem already filters non-object values
- Remove the filename requirement from isResultItem: filename is
  optional in the zResultItem schema, and text outputs carry content
  instead of filename

Fixes #11078
2026-04-15 13:20:44 -07:00
pythongosssss
a8e1fa8bef test: add regression test for WEBP RIFF padding (#8527) (#11267)
## Summary

Add a regression test for #8527 (handle RIFF padding for odd-sized WEBP
chunks). The fix added + (chunk_length % 2) to the chunk-stride
calculation in getWebpMetadata so EXIF chunks following an odd-sized
chunk are still located correctly. There was no existing unit test
covering getWebpMetadata, so without a regression test the fix could
silently break in a future
  refactor. 

## Changes

- **What**: 
- New unit test file src/scripts/pnginfo.test.ts covering
getWebpMetadata's RIFF chunk traversal.
- Helpers build a minimal in-memory WEBP with one VP8 chunk of
configurable length followed by an EXIF chunk encoding workflow:<json>.
- Odd-length case (regression for #8527): without the % 2 padding
adjustment, the parser walks one byte short and returns {}.
- Even-length case: guards against an over-correction that always adds
1.
- Verified RED→GREEN locally.

┆Issue is synchronized with this [Notion
page](https://www.notion.so/PR-11267-test-add-regression-test-for-WEBP-RIFF-padding-8527-3436d73d36508117a66edf3cb108ded0)
by [Unito](https://www.unito.io)
2026-04-15 18:14:49 +00:00
pythongosssss
83ceef8cb3 test: add regression test for non-string serverLogs (#8460) (#11268)
## Summary

Add a regression test for #8460 (handle non-string `serverLogs` in error
report). The fix added `typeof error.serverLogs === 'string' ? ... :
JSON.stringify(...)` in `errorReportUtil.ts` so object-shaped logs no
longer render as `[object Object]`. There was no existing unit test for
`generateErrorReport`, so this regression could silently return.

## Changes

- **What**: New unit test file `src/utils/errorReportUtil.test.ts`
covering `generateErrorReport`'s `serverLogs` rendering.
- String case: verifies plain-string logs still appear verbatim and
`[object Object]` is absent.
- Object case (regression for #8460): verifies object logs are
JSON-stringified instead of coerced to `[object Object]`.
- Verified RED→GREEN locally.

┆Issue is synchronized with this [Notion
page](https://www.notion.so/PR-11268-test-add-regression-test-for-non-string-serverLogs-8460-3436d73d36508195a32fc559ab7ce5bb)
by [Unito](https://www.unito.io)
2026-04-15 18:14:17 +00:00
8 changed files with 129 additions and 22 deletions

View File

@@ -1,5 +1,4 @@
// oxlint-disable no-empty-pattern
// TODO: Fix these tests after migration
import { afterEach, describe, expect, vi } from 'vitest'
import type {

View File

@@ -1,4 +1,3 @@
// TODO: Fix these tests after migration
import { beforeEach, describe, expect, it, vi } from 'vitest'
import {

View File

@@ -1,4 +1,3 @@
// TODO: Fix these tests after migration
import { test as baseTest } from 'vitest'
import type { Point, Rect } from '@/lib/litegraph/src/interfaces'

View File

@@ -0,0 +1,67 @@
import { describe, expect, it } from 'vitest'
import { getWebpMetadata } from './pnginfo'
function buildExifPayload(workflowJson: string): Uint8Array {
const fullStr = `workflow:${workflowJson}\0`
const strBytes = new TextEncoder().encode(fullStr)
const headerSize = 22
const buf = new Uint8Array(headerSize + strBytes.length)
const dv = new DataView(buf.buffer)
buf.set([0x49, 0x49], 0)
dv.setUint16(2, 0x002a, true)
dv.setUint32(4, 8, true)
dv.setUint16(8, 1, true)
dv.setUint16(10, 0, true)
dv.setUint16(12, 2, true)
dv.setUint32(14, strBytes.length, true)
dv.setUint32(18, 22, true)
buf.set(strBytes, 22)
return buf
}
function buildWebp(precedingChunkLength: number, workflowJson: string): File {
const exifPayload = buildExifPayload(workflowJson)
const precedingPadded = precedingChunkLength + (precedingChunkLength % 2)
const totalSize = 12 + (8 + precedingPadded) + (8 + exifPayload.length)
const buffer = new Uint8Array(totalSize)
const dv = new DataView(buffer.buffer)
buffer.set([0x52, 0x49, 0x46, 0x46], 0)
dv.setUint32(4, totalSize - 8, true)
buffer.set([0x57, 0x45, 0x42, 0x50], 8)
buffer.set([0x56, 0x50, 0x38, 0x20], 12)
dv.setUint32(16, precedingChunkLength, true)
const exifStart = 20 + precedingPadded
buffer.set([0x45, 0x58, 0x49, 0x46], exifStart)
dv.setUint32(exifStart + 4, exifPayload.length, true)
buffer.set(exifPayload, exifStart + 8)
return new File([buffer], 'test.webp', { type: 'image/webp' })
}
describe('getWebpMetadata', () => {
it('finds workflow when a preceding chunk has odd length (RIFF padding)', async () => {
const workflow = '{"nodes":[]}'
const file = buildWebp(3, workflow)
const metadata = await getWebpMetadata(file)
expect(metadata.workflow).toBe(workflow)
})
it('finds workflow when preceding chunk has even length (no padding)', async () => {
const workflow = '{"nodes":[1]}'
const file = buildWebp(4, workflow)
const metadata = await getWebpMetadata(file)
expect(metadata.workflow).toBe(workflow)
})
})

View File

@@ -189,7 +189,7 @@ describe('TaskItemImpl', () => {
})
})
it.skip('should parse text outputs', () => {
it('should parse text outputs', () => {
const job: JobListItem = {
...createHistoryJob(0, 'text-job'),
preview_output: {

View File

@@ -256,26 +256,23 @@ export class TaskItemImpl {
flatOutputs?: ReadonlyArray<ResultItemImpl>
) {
this.job = job
// If no outputs provided but job has preview_output, create synthetic outputs
// using the real nodeId and mediaType from the backend response
const preview = job.preview_output
const effectiveOutputs =
outputs ??
(job.preview_output
? {
[job.preview_output.nodeId]: {
[job.preview_output.mediaType]: [job.preview_output]
}
}
(preview && preview.mediaType !== 'text'
? { [preview.nodeId]: { [preview.mediaType]: [preview] } }
: {})
this.outputs = effectiveOutputs
this.flatOutputs = flatOutputs ?? this.calculateFlatOutputs()
}
calculateFlatOutputs(): ReadonlyArray<ResultItemImpl> {
if (!this.outputs) {
return []
const mediaOutputs = parseTaskOutput(this.outputs)
const preview = this.job.preview_output
if (preview?.mediaType === 'text') {
return [new ResultItemImpl(preview as ResultItemInit), ...mediaOutputs]
}
return parseTaskOutput(this.outputs)
return mediaOutputs
}
/** All outputs that support preview (images, videos, audio, 3D, text) */

View File

@@ -2,14 +2,8 @@ import type { NodeExecutionOutput, ResultItem } from '@/schemas/apiSchema'
import { resultItemType } from '@/schemas/apiSchema'
import { ResultItemImpl } from '@/stores/queueStore'
const METADATA_KEYS = new Set(['animated', 'text'])
const METADATA_KEYS = new Set(['animated'])
/**
* Validates that an unknown value is a well-formed ResultItem.
*
* Requires `filename` (string) since ResultItemImpl needs it for a valid URL.
* `subfolder` is optional here — ResultItemImpl constructor falls back to ''.
*/
function isResultItem(item: unknown): item is ResultItem {
if (!item || typeof item !== 'object' || Array.isArray(item)) return false

View File

@@ -0,0 +1,52 @@
import { describe, expect, it } from 'vitest'
import type { ISerialisedGraph } from '@/lib/litegraph/src/litegraph'
import type { SystemStats } from '@/schemas/apiSchema'
import type { ErrorReportData } from './errorReportUtil'
import { generateErrorReport } from './errorReportUtil'
const baseSystemStats: SystemStats = {
system: {
os: 'linux',
comfyui_version: '1.0.0',
python_version: '3.11',
pytorch_version: '2.0',
embedded_python: false,
argv: ['main.py'],
ram_total: 0,
ram_free: 0
},
devices: []
}
const baseWorkflow = { nodes: [], links: [] } as unknown as ISerialisedGraph
function buildError(serverLogs: unknown): ErrorReportData {
return {
exceptionType: 'RuntimeError',
exceptionMessage: 'boom',
systemStats: baseSystemStats,
serverLogs: serverLogs as string,
workflow: baseWorkflow
}
}
describe('generateErrorReport', () => {
it('embeds string serverLogs verbatim', () => {
const report = generateErrorReport(buildError('line one\nline two'))
expect(report).toContain('line one\nline two')
expect(report).not.toContain('[object Object]')
})
it('stringifies object serverLogs instead of rendering [object Object]', () => {
const report = generateErrorReport(
buildError({ entries: [{ msg: 'hello' }] })
)
expect(report).not.toContain('[object Object]')
expect(report).toContain('"entries"')
expect(report).toContain('"msg": "hello"')
})
})