fix: prevent cache poisoning on aborted requests in useCachedRequest

This commit is contained in:
Jin Yi
2026-03-04 10:53:55 +09:00
parent 0fde4d7c50
commit 7097918650
6 changed files with 42 additions and 8 deletions

View File

@@ -239,4 +239,32 @@ describe('useCachedRequest', () => {
await cachedRequest.call(123)
expect(mockRequestFn).toHaveBeenCalledTimes(1)
})
it('should not cache aborted requests', async () => {
vi.unstubAllGlobals()
let callCount = 0
const abortFn = vi.fn(async (_params: unknown, _signal?: AbortSignal) => {
callCount++
if (callCount === 1) {
const error = new DOMException(
'The operation was aborted.',
'AbortError'
)
throw error
}
return { data: 'success' }
})
const cachedRequest = useCachedRequest(abortFn)
// First call throws AbortError — should NOT be cached
const result1 = await cachedRequest.call('key')
expect(result1).toBeNull()
// Second call should retry (not use cached null)
const result2 = await cachedRequest.call('key')
expect(result2).toEqual({ data: 'success' })
expect(abortFn).toHaveBeenCalledTimes(2)
})
})

View File

@@ -48,6 +48,10 @@ export function useCachedRequest<TParams, TResult>(
return result
} catch (err) {
// Don't cache aborted requests — they should be retried
if (err instanceof DOMException && err.name === 'AbortError') {
return null
}
// Set cache on error to prevent retrying bad requests
cache.set(cacheKey, null)
return null

View File

@@ -11,7 +11,7 @@ export function useUngroupedAssets(
assets: Ref<AssetItem[]>,
groupByJob: Ref<boolean>
) {
const { call: cachedResolve, cancel } = useCachedRequest(
const { call: cachedResolve } = useCachedRequest(
(jobId: string, signal?: AbortSignal) => {
const asset = assets.value.find((a) => {
const m = getOutputAssetMetadata(a.user_metadata)
@@ -29,11 +29,9 @@ export function useUngroupedAssets(
const isResolving = ref(false)
const resolvedAssets = computedAsync(
async (onCancel) => {
async () => {
if (groupByJob.value) return []
onCancel(() => cancel())
const entries = assets.value.map((asset) => ({
asset,
metadata: getOutputAssetMetadata(asset.user_metadata)
@@ -41,7 +39,7 @@ export function useUngroupedAssets(
for (const { metadata } of entries) {
if ((metadata?.outputCount ?? 1) > 1 && metadata?.jobId) {
void cachedResolve(metadata.jobId)
void cachedResolve(metadata.jobId).catch(() => {})
}
}

View File

@@ -227,7 +227,9 @@ describe('fetchJobs', () => {
const result = await fetchJobDetail(mockFetch, 'job1')
expect(mockFetch).toHaveBeenCalledWith('/jobs/job1')
expect(mockFetch).toHaveBeenCalledWith('/jobs/job1', {
signal: undefined
})
expect(result?.id).toBe('job1')
expect(result?.outputs).toBeDefined()
})

View File

@@ -51,7 +51,9 @@ describe('fetchJobDetail', () => {
await fetchJobDetail(mockFetchApi, 'test-job-id')
expect(mockFetchApi).toHaveBeenCalledWith('/jobs/test-job-id')
expect(mockFetchApi).toHaveBeenCalledWith('/jobs/test-job-id', {
signal: undefined
})
})
it('should return job detail with workflow and outputs', async () => {

View File

@@ -323,7 +323,7 @@ describe('jobOutputCache', () => {
const result = await getJobDetail(jobId)
expect(result).toEqual(mockDetail)
expect(api.getJobDetail).toHaveBeenCalledWith(jobId)
expect(api.getJobDetail).toHaveBeenCalledWith(jobId, undefined)
})
it('returns cached job detail on subsequent calls', async () => {