Compare commits

...

5 Commits

Author SHA1 Message Date
bymyself
cd9eb15cba test+docs: harden assetsStore deletion+drift composition
Adversarial review (Skeptic, Architect, Minimalist lenses) plus an
oracle review surfaced two improvements without changing the underlying
pagination behavior:

- insertAssetSorted: tiebreak equal created_at timestamps with
  lexicographic id order so jobs that share a millisecond don't reorder
  across repeated incremental merges.
- removeHistoryItems: document the historyOffset decrement invariant.
  After a confirmed delete, the refreshed first page is backfilled with
  rows that previously sat past the old page-1 boundary; those
  backfilled rows count as drift, so the cursor must shrink by
  removedCount or the next loadMoreHistory will compose
  `historyOffset + drift` and silently skip unseen rows.
- Add a regression test that combines deletion with a non-zero drift
  counter and asserts the composed offset
  `(BATCH_SIZE - removed) + drift`. The existing tests covered each
  branch in isolation but not the composition.

Amp-Thread-ID: https://ampcode.com/threads/T-019df415-3078-74f9-9f05-c148c442293c
2026-05-04 11:17:50 -07:00
bymyself
229b11a396 fix: adjust historyOffset when removing items to prevent skipping unseen rows
Addresses review feedback: removeHistoryItems() now decrements
historyOffset by the number of removed items so the next
loadMoreHistory() call does not skip unseen history rows.

https://github.com/Comfy-Org/ComfyUI_frontend/pull/9005#discussion_r2044741070
2026-05-04 11:17:50 -07:00
bymyself
f1f3fa1456 fix: remove deleted history items from local state before incremental merge
Adds removeHistoryItems() to assetsStore that evicts items by ID from
allHistoryItems and loadedIds. The delete flow in useMediaAssetActions
now calls it for successfully deleted output assets before updateHistory(),
so deleted items no longer persist after the append-only incremental merge.

Addresses review feedback:
https://github.com/Comfy-Org/ComfyUI_frontend/pull/9005#discussion_r2951082746
2026-05-04 11:17:50 -07:00
bymyself
d0fcec81f4 fix: handle pagination re-enable and concurrent drift in assetsStore
Address code review feedback:
- Re-enable pagination (hasMoreHistory=true) when history grows past one
  page after previously being exhausted. Skip drift accumulation while
  pagination is inactive.
- Capture drift at request time in fetchHistoryNextPage and subtract only
  the captured amount on success, preserving concurrent updateHistory()
  increments during in-flight fetches.

Amp-Thread-ID: https://ampcode.com/threads/T-019c7a21-9e2b-73dc-81bf-d7c64574345c
2026-05-04 11:17:50 -07:00
bymyself
ab1f235f1e fix: preserve paginated assets during incremental history updates
updateHistory() reset all pagination state (offset=0, cleared items) on
every job event, causing assets loaded beyond the first batch to vanish
during infinite scroll.

Refactor to incremental prepend approach: subsequent updateHistory()
calls fetch page 1 and merge only new items. Track prepended count
between pagination calls so loadMoreHistory() adjusts offset to avoid
duplicates from shifted data.

Amp-Thread-ID: https://ampcode.com/threads/T-019c7a21-9e2b-73dc-81bf-d7c64574345c
2026-05-04 11:17:50 -07:00
3 changed files with 348 additions and 59 deletions

View File

@@ -633,6 +633,14 @@ export function useMediaAssetActions() {
)
if (hasOutputAssets) {
const succeededOutputIds = assetArray
.filter(
(a, i) =>
getAssetType(a) === 'output' &&
results[i].status === 'fulfilled'
)
.map((a) => a.id)
assetsStore.removeHistoryItems(succeededOutputIds)
await assetsStore.updateHistory()
}
if (hasInputAssets) {

View File

@@ -426,6 +426,93 @@ describe('assetsStore - Refactored (Option A)', () => {
})
})
describe('Deletion', () => {
it('should remove items by id from history', async () => {
const mockHistory = Array.from({ length: 5 }, (_, i) =>
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(mockHistory)
await store.updateHistory()
expect(store.historyAssets).toHaveLength(5)
store.removeHistoryItems(['prompt_1', 'prompt_3'])
expect(store.historyAssets).toHaveLength(3)
const ids = store.historyAssets.map((a) => a.id)
expect(ids).not.toContain('prompt_1')
expect(ids).not.toContain('prompt_3')
})
it('should adjust pagination offset after deletion', async () => {
const mockHistory = Array.from({ length: 200 }, (_, i) =>
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(mockHistory)
await store.updateHistory()
// Delete 3 items — offset shifts from 200 to 197 so the next page
// request lines up with the server's post-deletion cursor and we don't
// skip the rows that backfilled into the old page-1 boundary.
store.removeHistoryItems(['prompt_1', 'prompt_3', 'prompt_5'])
expect(store.historyAssets).toHaveLength(197)
const nextBatch = Array.from({ length: 200 }, (_, i) =>
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(nextBatch)
await store.loadMoreHistory()
expect(api.getHistory).toHaveBeenLastCalledWith(200, { offset: 197 })
})
it('should compose deletion offset with drift on next loadMore', async () => {
// Initial page 1 (200 items: prompt_0 .. prompt_199)
const page1 = Array.from({ length: 200 }, (_, i) => createMockJobItem(i))
vi.mocked(api.getHistory).mockResolvedValueOnce(page1)
await store.updateHistory()
// 3 net-new items merge in via a refresh — drift becomes 3.
const newJobs = Array.from({ length: 3 }, (_, i) =>
createMockJobItem(7000 + i)
)
const refreshedPage1 = [...newJobs, ...page1.slice(0, 197)]
vi.mocked(api.getHistory).mockResolvedValueOnce(refreshedPage1)
await store.updateHistory()
// Delete 2 loaded items — historyOffset should drop from 200 to 198.
store.removeHistoryItems(['prompt_10', 'prompt_20'])
// Next loadMore offset must compose deletion + drift:
// adjustedOffset = (200 - 2) + 3 = 201
// If historyOffset is not decremented the request becomes 203 and the
// server rows at the new positions 201202 are silently skipped.
const page2 = Array.from({ length: 200 }, (_, i) =>
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(page2)
await store.loadMoreHistory()
expect(api.getHistory).toHaveBeenLastCalledWith(200, { offset: 201 })
})
it('should allow re-inserting a removed item on next updateHistory', async () => {
const mockHistory = Array.from({ length: 3 }, (_, i) =>
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(mockHistory)
await store.updateHistory()
store.removeHistoryItems(['prompt_1'])
expect(store.historyAssets).toHaveLength(2)
// Simulate the item reappearing from the server (e.g. delete failed)
vi.mocked(api.getHistory).mockResolvedValueOnce(mockHistory)
await store.updateHistory()
expect(store.historyAssets).toHaveLength(3)
})
})
describe('Error Handling', () => {
it('should preserve existing data when loadMore fails', async () => {
// First successful load - full batch
@@ -542,6 +629,142 @@ describe('assetsStore - Refactored (Option A)', () => {
})
})
describe('Incremental Update', () => {
it('should preserve loaded items when updateHistory is called after pagination', async () => {
// Use indices 100-299 for page1 so there's room for a "newer" item
const page1 = Array.from({ length: 200 }, (_, i) =>
createMockJobItem(100 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(page1)
await store.updateHistory()
const page2 = Array.from({ length: 200 }, (_, i) =>
createMockJobItem(300 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(page2)
await store.loadMoreHistory()
expect(store.historyAssets).toHaveLength(400)
// Index 0 → Date.now() - 0ms, which is newer than index 100+
const newJob = createMockJobItem(0)
const refreshedPage1 = [newJob, ...page1.slice(0, 199)]
vi.mocked(api.getHistory).mockResolvedValueOnce(refreshedPage1)
await store.updateHistory()
expect(store.historyAssets).toHaveLength(401)
expect(store.historyAssets[0].id).toBe('prompt_0')
})
it('should not duplicate items when updateHistory fetches overlapping data', async () => {
const page1 = Array.from({ length: 200 }, (_, i) => createMockJobItem(i))
vi.mocked(api.getHistory).mockResolvedValueOnce(page1)
await store.updateHistory()
vi.mocked(api.getHistory).mockResolvedValueOnce([...page1])
await store.updateHistory()
expect(store.historyAssets).toHaveLength(200)
})
it('should adjust offset for loadMore after new items are prepended', async () => {
const page1 = Array.from({ length: 200 }, (_, i) => createMockJobItem(i))
vi.mocked(api.getHistory).mockResolvedValueOnce(page1)
await store.updateHistory()
const newJobs = Array.from({ length: 5 }, (_, i) =>
createMockJobItem(5000 + i)
)
const refreshedPage1 = [...newJobs, ...page1.slice(0, 195)]
vi.mocked(api.getHistory).mockResolvedValueOnce(refreshedPage1)
await store.updateHistory()
const page2 = Array.from({ length: 200 }, (_, i) =>
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(page2)
await store.loadMoreHistory()
expect(api.getHistory).toHaveBeenLastCalledWith(200, { offset: 205 })
})
it('should preserve drift counter when loadMore fails', async () => {
const page1 = Array.from({ length: 200 }, (_, i) => createMockJobItem(i))
vi.mocked(api.getHistory).mockResolvedValueOnce(page1)
await store.updateHistory()
const newJobs = Array.from({ length: 3 }, (_, i) =>
createMockJobItem(6000 + i)
)
const refreshedPage1 = [...newJobs, ...page1.slice(0, 197)]
vi.mocked(api.getHistory).mockResolvedValueOnce(refreshedPage1)
await store.updateHistory()
// loadMore fails — drift counter should be preserved
vi.mocked(api.getHistory).mockRejectedValueOnce(new Error('fail'))
await store.loadMoreHistory()
// Retry loadMore — should still include the drift adjustment
const page2 = Array.from({ length: 200 }, (_, i) =>
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(page2)
await store.loadMoreHistory()
expect(api.getHistory).toHaveBeenLastCalledWith(200, { offset: 203 })
})
it('should re-enable pagination when history grows past one page', async () => {
// Initial load with fewer than BATCH_SIZE items — pagination exhausted
const smallPage = Array.from({ length: 50 }, (_, i) =>
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(smallPage)
await store.updateHistory()
expect(store.hasMoreHistory).toBe(false)
// Many new jobs arrive — first page is now full (BATCH_SIZE items)
const fullPage = Array.from({ length: 200 }, (_, i) =>
createMockJobItem(7000 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(fullPage)
await store.updateHistory()
expect(store.hasMoreHistory).toBe(true)
})
it('should not accumulate drift when pagination is exhausted', async () => {
const smallPage = Array.from({ length: 50 }, (_, i) =>
createMockJobItem(i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(smallPage)
await store.updateHistory()
// Subsequent update with a few new items, still under BATCH_SIZE
const newJobs = Array.from({ length: 3 }, (_, i) =>
createMockJobItem(8000 + i)
)
const refreshed = [...newJobs, ...smallPage.slice(0, 47)]
vi.mocked(api.getHistory).mockResolvedValueOnce(refreshed)
await store.updateHistory()
// Re-enable pagination with a full page
const fullPage = Array.from({ length: 200 }, (_, i) =>
createMockJobItem(9000 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(fullPage)
await store.updateHistory()
// loadMore should use offset 200 (no drift accumulated while exhausted)
const page2 = Array.from({ length: 200 }, (_, i) =>
createMockJobItem(200 + i)
)
vi.mocked(api.getHistory).mockResolvedValueOnce(page2)
await store.loadMoreHistory()
expect(api.getHistory).toHaveBeenLastCalledWith(200, { offset: 200 })
})
})
describe('jobDetailView Support', () => {
it('should include outputCount and allOutputs in user_metadata', async () => {
const mockHistory = Array.from({ length: 5 }, (_, i) =>

View File

@@ -94,7 +94,7 @@ export const useAssetsStore = defineStore('assets', () => {
// Track assets currently being deleted (for loading overlay)
const deletingAssetIds = shallowReactive(new Set<string>())
const setAssetDeleting = (assetId: string, isDeleting: boolean) => {
function setAssetDeleting(assetId: string, isDeleting: boolean) {
if (isDeleting) {
deletingAssetIds.add(assetId)
} else {
@@ -102,7 +102,7 @@ export const useAssetsStore = defineStore('assets', () => {
}
}
const isAssetDeleting = (assetId: string): boolean => {
function isAssetDeleting(assetId: string): boolean {
return deletingAssetIds.has(assetId)
}
@@ -110,6 +110,7 @@ export const useAssetsStore = defineStore('assets', () => {
const historyOffset = ref(0)
const hasMoreHistory = ref(true)
const isLoadingMore = ref(false)
const prependedSinceLastLoad = ref(0)
const allHistoryItems = ref<AssetItem[]>([])
@@ -139,67 +140,124 @@ export const useAssetsStore = defineStore('assets', () => {
}
/**
* Fetch history assets with pagination support
* @param loadMore - true for pagination (append), false for initial load (replace)
* Insert an asset into the sorted list at the correct position (newest first).
* Skips duplicates already tracked in loadedIds.
* @returns true if the asset was inserted, false if it was a duplicate.
*/
const fetchHistoryAssets = async (loadMore = false): Promise<AssetItem[]> => {
// Reset state for initial load
if (!loadMore) {
historyOffset.value = 0
hasMoreHistory.value = true
allHistoryItems.value = []
loadedIds.clear()
}
function insertAssetSorted(asset: AssetItem): boolean {
if (loadedIds.has(asset.id)) return false
loadedIds.add(asset.id)
// Fetch from server with offset
const history = await api.getHistory(BATCH_SIZE, {
offset: historyOffset.value
const assetTime = new Date(asset.created_at ?? 0).getTime()
// Sort: newer first; ties broken by lexicographically larger id first
// so insertion order is stable across repeated merges.
const insertIndex = allHistoryItems.value.findIndex((item) => {
const itemTime = new Date(item.created_at ?? 0).getTime()
if (itemTime !== assetTime) return itemTime < assetTime
return item.id < asset.id
})
// Convert JobListItems to AssetItems
const newAssets = mapHistoryToAssets(history)
if (loadMore) {
// Filter out duplicates and insert in sorted order
for (const asset of newAssets) {
if (loadedIds.has(asset.id)) {
continue // Skip duplicates
}
loadedIds.add(asset.id)
// Find insertion index to maintain sorted order (newest first)
const assetTime = new Date(asset.created_at ?? 0).getTime()
const insertIndex = allHistoryItems.value.findIndex(
(item) => new Date(item.created_at ?? 0).getTime() < assetTime
)
if (insertIndex === -1) {
// Asset is oldest, append to end
allHistoryItems.value.push(asset)
} else {
// Insert at the correct position
allHistoryItems.value.splice(insertIndex, 0, asset)
}
}
if (insertIndex === -1) {
allHistoryItems.value.push(asset)
} else {
// Initial load: replace all
allHistoryItems.value = newAssets
newAssets.forEach((asset) => loadedIds.add(asset.id))
allHistoryItems.value.splice(insertIndex, 0, asset)
}
return true
}
// Update pagination state
historyOffset.value += BATCH_SIZE
hasMoreHistory.value = history.length === BATCH_SIZE
/**
* Remove items from the local view (used after the server confirmed the
* delete). Decrements historyOffset by the count of removed loaded items so
* the canonical caller flow — `removeHistoryItems(ids)` then
* `await updateHistory()` then `loadMoreHistory()` — produces the correct
* server offset:
*
* After deletion, the refreshed first page that `updateHistory()` fetches
* gets backfilled with items that previously sat just past the page-1
* boundary. Those backfilled items legitimately count as drift. If we
* leave `historyOffset` untouched, `loadMoreHistory()` then computes
* `historyOffset + drift` which double-counts the deletion: it adds the
* backfilled items on top of the original cursor and skips that many
* unseen rows on the next page.
*/
function removeHistoryItems(ids: string[]) {
const idSet = new Set(ids)
const removedCount = allHistoryItems.value.filter((item) =>
idSet.has(item.id)
).length
allHistoryItems.value = allHistoryItems.value.filter(
(item) => !idSet.has(item.id)
)
ids.forEach((id) => loadedIds.delete(id))
historyOffset.value = Math.max(0, historyOffset.value - removedCount)
historyAssets.value = allHistoryItems.value
}
function trimToMaxItems() {
if (allHistoryItems.value.length > MAX_HISTORY_ITEMS) {
const removed = allHistoryItems.value.slice(MAX_HISTORY_ITEMS)
allHistoryItems.value = allHistoryItems.value.slice(0, MAX_HISTORY_ITEMS)
// Clean up Set
const removed = allHistoryItems.value.splice(MAX_HISTORY_ITEMS)
removed.forEach((item) => loadedIds.delete(item.id))
}
}
return allHistoryItems.value
/**
* Fetch the first page of history. On initial load (empty state), replaces
* all items. On subsequent calls, incrementally merges new items without
* clearing existing pagination state — fixing the "disappearing assets" bug.
*/
async function fetchHistoryIncremental(): Promise<void> {
const history = await api.getHistory(BATCH_SIZE, { offset: 0 })
const newAssets = mapHistoryToAssets(history)
if (allHistoryItems.value.length === 0) {
allHistoryItems.value = newAssets
newAssets.forEach((asset) => loadedIds.add(asset.id))
historyOffset.value = BATCH_SIZE
hasMoreHistory.value = history.length === BATCH_SIZE
} else {
let newCount = 0
for (const asset of newAssets) {
if (insertAssetSorted(asset)) newCount++
}
if (hasMoreHistory.value) {
prependedSinceLastLoad.value += newCount
} else if (history.length === BATCH_SIZE) {
hasMoreHistory.value = true
prependedSinceLastLoad.value = 0
}
}
trimToMaxItems()
}
/**
* Fetch the next page of history for infinite scroll pagination.
* Adjusts offset to account for items prepended since the last page load.
*/
async function fetchHistoryNextPage(): Promise<void> {
const driftAtRequest = prependedSinceLastLoad.value
const adjustedOffset = historyOffset.value + driftAtRequest
const history = await api.getHistory(BATCH_SIZE, {
offset: adjustedOffset
})
const newAssets = mapHistoryToAssets(history)
// Subtract only the drift captured at request time so concurrent
// updateHistory() increments during the in-flight fetch are preserved.
prependedSinceLastLoad.value = Math.max(
0,
prependedSinceLastLoad.value - driftAtRequest
)
for (const asset of newAssets) {
insertAssetSorted(asset)
}
historyOffset.value = adjustedOffset + BATCH_SIZE
hasMoreHistory.value = history.length === BATCH_SIZE
trimToMaxItems()
}
const historyAssets = ref<AssetItem[]>([])
@@ -207,18 +265,19 @@ export const useAssetsStore = defineStore('assets', () => {
const historyError = ref<unknown>(null)
/**
* Initial load of history assets
* Load or refresh history assets. On first call, performs a full load.
* On subsequent calls (e.g. after job completion), incrementally merges
* new items without clearing existing pagination state.
*/
const updateHistory = async () => {
async function updateHistory() {
historyLoading.value = true
historyError.value = null
try {
await fetchHistoryAssets(false)
await fetchHistoryIncremental()
historyAssets.value = allHistoryItems.value
} catch (err) {
console.error('Error fetching history assets:', err)
historyError.value = err
// Keep existing data when error occurs
if (!historyAssets.value.length) {
historyAssets.value = []
}
@@ -230,20 +289,18 @@ export const useAssetsStore = defineStore('assets', () => {
/**
* Load more history items (infinite scroll)
*/
const loadMoreHistory = async () => {
// Guard: prevent concurrent loads and check if more items available
async function loadMoreHistory() {
if (!hasMoreHistory.value || isLoadingMore.value) return
isLoadingMore.value = true
historyError.value = null
try {
await fetchHistoryAssets(true)
await fetchHistoryNextPage()
historyAssets.value = allHistoryItems.value
} catch (err) {
console.error('Error loading more history:', err)
historyError.value = err
// Keep existing data when error occurs (consistent with updateHistory)
if (!historyAssets.value.length) {
historyAssets.value = []
}
@@ -747,6 +804,7 @@ export const useAssetsStore = defineStore('assets', () => {
updateInputs,
updateHistory,
loadMoreHistory,
removeHistoryItems,
// Input mapping helpers
inputAssetsByFilename,