mirror of
https://github.com/Comfy-Org/ComfyUI_frontend.git
synced 2026-03-13 17:10:06 +00:00
Cache execution id to node locator id mappings (#9244)
## Summary
nodeLocationProgressStates runs executionIdToNodeLocatorId for every
ancestor prefix of every node's display_node_id on every WebSocket
progress update. Each call traverses the subgraph hierarchy via
getNodeById. For nested subgraphs with many executing nodes, this
results in O(N × D²) graph lookups per progress tick, where N is
the number of executing nodes and D is the nesting depth.
Add a plain Map cache inside the execution store that memoizes
executionIdToNodeLocatorId results by execution ID string. The
cache persists across computed re-evaluations within a single
execution run, reducing subsequent progress updates to O(1)
lookups per execution ID.
Cache invalidation:
- Cleared at execution start (handleExecutionStart) to ensure
fresh graph state for each new run
- Cleared at execution end (resetExecutionState) to prevent
stale entries leaking across runs
The cache stores strings only (no graph node references), with
typical size of ~50-200 entries per run, so memory impact is
negligible.
- **What**: Caches a mapping of ids to ids, preventing an exponential
re-scan of subgraphs
- **Breaking**: Nothing
- **Dependencies**: None
You will need to set the feature flag
`ff:expose_executionId_to_node_locator_id_cache_counters` from the
underlying counter PR if you want to measure the impact.
```js
localStorage.setItem(
'ff:expose_executionId_to_node_locator_id_cache_counters',
'true'
)
```
## Review Focus
Before pulling this PR, pull
https://github.com/Comfy-Org/ComfyUI_frontend/pull/9243 , set the
feature flag, and run a workflow with nested subgraphs, then look in
console to see a cache measurement.
Next, pull this PR and load the same workflow. Note the massive
reduction in visits.
## Screenshots
Login problems due to cross-opener policy are preventing me from taking
screenshots from a local dev build at this time
## Thread
There isn't one. I don't have access to AmpCode or Unito.
┆Issue is synchronized with this [Notion
page](https://www.notion.so/PR-9244-Cache-execution-id-to-node-locator-id-mappings-3136d73d365081e680eae3c891480ee7)
by [Unito](https://www.unito.io)
Co-authored-by: bymyself <cbyrne@comfy.org>
This commit is contained in:
@@ -136,6 +136,140 @@ describe('useExecutionStore - NodeLocatorId conversions', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('useExecutionStore - nodeLocationProgressStates caching', () => {
|
||||
let store: ReturnType<typeof useExecutionStore>
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
mockNodeExecutionIdToNodeLocatorId.mockReset()
|
||||
mockNodeIdToNodeLocatorId.mockReset()
|
||||
mockNodeLocatorIdToNodeExecutionId.mockReset()
|
||||
|
||||
setActivePinia(createTestingPinia({ stubActions: false }))
|
||||
store = useExecutionStore()
|
||||
})
|
||||
|
||||
it('should resolve execution IDs to locator IDs for subgraph nodes', () => {
|
||||
const mockSubgraph = {
|
||||
id: 'a1b2c3d4-e5f6-7890-abcd-ef1234567890',
|
||||
nodes: []
|
||||
}
|
||||
const mockNode = createMockLGraphNode({
|
||||
id: 123,
|
||||
isSubgraphNode: () => true,
|
||||
subgraph: mockSubgraph
|
||||
})
|
||||
vi.mocked(app.rootGraph.getNodeById).mockReturnValue(mockNode)
|
||||
|
||||
store.nodeProgressStates = {
|
||||
node1: {
|
||||
display_node_id: '123:456',
|
||||
state: 'running',
|
||||
value: 50,
|
||||
max: 100,
|
||||
prompt_id: 'test',
|
||||
node_id: 'node1'
|
||||
}
|
||||
}
|
||||
|
||||
const result = store.nodeLocationProgressStates
|
||||
|
||||
expect(result['123']).toBeDefined()
|
||||
expect(result['a1b2c3d4-e5f6-7890-abcd-ef1234567890:456']).toBeDefined()
|
||||
})
|
||||
|
||||
it('should not re-traverse graph for same execution IDs across progress updates', () => {
|
||||
const mockSubgraph = {
|
||||
id: 'a1b2c3d4-e5f6-7890-abcd-ef1234567890',
|
||||
nodes: []
|
||||
}
|
||||
const mockNode = createMockLGraphNode({
|
||||
id: 123,
|
||||
isSubgraphNode: () => true,
|
||||
subgraph: mockSubgraph
|
||||
})
|
||||
vi.mocked(app.rootGraph.getNodeById).mockReturnValue(mockNode)
|
||||
|
||||
store.nodeProgressStates = {
|
||||
node1: {
|
||||
display_node_id: '123:456',
|
||||
state: 'running',
|
||||
value: 50,
|
||||
max: 100,
|
||||
prompt_id: 'test',
|
||||
node_id: 'node1'
|
||||
}
|
||||
}
|
||||
|
||||
// First evaluation triggers graph traversal
|
||||
expect(store.nodeLocationProgressStates['123']).toBeDefined()
|
||||
const callCountAfterFirst = vi.mocked(app.rootGraph.getNodeById).mock.calls
|
||||
.length
|
||||
|
||||
// Second update with same execution IDs but different progress
|
||||
store.nodeProgressStates = {
|
||||
node1: {
|
||||
display_node_id: '123:456',
|
||||
state: 'running',
|
||||
value: 75,
|
||||
max: 100,
|
||||
prompt_id: 'test',
|
||||
node_id: 'node1'
|
||||
}
|
||||
}
|
||||
|
||||
expect(store.nodeLocationProgressStates['123']).toBeDefined()
|
||||
|
||||
// getNodeById should NOT be called again for the same execution ID
|
||||
expect(vi.mocked(app.rootGraph.getNodeById).mock.calls.length).toBe(
|
||||
callCountAfterFirst
|
||||
)
|
||||
})
|
||||
|
||||
it('should correctly resolve multiple sibling nodes in the same subgraph', () => {
|
||||
const mockSubgraph = {
|
||||
id: 'a1b2c3d4-e5f6-7890-abcd-ef1234567890',
|
||||
nodes: []
|
||||
}
|
||||
const mockNode = createMockLGraphNode({
|
||||
id: 123,
|
||||
isSubgraphNode: () => true,
|
||||
subgraph: mockSubgraph
|
||||
})
|
||||
vi.mocked(app.rootGraph.getNodeById).mockReturnValue(mockNode)
|
||||
|
||||
// Two sibling nodes in the same subgraph
|
||||
store.nodeProgressStates = {
|
||||
node1: {
|
||||
display_node_id: '123:456',
|
||||
state: 'running',
|
||||
value: 50,
|
||||
max: 100,
|
||||
prompt_id: 'test',
|
||||
node_id: 'node1'
|
||||
},
|
||||
node2: {
|
||||
display_node_id: '123:789',
|
||||
state: 'running',
|
||||
value: 30,
|
||||
max: 100,
|
||||
prompt_id: 'test',
|
||||
node_id: 'node2'
|
||||
}
|
||||
}
|
||||
|
||||
const result = store.nodeLocationProgressStates
|
||||
|
||||
// Both sibling nodes should be resolved with the correct subgraph UUID
|
||||
expect(result['a1b2c3d4-e5f6-7890-abcd-ef1234567890:456']).toBeDefined()
|
||||
expect(result['a1b2c3d4-e5f6-7890-abcd-ef1234567890:789']).toBeDefined()
|
||||
|
||||
// The shared parent "123" should also have a merged state
|
||||
expect(result['123']).toBeDefined()
|
||||
expect(result['123'].state).toBe('running')
|
||||
})
|
||||
})
|
||||
|
||||
describe('useExecutionStore - reconcileInitializingJobs', () => {
|
||||
let store: ReturnType<typeof useExecutionStore>
|
||||
|
||||
|
||||
@@ -73,6 +73,24 @@ export const useExecutionStore = defineStore('execution', () => {
|
||||
|
||||
const initializingJobIds = ref<Set<string>>(new Set())
|
||||
|
||||
/**
|
||||
* Cache for executionIdToNodeLocatorId lookups.
|
||||
* Avoids redundant graph traversals during a single execution run.
|
||||
* Cleared at execution start and end to ensure fresh graph state.
|
||||
*/
|
||||
const executionIdToLocatorCache = new Map<string, NodeLocatorId | undefined>()
|
||||
|
||||
function cachedExecutionIdToLocator(
|
||||
executionId: string
|
||||
): NodeLocatorId | undefined {
|
||||
if (executionIdToLocatorCache.has(executionId)) {
|
||||
return executionIdToLocatorCache.get(executionId)
|
||||
}
|
||||
const locatorId = executionIdToNodeLocatorId(app.rootGraph, executionId)
|
||||
executionIdToLocatorCache.set(executionId, locatorId)
|
||||
return locatorId
|
||||
}
|
||||
|
||||
const mergeExecutionProgressStates = (
|
||||
currentState: NodeProgressState | undefined,
|
||||
newState: NodeProgressState
|
||||
@@ -112,7 +130,7 @@ export const useExecutionStore = defineStore('execution', () => {
|
||||
const parts = String(state.display_node_id).split(':')
|
||||
for (let i = 0; i < parts.length; i++) {
|
||||
const executionId = parts.slice(0, i + 1).join(':')
|
||||
const locatorId = executionIdToNodeLocatorId(app.rootGraph, executionId)
|
||||
const locatorId = cachedExecutionIdToLocator(executionId)
|
||||
if (!locatorId) continue
|
||||
|
||||
result[locatorId] = mergeExecutionProgressStates(
|
||||
@@ -220,6 +238,7 @@ export const useExecutionStore = defineStore('execution', () => {
|
||||
}
|
||||
|
||||
function handleExecutionStart(e: CustomEvent<ExecutionStartWsMessage>) {
|
||||
executionIdToLocatorCache.clear()
|
||||
executionErrorStore.clearAllErrors()
|
||||
activeJobId.value = e.detail.prompt_id
|
||||
queuedJobs.value[activeJobId.value] ??= { nodes: {} }
|
||||
@@ -437,6 +456,7 @@ export const useExecutionStore = defineStore('execution', () => {
|
||||
* Reset execution-related state after a run completes or is stopped.
|
||||
*/
|
||||
function resetExecutionState(jobIdParam?: string | null) {
|
||||
executionIdToLocatorCache.clear()
|
||||
nodeProgressStates.value = {}
|
||||
const jobId = jobIdParam ?? activeJobId.value ?? null
|
||||
if (jobId) {
|
||||
|
||||
Reference in New Issue
Block a user