[test] Add performance tests for transform operations

Comprehensive benchmarks covering:
- Coordinate conversion performance (10k operations < 20ms)
- Viewport culling efficiency (1k nodes < 10ms)
- Transform synchronization (1k syncs < 15ms)
- Real-world scenarios (panning, zooming)

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
bymyself
2025-07-05 00:10:52 -07:00
parent 32ddf7263c
commit c2463268d7
2 changed files with 878 additions and 0 deletions

View File

@@ -0,0 +1,400 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { useSpatialIndex } from '@/composables/graph/useSpatialIndex'
import type { Bounds } from '@/utils/spatial/QuadTree'
describe('Spatial Index Performance', () => {
let spatialIndex: ReturnType<typeof useSpatialIndex>
beforeEach(() => {
spatialIndex = useSpatialIndex({
maxDepth: 6,
maxItemsPerNode: 4,
updateDebounceMs: 0 // Disable debouncing for tests
})
})
describe('large scale operations', () => {
it('should handle 1000 node insertions efficiently', () => {
const startTime = performance.now()
// Generate 1000 nodes in a realistic distribution
const nodes = Array.from({ length: 1000 }, (_, i) => ({
id: `node${i}`,
position: {
x: (Math.random() - 0.5) * 10000,
y: (Math.random() - 0.5) * 10000
},
size: {
width: 150 + Math.random() * 100,
height: 100 + Math.random() * 50
}
}))
spatialIndex.batchUpdate(nodes)
const insertTime = performance.now() - startTime
// Should insert 1000 nodes in under 100ms
expect(insertTime).toBeLessThan(100)
expect(spatialIndex.metrics.value.totalNodes).toBe(1000)
})
it('should maintain fast viewport queries under load', () => {
// First populate with many nodes
const nodes = Array.from({ length: 1000 }, (_, i) => ({
id: `node${i}`,
position: {
x: (Math.random() - 0.5) * 10000,
y: (Math.random() - 0.5) * 10000
},
size: { width: 200, height: 100 }
}))
spatialIndex.batchUpdate(nodes)
// Now benchmark viewport queries
const queryCount = 100
const viewportBounds: Bounds = {
x: -960,
y: -540,
width: 1920,
height: 1080
}
const startTime = performance.now()
for (let i = 0; i < queryCount; i++) {
// Vary viewport position to test different tree regions
const offsetX = (i % 10) * 500
const offsetY = Math.floor(i / 10) * 300
spatialIndex.queryViewport({
x: viewportBounds.x + offsetX,
y: viewportBounds.y + offsetY,
width: viewportBounds.width,
height: viewportBounds.height
})
}
const totalQueryTime = performance.now() - startTime
const avgQueryTime = totalQueryTime / queryCount
// Each query should take less than 2ms on average
expect(avgQueryTime).toBeLessThan(2)
expect(totalQueryTime).toBeLessThan(100) // 100 queries in under 100ms
})
it('should demonstrate performance advantage over linear search', () => {
// Create test data
const nodeCount = 500
const nodes = Array.from({ length: nodeCount }, (_, i) => ({
id: `node${i}`,
position: {
x: (Math.random() - 0.5) * 8000,
y: (Math.random() - 0.5) * 8000
},
size: { width: 200, height: 100 }
}))
// Populate spatial index
spatialIndex.batchUpdate(nodes)
const viewport: Bounds = { x: -500, y: -300, width: 1000, height: 600 }
const queryCount = 50
// Benchmark spatial index queries
const spatialStartTime = performance.now()
for (let i = 0; i < queryCount; i++) {
spatialIndex.queryViewport(viewport)
}
const spatialTime = performance.now() - spatialStartTime
// Benchmark linear search equivalent
const linearStartTime = performance.now()
for (let i = 0; i < queryCount; i++) {
nodes.filter((node) => {
const nodeRight = node.position.x + node.size.width
const nodeBottom = node.position.y + node.size.height
const viewportRight = viewport.x + viewport.width
const viewportBottom = viewport.y + viewport.height
return !(
nodeRight < viewport.x ||
node.position.x > viewportRight ||
nodeBottom < viewport.y ||
node.position.y > viewportBottom
)
})
}
const linearTime = performance.now() - linearStartTime
// Spatial index should be faster (at least 2x for 500 nodes)
const speedup = linearTime / spatialTime
expect(speedup).toBeGreaterThan(2)
// Both should find roughly the same number of nodes
const spatialResults = spatialIndex.queryViewport(viewport)
const linearResults = nodes.filter((node) => {
const nodeRight = node.position.x + node.size.width
const nodeBottom = node.position.y + node.size.height
const viewportRight = viewport.x + viewport.width
const viewportBottom = viewport.y + viewport.height
return !(
nodeRight < viewport.x ||
node.position.x > viewportRight ||
nodeBottom < viewport.y ||
node.position.y > viewportBottom
)
})
// Results should be similar (within 10% due to QuadTree boundary effects)
const resultsDiff = Math.abs(spatialResults.length - linearResults.length)
const maxDiff =
Math.max(spatialResults.length, linearResults.length) * 0.1
expect(resultsDiff).toBeLessThan(maxDiff)
})
})
describe('update performance', () => {
it('should handle frequent position updates efficiently', () => {
// Add initial nodes
const nodeCount = 200
const initialNodes = Array.from({ length: nodeCount }, (_, i) => ({
id: `node${i}`,
position: { x: i * 100, y: i * 50 },
size: { width: 200, height: 100 }
}))
spatialIndex.batchUpdate(initialNodes)
// Benchmark frequent updates (simulating animation/dragging)
const updateCount = 100
const startTime = performance.now()
for (let frame = 0; frame < updateCount; frame++) {
// Update a subset of nodes each frame
for (let i = 0; i < 20; i++) {
const nodeId = `node${i}`
spatialIndex.updateNode(
nodeId,
{
x: i * 100 + Math.sin(frame * 0.1) * 50,
y: i * 50 + Math.cos(frame * 0.1) * 30
},
{ width: 200, height: 100 }
)
}
}
const updateTime = performance.now() - startTime
const avgFrameTime = updateTime / updateCount
// Should maintain 60fps (16.67ms per frame) with 20 node updates per frame
expect(avgFrameTime).toBeLessThan(8) // Conservative target: 8ms per frame
})
it('should handle node additions and removals efficiently', () => {
const startTime = performance.now()
// Add nodes
for (let i = 0; i < 100; i++) {
spatialIndex.updateNode(
`node${i}`,
{ x: Math.random() * 1000, y: Math.random() * 1000 },
{ width: 200, height: 100 }
)
}
// Remove half of them
for (let i = 0; i < 50; i++) {
spatialIndex.removeNode(`node${i}`)
}
// Add new ones
for (let i = 100; i < 150; i++) {
spatialIndex.updateNode(
`node${i}`,
{ x: Math.random() * 1000, y: Math.random() * 1000 },
{ width: 200, height: 100 }
)
}
const totalTime = performance.now() - startTime
// All operations should complete quickly
expect(totalTime).toBeLessThan(50)
expect(spatialIndex.metrics.value.totalNodes).toBe(100) // 50 remaining + 50 new
})
})
describe('memory and scaling', () => {
it('should scale efficiently with increasing node counts', () => {
const nodeCounts = [100, 200, 500, 1000]
const queryTimes: number[] = []
for (const nodeCount of nodeCounts) {
// Create fresh spatial index for each test
const testIndex = useSpatialIndex({ updateDebounceMs: 0 })
// Populate with nodes
const nodes = Array.from({ length: nodeCount }, (_, i) => ({
id: `node${i}`,
position: {
x: (Math.random() - 0.5) * 10000,
y: (Math.random() - 0.5) * 10000
},
size: { width: 200, height: 100 }
}))
testIndex.batchUpdate(nodes)
// Benchmark query time
const viewport: Bounds = { x: -500, y: -300, width: 1000, height: 600 }
const startTime = performance.now()
for (let i = 0; i < 10; i++) {
testIndex.queryViewport(viewport)
}
const avgTime = (performance.now() - startTime) / 10
queryTimes.push(avgTime)
}
// Query time should scale logarithmically, not linearly
// The ratio between 1000 nodes and 100 nodes should be less than 5x
const ratio100to1000 = queryTimes[3] / queryTimes[0]
expect(ratio100to1000).toBeLessThan(5)
// All query times should be reasonable
queryTimes.forEach((time) => {
expect(time).toBeLessThan(5) // Each query under 5ms
})
})
it('should handle edge cases without performance degradation', () => {
// Test with very large nodes
spatialIndex.updateNode(
'huge-node',
{ x: -1000, y: -1000 },
{ width: 5000, height: 3000 }
)
// Test with many tiny nodes
for (let i = 0; i < 100; i++) {
spatialIndex.updateNode(
`tiny-${i}`,
{ x: Math.random() * 100, y: Math.random() * 100 },
{ width: 1, height: 1 }
)
}
// Test with nodes at extreme coordinates
spatialIndex.updateNode(
'extreme-pos',
{ x: 50000, y: -50000 },
{ width: 200, height: 100 }
)
spatialIndex.updateNode(
'extreme-neg',
{ x: -50000, y: 50000 },
{ width: 200, height: 100 }
)
// Queries should still be fast
const startTime = performance.now()
for (let i = 0; i < 20; i++) {
spatialIndex.queryViewport({
x: Math.random() * 1000 - 500,
y: Math.random() * 1000 - 500,
width: 1000,
height: 600
})
}
const queryTime = performance.now() - startTime
expect(queryTime).toBeLessThan(20) // 20 queries in under 20ms
})
})
describe('realistic workflow scenarios', () => {
it('should handle typical ComfyUI workflow performance', () => {
// Simulate a large ComfyUI workflow with clustered nodes
const clusters = [
{ center: { x: 0, y: 0 }, nodeCount: 50 },
{ center: { x: 2000, y: 0 }, nodeCount: 30 },
{ center: { x: 4000, y: 1000 }, nodeCount: 40 },
{ center: { x: 0, y: 2000 }, nodeCount: 35 }
]
let nodeId = 0
const allNodes: Array<{
id: string
position: { x: number; y: number }
size: { width: number; height: number }
}> = []
// Create clustered nodes (realistic for ComfyUI workflows)
clusters.forEach((cluster) => {
for (let i = 0; i < cluster.nodeCount; i++) {
allNodes.push({
id: `node${nodeId++}`,
position: {
x: cluster.center.x + (Math.random() - 0.5) * 800,
y: cluster.center.y + (Math.random() - 0.5) * 600
},
size: {
width: 150 + Math.random() * 100,
height: 100 + Math.random() * 50
}
})
}
})
// Add the nodes
const setupTime = performance.now()
spatialIndex.batchUpdate(allNodes)
const setupDuration = performance.now() - setupTime
// Simulate user panning around the workflow
const viewportSize = { width: 1920, height: 1080 }
const panPositions = [
{ x: -960, y: -540 }, // Center on first cluster
{ x: 1040, y: -540 }, // Pan to second cluster
{ x: 3040, y: 460 }, // Pan to third cluster
{ x: -960, y: 1460 }, // Pan to fourth cluster
{ x: 1000, y: 500 } // Overview position
]
const panStartTime = performance.now()
const queryResults: number[] = []
panPositions.forEach((pos) => {
// Simulate multiple viewport queries during smooth panning
for (let step = 0; step < 10; step++) {
const results = spatialIndex.queryViewport({
x: pos.x + step * 20,
y: pos.y + step * 10,
width: viewportSize.width,
height: viewportSize.height
})
queryResults.push(results.length)
}
})
const panDuration = performance.now() - panStartTime
const avgQueryTime = panDuration / (panPositions.length * 10)
// Performance expectations for realistic workflows
expect(setupDuration).toBeLessThan(30) // Setup 155 nodes in under 30ms
expect(avgQueryTime).toBeLessThan(1.5) // Average query under 1.5ms
expect(panDuration).toBeLessThan(50) // All panning queries under 50ms
// Should have reasonable culling efficiency
const totalNodes = allNodes.length
const avgVisibleNodes =
queryResults.reduce((a, b) => a + b, 0) / queryResults.length
const cullRatio = (totalNodes - avgVisibleNodes) / totalNodes
expect(cullRatio).toBeGreaterThan(0.3) // At least 30% culling efficiency
})
})
})

View File

@@ -0,0 +1,478 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { useTransformState } from '@/composables/element/useTransformState'
// Mock canvas context for testing
const createMockCanvasContext = () => ({
ds: {
offset: [0, 0] as [number, number],
scale: 1
}
})
describe('Transform Performance', () => {
let transformState: ReturnType<typeof useTransformState>
let mockCanvas: any
beforeEach(() => {
transformState = useTransformState()
mockCanvas = createMockCanvasContext()
})
describe('coordinate conversion performance', () => {
it('should handle large batches of coordinate conversions efficiently', () => {
// Set up a realistic transform state
mockCanvas.ds.offset = [500, 300]
mockCanvas.ds.scale = 1.5
transformState.syncWithCanvas(mockCanvas)
const conversionCount = 10000
const points = Array.from({ length: conversionCount }, () => ({
x: Math.random() * 5000,
y: Math.random() * 3000
}))
// Benchmark canvas to screen conversions
const canvasToScreenStart = performance.now()
const screenPoints = points.map((point) =>
transformState.canvasToScreen(point)
)
const canvasToScreenTime = performance.now() - canvasToScreenStart
// Benchmark screen to canvas conversions
const screenToCanvasStart = performance.now()
const backToCanvas = screenPoints.map((point) =>
transformState.screenToCanvas(point)
)
const screenToCanvasTime = performance.now() - screenToCanvasStart
// Performance expectations
expect(canvasToScreenTime).toBeLessThan(20) // 10k conversions in under 20ms
expect(screenToCanvasTime).toBeLessThan(20) // 10k conversions in under 20ms
// Verify accuracy of round-trip conversion
const maxError = points.reduce((max, original, i) => {
const converted = backToCanvas[i]
const errorX = Math.abs(original.x - converted.x)
const errorY = Math.abs(original.y - converted.y)
return Math.max(max, errorX, errorY)
}, 0)
expect(maxError).toBeLessThan(0.001) // Sub-pixel accuracy
})
it('should maintain performance across different zoom levels', () => {
const zoomLevels = [0.1, 0.5, 1.0, 2.0, 5.0, 10.0]
const conversionCount = 1000
const testPoints = Array.from({ length: conversionCount }, () => ({
x: Math.random() * 2000,
y: Math.random() * 1500
}))
const performanceResults: number[] = []
zoomLevels.forEach((scale) => {
mockCanvas.ds.scale = scale
transformState.syncWithCanvas(mockCanvas)
const startTime = performance.now()
testPoints.forEach((point) => {
const screen = transformState.canvasToScreen(point)
transformState.screenToCanvas(screen)
})
const duration = performance.now() - startTime
performanceResults.push(duration)
})
// Performance should be consistent across zoom levels
const maxTime = Math.max(...performanceResults)
const minTime = Math.min(...performanceResults)
const variance = (maxTime - minTime) / minTime
expect(maxTime).toBeLessThan(10) // All zoom levels under 10ms
expect(variance).toBeLessThan(0.5) // Less than 50% variance between zoom levels
})
it('should handle extreme coordinate values efficiently', () => {
// Test with very large coordinate values
const extremePoints = [
{ x: -100000, y: -100000 },
{ x: 100000, y: 100000 },
{ x: 0, y: 0 },
{ x: -50000, y: 50000 },
{ x: 1e6, y: -1e6 }
]
// Test at extreme zoom levels
const extremeScales = [0.001, 1000]
extremeScales.forEach((scale) => {
mockCanvas.ds.scale = scale
mockCanvas.ds.offset = [1000, 500]
transformState.syncWithCanvas(mockCanvas)
const startTime = performance.now()
// Convert each point 100 times
extremePoints.forEach((point) => {
for (let i = 0; i < 100; i++) {
const screen = transformState.canvasToScreen(point)
transformState.screenToCanvas(screen)
}
})
const duration = performance.now() - startTime
expect(duration).toBeLessThan(5) // Should handle extremes efficiently
expect(
Number.isFinite(transformState.canvasToScreen(extremePoints[0]).x)
).toBe(true)
expect(
Number.isFinite(transformState.canvasToScreen(extremePoints[0]).y)
).toBe(true)
})
})
})
describe('viewport culling performance', () => {
it('should efficiently determine node visibility for large numbers of nodes', () => {
// Set up realistic viewport
const viewport = { width: 1920, height: 1080 }
// Generate many node positions
const nodeCount = 1000
const nodes = Array.from({ length: nodeCount }, () => ({
pos: [Math.random() * 10000, Math.random() * 6000] as ArrayLike<number>,
size: [
150 + Math.random() * 100,
100 + Math.random() * 50
] as ArrayLike<number>
}))
// Test at different zoom levels and positions
const testConfigs = [
{ scale: 0.5, offset: [0, 0] },
{ scale: 1.0, offset: [2000, 1000] },
{ scale: 2.0, offset: [-1000, -500] }
]
testConfigs.forEach((config) => {
mockCanvas.ds.scale = config.scale
mockCanvas.ds.offset = config.offset
transformState.syncWithCanvas(mockCanvas)
const startTime = performance.now()
// Test viewport culling for all nodes
const visibleNodes = nodes.filter((node) =>
transformState.isNodeInViewport(node.pos, node.size, viewport)
)
const cullTime = performance.now() - startTime
expect(cullTime).toBeLessThan(10) // 1000 nodes culled in under 10ms
expect(visibleNodes.length).toBeLessThan(nodeCount) // Some culling should occur
expect(visibleNodes.length).toBeGreaterThanOrEqual(0) // Sanity check
})
})
it('should optimize culling with adaptive margins', () => {
const viewport = { width: 1280, height: 720 }
const testNode = {
pos: [1300, 100] as ArrayLike<number>, // Just outside viewport
size: [200, 100] as ArrayLike<number>
}
// Test margin adaptation at different zoom levels
const zoomTests = [
{ scale: 0.05, expectedVisible: true }, // Low zoom, larger margin
{ scale: 1.0, expectedVisible: true }, // Normal zoom, standard margin
{ scale: 4.0, expectedVisible: false } // High zoom, tighter margin
]
const marginTests: boolean[] = []
const timings: number[] = []
zoomTests.forEach((test) => {
mockCanvas.ds.scale = test.scale
mockCanvas.ds.offset = [0, 0]
transformState.syncWithCanvas(mockCanvas)
const startTime = performance.now()
const isVisible = transformState.isNodeInViewport(
testNode.pos,
testNode.size,
viewport,
0.2 // 20% margin
)
const duration = performance.now() - startTime
marginTests.push(isVisible)
timings.push(duration)
})
// All culling operations should be very fast
timings.forEach((time) => {
expect(time).toBeLessThan(0.1) // Individual culling under 0.1ms
})
// Verify adaptive behavior (margins should work as expected)
expect(marginTests[0]).toBe(zoomTests[0].expectedVisible)
expect(marginTests[2]).toBe(zoomTests[2].expectedVisible)
})
it('should handle size-based culling efficiently', () => {
// Test nodes of various sizes
const nodeSizes = [
[1, 1], // Tiny node
[5, 5], // Small node
[50, 50], // Medium node
[200, 100], // Large node
[500, 300] // Very large node
]
const viewport = { width: 1920, height: 1080 }
// Position all nodes in viewport center
const centerPos = [960, 540] as ArrayLike<number>
nodeSizes.forEach((size) => {
// Test at very high zoom where size culling should activate
mockCanvas.ds.scale = 10
transformState.syncWithCanvas(mockCanvas)
const startTime = performance.now()
const isVisible = transformState.isNodeInViewport(
centerPos,
size as ArrayLike<number>,
viewport
)
const cullTime = performance.now() - startTime
expect(cullTime).toBeLessThan(0.05) // Size culling under 0.05ms
// Very small nodes should be culled at high zoom
if (size[0] <= 3 && size[1] <= 3) {
expect(isVisible).toBe(false)
} else {
expect(isVisible).toBe(true)
}
})
})
})
describe('transform state synchronization', () => {
it('should efficiently sync with canvas state changes', () => {
const syncCount = 1000
const transformUpdates = Array.from({ length: syncCount }, (_, i) => ({
offset: [Math.sin(i * 0.1) * 1000, Math.cos(i * 0.1) * 500],
scale: 0.5 + Math.sin(i * 0.05) * 0.4 // Scale between 0.1 and 0.9
}))
const startTime = performance.now()
transformUpdates.forEach((update) => {
mockCanvas.ds.offset = update.offset
mockCanvas.ds.scale = update.scale
transformState.syncWithCanvas(mockCanvas)
})
const syncTime = performance.now() - startTime
expect(syncTime).toBeLessThan(15) // 1000 syncs in under 15ms
// Verify final state is correct
const lastUpdate = transformUpdates[transformUpdates.length - 1]
expect(transformState.camera.x).toBe(lastUpdate.offset[0])
expect(transformState.camera.y).toBe(lastUpdate.offset[1])
expect(transformState.camera.z).toBe(lastUpdate.scale)
})
it('should generate CSS transform strings efficiently', () => {
const transformCount = 10000
// Set up varying transform states
const transforms = Array.from({ length: transformCount }, (_, i) => {
mockCanvas.ds.offset = [i * 10, i * 5]
mockCanvas.ds.scale = 0.5 + (i % 100) / 100
transformState.syncWithCanvas(mockCanvas)
return transformState.transformStyle.value
})
const startTime = performance.now()
// Access transform styles (triggers computed property)
transforms.forEach((style) => {
expect(style.transform).toContain('scale(')
expect(style.transform).toContain('translate(')
expect(style.transformOrigin).toBe('0 0')
})
const accessTime = performance.now() - startTime
expect(accessTime).toBeLessThan(20) // 10k style accesses in under 20ms
})
})
describe('bounds calculation performance', () => {
it('should calculate node screen bounds efficiently', () => {
// Set up realistic transform
mockCanvas.ds.offset = [200, 100]
mockCanvas.ds.scale = 1.5
transformState.syncWithCanvas(mockCanvas)
const nodeCount = 1000
const nodes = Array.from({ length: nodeCount }, () => ({
pos: [Math.random() * 5000, Math.random() * 3000] as ArrayLike<number>,
size: [
100 + Math.random() * 200,
80 + Math.random() * 120
] as ArrayLike<number>
}))
const startTime = performance.now()
const bounds = nodes.map((node) =>
transformState.getNodeScreenBounds(node.pos, node.size)
)
const calcTime = performance.now() - startTime
expect(calcTime).toBeLessThan(15) // 1000 bounds calculations in under 15ms
expect(bounds).toHaveLength(nodeCount)
// Verify bounds are reasonable
bounds.forEach((bound) => {
expect(bound.width).toBeGreaterThan(0)
expect(bound.height).toBeGreaterThan(0)
expect(Number.isFinite(bound.x)).toBe(true)
expect(Number.isFinite(bound.y)).toBe(true)
})
})
it('should calculate viewport bounds efficiently', () => {
const viewportSizes = [
{ width: 800, height: 600 },
{ width: 1920, height: 1080 },
{ width: 3840, height: 2160 },
{ width: 1280, height: 720 }
]
const margins = [0, 0.1, 0.2, 0.5]
const combinations = viewportSizes.flatMap((viewport) =>
margins.map((margin) => ({ viewport, margin }))
)
const startTime = performance.now()
const allBounds = combinations.map(({ viewport, margin }) => {
mockCanvas.ds.offset = [Math.random() * 1000, Math.random() * 500]
mockCanvas.ds.scale = 0.5 + Math.random() * 2
transformState.syncWithCanvas(mockCanvas)
return transformState.getViewportBounds(viewport, margin)
})
const calcTime = performance.now() - startTime
expect(calcTime).toBeLessThan(5) // All viewport calculations in under 5ms
expect(allBounds).toHaveLength(combinations.length)
// Verify bounds are reasonable
allBounds.forEach((bounds) => {
expect(bounds.width).toBeGreaterThan(0)
expect(bounds.height).toBeGreaterThan(0)
expect(Number.isFinite(bounds.x)).toBe(true)
expect(Number.isFinite(bounds.y)).toBe(true)
})
})
})
describe('real-world performance scenarios', () => {
it('should handle smooth panning performance', () => {
// Simulate smooth 60fps panning for 2 seconds
const frameCount = 120 // 2 seconds at 60fps
const panDistance = 2000 // Pan 2000 pixels
const frames: number[] = []
for (let frame = 0; frame < frameCount; frame++) {
const progress = frame / (frameCount - 1)
const x = progress * panDistance
const y = Math.sin(progress * Math.PI * 2) * 200 // Slight vertical wave
mockCanvas.ds.offset = [x, y]
const frameStart = performance.now()
// Typical operations during panning
transformState.syncWithCanvas(mockCanvas)
const style = transformState.transformStyle.value // Access transform style
expect(style.transform).toContain('translate') // Verify style is valid
// Simulate some coordinate conversions (mouse tracking, etc.)
for (let i = 0; i < 5; i++) {
const screen = transformState.canvasToScreen({
x: x + i * 100,
y: y + i * 50
})
transformState.screenToCanvas(screen)
}
const frameTime = performance.now() - frameStart
frames.push(frameTime)
// Each frame should be well under 16.67ms for 60fps
expect(frameTime).toBeLessThan(1) // Conservative: under 1ms per frame
}
const totalTime = frames.reduce((sum, time) => sum + time, 0)
const avgFrameTime = totalTime / frameCount
expect(avgFrameTime).toBeLessThan(0.5) // Average frame time under 0.5ms
expect(totalTime).toBeLessThan(60) // Total panning overhead under 60ms
})
it('should handle zoom performance with viewport updates', () => {
// Simulate smooth zoom from 0.1x to 10x
const zoomSteps = 100
const viewport = { width: 1920, height: 1080 }
const zoomTimes: number[] = []
for (let step = 0; step < zoomSteps; step++) {
const zoomLevel = Math.pow(10, (step / (zoomSteps - 1)) * 2 - 1) // 0.1 to 10
mockCanvas.ds.scale = zoomLevel
const stepStart = performance.now()
// Operations during zoom
transformState.syncWithCanvas(mockCanvas)
// Viewport bounds calculation (for culling)
transformState.getViewportBounds(viewport, 0.2)
// Test a few nodes for visibility
for (let i = 0; i < 10; i++) {
transformState.isNodeInViewport(
[i * 200, i * 150],
[200, 100],
viewport
)
}
const stepTime = performance.now() - stepStart
zoomTimes.push(stepTime)
}
const maxZoomTime = Math.max(...zoomTimes)
const avgZoomTime =
zoomTimes.reduce((sum, time) => sum + time, 0) / zoomSteps
expect(maxZoomTime).toBeLessThan(2) // No zoom step over 2ms
expect(avgZoomTime).toBeLessThan(1) // Average zoom step under 1ms
})
})
})