Merge remote-tracking branch 'origin/main' into bl-semantic-jellyfish

# Conflicts:
#	src/components/TopMenuSection.vue
#	src/components/queue/job/QueueJobItem.vue
This commit is contained in:
Benjamin Lu
2025-12-08 18:37:05 -08:00
88 changed files with 1258 additions and 310 deletions

View File

@@ -115,6 +115,7 @@ const cancelJobTooltipConfig = computed(() =>
// Right side panel toggle
const { isOpen: isRightSidePanelOpen } = storeToRefs(rightSidePanelStore)
const rightSidePanelTooltipConfig = computed(() =>
buildTooltipConfig(t('rightSidePanel.togglePanel'))
)

View File

@@ -10,6 +10,7 @@
@click="$emit('showFilter', $event)"
/>
<InputText
ref="inputRef"
class="search-box-input w-full"
:model-value="modelValue"
:placeholder="placeholder"
@@ -48,6 +49,7 @@ import Button from 'primevue/button'
import IconField from 'primevue/iconfield'
import InputIcon from 'primevue/inputicon'
import InputText from 'primevue/inputtext'
import { ref } from 'vue'
import type { SearchFilter } from './SearchFilterChip.vue'
import SearchFilterChip from './SearchFilterChip.vue'
@@ -77,6 +79,14 @@ const emit = defineEmits<{
(e: 'removeFilter', filter: TFilter): void
}>()
const inputRef = ref()
defineExpose({
focus: () => {
inputRef.value?.$el?.focus()
}
})
const emitSearch = debounce((value: string) => {
emit('search', value, filters)
}, debounceTime)

View File

@@ -117,16 +117,7 @@ onBeforeUnmount(() => {
.scroll-container {
height: 100%;
overflow-y: auto;
/* Firefox */
scrollbar-width: none;
&::-webkit-scrollbar {
width: 1px;
}
&::-webkit-scrollbar-thumb {
background-color: transparent;
}
scrollbar-width: thin;
scrollbar-color: var(--dialog-surface) transparent;
}
</style>

View File

@@ -132,7 +132,7 @@ import { useCopy } from '@/composables/useCopy'
import { useGlobalLitegraph } from '@/composables/useGlobalLitegraph'
import { usePaste } from '@/composables/usePaste'
import { useVueFeatureFlags } from '@/composables/useVueFeatureFlags'
import { i18n, t } from '@/i18n'
import { mergeCustomNodesI18n, t } from '@/i18n'
import { LiteGraph } from '@/lib/litegraph/src/litegraph'
import { useLitegraphSettings } from '@/platform/settings/composables/useLitegraphSettings'
import { CORE_SETTINGS } from '@/platform/settings/constants/coreSettings'
@@ -391,9 +391,7 @@ useEventListener(
const loadCustomNodesI18n = async () => {
try {
const i18nData = await api.getCustomNodesI18n()
Object.entries(i18nData).forEach(([locale, message]) => {
i18n.global.mergeLocaleMessage(locale, message)
})
mergeCustomNodesI18n(i18nData)
} catch (error) {
console.error('Failed to load custom nodes i18n', error)
}

View File

@@ -36,12 +36,12 @@
</template>
<script setup lang="ts">
import { ref } from 'vue'
import { onBeforeUnmount, ref, watch } from 'vue'
import QueueJobItem from '@/components/queue/job/QueueJobItem.vue'
import type { JobGroup, JobListItem } from '@/composables/queue/useJobList'
defineProps<{ displayedJobGroups: JobGroup[] }>()
const props = defineProps<{ displayedJobGroups: JobGroup[] }>()
const emit = defineEmits<{
(e: 'cancelItem', item: JobListItem): void
@@ -89,4 +89,26 @@ const onDetailsLeave = (jobId: string) => {
hideTimer.value = null
}, 150)
}
const resetActiveDetails = () => {
clearHideTimer()
clearShowTimer()
activeDetailsId.value = null
}
watch(
() => props.displayedJobGroups,
(groups) => {
const activeId = activeDetailsId.value
if (!activeId) return
const hasActiveJob = groups.some((group) =>
group.items.some((item) => item.id === activeId)
)
if (!hasActiveJob) resetActiveDetails()
}
)
onBeforeUnmount(resetActiveDetails)
</script>

View File

@@ -361,7 +361,10 @@ const baseActions = computed<ActionConfig[]>(() => {
ariaLabel: t('g.delete'),
tooltip: deleteTooltipConfig.value,
isVisible: () => props.state === 'failed' && computedShowClear.value,
onClick: () => emit('delete')
onClick: () => {
onRowLeave()
emit('delete')
}
},
{
key: 'cancel-hover',
@@ -377,7 +380,10 @@ const baseActions = computed<ActionConfig[]>(() => {
props.state !== 'running' &&
props.state !== 'failed' &&
computedShowClear.value,
onClick: () => emit('cancel')
onClick: () => {
onRowLeave()
emit('cancel')
}
},
{
key: 'view',
@@ -401,7 +407,10 @@ const baseActions = computed<ActionConfig[]>(() => {
ariaLabel: t('g.cancel'),
tooltip: cancelTooltipConfig.value,
isVisible: () => props.state === 'running' && computedShowClear.value,
onClick: () => emit('cancel')
onClick: () => {
onRowLeave()
emit('cancel')
}
}
]
})

View File

@@ -21,6 +21,7 @@
</template>
<template #header>
<SearchBox
ref="searchBoxRef"
v-model:model-value="searchQuery"
class="model-lib-search-box p-2 2xl:p-4"
:placeholder="$t('g.searchModels') + '...'"
@@ -66,6 +67,7 @@ import { buildTree } from '@/utils/treeUtil'
const modelStore = useModelStore()
const modelToNodeStore = useModelToNodeStore()
const settingStore = useSettingStore()
const searchBoxRef = ref()
const searchQuery = ref<string>('')
const expandedKeys = ref<Record<string, boolean>>({})
const { expandNode, toggleNodeOnEvent } = useTreeExpansion(expandedKeys)
@@ -180,6 +182,7 @@ watch(
)
onMounted(async () => {
searchBoxRef.value?.focus()
if (settingStore.get('Comfy.ModelLibrary.AutoLoadAll')) {
await modelStore.loadModels()
}

View File

@@ -78,6 +78,7 @@
<template #header>
<div>
<SearchBox
ref="searchBoxRef"
v-model:model-value="searchQuery"
class="node-lib-search-box p-2 2xl:p-4"
:placeholder="$t('g.searchNodes') + '...'"
@@ -130,7 +131,7 @@ import Button from 'primevue/button'
import Divider from 'primevue/divider'
import Popover from 'primevue/popover'
import type { Ref } from 'vue'
import { computed, h, nextTick, ref, render } from 'vue'
import { computed, h, nextTick, onMounted, ref, render } from 'vue'
import SearchBox from '@/components/common/SearchBox.vue'
import type { SearchFilter } from '@/components/common/SearchFilterChip.vue'
@@ -171,6 +172,12 @@ const { expandNode, toggleNodeOnEvent } = useTreeExpansion(expandedKeys)
const nodeBookmarkTreeExplorerRef = ref<InstanceType<
typeof NodeBookmarkTreeExplorer
> | null>(null)
const searchBoxRef = ref()
onMounted(() => {
searchBoxRef.value?.focus()
})
const searchFilter = ref<InstanceType<typeof Popover> | null>(null)
const groupingPopover = ref<InstanceType<typeof Popover> | null>(null)
const sortingPopover = ref<InstanceType<typeof Popover> | null>(null)

View File

@@ -14,6 +14,7 @@
</template>
<template #header>
<SearchBox
ref="searchBoxRef"
v-model:model-value="searchQuery"
class="workflows-search-box p-2 2xl:p-4"
:placeholder="$t('g.searchWorkflows') + '...'"
@@ -158,6 +159,8 @@ const workflowTabsPosition = computed(() =>
settingStore.get('Comfy.Workflow.WorkflowTabsPosition')
)
const searchBoxRef = ref()
const searchQuery = ref('')
const isSearching = computed(() => searchQuery.value.length > 0)
const filteredWorkflows = ref<ComfyWorkflow[]>([])
@@ -295,6 +298,7 @@ const selectionKeys = computed(() => ({
const workflowBookmarkStore = useWorkflowBookmarkStore()
onMounted(async () => {
searchBoxRef.value?.focus()
await workflowBookmarkStore.loadBookmarks()
})
</script>

View File

@@ -162,7 +162,7 @@ const rightAreaClasses = computed(() => {
})
const contentContainerClasses = computed(() => {
return cn('min-h-0 px-6 pt-0 pb-10', 'overflow-y-auto scrollbar-hide')
return cn('min-h-0 px-6 pt-0 pb-10', 'overflow-y-auto')
})
const rightPanelClasses = computed(() => {

View File

@@ -402,6 +402,19 @@ export function usePanAndZoom() {
}
)
const addPenPointerId = (pointerId: number): void => {
if (!penPointerIdList.value.includes(pointerId)) {
penPointerIdList.value.push(pointerId)
}
}
const removePenPointerId = (pointerId: number): void => {
const index = penPointerIdList.value.indexOf(pointerId)
if (index !== -1) {
penPointerIdList.value.splice(index, 1)
}
}
return {
initializeCanvasPanZoom,
handlePanStart,
@@ -411,6 +424,8 @@ export function usePanAndZoom() {
handleTouchEnd,
updateCursorPosition,
zoom,
invalidatePanZoom
invalidatePanZoom,
addPenPointerId,
removePenPointerId
}
}

View File

@@ -114,6 +114,10 @@ export function useToolManager(
event.preventDefault()
if (event.pointerType === 'touch') return
if (event.pointerType === 'pen') {
panZoom.addPenPointerId(event.pointerId)
}
const isSpacePressed = keyboard.isKeyDown(' ')
if (event.buttons === 4 || (event.buttons === 1 && isSpacePressed)) {
@@ -207,6 +211,11 @@ export function useToolManager(
const handlePointerUp = async (event: PointerEvent): Promise<void> => {
store.isPanning = false
store.brushVisible = true
if (event.pointerType === 'pen') {
panZoom.removePenPointerId(event.pointerId)
}
if (event.pointerType === 'touch') return
updateCursor()
store.isAdjustingBrush = false

200
src/i18n.test.ts Normal file
View File

@@ -0,0 +1,200 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const { i18n, loadLocale, mergeCustomNodesI18n } = await import('./i18n')
// Mock the JSON imports before importing i18n module
vi.mock('./locales/en/main.json', () => ({ default: { welcome: 'Welcome' } }))
vi.mock('./locales/en/nodeDefs.json', () => ({
default: { testNode: 'Test Node' }
}))
vi.mock('./locales/en/commands.json', () => ({
default: { save: 'Save' }
}))
vi.mock('./locales/en/settings.json', () => ({
default: { theme: 'Theme' }
}))
// Mock lazy-loaded locales
vi.mock('./locales/zh/main.json', () => ({ default: { welcome: '欢迎' } }))
vi.mock('./locales/zh/nodeDefs.json', () => ({
default: { testNode: '测试节点' }
}))
vi.mock('./locales/zh/commands.json', () => ({ default: { save: '保存' } }))
vi.mock('./locales/zh/settings.json', () => ({ default: { theme: '主题' } }))
describe('i18n', () => {
beforeEach(async () => {
vi.resetModules()
})
describe('mergeCustomNodesI18n', () => {
it('should immediately merge data for already loaded locales (en)', async () => {
// English is pre-loaded, so merge should work immediately
mergeCustomNodesI18n({
en: {
customNode: {
title: 'Custom Node Title'
}
}
})
// Verify the custom node data was merged
const messages = i18n.global.getLocaleMessage('en') as Record<
string,
unknown
>
expect(messages.customNode).toEqual({ title: 'Custom Node Title' })
})
it('should store data for not-yet-loaded locales', async () => {
const { i18n, mergeCustomNodesI18n } = await import('./i18n')
// Chinese is not pre-loaded, data should be stored but not merged yet
mergeCustomNodesI18n({
zh: {
customNode: {
title: '自定义节点标题'
}
}
})
// zh locale should not exist yet (not loaded)
const zhMessages = i18n.global.getLocaleMessage('zh') as Record<
string,
unknown
>
// Either empty or doesn't have our custom data merged directly
// (since zh wasn't loaded yet, mergeLocaleMessage on non-existent locale
// may create an empty locale or do nothing useful)
expect(zhMessages.customNode).toBeUndefined()
})
it('should merge stored data when locale is lazily loaded', async () => {
// First, store custom nodes i18n data (before locale is loaded)
mergeCustomNodesI18n({
zh: {
customNode: {
title: '自定义节点标题'
}
}
})
await loadLocale('zh')
// Verify both the base locale data and custom node data are present
const zhMessages = i18n.global.getLocaleMessage('zh') as Record<
string,
unknown
>
expect(zhMessages.welcome).toBe('欢迎')
expect(zhMessages.customNode).toEqual({ title: '自定义节点标题' })
})
it('should preserve custom node data when locale is loaded after merge', async () => {
// Simulate the real scenario:
// 1. Custom nodes i18n is loaded first
mergeCustomNodesI18n({
zh: {
customNode: {
title: '自定义节点标题'
},
settingsCategories: {
Hotkeys: '快捷键'
}
}
})
// 2. Then locale is lazily loaded (this would previously overwrite custom data)
await loadLocale('zh')
// 3. Verify custom node data is still present
const zhMessages = i18n.global.getLocaleMessage('zh') as Record<
string,
unknown
>
expect(zhMessages.customNode).toEqual({ title: '自定义节点标题' })
expect(zhMessages.settingsCategories).toEqual({ Hotkeys: '快捷键' })
// 4. Also verify base locale data is present
expect(zhMessages.welcome).toBe('欢迎')
expect(zhMessages.nodeDefs).toEqual({ testNode: '测试节点' })
})
it('should handle multiple locales in custom nodes i18n data', async () => {
// Merge data for multiple locales
mergeCustomNodesI18n({
en: {
customPlugin: { name: 'Easy Use' }
},
zh: {
customPlugin: { name: '简单使用' }
}
})
// English should be merged immediately (pre-loaded)
const enMessages = i18n.global.getLocaleMessage('en') as Record<
string,
unknown
>
expect(enMessages.customPlugin).toEqual({ name: 'Easy Use' })
await loadLocale('zh')
const zhMessages = i18n.global.getLocaleMessage('zh') as Record<
string,
unknown
>
expect(zhMessages.customPlugin).toEqual({ name: '简单使用' })
})
it('should handle calling mergeCustomNodesI18n multiple times', async () => {
// Use fresh module instance to ensure clean state
vi.resetModules()
const { i18n, loadLocale, mergeCustomNodesI18n } = await import('./i18n')
mergeCustomNodesI18n({
zh: { plugin1: { name: '插件1' } }
})
mergeCustomNodesI18n({
zh: { plugin2: { name: '插件2' } }
})
await loadLocale('zh')
const zhMessages = i18n.global.getLocaleMessage('zh') as Record<
string,
unknown
>
// Only the second call's data should be present
expect(zhMessages.plugin2).toEqual({ name: '插件2' })
// First call's data is overwritten
expect(zhMessages.plugin1).toBeUndefined()
})
})
describe('loadLocale', () => {
it('should not reload already loaded locale', async () => {
await loadLocale('zh')
await loadLocale('zh')
// Should complete without error (second call returns early)
})
it('should warn for unsupported locale', async () => {
const consoleSpy = vi.spyOn(console, 'warn').mockImplementation(() => {})
await loadLocale('unsupported-locale')
expect(consoleSpy).toHaveBeenCalledWith(
'Locale "unsupported-locale" is not supported'
)
consoleSpy.mockRestore()
})
it('should handle concurrent load requests for same locale', async () => {
// Start multiple loads concurrently
const promises = [loadLocale('zh'), loadLocale('zh'), loadLocale('zh')]
await Promise.all(promises)
})
})
})

View File

@@ -94,6 +94,9 @@ const loadedLocales = new Set<string>(['en'])
// Track locales currently being loaded to prevent race conditions
const loadingLocales = new Map<string, Promise<void>>()
// Store custom nodes i18n data for merging when locales are lazily loaded
const customNodesI18nData: Record<string, unknown> = {}
/**
* Dynamically load a locale and its associated files (nodeDefs, commands, settings)
*/
@@ -137,6 +140,10 @@ export async function loadLocale(locale: string): Promise<void> {
i18n.global.setLocaleMessage(locale, messages as LocaleMessages)
loadedLocales.add(locale)
if (customNodesI18nData[locale]) {
i18n.global.mergeLocaleMessage(locale, customNodesI18nData[locale])
}
} catch (error) {
console.error(`Failed to load locale "${locale}":`, error)
throw error
@@ -150,6 +157,24 @@ export async function loadLocale(locale: string): Promise<void> {
return loadPromise
}
/**
* Stores the data for later use when locales are lazily loaded,
* and immediately merges data for already-loaded locales.
*/
export function mergeCustomNodesI18n(i18nData: Record<string, unknown>): void {
// Clear existing data and replace with new data
for (const key of Object.keys(customNodesI18nData)) {
delete customNodesI18nData[key]
}
Object.assign(customNodesI18nData, i18nData)
for (const [locale, message] of Object.entries(i18nData)) {
if (loadedLocales.has(locale)) {
i18n.global.mergeLocaleMessage(locale, message)
}
}
}
// Only include English in the initial bundle
const messages = {
en: buildLocale(en, enNodes, enCommands, enSettings)

View File

@@ -63,6 +63,9 @@ export class ToInputFromIoNodeLink implements RenderLink {
if (existingLink) {
// Moving an existing link
const { input, inputNode } = existingLink.resolve(this.network)
if (inputNode && input)
this.node._disconnectNodeInput(inputNode, input, existingLink)
events.dispatch('input-moved', this)
} else {
// Creating a new link

View File

@@ -206,8 +206,7 @@
"tooltip": "الإطار الأخير الذي سيتم استخدامه للفيديو."
},
"model": {
"name": "النموذج",
"tooltip": "اسم النموذج"
"name": "النموذج"
},
"prompt": {
"name": "المطالبة النصية",
@@ -248,8 +247,7 @@
"tooltip": "الصورة الأساسية للتحرير"
},
"model": {
"name": "النموذج",
"tooltip": "اسم النموذج"
"name": "النموذج"
},
"prompt": {
"name": "المطالبة النصية",
@@ -286,8 +284,7 @@
"tooltip": "الارتفاع المخصص للصورة. القيمة تعمل فقط إذا تم ضبط `size_preset` على `Custom`"
},
"model": {
"name": "النموذج",
"tooltip": "اسم النموذج"
"name": "النموذج"
},
"prompt": {
"name": "النص الموجه",
@@ -336,8 +333,7 @@
"tooltip": "من صورة إلى أربع صور."
},
"model": {
"name": "النموذج",
"tooltip": "اسم النموذج"
"name": "النموذج"
},
"prompt": {
"name": "prompt",
@@ -386,8 +382,7 @@
"tooltip": "الإطار الأول الذي سيتم استخدامه للفيديو."
},
"model": {
"name": "model",
"tooltip": "اسم النموذج"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -490,8 +485,7 @@
"tooltip": "مدة الفيديو الناتج بالثواني."
},
"model": {
"name": "النموذج",
"tooltip": "اسم النموذج"
"name": "النموذج"
},
"prompt": {
"name": "النص الموجه",

View File

@@ -1360,6 +1360,7 @@
"chroma_radiance": "chroma_radiance",
"attention_experiments": "attention_experiments",
"flux": "flux",
"kandinsky5": "kandinsky5",
"hooks": "hooks",
"combine": "combine",
"cond single": "cond single",
@@ -2275,4 +2276,4 @@
"inputsNoneTooltip": "Node has no inputs",
"nodeState": "Node state"
}
}
}

View File

@@ -151,6 +151,11 @@
"name": "volume",
"tooltip": "Volume adjustment in decibels (dB). 0 = no change, +6 = double, -6 = half, etc"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"AudioConcat": {
@@ -167,6 +172,11 @@
"name": "direction",
"tooltip": "Whether to append audio2 after or before audio1."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"AudioEncoderEncode": {
@@ -212,6 +222,11 @@
"name": "merge_method",
"tooltip": "The method used to combine the audio waveforms."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"BasicGuider": {
@@ -279,8 +294,7 @@
"description": "Generate video using prompt and first and last frames.",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -333,8 +347,7 @@
"description": "Edit images using ByteDance models via api based on prompt",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
"name": "model"
},
"image": {
"name": "image",
@@ -371,8 +384,7 @@
"description": "Generate images using ByteDance models via api based on prompt",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -417,8 +429,7 @@
"description": "Generate video using prompt and reference images.",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -463,8 +474,7 @@
"description": "Generate video using ByteDance models via api based on image and prompt",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -571,8 +581,7 @@
"description": "Generate video using ByteDance models via api based on prompt",
"inputs": {
"model": {
"name": "model",
"tooltip": "Model name"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -1007,6 +1016,25 @@
}
}
},
"CLIPTextEncodeKandinsky5": {
"display_name": "CLIPTextEncodeKandinsky5",
"inputs": {
"clip": {
"name": "clip"
},
"clip_l": {
"name": "clip_l"
},
"qwen25_7b": {
"name": "qwen25_7b"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"CLIPTextEncodeLumina2": {
"display_name": "CLIP Text Encode for Lumina2",
"description": "Encodes a system prompt and a user prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images.",
@@ -1453,10 +1481,12 @@
},
"outputs": {
"0": {
"name": "positive"
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative"
"name": "negative",
"tooltip": null
}
}
},
@@ -1522,6 +1552,10 @@
"dim": {
"name": "dim",
"tooltip": "The dimension to apply the context windows to."
},
"freenoise": {
"name": "freenoise",
"tooltip": "Whether to apply FreeNoise noise shuffling, improves window blending."
}
},
"outputs": {
@@ -1915,6 +1949,11 @@
"height": {
"name": "height"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"DiffControlNetLoader": {
@@ -2072,6 +2111,11 @@
"name": "channels",
"tooltip": "Number of audio channels (1 for mono, 2 for stereo)."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"EmptyChromaRadianceLatentImage": {
@@ -2224,6 +2268,11 @@
"name": "batch_size",
"tooltip": "The number of latent images in the batch."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"EmptyLatentHunyuan3Dv2": {
@@ -2407,6 +2456,11 @@
"bottom": {
"name": "bottom"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FlipSigmas": {
@@ -2771,6 +2825,11 @@
"s2": {
"name": "s2"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FreeU_V2": {
@@ -2791,6 +2850,11 @@
"s2": {
"name": "s2"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"FreSca": {
@@ -2854,6 +2918,10 @@
"name": "files",
"tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node."
},
"system_prompt": {
"name": "system_prompt",
"tooltip": "Foundational instructions that dictate an AI's behavior."
},
"control_after_generate": {
"name": "control after generate"
}
@@ -2899,6 +2967,10 @@
"name": "response_modalities",
"tooltip": "Choose 'IMAGE' for image-only output, or 'IMAGE+TEXT' to return both the generated image and a text response."
},
"system_prompt": {
"name": "system_prompt",
"tooltip": "Foundational instructions that dictate an AI's behavior."
},
"control_after_generate": {
"name": "control after generate"
}
@@ -2963,6 +3035,10 @@
"name": "files",
"tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node."
},
"system_prompt": {
"name": "system_prompt",
"tooltip": "Foundational instructions that dictate an AI's behavior."
},
"control_after_generate": {
"name": "control after generate"
}
@@ -3074,7 +3150,7 @@
}
},
"GrowMask": {
"display_name": "GrowMask",
"display_name": "Grow Mask",
"inputs": {
"mask": {
"name": "mask"
@@ -3085,6 +3161,11 @@
"tapered_corners": {
"name": "tapered_corners"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"Hunyuan3Dv2Conditioning": {
@@ -3583,6 +3664,11 @@
"color": {
"name": "color"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ImageCompositeMasked": {
@@ -3606,6 +3692,11 @@
"mask": {
"name": "mask"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ImageCrop": {
@@ -3933,6 +4024,11 @@
"channel": {
"name": "channel"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"ImageUpscaleWithModel": {
@@ -4042,6 +4138,11 @@
"mask": {
"name": "mask"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"JoinImageWithAlpha": {
@@ -4060,6 +4161,53 @@
}
}
},
"Kandinsky5ImageToVideo": {
"display_name": "Kandinsky5ImageToVideo",
"inputs": {
"positive": {
"name": "positive"
},
"negative": {
"name": "negative"
},
"vae": {
"name": "vae"
},
"width": {
"name": "width"
},
"height": {
"name": "height"
},
"length": {
"name": "length"
},
"batch_size": {
"name": "batch_size"
},
"start_image": {
"name": "start_image"
}
},
"outputs": {
"0": {
"name": "positive",
"tooltip": null
},
"1": {
"name": "negative",
"tooltip": null
},
"2": {
"name": "latent",
"tooltip": "Empty video latent"
},
"3": {
"name": "cond_latent",
"tooltip": "Clean encoded start images, used to replace the noisy start of the model output latents"
}
}
},
"KarrasScheduler": {
"display_name": "KarrasScheduler",
"inputs": {
@@ -4974,6 +5122,11 @@
"mask": {
"name": "mask"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LatentConcat": {
@@ -5255,22 +5408,28 @@
},
"outputs": {
"0": {
"name": "image"
"name": "image",
"tooltip": null
},
"1": {
"name": "mask"
"name": "mask",
"tooltip": null
},
"2": {
"name": "mesh_path"
"name": "mesh_path",
"tooltip": null
},
"3": {
"name": "normal"
"name": "normal",
"tooltip": null
},
"4": {
"name": "camera_info"
"name": "camera_info",
"tooltip": null
},
"5": {
"name": "recording_video"
"name": "recording_video",
"tooltip": null
}
}
},
@@ -5286,6 +5445,11 @@
"upload": {
"name": "choose file to upload"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"LoadImage": {
@@ -6058,10 +6222,15 @@
"operation": {
"name": "operation"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"MaskPreview": {
"display_name": "MaskPreview",
"display_name": "Preview Mask",
"description": "Saves the input images to your ComfyUI output directory.",
"inputs": {
"mask": {
@@ -6075,6 +6244,11 @@
"mask": {
"name": "mask"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"MergeImageLists": {
@@ -8571,6 +8745,29 @@
}
}
},
"NormalizeVideoLatentStart": {
"display_name": "NormalizeVideoLatentStart",
"description": "Normalizes the initial frames of a video latent to match the mean and standard deviation of subsequent reference frames. Helps reduce differences between the starting frames and the rest of the video.",
"inputs": {
"latent": {
"name": "latent"
},
"start_frame_count": {
"name": "start_frame_count",
"tooltip": "Number of latent frames to normalize, counted from the start"
},
"reference_frame_count": {
"name": "reference_frame_count",
"tooltip": "Number of latent frames after the start frames to use as reference"
}
},
"outputs": {
"0": {
"name": "latent",
"tooltip": null
}
}
},
"OpenAIChatConfig": {
"display_name": "OpenAI ChatGPT Advanced Options",
"description": "Allows specifying advanced configuration options for the OpenAI Chat Nodes.",
@@ -9771,6 +9968,11 @@
"audio": {
"name": "audio"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"RecraftColorRGB": {
@@ -10311,6 +10513,28 @@
}
}
},
"ReplaceVideoLatentFrames": {
"display_name": "ReplaceVideoLatentFrames",
"inputs": {
"destination": {
"name": "destination",
"tooltip": "The destination latent where frames will be replaced."
},
"index": {
"name": "index",
"tooltip": "The starting latent frame index in the destination latent where the source latent frames will be placed. Negative values count from the end."
},
"source": {
"name": "source",
"tooltip": "The source latent providing frames to insert into the destination latent. If not provided, the destination latent is returned unchanged."
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"RescaleCFG": {
"display_name": "RescaleCFG",
"inputs": {
@@ -11561,6 +11785,11 @@
"height": {
"name": "height"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"SplitAudioChannels": {
@@ -11573,10 +11802,12 @@
},
"outputs": {
"0": {
"name": "left"
"name": "left",
"tooltip": null
},
"1": {
"name": "right"
"name": "right",
"tooltip": null
}
}
},
@@ -12555,6 +12786,11 @@
"value": {
"name": "value"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"TomePatchModel": {
@@ -12806,6 +13042,11 @@
"name": "duration",
"tooltip": "Duration in seconds"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"TrimVideoLatent": {
@@ -13305,6 +13546,11 @@
"vae": {
"name": "vae"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"VAEDecodeHunyuan3D": {
@@ -13374,6 +13620,11 @@
"vae": {
"name": "vae"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"VAEEncodeForInpaint": {
@@ -14146,6 +14397,10 @@
"fuse_method": {
"name": "fuse_method",
"tooltip": "The method to use to fuse the context windows."
},
"freenoise": {
"name": "freenoise",
"tooltip": "Whether to apply FreeNoise noise shuffling, improves window blending."
}
},
"outputs": {

View File

@@ -206,8 +206,7 @@
"tooltip": "Último fotograma que se utilizará para el video."
},
"model": {
"name": "modelo",
"tooltip": "Nombre del modelo"
"name": "modelo"
},
"prompt": {
"name": "prompt",
@@ -248,8 +247,7 @@
"tooltip": "La imagen base para editar"
},
"model": {
"name": "modelo",
"tooltip": "Nombre del modelo"
"name": "modelo"
},
"prompt": {
"name": "prompt",
@@ -286,8 +284,7 @@
"tooltip": "Alto personalizado para la imagen. El valor solo funciona si `tamaño_predefinido` está establecido en `Personalizado`"
},
"model": {
"name": "modelo",
"tooltip": "Nombre del modelo"
"name": "modelo"
},
"prompt": {
"name": "prompt",
@@ -336,8 +333,7 @@
"tooltip": "De una a cuatro imágenes."
},
"model": {
"name": "modelo",
"tooltip": "Nombre del modelo"
"name": "modelo"
},
"prompt": {
"name": "prompt",
@@ -386,8 +382,7 @@
"tooltip": "Primer fotograma a usar para el video."
},
"model": {
"name": "modelo",
"tooltip": "Nombre del modelo"
"name": "modelo"
},
"prompt": {
"name": "prompt",
@@ -490,8 +485,7 @@
"tooltip": "La duración del video de salida en segundos."
},
"model": {
"name": "modelo",
"tooltip": "Nombre del modelo"
"name": "modelo"
},
"prompt": {
"name": "prompt",

View File

@@ -206,8 +206,7 @@
"tooltip": "Dernière image à utiliser pour la vidéo."
},
"model": {
"name": "modèle",
"tooltip": "Nom du modèle"
"name": "modèle"
},
"prompt": {
"name": "invite",
@@ -248,8 +247,7 @@
"tooltip": "L'image de base à modifier"
},
"model": {
"name": "model",
"tooltip": "Nom du modèle"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -286,8 +284,7 @@
"tooltip": "Hauteur personnalisée pour l'image. La valeur fonctionne uniquement si `size_preset` est défini sur `Personnalisé`"
},
"model": {
"name": "model",
"tooltip": "Nom du modèle"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -336,8 +333,7 @@
"tooltip": "Une à quatre images."
},
"model": {
"name": "modèle",
"tooltip": "Nom du modèle"
"name": "modèle"
},
"prompt": {
"name": "prompt",
@@ -386,8 +382,7 @@
"tooltip": "Première image à utiliser pour la vidéo."
},
"model": {
"name": "modèle",
"tooltip": "Nom du modèle"
"name": "modèle"
},
"prompt": {
"name": "prompt",
@@ -490,8 +485,7 @@
"tooltip": "La durée de la vidéo de sortie en secondes."
},
"model": {
"name": "model",
"tooltip": "Nom du modèle"
"name": "model"
},
"prompt": {
"name": "prompt",

View File

@@ -206,8 +206,7 @@
"tooltip": "動画に使用する最後のフレーム。"
},
"model": {
"name": "モデル",
"tooltip": "モデル名"
"name": "モデル"
},
"prompt": {
"name": "プロンプト",
@@ -248,8 +247,7 @@
"tooltip": "編集するベース画像"
},
"model": {
"name": "モデル",
"tooltip": "モデル名"
"name": "モデル"
},
"prompt": {
"name": "プロンプト",
@@ -286,8 +284,7 @@
"tooltip": "画像のカスタム高さ。`size_preset`が`Custom`に設定されている場合のみ有効"
},
"model": {
"name": "モデル",
"tooltip": "モデル名"
"name": "モデル"
},
"prompt": {
"name": "プロンプト",
@@ -336,8 +333,7 @@
"tooltip": "1〜4枚の画像"
},
"model": {
"name": "モデル",
"tooltip": "モデル名"
"name": "モデル"
},
"prompt": {
"name": "プロンプト",
@@ -386,8 +382,7 @@
"tooltip": "動画の最初のフレームとして使用する画像。"
},
"model": {
"name": "model",
"tooltip": "モデル名"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -490,8 +485,7 @@
"tooltip": "出力動画の長さ(秒単位)。"
},
"model": {
"name": "モデル",
"tooltip": "モデル名"
"name": "モデル"
},
"prompt": {
"name": "プロンプト",

View File

@@ -206,8 +206,7 @@
"tooltip": "비디오에 사용될 마지막 프레임입니다."
},
"model": {
"name": "model",
"tooltip": "모델 이름"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -248,8 +247,7 @@
"tooltip": "편집할 기본 이미지"
},
"model": {
"name": "모델",
"tooltip": "모델 이름"
"name": "모델"
},
"prompt": {
"name": "프롬프트",
@@ -286,8 +284,7 @@
"tooltip": "이미지의 사용자 지정 높이. `size_preset`이 `Custom`으로 설정된 경우에만 값이 적용됨"
},
"model": {
"name": "모델",
"tooltip": "모델 이름"
"name": "모델"
},
"prompt": {
"name": "프롬프트",
@@ -336,8 +333,7 @@
"tooltip": "1개에서 4개의 이미지"
},
"model": {
"name": "모델",
"tooltip": "모델 이름"
"name": "모델"
},
"prompt": {
"name": "프롬프트",
@@ -386,8 +382,7 @@
"tooltip": "비디오에 사용할 첫 번째 프레임입니다."
},
"model": {
"name": "모델",
"tooltip": "모델 이름"
"name": "모델"
},
"prompt": {
"name": "프롬프트",
@@ -490,8 +485,7 @@
"tooltip": "출력 비디오의 지속 시간(초)입니다."
},
"model": {
"name": "model",
"tooltip": "모델 이름"
"name": "model"
},
"prompt": {
"name": "prompt",

View File

@@ -206,8 +206,7 @@
"tooltip": "Последний кадр, который будет использоваться для видео."
},
"model": {
"name": "модель",
"tooltip": "Название модели"
"name": "модель"
},
"prompt": {
"name": "промпт",
@@ -248,8 +247,7 @@
"tooltip": "Базовое изображение для редактирования"
},
"model": {
"name": "model",
"tooltip": "Название модели"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -286,8 +284,7 @@
"tooltip": "Пользовательская высота изображения. Значение работает только если `размер_пресет` установлен в `Пользовательский`"
},
"model": {
"name": "model",
"tooltip": "Название модели"
"name": "model"
},
"prompt": {
"name": "промпт",
@@ -336,8 +333,7 @@
"tooltip": "От одного до четырёх изображений."
},
"model": {
"name": "модель",
"tooltip": "Название модели"
"name": "модель"
},
"prompt": {
"name": "промпт",
@@ -386,8 +382,7 @@
"tooltip": "Первый кадр, который будет использоваться для видео."
},
"model": {
"name": "model",
"tooltip": "Название модели"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -490,8 +485,7 @@
"tooltip": "Длительность выходного видео в секундах."
},
"model": {
"name": "модель",
"tooltip": "Название модели"
"name": "модель"
},
"prompt": {
"name": "промпт",

View File

@@ -206,8 +206,7 @@
"tooltip": "Video için kullanılacak son kare."
},
"model": {
"name": "model",
"tooltip": "Model adı"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -248,8 +247,7 @@
"tooltip": "Düzenlenecek temel görüntü"
},
"model": {
"name": "model",
"tooltip": "Model adı"
"name": "model"
},
"prompt": {
"name": "istek",
@@ -286,8 +284,7 @@
"tooltip": "Görsel için özel yükseklik. Bu değer yalnızca `size_preset` `Custom` olarak ayarlandığında çalışır"
},
"model": {
"name": "model",
"tooltip": "Model adı"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -336,8 +333,7 @@
"tooltip": "Bir ila dört görsel."
},
"model": {
"name": "model",
"tooltip": "Model adı"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -386,8 +382,7 @@
"tooltip": "Video için kullanılacak ilk kare."
},
"model": {
"name": "model",
"tooltip": "Model adı"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -490,8 +485,7 @@
"tooltip": ıktı videosunun saniye cinsinden süresi."
},
"model": {
"name": "model",
"tooltip": "Model adı"
"name": "model"
},
"prompt": {
"name": "prompt",

View File

@@ -206,8 +206,7 @@
"tooltip": "用於影片的尾幀。"
},
"model": {
"name": "模型",
"tooltip": "模型名稱"
"name": "模型"
},
"prompt": {
"name": "提示詞",
@@ -248,8 +247,7 @@
"tooltip": "要編輯的基礎圖片"
},
"model": {
"name": "模型",
"tooltip": "模型名稱"
"name": "模型"
},
"prompt": {
"name": "提示詞",
@@ -286,8 +284,7 @@
"tooltip": "圖像的自訂高度。僅在 `size_preset` 設為 `Custom` 時生效"
},
"model": {
"name": "model",
"tooltip": "模型名稱"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -336,8 +333,7 @@
"tooltip": "一至四張圖片。"
},
"model": {
"name": "model",
"tooltip": "模型名稱"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -386,8 +382,7 @@
"tooltip": "用於影片的第一幀圖片。"
},
"model": {
"name": "model",
"tooltip": "模型名稱"
"name": "model"
},
"prompt": {
"name": "prompt",
@@ -490,8 +485,7 @@
"tooltip": "輸出影片的持續時間(秒)。"
},
"model": {
"name": "模型",
"tooltip": "模型名稱"
"name": "模型"
},
"prompt": {
"name": "提示詞",

View File

@@ -206,8 +206,7 @@
"tooltip": "用于视频的最后一帧。"
},
"model": {
"name": "模型",
"tooltip": "模型名称"
"name": "模型"
},
"prompt": {
"name": "提示",
@@ -248,8 +247,7 @@
"tooltip": "要编辑的基础图像"
},
"model": {
"name": "模型",
"tooltip": "模型名称"
"name": "模型"
},
"prompt": {
"name": "提示",
@@ -286,8 +284,7 @@
"tooltip": "图像的自定义高度。仅当 `size_preset` 设置为 `Custom` 时该值才生效"
},
"model": {
"name": "模型",
"tooltip": "模型名称"
"name": "模型"
},
"prompt": {
"name": "提示",
@@ -336,8 +333,7 @@
"tooltip": "一到四张图片。"
},
"model": {
"name": "模型",
"tooltip": "模型名称"
"name": "模型"
},
"prompt": {
"name": "提示",
@@ -386,8 +382,7 @@
"tooltip": "用于视频的第一帧。"
},
"model": {
"name": "模型",
"tooltip": "模型名称"
"name": "模型"
},
"prompt": {
"name": "提示",
@@ -490,8 +485,7 @@
"tooltip": "输出视频的时长(秒)。"
},
"model": {
"name": "模型",
"tooltip": "模型名称"
"name": "模型"
},
"prompt": {
"name": "提示",

View File

@@ -5,10 +5,8 @@
<p class="m-0">
{{ $t('assetBrowser.modelAssociatedWithLink') }}
</p>
<p
class="mt-0 bg-modal-card-background text-base-foreground p-3 rounded-lg"
>
{{ metadata?.name || metadata?.filename }}
<p class="mt-0 text-base-foreground rounded-lg">
{{ metadata?.filename || metadata?.name }}
</p>
</div>

View File

@@ -29,7 +29,7 @@
>
<div class="flex flex-col justify-center items-start gap-1 flex-1">
<p class="text-base-foreground m-0">
{{ metadata?.name || metadata?.filename }}
{{ metadata?.filename || metadata?.name }}
</p>
<p class="text-sm text-muted m-0">
<!-- Going to want to add another translation here to get a nice display name. -->

View File

@@ -4,6 +4,8 @@ import { computed, ref, watch } from 'vue'
import { st } from '@/i18n'
import type { AssetMetadata } from '@/platform/assets/schemas/assetSchema'
import { assetService } from '@/platform/assets/services/assetService'
import { useAssetsStore } from '@/stores/assetsStore'
import { useModelToNodeStore } from '@/stores/modelToNodeStore'
interface WizardData {
url: string
@@ -18,6 +20,8 @@ interface ModelTypeOption {
}
export function useUploadModelWizard(modelTypes: Ref<ModelTypeOption[]>) {
const assetsStore = useAssetsStore()
const modelToNodeStore = useModelToNodeStore()
const currentStep = ref(1)
const isFetchingMetadata = ref(false)
const isUploading = ref(false)
@@ -143,6 +147,19 @@ export function useUploadModelWizard(modelTypes: Ref<ModelTypeOption[]>) {
uploadStatus.value = 'success'
currentStep.value = 3
// Refresh model caches for all node types that use this model category
if (selectedModelType.value) {
const providers = modelToNodeStore.getAllNodeProviders(
selectedModelType.value
)
await Promise.all(
providers.map((provider) =>
assetsStore.updateModelsForNodeType(provider.nodeDef.name)
)
)
}
return true
} catch (error) {
console.error('Failed to upload asset:', error)

View File

@@ -199,8 +199,11 @@ export function createModelNodeFromAsset(
}
}
targetGraph.add(node)
// Set widget value BEFORE adding to graph so the node is created with correct value
widget.value = filename
// Now add the node to the graph with the correct widget value already set
targetGraph.add(node)
return { success: true, value: node }
}

View File

@@ -956,6 +956,15 @@ class LayoutStoreImpl implements LayoutStore {
return this.currentActor
}
/**
* Clean up refs and triggers for a node when its Vue component unmounts.
* This should be called from the component's onUnmounted hook.
*/
cleanupNodeRef(nodeId: NodeId): void {
this.nodeRefs.delete(nodeId)
this.nodeTriggers.delete(nodeId)
}
/**
* Initialize store with existing nodes
*/
@@ -964,8 +973,10 @@ class LayoutStoreImpl implements LayoutStore {
): void {
this.ydoc.transact(() => {
this.ynodes.clear()
this.nodeRefs.clear()
this.nodeTriggers.clear()
// Note: We intentionally do NOT clear nodeRefs and nodeTriggers here.
// Vue components may already hold references to these refs, and clearing
// them would break the reactivity chain. The refs will be reused when
// nodes are recreated, and stale refs will be cleaned up over time.
this.spatialIndex.clear()
this.linkSegmentSpatialIndex.clear()
this.slotSpatialIndex.clear()
@@ -995,6 +1006,9 @@ class LayoutStoreImpl implements LayoutStore {
// Add to spatial index
this.spatialIndex.insert(layout.id, layout.bounds)
})
// Trigger all existing refs to notify Vue of the new data
this.nodeTriggers.forEach((trigger) => trigger())
}, 'initialization')
}
@@ -1085,8 +1099,10 @@ class LayoutStoreImpl implements LayoutStore {
if (!this.ynodes.has(operation.nodeId)) return
this.ynodes.delete(operation.nodeId)
this.nodeRefs.delete(operation.nodeId)
this.nodeTriggers.delete(operation.nodeId)
// Note: We intentionally do NOT delete nodeRefs and nodeTriggers here.
// During undo/redo, Vue components may still hold references to the old ref.
// If we delete the trigger, Vue won't be notified when the node is re-created.
// The trigger will be called in finalizeOperation to notify Vue of the change.
// Remove from spatial index
this.spatialIndex.remove(operation.nodeId)

View File

@@ -0,0 +1,116 @@
import { afterEach, describe, expect, it, vi } from 'vitest'
import { resolvePointerTarget } from '@/renderer/extensions/vueNodes/composables/useSlotLinkInteraction'
describe('resolvePointerTarget', () => {
afterEach(() => {
vi.restoreAllMocks()
})
it('returns element from elementFromPoint when available', () => {
const targetElement = document.createElement('div')
targetElement.className = 'lg-slot'
const spy = vi
.spyOn(document, 'elementFromPoint')
.mockReturnValue(targetElement)
const fallback = document.createElement('span')
const result = resolvePointerTarget(100, 200, fallback)
expect(spy).toHaveBeenCalledWith(100, 200)
expect(result).toBe(targetElement)
})
it('returns fallback when elementFromPoint returns null', () => {
const spy = vi.spyOn(document, 'elementFromPoint').mockReturnValue(null)
const fallback = document.createElement('span')
fallback.className = 'fallback-element'
const result = resolvePointerTarget(100, 200, fallback)
expect(spy).toHaveBeenCalledWith(100, 200)
expect(result).toBe(fallback)
})
it('returns null fallback when both elementFromPoint and fallback are null', () => {
vi.spyOn(document, 'elementFromPoint').mockReturnValue(null)
const result = resolvePointerTarget(100, 200, null)
expect(result).toBeNull()
})
describe('touch/mobile pointer capture simulation', () => {
it('resolves correct target when touch moves over different element', () => {
// Simulate the touch scenario:
// - User touches slot A (event.target = slotA)
// - User drags over slot B (elementFromPoint returns slotB)
// - resolvePointerTarget should return slotB, not slotA
const slotA = document.createElement('div')
slotA.className = 'lg-slot slot-a'
slotA.setAttribute('data-slot-key', 'node1-0-input')
const slotB = document.createElement('div')
slotB.className = 'lg-slot slot-b'
slotB.setAttribute('data-slot-key', 'node2-0-input')
// When pointer is over slotB, elementFromPoint returns slotB
vi.spyOn(document, 'elementFromPoint').mockReturnValue(slotB)
// But the fallback (event.target on touch) is still slotA
const result = resolvePointerTarget(150, 250, slotA)
// Should return slotB (the actual element under pointer), not slotA
expect(result).toBe(slotB)
expect(result).not.toBe(slotA)
})
it('falls back to original target when pointer is outside viewport', () => {
// When pointer is outside the document (e.g., dragged off screen),
// elementFromPoint returns null
const slotA = document.createElement('div')
slotA.className = 'lg-slot slot-a'
vi.spyOn(document, 'elementFromPoint').mockReturnValue(null)
const result = resolvePointerTarget(-100, -100, slotA)
// Should fall back to the original target
expect(result).toBe(slotA)
})
})
describe('integration with slot detection', () => {
it('returned element can be used with closest() for slot detection', () => {
// Create a nested structure like the real DOM
const nodeContainer = document.createElement('div')
nodeContainer.setAttribute('data-node-id', 'node123')
const slotWrapper = document.createElement('div')
slotWrapper.className = 'lg-slot'
const slotDot = document.createElement('div')
slotDot.className = 'slot-dot'
slotDot.setAttribute('data-slot-key', 'node123-0-input')
slotWrapper.appendChild(slotDot)
nodeContainer.appendChild(slotWrapper)
// elementFromPoint returns the innermost element (slot dot)
vi.spyOn(document, 'elementFromPoint').mockReturnValue(slotDot)
const result = resolvePointerTarget(100, 100, null)
// Verify we can use closest() to find parent slot and node
expect(result).toBeInstanceOf(HTMLElement)
const htmlResult = result as HTMLElement
expect(htmlResult.closest('.lg-slot')).toBe(slotWrapper)
expect(htmlResult.closest('[data-node-id]')).toBe(nodeContainer)
expect(htmlResult.getAttribute('data-slot-key')).toBe('node123-0-input')
})
})
})

View File

@@ -85,6 +85,28 @@ function createPointerSession(): PointerSession {
return { begin, register, matches, isActive, clear }
}
/**
* Resolves the actual DOM element under the pointer position.
*
* On touch/mobile devices, pointer events have "implicit pointer capture" -
* event.target stays as the element where the touch started, not the element
* currently under the pointer. This helper uses document.elementFromPoint()
* to get the actual element under the pointer, falling back to the provided
* fallback target if elementFromPoint returns null.
*
* @param clientX - The client X coordinate of the pointer
* @param clientY - The client Y coordinate of the pointer
* @param fallback - Fallback target to use if elementFromPoint returns null
* @returns The resolved target element
*/
export function resolvePointerTarget(
clientX: number,
clientY: number,
fallback: EventTarget | null
): EventTarget | null {
return document.elementFromPoint(clientX, clientY) ?? fallback
}
export function useSlotLinkInteraction({
nodeId,
index,
@@ -299,7 +321,7 @@ export function useSlotLinkInteraction({
let hoveredSlotKey: string | null = null
let hoveredNodeId: NodeId | null = null
const target = data.target
const target = resolvePointerTarget(data.clientX, data.clientY, data.target)
if (target === dragContext.lastPointerEventTarget) {
hoveredSlotKey = dragContext.lastPointerTargetSlotKey
hoveredNodeId = dragContext.lastPointerTargetNodeId
@@ -501,9 +523,14 @@ export function useSlotLinkInteraction({
? state.candidate
: null
const hasConnected = connectByPriority(canvasEvent.target, snappedCandidate)
const dropTarget = resolvePointerTarget(
event.clientX,
event.clientY,
canvasEvent.target
)
const hasConnected = connectByPriority(dropTarget, snappedCandidate)
if (!hasConnected && event.target === app.canvas?.canvas) {
if (!hasConnected && dropTarget === app.canvas?.canvas) {
activeAdapter?.dropOnCanvas(canvasEvent)
}

View File

@@ -1,4 +1,4 @@
import { computed, toValue } from 'vue'
import { computed, onUnmounted, toValue } from 'vue'
import type { MaybeRefOrGetter } from 'vue'
import { useLayoutMutations } from '@/renderer/core/layout/operations/layoutMutations'
@@ -17,6 +17,11 @@ export function useNodeLayout(nodeIdMaybe: MaybeRefOrGetter<string>) {
// Get the customRef for this node (shared write access)
const layoutRef = layoutStore.getNodeLayoutRef(nodeId)
// Clean up refs and triggers when Vue component unmounts
onUnmounted(() => {
layoutStore.cleanupNodeRef(nodeId)
})
// Computed properties for easy access
const position = computed(() => {
const layout = layoutRef.value

View File

@@ -4,6 +4,7 @@
v-model="modelValue"
:invalid
:filter="selectOptions.length > 4"
:auto-filter-focus="selectOptions.length > 4"
:options="selectOptions"
v-bind="combinedProps"
:class="cn(WidgetInputBaseClass, 'w-full text-xs')"

View File

@@ -119,11 +119,6 @@ export const useLitegraphService = () => {
return { color: '#0f0' }
}
}
node.strokeStyles['nodeError'] = function (this: LGraphNode) {
if (app.lastNodeErrors?.[this.id]?.errors) {
return { color: 'red' }
}
}
node.strokeStyles['dragOver'] = function (this: LGraphNode) {
if (app.dragOverNode?.id == this.id) {
return { color: 'dodgerblue' }

View File

@@ -1,5 +1,5 @@
import { defineStore } from 'pinia'
import { computed, ref } from 'vue'
import { computed, ref, watch } from 'vue'
import { useNodeProgressText } from '@/composables/node/useNodeProgressText'
import type { LGraph, Subgraph } from '@/lib/litegraph/src/litegraph'
@@ -32,6 +32,7 @@ import { app } from '@/scripts/app'
import { useNodeOutputStore } from '@/stores/imagePreviewStore'
import type { NodeLocatorId } from '@/types/nodeIdentification'
import { createNodeLocatorId } from '@/types/nodeIdentification'
import { forEachNode, getNodeByExecutionId } from '@/utils/graphTraversalUtil'
interface QueuedPrompt {
/**
@@ -534,6 +535,97 @@ export const useExecutionStore = defineStore('execution', () => {
() => runningPromptIds.value.length
)
/** Map of node errors indexed by locator ID. */
const nodeErrorsByLocatorId = computed<Record<NodeLocatorId, NodeError>>(
() => {
if (!lastNodeErrors.value) return {}
const map: Record<NodeLocatorId, NodeError> = {}
for (const [executionId, nodeError] of Object.entries(
lastNodeErrors.value
)) {
const locatorId = executionIdToNodeLocatorId(executionId)
if (locatorId) {
map[locatorId] = nodeError
}
}
return map
}
)
/** Get node errors by locator ID. */
const getNodeErrors = (
nodeLocatorId: NodeLocatorId
): NodeError | undefined => {
return nodeErrorsByLocatorId.value[nodeLocatorId]
}
/** Check if a specific slot has validation errors. */
const slotHasError = (
nodeLocatorId: NodeLocatorId,
slotName: string
): boolean => {
const nodeError = getNodeErrors(nodeLocatorId)
if (!nodeError) return false
return nodeError.errors.some((e) => e.extra_info?.input_name === slotName)
}
/**
* Update node and slot error flags when validation errors change.
* Propagates errors up subgraph chains.
*/
watch(lastNodeErrors, () => {
if (!app.graph || !app.graph.nodes) return
// Clear all error flags
forEachNode(app.graph, (node) => {
node.has_errors = false
if (node.inputs) {
for (const slot of node.inputs) {
slot.hasErrors = false
}
}
})
if (!lastNodeErrors.value) return
// Set error flags on nodes and slots
for (const [executionId, nodeError] of Object.entries(
lastNodeErrors.value
)) {
const node = getNodeByExecutionId(app.graph, executionId)
if (!node) continue
node.has_errors = true
// Mark input slots with errors
if (node.inputs) {
for (const error of nodeError.errors) {
const slotName = error.extra_info?.input_name
if (!slotName) continue
const slot = node.inputs.find((s) => s.name === slotName)
if (slot) {
slot.hasErrors = true
}
}
}
// Propagate errors to parent subgraph nodes
const parts = executionId.split(':')
for (let i = parts.length - 1; i > 0; i--) {
const parentExecutionId = parts.slice(0, i).join(':')
const parentNode = getNodeByExecutionId(app.graph, parentExecutionId)
if (parentNode) {
parentNode.has_errors = true
}
}
}
})
return {
isIdle,
clientId,
@@ -567,6 +659,9 @@ export const useExecutionStore = defineStore('execution', () => {
// NodeLocatorId conversion helpers
executionIdToNodeLocatorId,
nodeLocatorIdToExecutionId,
promptIdToWorkflowId
promptIdToWorkflowId,
// Node error lookup helpers
getNodeErrors,
slotHasError
}
})

View File

@@ -2,14 +2,38 @@ import type { ReadOnlyRect } from '@/lib/litegraph/src/interfaces'
import type { Bounds } from '@/renderer/core/layout/types'
/**
* Finds the greatest common divisor (GCD) for two numbers.
* Finds the greatest common divisor (GCD) for two numbers using iterative
* Euclidean algorithm. Uses iteration instead of recursion to avoid stack
* overflow with large inputs or small floating-point step values.
*
* For floating-point numbers, uses a tolerance-based approach to handle
* precision issues and limits iterations to prevent hangs.
*
* @param a - The first number.
* @param b - The second number.
* @returns The GCD of the two numbers.
*/
export const gcd = (a: number, b: number): number => {
return b === 0 ? a : gcd(b, a % b)
// Use absolute values to handle negative numbers
let x = Math.abs(a)
let y = Math.abs(b)
// Handle edge cases
if (x === 0) return y
if (y === 0) return x
// For floating-point numbers, use tolerance-based comparison
// This prevents infinite loops due to floating-point precision issues
const epsilon = 1e-10
const maxIterations = 100
let iterations = 0
while (y > epsilon && iterations < maxIterations) {
;[x, y] = [y, x % y]
iterations++
}
return x
}
/**