Compare commits

..

1 Commits

Author SHA1 Message Date
Claude
d930514bea fix: incorporate Fuse search scores into template sorting
When searching templates, the Fuse.js relevance scores were being
discarded when any sort option other than 'default' was selected.
This caused templates with better search matches to be ranked lower
than templates with higher usage/popularity but worse search relevance.

Changes:
- Store Fuse search scores in a Map for use during sorting
- For 'recommended' sort with active search: weight search relevance
  at 60% and base recommendation score at 40%
- For 'popular' sort with active search: weight both equally at 50%
- For VRAM/size sorts: use search relevance as tiebreaker
- 'default' sort preserves Fuse's original relevance order
2026-01-08 20:01:41 +00:00
7 changed files with 240 additions and 230 deletions

View File

@@ -1,7 +1,7 @@
{
"name": "@comfyorg/comfyui-frontend",
"private": true,
"version": "1.37.6",
"version": "1.37.5",
"type": "module",
"repository": "https://github.com/Comfy-Org/ComfyUI_frontend",
"homepage": "https://comfy.org",

View File

@@ -20,14 +20,9 @@
variant="secondary"
size="icon"
:aria-label="t('menu.customNodesManager')"
class="relative"
@click="openCustomNodeManager"
>
<i class="icon-[lucide--puzzle] size-4" />
<span
v-if="shouldShowRedDot"
class="absolute top-0.5 right-1 size-2 rounded-full bg-red-500"
/>
</Button>
</div>
@@ -96,14 +91,12 @@ import Button from '@/components/ui/button/Button.vue'
import { useCurrentUser } from '@/composables/auth/useCurrentUser'
import { useErrorHandling } from '@/composables/useErrorHandling'
import { buildTooltipConfig } from '@/composables/useTooltipConfig'
import { useReleaseStore } from '@/platform/updates/common/releaseStore'
import { app } from '@/scripts/app'
import { useCommandStore } from '@/stores/commandStore'
import { useQueueStore, useQueueUIStore } from '@/stores/queueStore'
import { useRightSidePanelStore } from '@/stores/workspace/rightSidePanelStore'
import { useWorkspaceStore } from '@/stores/workspaceStore'
import { isElectron } from '@/utils/envUtil'
import { useConflictAcknowledgment } from '@/workbench/extensions/manager/composables/useConflictAcknowledgment'
import { useManagerState } from '@/workbench/extensions/manager/composables/useManagerState'
import { ManagerTab } from '@/workbench/extensions/manager/types/comfyManagerTypes'
@@ -118,10 +111,6 @@ const commandStore = useCommandStore()
const queueStore = useQueueStore()
const queueUIStore = useQueueUIStore()
const { isOverlayExpanded: isQueueOverlayExpanded } = storeToRefs(queueUIStore)
const releaseStore = useReleaseStore()
const { shouldShowRedDot: showReleaseRedDot } = storeToRefs(releaseStore)
const { shouldShowRedDot: shouldShowConflictRedDot } =
useConflictAcknowledgment()
const isTopMenuHovered = ref(false)
const queuedCount = computed(() => queueStore.pendingTasks.length)
const queueHistoryTooltipConfig = computed(() =>
@@ -131,12 +120,6 @@ const customNodesManagerTooltipConfig = computed(() =>
buildTooltipConfig(t('menu.customNodesManager'))
)
// Use either release red dot or conflict red dot
const shouldShowRedDot = computed((): boolean => {
const releaseRedDot = showReleaseRedDot.value
return releaseRedDot || shouldShowConflictRedDot.value
})
// Right side panel toggle
const { isOpen: isRightSidePanelOpen } = storeToRefs(rightSidePanelStore)
const rightSidePanelTooltipConfig = computed(() =>

View File

@@ -1664,41 +1664,31 @@ describe('useNodePricing', () => {
{
model: 'gemini-2.5-pro-preview-05-06',
expected: creditsListLabel([0.00125, 0.01], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gemini-2.5-pro',
expected: creditsListLabel([0.00125, 0.01], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gemini-3-pro-preview',
expected: creditsListLabel([0.002, 0.012], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gemini-2.5-flash-preview-04-17',
expected: creditsListLabel([0.0003, 0.0025], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gemini-2.5-flash',
expected: creditsListLabel([0.0003, 0.0025], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{ model: 'unknown-gemini-model', expected: 'Token-based' }
@@ -1712,6 +1702,16 @@ describe('useNodePricing', () => {
})
})
it('should return per-second pricing for Gemini Veo models', () => {
const { getNodeDisplayPrice } = useNodePricing()
const node = createMockNode('GeminiNode', [
{ name: 'model', value: 'veo-2.0-generate-001' }
])
const price = getNodeDisplayPrice(node)
expect(price).toBe(creditsLabel(0.5, { suffix: '/second' }))
})
it('should return fallback for GeminiNode without model widget', () => {
const { getNodeDisplayPrice } = useNodePricing()
const node = createMockNode('GeminiNode', [])
@@ -1737,97 +1737,73 @@ describe('useNodePricing', () => {
{
model: 'o4-mini',
expected: creditsListLabel([0.0011, 0.0044], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'o1-pro',
expected: creditsListLabel([0.15, 0.6], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'o1',
expected: creditsListLabel([0.015, 0.06], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'o3-mini',
expected: creditsListLabel([0.0011, 0.0044], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'o3',
expected: creditsListLabel([0.01, 0.04], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gpt-4o',
expected: creditsListLabel([0.0025, 0.01], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gpt-4.1-nano',
expected: creditsListLabel([0.0001, 0.0004], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gpt-4.1-mini',
expected: creditsListLabel([0.0004, 0.0016], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gpt-4.1',
expected: creditsListLabel([0.002, 0.008], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gpt-5-nano',
expected: creditsListLabel([0.00005, 0.0004], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gpt-5-mini',
expected: creditsListLabel([0.00025, 0.002], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gpt-5',
expected: creditsListLabel([0.00125, 0.01], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
}
]
@@ -1848,49 +1824,37 @@ describe('useNodePricing', () => {
{
model: 'gpt-4.1-nano-test',
expected: creditsListLabel([0.0001, 0.0004], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gpt-4.1-mini-test',
expected: creditsListLabel([0.0004, 0.0016], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'gpt-4.1-test',
expected: creditsListLabel([0.002, 0.008], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'o1-pro-test',
expected: creditsListLabel([0.15, 0.6], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'o1-test',
expected: creditsListLabel([0.015, 0.06], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{
model: 'o3-mini-test',
expected: creditsListLabel([0.0011, 0.0044], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
},
{ model: 'unknown-model', expected: 'Token-based' }

View File

@@ -1823,35 +1823,28 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
const model = String(modelWidget.value)
if (model.includes('gemini-2.5-flash-preview-04-17')) {
// Google Veo video generation
if (model.includes('veo-2.0')) {
return formatCreditsLabel(0.5, { suffix: '/second' })
} else if (model.includes('gemini-2.5-flash-preview-04-17')) {
return formatCreditsListLabel([0.0003, 0.0025], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('gemini-2.5-flash')) {
return formatCreditsListLabel([0.0003, 0.0025], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('gemini-2.5-pro-preview-05-06')) {
return formatCreditsListLabel([0.00125, 0.01], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('gemini-2.5-pro')) {
return formatCreditsListLabel([0.00125, 0.01], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('gemini-3-pro-preview')) {
return formatCreditsListLabel([0.002, 0.012], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
}
// For other Gemini models, show token-based pricing info
@@ -1906,75 +1899,51 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
// Specific pricing for exposed models based on official pricing data (converted to per 1K tokens)
if (model.includes('o4-mini')) {
return formatCreditsListLabel([0.0011, 0.0044], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('o1-pro')) {
return formatCreditsListLabel([0.15, 0.6], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('o1')) {
return formatCreditsListLabel([0.015, 0.06], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('o3-mini')) {
return formatCreditsListLabel([0.0011, 0.0044], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('o3')) {
return formatCreditsListLabel([0.01, 0.04], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('gpt-4o')) {
return formatCreditsListLabel([0.0025, 0.01], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('gpt-4.1-nano')) {
return formatCreditsListLabel([0.0001, 0.0004], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('gpt-4.1-mini')) {
return formatCreditsListLabel([0.0004, 0.0016], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('gpt-4.1')) {
return formatCreditsListLabel([0.002, 0.008], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('gpt-5-nano')) {
return formatCreditsListLabel([0.00005, 0.0004], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('gpt-5-mini')) {
return formatCreditsListLabel([0.00025, 0.002], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
} else if (model.includes('gpt-5')) {
return formatCreditsListLabel([0.00125, 0.01], {
suffix: ' per 1K tokens',
approximate: true,
separator: '-'
suffix: ' per 1K tokens'
})
}
return 'Token-based'
@@ -2132,35 +2101,6 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
},
LtxvApiImageToVideo: {
displayPrice: ltxvPricingCalculator
},
WanReferenceVideoApi: {
displayPrice: (node: LGraphNode): string => {
const durationWidget = node.widgets?.find(
(w) => w.name === 'duration'
) as IComboWidget
const sizeWidget = node.widgets?.find(
(w) => w.name === 'size'
) as IComboWidget
if (!durationWidget || !sizeWidget) {
return formatCreditsRangeLabel(0.7, 1.5, {
note: '(varies with size & duration)'
})
}
const seconds = parseFloat(String(durationWidget.value))
const sizeStr = String(sizeWidget.value).toLowerCase()
const rate = sizeStr.includes('1080p') ? 0.15 : 0.1
const inputMin = 2 * rate
const inputMax = 5 * rate
const outputPrice = seconds * rate
const minTotal = inputMin + outputPrice
const maxTotal = inputMax + outputPrice
return formatCreditsRangeLabel(minTotal, maxTotal)
}
}
}
@@ -2314,7 +2254,6 @@ export const useNodePricing = () => {
ByteDanceImageReferenceNode: ['model', 'duration', 'resolution'],
WanTextToVideoApi: ['duration', 'size'],
WanImageToVideoApi: ['duration', 'resolution'],
WanReferenceVideoApi: ['duration', 'size'],
LtxvApiTextToVideo: ['model', 'duration', 'resolution'],
LtxvApiImageToVideo: ['model', 'duration', 'resolution']
}

View File

@@ -272,4 +272,108 @@ describe('useTemplateFiltering', () => {
'beta-pro'
])
})
it('incorporates search relevance into recommended sorting', async () => {
vi.useFakeTimers()
const templates = ref<TemplateInfo[]>([
{
name: 'wan-video-exact',
title: 'Wan Video Template',
description: 'A template with Wan in title',
mediaType: 'image',
mediaSubtype: 'png',
date: '2024-01-01',
usage: 10
},
{
name: 'qwen-image-partial',
title: 'Qwen Image Editor',
description: 'A template that contains w, a, n scattered',
mediaType: 'image',
mediaSubtype: 'png',
date: '2024-01-01',
usage: 1000 // Higher usage but worse search match
},
{
name: 'wan-text-exact',
title: 'Wan2.5: Text to Image',
description: 'Another exact match for Wan',
mediaType: 'image',
mediaSubtype: 'png',
date: '2024-01-01',
usage: 50
}
])
const { searchQuery, sortBy, filteredTemplates } =
useTemplateFiltering(templates)
// Search for "Wan"
searchQuery.value = 'Wan'
sortBy.value = 'recommended'
await nextTick()
await vi.runOnlyPendingTimersAsync()
await nextTick()
// Templates with "Wan" in title should rank higher than Qwen despite lower usage
// because search relevance is now factored into the recommended sort
const results = filteredTemplates.value.map((t) => t.name)
// Verify exact matches appear (Qwen might be filtered out by threshold)
expect(results).toContain('wan-video-exact')
expect(results).toContain('wan-text-exact')
// If Qwen appears, it should be ranked lower than exact matches
if (results.includes('qwen-image-partial')) {
const wanIndex = results.indexOf('wan-video-exact')
const qwenIndex = results.indexOf('qwen-image-partial')
expect(wanIndex).toBeLessThan(qwenIndex)
}
vi.useRealTimers()
})
it('preserves Fuse search order when using default sort', async () => {
vi.useFakeTimers()
const templates = ref<TemplateInfo[]>([
{
name: 'portrait-basic',
title: 'Basic Portrait',
description: 'A basic template',
mediaType: 'image',
mediaSubtype: 'png'
},
{
name: 'portrait-pro',
title: 'Portrait Pro Edition',
description: 'Advanced portrait features',
mediaType: 'image',
mediaSubtype: 'png'
},
{
name: 'landscape-view',
title: 'Landscape Generator',
description: 'Generate landscapes',
mediaType: 'image',
mediaSubtype: 'png'
}
])
const { searchQuery, sortBy, filteredTemplates } =
useTemplateFiltering(templates)
searchQuery.value = 'Portrait Pro'
sortBy.value = 'default'
await nextTick()
await vi.runOnlyPendingTimersAsync()
await nextTick()
const results = filteredTemplates.value.map((t) => t.name)
// With default sort, Fuse's relevance ordering is preserved
// "Portrait Pro Edition" should be first as it's the best match
expect(results[0]).toBe('portrait-pro')
})
})

View File

@@ -82,13 +82,31 @@ export function useTemplateFiltering(
const debouncedSearchQuery = refDebounced(searchQuery, 50)
const filteredBySearch = computed(() => {
// Store Fuse search results with scores for use in sorting
const fuseSearchResults = computed(() => {
if (!debouncedSearchQuery.value.trim()) {
return null
}
return fuse.value.search(debouncedSearchQuery.value)
})
// Map of template name to search score (lower is better in Fuse, 0 = perfect match)
const searchScoreMap = computed(() => {
const map = new Map<string, number>()
if (fuseSearchResults.value) {
fuseSearchResults.value.forEach((result) => {
// Store the score (0 = perfect match, 1 = worst match)
map.set(result.item.name, result.score ?? 1)
})
}
return map
})
const filteredBySearch = computed(() => {
if (!fuseSearchResults.value) {
return templatesArray.value
}
const results = fuse.value.search(debouncedSearchQuery.value)
return results.map((result) => result.item)
return fuseSearchResults.value.map((result) => result.item)
})
const filteredByModels = computed(() => {
@@ -165,31 +183,66 @@ export function useTemplateFiltering(
{ immediate: true }
)
// Helper to get search relevance score (higher is better, 0-1 range)
// Fuse returns scores where 0 = perfect match, 1 = worst match
// We invert it so higher = better for combining with other scores
const getSearchRelevance = (template: TemplateInfo): number => {
const fuseScore = searchScoreMap.value.get(template.name)
if (fuseScore === undefined) return 0 // Not in search results or no search
return 1 - fuseScore // Invert: 0 (worst) -> 1 (best)
}
const hasActiveSearch = computed(
() => debouncedSearchQuery.value.trim() !== ''
)
const sortedTemplates = computed(() => {
const templates = [...filteredByRunsOn.value]
switch (sortBy.value) {
case 'recommended':
// Curated: usage × 0.5 + internal × 0.3 + freshness × 0.2
// When searching, heavily weight search relevance
// Formula with search: searchRelevance × 0.6 + (usage × 0.5 + internal × 0.3 + freshness × 0.2) × 0.4
// Formula without search: usage × 0.5 + internal × 0.3 + freshness × 0.2
return templates.sort((a, b) => {
const scoreA = rankingStore.computeDefaultScore(
const baseScoreA = rankingStore.computeDefaultScore(
a.date,
a.searchRank,
a.usage
)
const scoreB = rankingStore.computeDefaultScore(
const baseScoreB = rankingStore.computeDefaultScore(
b.date,
b.searchRank,
b.usage
)
return scoreB - scoreA
if (hasActiveSearch.value) {
const searchA = getSearchRelevance(a)
const searchB = getSearchRelevance(b)
const finalA = searchA * 0.6 + baseScoreA * 0.4
const finalB = searchB * 0.6 + baseScoreB * 0.4
return finalB - finalA
}
return baseScoreB - baseScoreA
})
case 'popular':
// User-driven: usage × 0.9 + freshness × 0.1
// When searching, include search relevance
// Formula with search: searchRelevance × 0.5 + (usage × 0.9 + freshness × 0.1) × 0.5
// Formula without search: usage × 0.9 + freshness × 0.1
return templates.sort((a, b) => {
const scoreA = rankingStore.computePopularScore(a.date, a.usage)
const scoreB = rankingStore.computePopularScore(b.date, b.usage)
return scoreB - scoreA
const baseScoreA = rankingStore.computePopularScore(a.date, a.usage)
const baseScoreB = rankingStore.computePopularScore(b.date, b.usage)
if (hasActiveSearch.value) {
const searchA = getSearchRelevance(a)
const searchB = getSearchRelevance(b)
const finalA = searchA * 0.5 + baseScoreA * 0.5
const finalB = searchB * 0.5 + baseScoreB * 0.5
return finalB - finalA
}
return baseScoreB - baseScoreA
})
case 'alphabetical':
return templates.sort((a, b) => {
@@ -209,6 +262,12 @@ export function useTemplateFiltering(
const vramB = getVramMetric(b)
if (vramA === vramB) {
// Use search relevance as tiebreaker when searching
if (hasActiveSearch.value) {
const searchA = getSearchRelevance(a)
const searchB = getSearchRelevance(b)
if (searchA !== searchB) return searchB - searchA
}
const nameA = a.title || a.name || ''
const nameB = b.title || b.name || ''
return nameA.localeCompare(nameB)
@@ -225,11 +284,20 @@ export function useTemplateFiltering(
typeof a.size === 'number' ? a.size : Number.POSITIVE_INFINITY
const sizeB =
typeof b.size === 'number' ? b.size : Number.POSITIVE_INFINITY
if (sizeA === sizeB) return 0
if (sizeA === sizeB) {
// Use search relevance as tiebreaker when searching
if (hasActiveSearch.value) {
const searchA = getSearchRelevance(a)
const searchB = getSearchRelevance(b)
if (searchA !== searchB) return searchB - searchA
}
return 0
}
return sizeA - sizeB
})
case 'default':
default:
// 'default' preserves Fuse's search order (already sorted by relevance)
return templates
}
})

View File

@@ -6088,9 +6088,6 @@
},
"ckpt_name": {
"name": "ckpt_name"
},
"device": {
"name": "device"
}
},
"outputs": {
@@ -15368,51 +15365,6 @@
}
}
},
"WanReferenceVideoApi": {
"display_name": "Wan Reference to Video",
"description": "Use the character and voice from input videos, combined with a prompt, to generate a new video that maintains character consistency.",
"inputs": {
"model": {
"name": "model"
},
"prompt": {
"name": "prompt",
"tooltip": "Prompt describing the elements and visual features. Supports English and Chinese. Use identifiers such as `character1` and `character2` to refer to the reference characters."
},
"negative_prompt": {
"name": "negative_prompt",
"tooltip": "Negative prompt describing what to avoid."
},
"reference_videos": {
"name": "reference_videos"
},
"size": {
"name": "size"
},
"duration": {
"name": "duration"
},
"seed": {
"name": "seed"
},
"shot_type": {
"name": "shot_type",
"tooltip": "Specifies the shot type for the generated video, that is, whether the video is a single continuous shot or multiple shots with cuts."
},
"watermark": {
"name": "watermark",
"tooltip": "Whether to add an AI-generated watermark to the result."
},
"control_after_generate": {
"name": "control after generate"
}
},
"outputs": {
"0": {
"tooltip": null
}
}
},
"WanSoundImageToVideo": {
"display_name": "WanSoundImageToVideo",
"inputs": {