Restore lost work from manager/menu-items-migration feature branch

This merge restores work that was lost during a bad rebase ~2 months ago.
Key restored functionality:
- ComfyUI Manager service with queue/task API endpoint
- Manager queue composable and WebSocket status handling
- Enhanced manager dialog commands and UI labels
- Manager websocket queue status enum types
- Server feature flag support functions

Resolves conflicts between recovery commit 730b278fa0 and
feature branch manager/menu-items-migration to combine all lost work.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
bymyself
2025-08-27 07:37:04 -07:00
39 changed files with 3403 additions and 735 deletions

View File

@@ -0,0 +1,708 @@
# Comprehensive PR Review for ComfyUI Frontend
<task>
You are performing a comprehensive code review for PR #$1 in the ComfyUI frontend repository. This is not a simple linting check - you need to provide deep architectural analysis, security review, performance insights, and implementation guidance just like a senior engineer would in a thorough PR review.
Your review should cover:
1. Architecture and design patterns
2. Security vulnerabilities and risks
3. Performance implications
4. Code quality and maintainability
5. Integration with existing systems
6. Best practices and conventions
7. Testing considerations
8. Documentation needs
</task>
Arguments: PR number passed via PR_NUMBER environment variable
## Phase 0: Initialize Variables and Helper Functions
```bash
# Validate PR_NUMBER first thing
if [ -z "$PR_NUMBER" ]; then
echo "Error: PR_NUMBER environment variable is not set"
echo "Usage: PR_NUMBER=<number> claude run /comprehensive-pr-review"
exit 1
fi
# Initialize all counters at the start
CRITICAL_COUNT=0
HIGH_COUNT=0
MEDIUM_COUNT=0
LOW_COUNT=0
ARCHITECTURE_ISSUES=0
SECURITY_ISSUES=0
PERFORMANCE_ISSUES=0
QUALITY_ISSUES=0
# Helper function for posting review comments
post_review_comment() {
local file_path=$1
local line_number=$2
local severity=$3 # critical/high/medium/low
local category=$4 # architecture/security/performance/quality
local issue=$5
local context=$6
local suggestion=$7
# Update counters
case $severity in
"critical") ((CRITICAL_COUNT++)) ;;
"high") ((HIGH_COUNT++)) ;;
"medium") ((MEDIUM_COUNT++)) ;;
"low") ((LOW_COUNT++)) ;;
esac
case $category in
"architecture") ((ARCHITECTURE_ISSUES++)) ;;
"security") ((SECURITY_ISSUES++)) ;;
"performance") ((PERFORMANCE_ISSUES++)) ;;
"quality") ((QUALITY_ISSUES++)) ;;
esac
# Post inline comment via GitHub CLI
local comment="${issue}\n${context}\n${suggestion}"
gh pr review $PR_NUMBER --comment --body "$comment" -F - <<< "$comment"
}
```
## Phase 1: Environment Setup and PR Context
```bash
# Pre-flight checks
check_prerequisites() {
# Check gh CLI is available
if ! command -v gh &> /dev/null; then
echo "Error: gh CLI is not installed"
exit 1
fi
# In GitHub Actions, auth is handled via GITHUB_TOKEN
if [ -n "$GITHUB_ACTIONS" ] && [ -z "$GITHUB_TOKEN" ]; then
echo "Error: GITHUB_TOKEN is not set in GitHub Actions"
exit 1
fi
# Check if we're authenticated
if ! gh auth status &> /dev/null; then
echo "Error: Not authenticated with GitHub. Run 'gh auth login'"
exit 1
fi
# Set repository if not already set
if [ -z "$REPOSITORY" ]; then
REPOSITORY="Comfy-Org/ComfyUI_frontend"
fi
# Check PR exists and is open
PR_STATE=$(gh pr view $PR_NUMBER --repo $REPOSITORY --json state -q .state 2>/dev/null || echo "NOT_FOUND")
if [ "$PR_STATE" = "NOT_FOUND" ]; then
echo "Error: PR #$PR_NUMBER not found in $REPOSITORY"
exit 1
elif [ "$PR_STATE" != "OPEN" ]; then
echo "Error: PR #$PR_NUMBER is not open (state: $PR_STATE)"
exit 1
fi
# Check API rate limits
RATE_REMAINING=$(gh api /rate_limit --jq '.rate.remaining' 2>/dev/null || echo "5000")
if [ "$RATE_REMAINING" -lt 100 ]; then
echo "Warning: Low API rate limit: $RATE_REMAINING remaining"
if [ "$RATE_REMAINING" -lt 50 ]; then
echo "Error: Insufficient API rate limit for comprehensive review"
exit 1
fi
fi
echo "Pre-flight checks passed"
}
# Run pre-flight checks
check_prerequisites
echo "Starting comprehensive review of PR #$PR_NUMBER"
# Fetch PR information with error handling
echo "Fetching PR information..."
if ! gh pr view $PR_NUMBER --repo $REPOSITORY --json files,title,body,additions,deletions,baseRefName,headRefName > pr_info.json; then
echo "Error: Failed to fetch PR information"
exit 1
fi
# Extract branch names
BASE_BRANCH=$(jq -r '.baseRefName' < pr_info.json)
HEAD_BRANCH=$(jq -r '.headRefName' < pr_info.json)
# Checkout PR branch locally for better file inspection
echo "Checking out PR branch..."
git fetch origin "pull/$PR_NUMBER/head:pr-$PR_NUMBER"
git checkout "pr-$PR_NUMBER"
# Get changed files using git locally (much faster)
git diff --name-only "origin/$BASE_BRANCH" > changed_files.txt
# Get the diff using git locally
git diff "origin/$BASE_BRANCH" > pr_diff.txt
# Get detailed file changes with line numbers
git diff --name-status "origin/$BASE_BRANCH" > file_changes.txt
# For API compatibility, create a simplified pr_files.json
echo '[]' > pr_files.json
while IFS=$'\t' read -r status file; do
if [[ "$status" != "D" ]]; then # Skip deleted files
# Get the patch for this file
patch=$(git diff "origin/$BASE_BRANCH" -- "$file" | jq -Rs .)
additions=$(git diff --numstat "origin/$BASE_BRANCH" -- "$file" | awk '{print $1}')
deletions=$(git diff --numstat "origin/$BASE_BRANCH" -- "$file" | awk '{print $2}')
jq --arg file "$file" \
--arg patch "$patch" \
--arg additions "$additions" \
--arg deletions "$deletions" \
'. += [{
"filename": $file,
"patch": $patch,
"additions": ($additions | tonumber),
"deletions": ($deletions | tonumber)
}]' pr_files.json > pr_files.json.tmp
mv pr_files.json.tmp pr_files.json
fi
done < file_changes.txt
# Setup caching directory
CACHE_DIR=".claude-review-cache"
mkdir -p "$CACHE_DIR"
# Function to cache analysis results
cache_analysis() {
local file_path=$1
local analysis_result=$2
local file_hash=$(git hash-object "$file_path" 2>/dev/null || echo "no-hash")
if [ "$file_hash" != "no-hash" ]; then
echo "$analysis_result" > "$CACHE_DIR/${file_hash}.cache"
fi
}
# Function to get cached analysis
get_cached_analysis() {
local file_path=$1
local file_hash=$(git hash-object "$file_path" 2>/dev/null || echo "no-hash")
if [ "$file_hash" != "no-hash" ] && [ -f "$CACHE_DIR/${file_hash}.cache" ]; then
cat "$CACHE_DIR/${file_hash}.cache"
return 0
fi
return 1
}
# Clean old cache entries (older than 7 days)
find "$CACHE_DIR" -name "*.cache" -mtime +7 -delete 2>/dev/null || true
```
## Phase 2: Load Comprehensive Knowledge Base
```bash
# Don't create knowledge directory until we know we need it
KNOWLEDGE_FOUND=false
# Use local cache for knowledge base to avoid repeated downloads
KNOWLEDGE_CACHE_DIR=".claude-knowledge-cache"
mkdir -p "$KNOWLEDGE_CACHE_DIR"
# Option to use cloned prompt library for better performance
PROMPT_LIBRARY_PATH="../comfy-claude-prompt-library"
if [ -d "$PROMPT_LIBRARY_PATH" ]; then
echo "Using local prompt library at $PROMPT_LIBRARY_PATH"
USE_LOCAL_PROMPT_LIBRARY=true
else
echo "No local prompt library found, will use GitHub API"
USE_LOCAL_PROMPT_LIBRARY=false
fi
# Function to fetch with cache
fetch_with_cache() {
local url=$1
local output_file=$2
local cache_file="$KNOWLEDGE_CACHE_DIR/$(echo "$url" | sed 's/[^a-zA-Z0-9]/_/g')"
# Check if cached version exists and is less than 1 day old
if [ -f "$cache_file" ] && [ $(find "$cache_file" -mtime -1 2>/dev/null | wc -l) -gt 0 ]; then
# Create knowledge directory only when we actually have content
if [ "$KNOWLEDGE_FOUND" = "false" ]; then
mkdir -p review_knowledge
KNOWLEDGE_FOUND=true
fi
cp "$cache_file" "$output_file"
echo "Using cached version of $(basename "$output_file")"
return 0
fi
# Try to fetch fresh version
if curl -s -f "$url" > "$output_file.tmp"; then
# Create knowledge directory only when we actually have content
if [ "$KNOWLEDGE_FOUND" = "false" ]; then
mkdir -p review_knowledge
KNOWLEDGE_FOUND=true
fi
mv "$output_file.tmp" "$output_file"
cp "$output_file" "$cache_file"
echo "Downloaded fresh version of $(basename "$output_file")"
return 0
else
# If fetch failed but we have a cache, use it
if [ -f "$cache_file" ]; then
if [ "$KNOWLEDGE_FOUND" = "false" ]; then
mkdir -p review_knowledge
KNOWLEDGE_FOUND=true
fi
cp "$cache_file" "$output_file"
echo "Using stale cache for $(basename "$output_file") (download failed)"
return 0
fi
echo "Failed to load $(basename "$output_file")"
return 1
fi
}
# Load REPOSITORY_GUIDE.md for deep architectural understanding
echo "Loading ComfyUI Frontend repository guide..."
if [ "$USE_LOCAL_PROMPT_LIBRARY" = "true" ] && [ -f "$PROMPT_LIBRARY_PATH/project-summaries-for-agents/ComfyUI_frontend/REPOSITORY_GUIDE.md" ]; then
if [ "$KNOWLEDGE_FOUND" = "false" ]; then
mkdir -p review_knowledge
KNOWLEDGE_FOUND=true
fi
cp "$PROMPT_LIBRARY_PATH/project-summaries-for-agents/ComfyUI_frontend/REPOSITORY_GUIDE.md" "review_knowledge/repository_guide.md"
echo "Loaded repository guide from local prompt library"
else
fetch_with_cache "https://raw.githubusercontent.com/Comfy-Org/comfy-claude-prompt-library/master/project-summaries-for-agents/ComfyUI_frontend/REPOSITORY_GUIDE.md" "review_knowledge/repository_guide.md"
fi
# 3. Discover and load relevant knowledge folders from GitHub API
echo "Discovering available knowledge folders..."
KNOWLEDGE_API_URL="https://api.github.com/repos/Comfy-Org/comfy-claude-prompt-library/contents/.claude/knowledge"
if KNOWLEDGE_FOLDERS=$(curl -s "$KNOWLEDGE_API_URL" | jq -r '.[] | select(.type=="dir") | .name' 2>/dev/null); then
echo "Available knowledge folders: $KNOWLEDGE_FOLDERS"
# Analyze changed files to determine which knowledge folders might be relevant
CHANGED_FILES=$(cat changed_files.txt)
PR_TITLE=$(jq -r '.title' < pr_info.json)
PR_BODY=$(jq -r '.body // ""' < pr_info.json)
# For each knowledge folder, check if it might be relevant to the PR
for folder in $KNOWLEDGE_FOLDERS; do
# Simple heuristic: if folder name appears in changed file paths or PR context
if echo "$CHANGED_FILES $PR_TITLE $PR_BODY" | grep -qi "$folder"; then
echo "Loading knowledge folder: $folder"
# Fetch all files in that knowledge folder
FOLDER_API_URL="https://api.github.com/repos/Comfy-Org/comfy-claude-prompt-library/contents/.claude/knowledge/$folder"
curl -s "$FOLDER_API_URL" | jq -r '.[] | select(.type=="file") | .download_url' 2>/dev/null | \
while read url; do
if [ -n "$url" ]; then
filename=$(basename "$url")
fetch_with_cache "$url" "review_knowledge/${folder}_${filename}"
fi
done
fi
done
else
echo "Could not discover knowledge folders"
fi
# 4. Load validation rules from the repository
echo "Loading validation rules..."
VALIDATION_API_URL="https://api.github.com/repos/Comfy-Org/comfy-claude-prompt-library/contents/.claude/commands/validation"
if VALIDATION_FILES=$(curl -s "$VALIDATION_API_URL" | jq -r '.[] | select(.name | contains("frontend") or contains("security") or contains("performance")) | .download_url' 2>/dev/null); then
for url in $VALIDATION_FILES; do
if [ -n "$url" ]; then
filename=$(basename "$url")
fetch_with_cache "$url" "review_knowledge/validation_${filename}"
fi
done
else
echo "Could not load validation rules"
fi
# 5. Load local project guidelines
if [ -f "CLAUDE.md" ]; then
if [ "$KNOWLEDGE_FOUND" = "false" ]; then
mkdir -p review_knowledge
KNOWLEDGE_FOUND=true
fi
cp CLAUDE.md review_knowledge/local_claude.md
fi
if [ -f ".github/CLAUDE.md" ]; then
if [ "$KNOWLEDGE_FOUND" = "false" ]; then
mkdir -p review_knowledge
KNOWLEDGE_FOUND=true
fi
cp .github/CLAUDE.md review_knowledge/github_claude.md
fi
```
## Phase 3: Deep Analysis Instructions
Perform a comprehensive analysis covering these areas:
### 3.1 Architectural Analysis
Based on the repository guide and project summary, evaluate:
- Does this change align with the established architecture patterns?
- Are domain boundaries respected?
- Is the extension system used appropriately?
- Are components properly organized by feature?
- Does it follow the established service/composable/store patterns?
### 3.2 Code Quality Beyond Linting
- Cyclomatic complexity and cognitive load
- SOLID principles adherence
- DRY violations that aren't caught by simple duplication checks
- Proper abstraction levels
- Interface design and API clarity
- No leftover debug code (console.log, commented code, TODO comments)
### 3.3 Library Usage Enforcement
CRITICAL: Never re-implement functionality that exists in our standard libraries:
- **Tailwind CSS**: Use utility classes instead of custom CSS or style attributes
- **PrimeVue**: Never re-implement components that exist in PrimeVue (buttons, modals, dropdowns, etc.)
- **VueUse**: Never re-implement composables that exist in VueUse (useLocalStorage, useDebounceFn, etc.)
- **Lodash**: Never re-implement utility functions (debounce, throttle, cloneDeep, etc.)
- **Common components**: Reuse components from src/components/common/
- **DOMPurify**: Always use for HTML sanitization
- **Fuse.js**: Use for fuzzy search functionality
- **Marked**: Use for markdown parsing
- **Pinia**: Use for global state management, not custom solutions
- **Zod**: Use for form validation with zodResolver pattern
- **Tiptap**: Use for rich text/markdown editing
- **Xterm.js**: Use for terminal emulation
- **Axios**: Use for HTTP client initialization
### 3.4 Security Deep Dive
Beyond obvious vulnerabilities:
- Authentication/authorization implications
- Data validation completeness
- State management security
- Cross-origin concerns
- Extension security boundaries
### 3.5 Performance Analysis
- Render performance implications
- Layout thrashing prevention
- Memory leak potential
- Network request optimization
- State management efficiency
### 3.6 Integration Concerns
- Breaking changes to internal APIs
- Extension compatibility
- Backward compatibility
- Migration requirements
## Phase 4: Create Detailed Review Comments
CRITICAL: Keep comments extremely concise and effective. Use only as many words as absolutely necessary.
- NO markdown formatting (no #, ##, ###, **, etc.)
- NO emojis
- Get to the point immediately
- Burden the reader as little as possible
For each issue found, create a concise inline comment with:
1. What's wrong (one line)
2. Why it matters (one line)
3. How to fix it (one line)
4. Code example only if essential
```bash
# Helper function for comprehensive comments
post_review_comment() {
local file_path=$1
local line_number=$2
local severity=$3 # critical/high/medium/low
local category=$4 # architecture/security/performance/quality
local issue=$5
local context=$6
local suggestion=$7
local example=$8
local body="### [$category] $severity Priority
**Issue**: $issue
**Context**: $context
**Suggestion**: $suggestion"
if [ -n "$example" ]; then
body="$body
**Example**:
\`\`\`typescript
$example
\`\`\`"
fi
body="$body
*Related: See [repository guide](https://github.com/Comfy-Org/comfy-claude-prompt-library/blob/master/project-summaries-for-agents/ComfyUI_frontend/REPOSITORY_GUIDE.md) for patterns*"
gh api -X POST /repos/$REPOSITORY/pulls/$PR_NUMBER/comments \
-f path="$file_path" \
-f line=$line_number \
-f body="$body" \
-f commit_id="$COMMIT_SHA" \
-f side='RIGHT' || echo "Failed to post comment at $file_path:$line_number"
}
```
## Phase 5: Validation Rules Application
Apply ALL validation rules from the loaded knowledge, but focus on the changed lines:
### From Frontend Standards
- Vue 3 Composition API patterns
- Component communication patterns
- Proper use of composables
- TypeScript strict mode compliance
- Bundle optimization
### From Security Audit
- Input validation
- XSS prevention
- CSRF protection
- Secure state management
- API security
### From Performance Check
- Render optimization
- Memory management
- Network efficiency
- Bundle size impact
## Phase 6: Contextual Review Based on PR Type
Analyze the PR description and changes to determine the type:
```bash
# Extract PR metadata with error handling
if [ ! -f pr_info.json ]; then
echo "Error: pr_info.json not found"
exit 1
fi
PR_TITLE=$(jq -r '.title // "Unknown"' < pr_info.json)
PR_BODY=$(jq -r '.body // ""' < pr_info.json)
FILE_COUNT=$(wc -l < changed_files.txt)
ADDITIONS=$(jq -r '.additions // 0' < pr_info.json)
DELETIONS=$(jq -r '.deletions // 0' < pr_info.json)
# Determine PR type and apply specific review criteria
if echo "$PR_TITLE $PR_BODY" | grep -qiE "(feature|feat)"; then
echo "Detected feature PR - applying feature review criteria"
# Check for tests, documentation, backward compatibility
elif echo "$PR_TITLE $PR_BODY" | grep -qiE "(fix|bug)"; then
echo "Detected bug fix - checking root cause and regression tests"
# Verify fix addresses root cause, includes tests
elif echo "$PR_TITLE $PR_BODY" | grep -qiE "(refactor)"; then
echo "Detected refactoring - ensuring behavior preservation"
# Check that tests still pass, no behavior changes
fi
```
## Phase 7: Generate Comprehensive Summary
After all inline comments, create a detailed summary:
```bash
# Initialize metrics tracking
REVIEW_START_TIME=$(date +%s)
# Create the comprehensive summary
gh pr review $PR_NUMBER --comment --body "# Comprehensive PR Review
This review is generated by Claude. It may not always be accurate, as with human reviewers. If you believe that any of the comments are invalid or incorrect, please state why for each. For others, please implement the changes in one way or another.
## Review Summary
**PR**: $PR_TITLE (#$PR_NUMBER)
**Impact**: $ADDITIONS additions, $DELETIONS deletions across $FILE_COUNT files
### Issue Distribution
- Critical: $CRITICAL_COUNT
- High: $HIGH_COUNT
- Medium: $MEDIUM_COUNT
- Low: $LOW_COUNT
### Category Breakdown
- Architecture: $ARCHITECTURE_ISSUES issues
- Security: $SECURITY_ISSUES issues
- Performance: $PERFORMANCE_ISSUES issues
- Code Quality: $QUALITY_ISSUES issues
## Key Findings
### Architecture & Design
[Detailed architectural analysis based on repository patterns]
### Security Considerations
[Security implications beyond basic vulnerabilities]
### Performance Impact
[Performance analysis including bundle size, render impact]
### Integration Points
[How this affects other systems, extensions, etc.]
## Positive Observations
[What was done well, good patterns followed]
## References
- [Repository Architecture Guide](https://github.com/Comfy-Org/comfy-claude-prompt-library/blob/master/project-summaries-for-agents/ComfyUI_frontend/REPOSITORY_GUIDE.md)
- [Frontend Standards](https://github.com/Comfy-Org/comfy-claude-prompt-library/blob/master/.claude/commands/validation/frontend-code-standards.md)
- [Security Guidelines](https://github.com/Comfy-Org/comfy-claude-prompt-library/blob/master/.claude/commands/validation/security-audit.md)
## Next Steps
1. Address critical issues before merge
2. Consider architectural feedback for long-term maintainability
3. Add tests for uncovered scenarios
4. Update documentation if needed
---
*This is a comprehensive automated review. For architectural decisions requiring human judgment, please request additional manual review.*"
```
## Important: Think Deeply
When reviewing:
1. **Think hard** about architectural implications
2. Consider how changes affect the entire system
3. Look for subtle bugs and edge cases
4. Evaluate maintainability over time
5. Consider extension developer experience
6. Think about migration paths
This is a COMPREHENSIVE review, not a linting pass. Provide the same quality feedback a senior engineer would give after careful consideration.
## Phase 8: Track Review Metrics
After completing the review, save metrics for analysis:
```bash
# Calculate review duration
REVIEW_END_TIME=$(date +%s)
REVIEW_DURATION=$((REVIEW_END_TIME - REVIEW_START_TIME))
# Calculate total issues
TOTAL_ISSUES=$((CRITICAL_COUNT + HIGH_COUNT + MEDIUM_COUNT + LOW_COUNT))
# Create metrics directory if it doesn't exist
METRICS_DIR=".claude/review-metrics"
mkdir -p "$METRICS_DIR"
# Generate metrics file
METRICS_FILE="$METRICS_DIR/metrics-$(date +%Y%m).json"
# Create or update monthly metrics file
if [ -f "$METRICS_FILE" ]; then
# Append to existing file
jq -n \
--arg pr "$PR_NUMBER" \
--arg title "$PR_TITLE" \
--arg timestamp "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
--arg duration "$REVIEW_DURATION" \
--arg files "$FILE_COUNT" \
--arg additions "$ADDITIONS" \
--arg deletions "$DELETIONS" \
--arg total "$TOTAL_ISSUES" \
--arg critical "$CRITICAL_COUNT" \
--arg high "$HIGH_COUNT" \
--arg medium "$MEDIUM_COUNT" \
--arg low "$LOW_COUNT" \
--arg architecture "$ARCHITECTURE_ISSUES" \
--arg security "$SECURITY_ISSUES" \
--arg performance "$PERFORMANCE_ISSUES" \
--arg quality "$QUALITY_ISSUES" \
'{
pr_number: $pr,
pr_title: $title,
timestamp: $timestamp,
review_duration_seconds: ($duration | tonumber),
files_reviewed: ($files | tonumber),
lines_added: ($additions | tonumber),
lines_deleted: ($deletions | tonumber),
issues: {
total: ($total | tonumber),
by_severity: {
critical: ($critical | tonumber),
high: ($high | tonumber),
medium: ($medium | tonumber),
low: ($low | tonumber)
},
by_category: {
architecture: ($architecture | tonumber),
security: ($security | tonumber),
performance: ($performance | tonumber),
quality: ($quality | tonumber)
}
}
}' > "$METRICS_FILE.new"
# Merge with existing data
jq -s '.[0] + [.[1]]' "$METRICS_FILE" "$METRICS_FILE.new" > "$METRICS_FILE.tmp"
mv "$METRICS_FILE.tmp" "$METRICS_FILE"
rm "$METRICS_FILE.new"
else
# Create new file
jq -n \
--arg pr "$PR_NUMBER" \
--arg title "$PR_TITLE" \
--arg timestamp "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
--arg duration "$REVIEW_DURATION" \
--arg files "$FILE_COUNT" \
--arg additions "$ADDITIONS" \
--arg deletions "$DELETIONS" \
--arg total "$TOTAL_ISSUES" \
--arg critical "$CRITICAL_COUNT" \
--arg high "$HIGH_COUNT" \
--arg medium "$MEDIUM_COUNT" \
--arg low "$LOW_COUNT" \
--arg architecture "$ARCHITECTURE_ISSUES" \
--arg security "$SECURITY_ISSUES" \
--arg performance "$PERFORMANCE_ISSUES" \
--arg quality "$QUALITY_ISSUES" \
'[{
pr_number: $pr,
pr_title: $title,
timestamp: $timestamp,
review_duration_seconds: ($duration | tonumber),
files_reviewed: ($files | tonumber),
lines_added: ($additions | tonumber),
lines_deleted: ($deletions | tonumber),
issues: {
total: ($total | tonumber),
by_severity: {
critical: ($critical | tonumber),
high: ($high | tonumber),
medium: ($medium | tonumber),
low: ($low | tonumber)
},
by_category: {
architecture: ($architecture | tonumber),
security: ($security | tonumber),
performance: ($performance | tonumber),
quality: ($quality | tonumber)
}
}
}]' > "$METRICS_FILE"
fi
echo "Review metrics saved to $METRICS_FILE"
```
This creates monthly metrics files (e.g., `metrics-202407.json`) that track:
- Which PRs were reviewed
- How long reviews took
- Types and severity of issues found
- Trends over time
You can later analyze these to see patterns and improve your development process.

36
.github/CLAUDE.md vendored Normal file
View File

@@ -0,0 +1,36 @@
# ComfyUI Frontend - Claude Review Context
This file provides additional context for the automated PR review system.
## Quick Reference
### PrimeVue Component Migrations
When reviewing, flag these deprecated components:
- `Dropdown` → Use `Select` from 'primevue/select'
- `OverlayPanel` → Use `Popover` from 'primevue/popover'
- `Calendar` → Use `DatePicker` from 'primevue/datepicker'
- `InputSwitch` → Use `ToggleSwitch` from 'primevue/toggleswitch'
- `Sidebar` → Use `Drawer` from 'primevue/drawer'
- `Chips` → Use `AutoComplete` with multiple enabled and typeahead disabled
- `TabMenu` → Use `Tabs` without panels
- `Steps` → Use `Stepper` without panels
- `InlineMessage` → Use `Message` component
### API Utilities Reference
- `api.apiURL()` - Backend API calls (/prompt, /queue, /view, etc.)
- `api.fileURL()` - Static file access (templates, extensions)
- `$t()` / `i18n.global.t()` - Internationalization
- `DOMPurify.sanitize()` - HTML sanitization
## Review Scope
This automated review performs comprehensive analysis including:
- Architecture and design patterns
- Security vulnerabilities
- Performance implications
- Code quality and maintainability
- Integration concerns
For implementation details, see `.claude/commands/comprehensive-pr-review.md`.

75
.github/workflows/claude-pr-review.yml vendored Normal file
View File

@@ -0,0 +1,75 @@
name: Claude PR Review
permissions:
contents: read
pull-requests: write
issues: write
on:
pull_request:
types: [labeled]
jobs:
wait-for-ci:
runs-on: ubuntu-latest
if: github.event.label.name == 'claude-review'
outputs:
should-proceed: ${{ steps.check-status.outputs.proceed }}
steps:
- name: Wait for other CI checks
uses: lewagon/wait-on-check-action@v1.3.1
with:
ref: ${{ github.event.pull_request.head.sha }}
check-regexp: '^(ESLint|Prettier Check|Tests CI|Vitest Tests)'
wait-interval: 30
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Check if we should proceed
id: check-status
run: |
# Get all check runs for this commit
CHECK_RUNS=$(gh api repos/${{ github.repository }}/commits/${{ github.event.pull_request.head.sha }}/check-runs --jq '.check_runs[] | select(.name | test("ESLint|Prettier Check|Tests CI|Vitest Tests")) | {name, conclusion}')
# Check if any required checks failed
if echo "$CHECK_RUNS" | grep -q '"conclusion": "failure"'; then
echo "Some CI checks failed - skipping Claude review"
echo "proceed=false" >> $GITHUB_OUTPUT
else
echo "All CI checks passed - proceeding with Claude review"
echo "proceed=true" >> $GITHUB_OUTPUT
fi
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
claude-review:
needs: wait-for-ci
if: needs.wait-for-ci.outputs.should-proceed == 'true'
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install dependencies for analysis tools
run: |
npm install -g typescript @vue/compiler-sfc
- name: Run Claude PR Review
uses: anthropics/claude-code-action@main
with:
prompt_file: .claude/commands/comprehensive-pr-review.md
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
max_turns: 1
timeout_minutes: 30
env:
PR_NUMBER: ${{ github.event.pull_request.number }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
BASE_SHA: ${{ github.event.pull_request.base.sha }}
REPOSITORY: ${{ github.repository }}

154
.github/workflows/pr-checks.yml vendored Normal file
View File

@@ -0,0 +1,154 @@
name: PR Checks
on:
pull_request:
types: [opened, edited, synchronize, reopened]
permissions:
contents: read
pull-requests: read
jobs:
analyze:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.check-changes.outputs.should_run }}
has_browser_tests: ${{ steps.check-coverage.outputs.has_browser_tests }}
has_screen_recording: ${{ steps.check-recording.outputs.has_recording }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Ensure base branch is available
run: |
# Fetch the specific base commit to ensure it's available for git diff
git fetch origin ${{ github.event.pull_request.base.sha }}
- name: Check if significant changes exist
id: check-changes
run: |
# Get list of changed files
CHANGED_FILES=$(git diff --name-only ${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }})
# Filter for src/ files
SRC_FILES=$(echo "$CHANGED_FILES" | grep '^src/' || true)
if [ -z "$SRC_FILES" ]; then
echo "No src/ files changed"
echo "should_run=false" >> "$GITHUB_OUTPUT"
exit 0
fi
# Count lines changed in src files
TOTAL_LINES=0
for file in $SRC_FILES; do
if [ -f "$file" ]; then
# Count added lines (non-empty)
ADDED=$(git diff ${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }} -- "$file" | grep '^+' | grep -v '^+++' | grep -v '^+$' | wc -l)
# Count removed lines (non-empty)
REMOVED=$(git diff ${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }} -- "$file" | grep '^-' | grep -v '^---' | grep -v '^-$' | wc -l)
TOTAL_LINES=$((TOTAL_LINES + ADDED + REMOVED))
fi
done
echo "Total lines changed in src/: $TOTAL_LINES"
if [ $TOTAL_LINES -gt 3 ]; then
echo "should_run=true" >> "$GITHUB_OUTPUT"
else
echo "should_run=false" >> "$GITHUB_OUTPUT"
fi
- name: Check browser test coverage
id: check-coverage
if: steps.check-changes.outputs.should_run == 'true'
run: |
# Check if browser tests were updated
BROWSER_TEST_CHANGES=$(git diff --name-only ${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }} | grep '^browser_tests/.*\.ts$' || true)
if [ -n "$BROWSER_TEST_CHANGES" ]; then
echo "has_browser_tests=true" >> "$GITHUB_OUTPUT"
else
echo "has_browser_tests=false" >> "$GITHUB_OUTPUT"
fi
- name: Check for screen recording
id: check-recording
if: steps.check-changes.outputs.should_run == 'true'
run: |
# Check PR body for screen recording
PR_BODY="${{ github.event.pull_request.body }}"
# Check for GitHub user attachments or YouTube links
if echo "$PR_BODY" | grep -qiE 'github\.com/user-attachments/assets/[a-f0-9-]+|youtube\.com/watch|youtu\.be/'; then
echo "has_recording=true" >> "$GITHUB_OUTPUT"
else
echo "has_recording=false" >> "$GITHUB_OUTPUT"
fi
- name: Final check and create results
id: final-check
if: always()
run: |
# Initialize results
WARNINGS_JSON=""
# Only run checks if should_run is true
if [ "${{ steps.check-changes.outputs.should_run }}" == "true" ]; then
# Check browser test coverage
if [ "${{ steps.check-coverage.outputs.has_browser_tests }}" != "true" ]; then
if [ -n "$WARNINGS_JSON" ]; then
WARNINGS_JSON="${WARNINGS_JSON},"
fi
WARNINGS_JSON="${WARNINGS_JSON}{\"message\":\"⚠️ **Warning: E2E Test Coverage Missing**\\n\\nIf this PR modifies behavior that can be covered by browser-based E2E tests, those tests are required. PRs lacking applicable test coverage may not be reviewed until added. Please add or update browser tests to ensure code quality and prevent regressions.\"}"
fi
# Check screen recording
if [ "${{ steps.check-recording.outputs.has_recording }}" != "true" ]; then
if [ -n "$WARNINGS_JSON" ]; then
WARNINGS_JSON="${WARNINGS_JSON},"
fi
WARNINGS_JSON="${WARNINGS_JSON}{\"message\":\"⚠️ **Warning: Visual Documentation Missing**\\n\\nIf this PR changes user-facing behavior, visual proof (screen recording or screenshot) is required. PRs without applicable visual documentation may not be reviewed until provided.\\nYou can add it by:\\n\\n- GitHub: Drag & drop media directly into the PR description\\n\\n- YouTube: Include a link to a short demo\"}"
fi
fi
# Create results JSON
if [ -n "$WARNINGS_JSON" ]; then
# Create JSON with warnings
cat > pr-check-results.json << EOF
{
"fails": [],
"warnings": [$WARNINGS_JSON],
"messages": [],
"markdowns": []
}
EOF
echo "failed=false" >> "$GITHUB_OUTPUT"
else
# Create JSON with success
cat > pr-check-results.json << 'EOF'
{
"fails": [],
"warnings": [],
"messages": [],
"markdowns": []
}
EOF
echo "failed=false" >> "$GITHUB_OUTPUT"
fi
# Write PR metadata
echo "${{ github.event.pull_request.number }}" > pr-number.txt
echo "${{ github.event.pull_request.head.sha }}" > pr-sha.txt
- name: Upload results
uses: actions/upload-artifact@v4
if: always()
with:
name: pr-check-results-${{ github.run_id }}
path: |
pr-check-results.json
pr-number.txt
pr-sha.txt
retention-days: 1

149
.github/workflows/pr-comment.yml vendored Normal file
View File

@@ -0,0 +1,149 @@
name: PR Comment
on:
workflow_run:
workflows: ["PR Checks"]
types: [completed]
permissions:
pull-requests: write
issues: write
statuses: write
jobs:
comment:
if: github.event.workflow_run.event == 'pull_request'
runs-on: ubuntu-latest
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
name: pr-check-results-${{ github.event.workflow_run.id }}
path: /tmp/pr-artifacts
github-token: ${{ secrets.GITHUB_TOKEN }}
run-id: ${{ github.event.workflow_run.id }}
- name: Post results
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const path = require('path');
// Helper function to safely read files
function safeReadFile(filePath) {
try {
if (!fs.existsSync(filePath)) return null;
return fs.readFileSync(filePath, 'utf8').trim();
} catch (e) {
console.error(`Error reading ${filePath}:`, e);
return null;
}
}
// Read artifact files
const artifactDir = '/tmp/pr-artifacts';
const prNumber = safeReadFile(path.join(artifactDir, 'pr-number.txt'));
const prSha = safeReadFile(path.join(artifactDir, 'pr-sha.txt'));
const resultsJson = safeReadFile(path.join(artifactDir, 'pr-check-results.json'));
// Validate PR number
if (!prNumber || isNaN(parseInt(prNumber))) {
throw new Error('Invalid or missing PR number');
}
// Parse and validate results
let results;
try {
results = JSON.parse(resultsJson || '{}');
} catch (e) {
console.error('Failed to parse check results:', e);
// Post error comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: parseInt(prNumber),
body: `⚠️ PR checks failed to complete properly. Error parsing results: ${e.message}`
});
return;
}
// Format check messages
const messages = [];
if (results.fails && results.fails.length > 0) {
messages.push('### ❌ Failures\n' + results.fails.map(f => f.message).join('\n\n'));
}
if (results.warnings && results.warnings.length > 0) {
messages.push('### ⚠️ Warnings\n' + results.warnings.map(w => w.message).join('\n\n'));
}
if (results.messages && results.messages.length > 0) {
messages.push('### 💬 Messages\n' + results.messages.map(m => m.message).join('\n\n'));
}
if (results.markdowns && results.markdowns.length > 0) {
messages.push(...results.markdowns.map(m => m.message));
}
// Find existing bot comment
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: parseInt(prNumber)
});
const botComment = comments.data.find(comment =>
comment.user.type === 'Bot' &&
comment.body.includes('<!-- pr-checks-comment -->')
);
// Post comment if there are any messages
if (messages.length > 0) {
const body = messages.join('\n\n');
const commentBody = `<!-- pr-checks-comment -->\n${body}`;
if (botComment) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body: commentBody
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: parseInt(prNumber),
body: commentBody
});
}
} else {
// No messages - delete existing comment if present
if (botComment) {
await github.rest.issues.deleteComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id
});
}
}
// Set commit status based on failures
if (prSha) {
const hasFailures = results.fails && results.fails.length > 0;
const hasWarnings = results.warnings && results.warnings.length > 0;
await github.rest.repos.createCommitStatus({
owner: context.repo.owner,
repo: context.repo.repo,
sha: prSha,
state: hasFailures ? 'failure' : 'success',
context: 'pr-checks',
description: hasFailures
? `${results.fails.length} check(s) failed`
: hasWarnings
? `${results.warnings.length} warning(s)`
: 'All checks passed'
});
}

View File

@@ -0,0 +1,716 @@
{
"id": "976d6e9a-927d-42db-abd4-96bfc0ecf8d9",
"revision": 0,
"last_node_id": 10,
"last_link_id": 11,
"nodes": [
{
"id": 10,
"type": "8beb610f-ddd1-4489-ae0d-2f732a4042ae",
"pos": [
532,
412.5
],
"size": [
140,
46
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
10
]
},
{
"name": "VAE",
"type": "VAE",
"links": [
11
]
}
],
"title": "subgraph 2",
"properties": {},
"widgets_values": []
},
{
"id": 8,
"type": "VAEDecode",
"pos": [
758.2109985351562,
398.3681335449219
],
"size": [
210,
46
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 10
},
{
"name": "vae",
"type": "VAE",
"link": 11
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"slot_index": 0,
"links": [
9
]
}
],
"properties": {
"Node name for S&R": "VAEDecode"
},
"widgets_values": []
},
{
"id": 9,
"type": "SaveImage",
"pos": [
1028.9615478515625,
381.83746337890625
],
"size": [
210,
270
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 9
}
],
"outputs": [],
"properties": {},
"widgets_values": [
"ComfyUI"
]
}
],
"links": [
[
9,
8,
0,
9,
0,
"IMAGE"
],
[
10,
10,
0,
8,
0,
"LATENT"
],
[
11,
10,
1,
8,
1,
"VAE"
]
],
"groups": [],
"definitions": {
"subgraphs": [
{
"id": "8beb610f-ddd1-4489-ae0d-2f732a4042ae",
"version": 1,
"state": {
"lastGroupId": 0,
"lastNodeId": 10,
"lastLinkId": 14,
"lastRerouteId": 0
},
"revision": 0,
"config": {},
"name": "subgraph 2",
"inputNode": {
"id": -10,
"bounding": [
-154,
415.5,
120,
40
]
},
"outputNode": {
"id": -20,
"bounding": [
1238,
395.5,
120,
80
]
},
"inputs": [],
"outputs": [
{
"id": "4d6c7e4e-971e-4f78-9218-9a604db53a4b",
"name": "LATENT",
"type": "LATENT",
"linkIds": [
7
],
"localized_name": "LATENT",
"pos": {
"0": 1258,
"1": 415.5
}
},
{
"id": "f8201d4f-7fc6-4a1b-b8c9-9f0716d9c09a",
"name": "VAE",
"type": "VAE",
"linkIds": [
14
],
"localized_name": "VAE",
"pos": {
"0": 1258,
"1": 435.5
}
}
],
"widgets": [],
"nodes": [
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [
415,
186
],
"size": [
422.84503173828125,
164.31304931640625
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [
{
"localized_name": "clip",
"name": "clip",
"type": "CLIP",
"link": 13
}
],
"outputs": [
{
"localized_name": "CONDITIONING",
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
4
]
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"beautiful scenery nature glass bottle landscape, , purple galaxy bottle,"
]
},
{
"id": 3,
"type": "KSampler",
"pos": [
863,
186
],
"size": [
315,
262
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"localized_name": "model",
"name": "model",
"type": "MODEL",
"link": 12
},
{
"localized_name": "positive",
"name": "positive",
"type": "CONDITIONING",
"link": 4
},
{
"localized_name": "negative",
"name": "negative",
"type": "CONDITIONING",
"link": 10
},
{
"localized_name": "latent_image",
"name": "latent_image",
"type": "LATENT",
"link": 11
}
],
"outputs": [
{
"localized_name": "LATENT",
"name": "LATENT",
"type": "LATENT",
"slot_index": 0,
"links": [
7
]
}
],
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [
32115495257102,
"randomize",
20,
8,
"euler",
"normal",
1
]
},
{
"id": 10,
"type": "dbe5763f-440b-47b4-82ac-454f1f98b0e3",
"pos": [
194.13900756835938,
657.3333740234375
],
"size": [
140,
106
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [],
"outputs": [
{
"localized_name": "CONDITIONING",
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
10
]
},
{
"localized_name": "LATENT",
"name": "LATENT",
"type": "LATENT",
"links": [
11
]
},
{
"localized_name": "MODEL",
"name": "MODEL",
"type": "MODEL",
"links": [
12
]
},
{
"localized_name": "CLIP",
"name": "CLIP",
"type": "CLIP",
"links": [
13
]
},
{
"localized_name": "VAE",
"name": "VAE",
"type": "VAE",
"links": [
14
]
}
],
"title": "subgraph 3",
"properties": {},
"widgets_values": []
}
],
"groups": [],
"links": [
{
"id": 4,
"origin_id": 6,
"origin_slot": 0,
"target_id": 3,
"target_slot": 1,
"type": "CONDITIONING"
},
{
"id": 7,
"origin_id": 3,
"origin_slot": 0,
"target_id": -20,
"target_slot": 0,
"type": "LATENT"
},
{
"id": 10,
"origin_id": 10,
"origin_slot": 0,
"target_id": 3,
"target_slot": 2,
"type": "CONDITIONING"
},
{
"id": 11,
"origin_id": 10,
"origin_slot": 1,
"target_id": 3,
"target_slot": 3,
"type": "LATENT"
},
{
"id": 12,
"origin_id": 10,
"origin_slot": 2,
"target_id": 3,
"target_slot": 0,
"type": "MODEL"
},
{
"id": 13,
"origin_id": 10,
"origin_slot": 3,
"target_id": 6,
"target_slot": 0,
"type": "CLIP"
},
{
"id": 14,
"origin_id": 10,
"origin_slot": 4,
"target_id": -20,
"target_slot": 1,
"type": "VAE"
}
],
"extra": {}
},
{
"id": "dbe5763f-440b-47b4-82ac-454f1f98b0e3",
"version": 1,
"state": {
"lastGroupId": 0,
"lastNodeId": 9,
"lastLinkId": 9,
"lastRerouteId": 0
},
"revision": 0,
"config": {},
"name": "subgraph 3",
"inputNode": {
"id": -10,
"bounding": [
-154,
517,
120,
40
]
},
"outputNode": {
"id": -20,
"bounding": [
898.2780151367188,
467,
128.6640625,
140
]
},
"inputs": [],
"outputs": [
{
"id": "b4882169-329b-43f6-a373-81abfbdea55b",
"name": "CONDITIONING",
"type": "CONDITIONING",
"linkIds": [
6
],
"localized_name": "CONDITIONING",
"pos": {
"0": 918.2780151367188,
"1": 487
}
},
{
"id": "01f51f96-a741-428e-8772-9557ee50b609",
"name": "LATENT",
"type": "LATENT",
"linkIds": [
2
],
"localized_name": "LATENT",
"pos": {
"0": 918.2780151367188,
"1": 507
}
},
{
"id": "47fa906e-d80b-45c3-a596-211a0e59d4a1",
"name": "MODEL",
"type": "MODEL",
"linkIds": [
1
],
"localized_name": "MODEL",
"pos": {
"0": 918.2780151367188,
"1": 527
}
},
{
"id": "f03dccd7-10e8-4513-9994-15854a92d192",
"name": "CLIP",
"type": "CLIP",
"linkIds": [
3
],
"localized_name": "CLIP",
"pos": {
"0": 918.2780151367188,
"1": 547
}
},
{
"id": "a666877f-e34f-49bc-8a78-b26156656b83",
"name": "VAE",
"type": "VAE",
"linkIds": [
8
],
"localized_name": "VAE",
"pos": {
"0": 918.2780151367188,
"1": 567
}
}
],
"widgets": [],
"nodes": [
{
"id": 7,
"type": "CLIPTextEncode",
"pos": [
413,
389
],
"size": [
425.27801513671875,
180.6060791015625
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"localized_name": "clip",
"name": "clip",
"type": "CLIP",
"link": 5
}
],
"outputs": [
{
"localized_name": "CONDITIONING",
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
6
]
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"text, watermark"
]
},
{
"id": 5,
"type": "EmptyLatentImage",
"pos": [
473,
609
],
"size": [
315,
106
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"localized_name": "LATENT",
"name": "LATENT",
"type": "LATENT",
"slot_index": 0,
"links": [
2
]
}
],
"properties": {
"Node name for S&R": "EmptyLatentImage"
},
"widgets_values": [
512,
512,
1
]
},
{
"id": 4,
"type": "CheckpointLoaderSimple",
"pos": [
26,
474
],
"size": [
315,
98
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [],
"outputs": [
{
"localized_name": "MODEL",
"name": "MODEL",
"type": "MODEL",
"slot_index": 0,
"links": [
1
]
},
{
"localized_name": "CLIP",
"name": "CLIP",
"type": "CLIP",
"slot_index": 1,
"links": [
3,
5
]
},
{
"localized_name": "VAE",
"name": "VAE",
"type": "VAE",
"slot_index": 2,
"links": [
8
]
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"v1-5-pruned-emaonly-fp16.safetensors"
]
}
],
"groups": [],
"links": [
{
"id": 5,
"origin_id": 4,
"origin_slot": 1,
"target_id": 7,
"target_slot": 0,
"type": "CLIP"
},
{
"id": 6,
"origin_id": 7,
"origin_slot": 0,
"target_id": -20,
"target_slot": 0,
"type": "CONDITIONING"
},
{
"id": 2,
"origin_id": 5,
"origin_slot": 0,
"target_id": -20,
"target_slot": 1,
"type": "LATENT"
},
{
"id": 1,
"origin_id": 4,
"origin_slot": 0,
"target_id": -20,
"target_slot": 2,
"type": "MODEL"
},
{
"id": 3,
"origin_id": 4,
"origin_slot": 1,
"target_id": -20,
"target_slot": 3,
"type": "CLIP"
},
{
"id": 8,
"origin_id": 4,
"origin_slot": 2,
"target_id": -20,
"target_slot": 4,
"type": "VAE"
}
],
"extra": {}
}
]
},
"config": {},
"extra": {
"frontendVersion": "1.24.0-1"
},
"version": 0.4
}

View File

@@ -0,0 +1,382 @@
import { expect } from '@playwright/test'
import { comfyPageFixture as test } from '../fixtures/ComfyPage'
test.describe('Feature Flags', () => {
test('Client and server exchange feature flags on connection', async ({
comfyPage
}) => {
// Navigate to a new page to capture the initial WebSocket connection
const newPage = await comfyPage.page.context().newPage()
// Set up monitoring before navigation
await newPage.addInitScript(() => {
// This runs before any page scripts
window.__capturedMessages = {
clientFeatureFlags: null,
serverFeatureFlags: null
}
// Capture outgoing client messages
const originalSend = WebSocket.prototype.send
WebSocket.prototype.send = function (data) {
try {
const parsed = JSON.parse(data)
if (parsed.type === 'feature_flags') {
window.__capturedMessages.clientFeatureFlags = parsed
}
} catch (e) {
// Not JSON, ignore
}
return originalSend.call(this, data)
}
// Monitor for server feature flags
const checkInterval = setInterval(() => {
if (
window['app']?.api?.serverFeatureFlags &&
Object.keys(window['app'].api.serverFeatureFlags).length > 0
) {
window.__capturedMessages.serverFeatureFlags =
window['app'].api.serverFeatureFlags
clearInterval(checkInterval)
}
}, 100)
// Clear after 10 seconds
setTimeout(() => clearInterval(checkInterval), 10000)
})
// Navigate to the app
await newPage.goto(comfyPage.url)
// Wait for both client and server feature flags
await newPage.waitForFunction(
() =>
window.__capturedMessages.clientFeatureFlags !== null &&
window.__capturedMessages.serverFeatureFlags !== null,
{ timeout: 10000 }
)
// Get the captured messages
const messages = await newPage.evaluate(() => window.__capturedMessages)
// Verify client sent feature flags
expect(messages.clientFeatureFlags).toBeTruthy()
expect(messages.clientFeatureFlags).toHaveProperty('type', 'feature_flags')
expect(messages.clientFeatureFlags).toHaveProperty('data')
expect(messages.clientFeatureFlags.data).toHaveProperty(
'supports_preview_metadata'
)
expect(
typeof messages.clientFeatureFlags.data.supports_preview_metadata
).toBe('boolean')
// Verify server sent feature flags back
expect(messages.serverFeatureFlags).toBeTruthy()
expect(messages.serverFeatureFlags).toHaveProperty(
'supports_preview_metadata'
)
expect(typeof messages.serverFeatureFlags.supports_preview_metadata).toBe(
'boolean'
)
expect(messages.serverFeatureFlags).toHaveProperty('max_upload_size')
expect(typeof messages.serverFeatureFlags.max_upload_size).toBe('number')
expect(Object.keys(messages.serverFeatureFlags).length).toBeGreaterThan(0)
await newPage.close()
})
test('Server feature flags are received and accessible', async ({
comfyPage
}) => {
// Wait for connection to establish
await comfyPage.page.waitForTimeout(1000)
// Get the actual server feature flags from the backend
const serverFlags = await comfyPage.page.evaluate(() => {
return window['app'].api.serverFeatureFlags
})
// Verify we received real feature flags from the backend
expect(serverFlags).toBeTruthy()
expect(Object.keys(serverFlags).length).toBeGreaterThan(0)
// The backend should send feature flags
expect(serverFlags).toHaveProperty('supports_preview_metadata')
expect(typeof serverFlags.supports_preview_metadata).toBe('boolean')
expect(serverFlags).toHaveProperty('max_upload_size')
expect(typeof serverFlags.max_upload_size).toBe('number')
})
test('serverSupportsFeature method works with real backend flags', async ({
comfyPage
}) => {
// Wait for connection
await comfyPage.page.waitForTimeout(1000)
// Test serverSupportsFeature with real backend flags
const supportsPreviewMetadata = await comfyPage.page.evaluate(() => {
return window['app'].api.serverSupportsFeature(
'supports_preview_metadata'
)
})
// The method should return a boolean based on the backend's value
expect(typeof supportsPreviewMetadata).toBe('boolean')
// Test non-existent feature - should always return false
const supportsNonExistent = await comfyPage.page.evaluate(() => {
return window['app'].api.serverSupportsFeature('non_existent_feature_xyz')
})
expect(supportsNonExistent).toBe(false)
// Test that the method only returns true for boolean true values
const testResults = await comfyPage.page.evaluate(() => {
// Temporarily modify serverFeatureFlags to test behavior
const original = window['app'].api.serverFeatureFlags
window['app'].api.serverFeatureFlags = {
bool_true: true,
bool_false: false,
string_value: 'yes',
number_value: 1,
null_value: null
}
const results = {
bool_true: window['app'].api.serverSupportsFeature('bool_true'),
bool_false: window['app'].api.serverSupportsFeature('bool_false'),
string_value: window['app'].api.serverSupportsFeature('string_value'),
number_value: window['app'].api.serverSupportsFeature('number_value'),
null_value: window['app'].api.serverSupportsFeature('null_value')
}
// Restore original
window['app'].api.serverFeatureFlags = original
return results
})
// serverSupportsFeature should only return true for boolean true values
expect(testResults.bool_true).toBe(true)
expect(testResults.bool_false).toBe(false)
expect(testResults.string_value).toBe(false)
expect(testResults.number_value).toBe(false)
expect(testResults.null_value).toBe(false)
})
test('getServerFeature method works with real backend data', async ({
comfyPage
}) => {
// Wait for connection
await comfyPage.page.waitForTimeout(1000)
// Test getServerFeature method
const previewMetadataValue = await comfyPage.page.evaluate(() => {
return window['app'].api.getServerFeature('supports_preview_metadata')
})
expect(typeof previewMetadataValue).toBe('boolean')
// Test getting max_upload_size
const maxUploadSize = await comfyPage.page.evaluate(() => {
return window['app'].api.getServerFeature('max_upload_size')
})
expect(typeof maxUploadSize).toBe('number')
expect(maxUploadSize).toBeGreaterThan(0)
// Test getServerFeature with default value for non-existent feature
const defaultValue = await comfyPage.page.evaluate(() => {
return window['app'].api.getServerFeature(
'non_existent_feature_xyz',
'default'
)
})
expect(defaultValue).toBe('default')
})
test('getServerFeatures returns all backend feature flags', async ({
comfyPage
}) => {
// Wait for connection
await comfyPage.page.waitForTimeout(1000)
// Test getServerFeatures returns all flags
const allFeatures = await comfyPage.page.evaluate(() => {
return window['app'].api.getServerFeatures()
})
expect(allFeatures).toBeTruthy()
expect(allFeatures).toHaveProperty('supports_preview_metadata')
expect(typeof allFeatures.supports_preview_metadata).toBe('boolean')
expect(allFeatures).toHaveProperty('max_upload_size')
expect(Object.keys(allFeatures).length).toBeGreaterThan(0)
})
test('Client feature flags are immutable', async ({ comfyPage }) => {
// Test that getClientFeatureFlags returns a copy
const immutabilityTest = await comfyPage.page.evaluate(() => {
const flags1 = window['app'].api.getClientFeatureFlags()
const flags2 = window['app'].api.getClientFeatureFlags()
// Modify the first object
flags1.test_modification = true
// Get flags again to check if original was modified
const flags3 = window['app'].api.getClientFeatureFlags()
return {
areEqual: flags1 === flags2,
hasModification: flags3.test_modification !== undefined,
hasSupportsPreview: flags1.supports_preview_metadata !== undefined,
supportsPreviewValue: flags1.supports_preview_metadata
}
})
// Verify they are different objects (not the same reference)
expect(immutabilityTest.areEqual).toBe(false)
// Verify modification didn't affect the original
expect(immutabilityTest.hasModification).toBe(false)
// Verify the flags contain expected properties
expect(immutabilityTest.hasSupportsPreview).toBe(true)
expect(typeof immutabilityTest.supportsPreviewValue).toBe('boolean') // From clientFeatureFlags.json
})
test('Server features are immutable when accessed via getServerFeatures', async ({
comfyPage
}) => {
// Wait for connection to establish
await comfyPage.page.waitForTimeout(1000)
const immutabilityTest = await comfyPage.page.evaluate(() => {
// Get a copy of server features
const features1 = window['app'].api.getServerFeatures()
// Try to modify it
features1.supports_preview_metadata = false
features1.new_feature = 'added'
// Get another copy
const features2 = window['app'].api.getServerFeatures()
return {
modifiedValue: features1.supports_preview_metadata,
originalValue: features2.supports_preview_metadata,
hasNewFeature: features2.new_feature !== undefined,
hasSupportsPreview: features2.supports_preview_metadata !== undefined
}
})
// The modification should only affect the copy
expect(immutabilityTest.modifiedValue).toBe(false)
expect(typeof immutabilityTest.originalValue).toBe('boolean') // Backend sends boolean for supports_preview_metadata
expect(immutabilityTest.hasNewFeature).toBe(false)
expect(immutabilityTest.hasSupportsPreview).toBe(true)
})
test('Feature flags are negotiated early in connection lifecycle', async ({
comfyPage
}) => {
// This test verifies that feature flags are available early in the app lifecycle
// which is important for protocol negotiation
// Create a new page to ensure clean state
const newPage = await comfyPage.page.context().newPage()
// Set up monitoring before navigation
await newPage.addInitScript(() => {
// Track when various app components are ready
;(window as any).__appReadiness = {
featureFlagsReceived: false,
apiInitialized: false,
appInitialized: false
}
// Monitor when feature flags arrive by checking periodically
const checkFeatureFlags = setInterval(() => {
if (
window['app']?.api?.serverFeatureFlags?.supports_preview_metadata !==
undefined
) {
;(window as any).__appReadiness.featureFlagsReceived = true
clearInterval(checkFeatureFlags)
}
}, 10)
// Monitor API initialization
const checkApi = setInterval(() => {
if (window['app']?.api) {
;(window as any).__appReadiness.apiInitialized = true
clearInterval(checkApi)
}
}, 10)
// Monitor app initialization
const checkApp = setInterval(() => {
if (window['app']?.graph) {
;(window as any).__appReadiness.appInitialized = true
clearInterval(checkApp)
}
}, 10)
// Clean up after 10 seconds
setTimeout(() => {
clearInterval(checkFeatureFlags)
clearInterval(checkApi)
clearInterval(checkApp)
}, 10000)
})
// Navigate to the app
await newPage.goto(comfyPage.url)
// Wait for feature flags to be received
await newPage.waitForFunction(
() =>
window['app']?.api?.serverFeatureFlags?.supports_preview_metadata !==
undefined,
{
timeout: 10000
}
)
// Get readiness state
const readiness = await newPage.evaluate(() => {
return {
...(window as any).__appReadiness,
currentFlags: window['app'].api.serverFeatureFlags
}
})
// Verify feature flags are available
expect(readiness.currentFlags).toHaveProperty('supports_preview_metadata')
expect(typeof readiness.currentFlags.supports_preview_metadata).toBe(
'boolean'
)
expect(readiness.currentFlags).toHaveProperty('max_upload_size')
// Verify feature flags were received (we detected them via polling)
expect(readiness.featureFlagsReceived).toBe(true)
// Verify API was initialized (feature flags require API)
expect(readiness.apiInitialized).toBe(true)
await newPage.close()
})
test('Backend /features endpoint returns feature flags', async ({
comfyPage
}) => {
// Test the HTTP endpoint directly
const response = await comfyPage.page.request.get(
`${comfyPage.url}/api/features`
)
expect(response.ok()).toBe(true)
const features = await response.json()
expect(features).toBeTruthy()
expect(features).toHaveProperty('supports_preview_metadata')
expect(typeof features.supports_preview_metadata).toBe('boolean')
expect(features).toHaveProperty('max_upload_size')
expect(Object.keys(features).length).toBeGreaterThan(0)
})
})

View File

@@ -0,0 +1,82 @@
import { expect } from '@playwright/test'
import { comfyPageFixture as test } from '../fixtures/ComfyPage'
test.describe('Subgraph Breadcrumb Title Sync', () => {
test.beforeEach(async ({ comfyPage }) => {
await comfyPage.setSetting('Comfy.UseNewMenu', 'Top')
})
test('Breadcrumb updates when subgraph node title is changed', async ({
comfyPage
}) => {
// Load a workflow with subgraphs
await comfyPage.loadWorkflow('nested-subgraph')
await comfyPage.nextFrame()
// Get the subgraph node by ID (node 10 is the subgraph)
const subgraphNode = await comfyPage.getNodeRefById('10')
// Get node position and double-click on it to enter the subgraph
const nodePos = await subgraphNode.getPosition()
const nodeSize = await subgraphNode.getSize()
await comfyPage.canvas.dblclick({
position: {
x: nodePos.x + nodeSize.width / 2,
y: nodePos.y + nodeSize.height / 2 + 10
}
})
await comfyPage.nextFrame()
// Wait for breadcrumb to appear
await comfyPage.page.waitForSelector('.subgraph-breadcrumb', {
state: 'visible',
timeout: 20000
})
// Get initial breadcrumb text
const breadcrumb = comfyPage.page.locator('.subgraph-breadcrumb')
const initialBreadcrumbText = await breadcrumb.textContent()
// Go back to main graph
await comfyPage.page.keyboard.press('Escape')
// Double-click on the title area of the subgraph node to edit
await comfyPage.canvas.dblclick({
position: {
x: nodePos.x + nodeSize.width / 2,
y: nodePos.y - 10 // Title area is above the node body
},
delay: 5
})
// Wait for title editor to appear
await expect(comfyPage.page.locator('.node-title-editor')).toBeVisible()
// Clear existing text and type new title
await comfyPage.page.keyboard.press('Control+a')
const newTitle = 'Updated Subgraph Title'
await comfyPage.page.keyboard.type(newTitle)
await comfyPage.page.keyboard.press('Enter')
// Wait a frame for the update to complete
await comfyPage.nextFrame()
// Enter the subgraph again
await comfyPage.canvas.dblclick({
position: {
x: nodePos.x + nodeSize.width / 2,
y: nodePos.y + nodeSize.height / 2
},
delay: 5
})
// Wait for breadcrumb
await comfyPage.page.waitForSelector('.subgraph-breadcrumb')
// Check that breadcrumb now shows the new title
const updatedBreadcrumbText = await breadcrumb.textContent()
expect(updatedBreadcrumbText).toContain(newTitle)
expect(updatedBreadcrumbText).not.toBe(initialBreadcrumbText)
})
})

12
package-lock.json generated
View File

@@ -1,18 +1,18 @@
{
"name": "@comfyorg/comfyui-frontend",
"version": "1.24.0",
"version": "1.24.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@comfyorg/comfyui-frontend",
"version": "1.24.0",
"version": "1.24.1",
"license": "GPL-3.0-only",
"dependencies": {
"@alloc/quick-lru": "^5.2.0",
"@atlaskit/pragmatic-drag-and-drop": "^1.3.1",
"@comfyorg/comfyui-electron-types": "^0.4.43",
"@comfyorg/litegraph": "^0.16.7",
"@comfyorg/litegraph": "^0.16.9",
"@primevue/forms": "^4.2.5",
"@primevue/themes": "^4.2.5",
"@sentry/vue": "^8.48.0",
@@ -949,9 +949,9 @@
"license": "GPL-3.0-only"
},
"node_modules/@comfyorg/litegraph": {
"version": "0.16.7",
"resolved": "https://registry.npmjs.org/@comfyorg/litegraph/-/litegraph-0.16.7.tgz",
"integrity": "sha512-ua2nwnOpCzKxihLZOvvckz77wB6cMEZ3XeQtTTsg3nR5qLndZEbN8GpWXHWp0Cmf9XgT3pEWVPBBKLHvbn5S7g==",
"version": "0.16.9",
"resolved": "https://registry.npmjs.org/@comfyorg/litegraph/-/litegraph-0.16.9.tgz",
"integrity": "sha512-ZsvqkLqdG65e2UyM8oTOUTv/7VFEyGbG/C9dCZnhxdNq30UaE+F0iLaKq/17u6w4yewyZuqIn5MoOtjpxPqLDQ==",
"license": "MIT"
},
"node_modules/@cspotcode/source-map-support": {

View File

@@ -1,7 +1,7 @@
{
"name": "@comfyorg/comfyui-frontend",
"private": true,
"version": "1.24.0",
"version": "1.24.1",
"type": "module",
"repository": "https://github.com/Comfy-Org/ComfyUI_frontend",
"homepage": "https://comfy.org",
@@ -77,7 +77,7 @@
"@alloc/quick-lru": "^5.2.0",
"@atlaskit/pragmatic-drag-and-drop": "^1.3.1",
"@comfyorg/comfyui-electron-types": "^0.4.43",
"@comfyorg/litegraph": "^0.16.7",
"@comfyorg/litegraph": "^0.16.9",
"@primevue/forms": "^4.2.5",
"@primevue/themes": "^4.2.5",
"@sentry/vue": "^8.48.0",

View File

@@ -48,8 +48,6 @@ import ListBox from 'primevue/listbox'
import { computed, onMounted, ref } from 'vue'
import NoResultsPlaceholder from '@/components/common/NoResultsPlaceholder.vue'
import MissingCoreNodesMessage from '@/components/dialog/content/MissingCoreNodesMessage.vue'
import PackInstallButton from '@/components/dialog/content/manager/button/PackInstallButton.vue'
import { useMissingNodes } from '@/composables/nodePack/useMissingNodes'
import { useComfyManagerService } from '@/services/comfyManagerService'
import { useDialogService } from '@/services/dialogService'

View File

@@ -41,7 +41,15 @@ const previousCanvasDraggable = ref(true)
const onEdit = (newValue: string) => {
if (titleEditorStore.titleEditorTarget && newValue.trim() !== '') {
titleEditorStore.titleEditorTarget.title = newValue.trim()
const trimmedTitle = newValue.trim()
titleEditorStore.titleEditorTarget.title = trimmedTitle
// If this is a subgraph node, sync the runtime subgraph name for breadcrumb reactivity
const target = titleEditorStore.titleEditorTarget
if (target instanceof LGraphNode && target.isSubgraphNode?.()) {
target.subgraph.name = trimmedTitle
}
app.graph.setDirtyCanvas(true, true)
}
showInput.value = false

View File

@@ -217,7 +217,8 @@ defineExpose({
display: flex;
flex-direction: column;
gap: 24px;
overflow: hidden;
max-height: 80vh;
overflow-y: auto;
padding: 32px 32px 24px;
border-radius: 12px;
}

View File

@@ -663,8 +663,8 @@ export function useCoreCommands(): ComfyCommand[] {
},
{
id: 'Comfy.Manager.CustomNodesManager.ShowCustomNodesMenu',
icon: 'pi pi-puzzle',
label: 'Show the Custom Nodes Manager',
icon: 'pi pi-objects-column',
label: 'Custom Nodes Manager',
versionAdded: '1.12.10',
function: async () => {
const { is_legacy_manager_ui } =
@@ -683,10 +683,10 @@ export function useCoreCommands(): ComfyCommand[] {
detail: t('manager.legacyMenuNotAvailable'),
life: 3000
})
dialogService.toggleManagerDialog()
dialogService.showManagerDialog()
}
} else {
dialogService.toggleManagerDialog()
dialogService.showManagerDialog()
}
}
},

View File

@@ -0,0 +1,3 @@
{
"supports_preview_metadata": false
}

View File

@@ -14,6 +14,13 @@ import type { SettingParams } from '@/types/settingTypes'
* when they are no longer needed.
*/
export const CORE_SETTINGS: SettingParams[] = [
{
id: 'Comfy.Memory.AllowManualUnload',
name: 'Allow manual unload of models and execution cache via user command',
type: 'hidden',
defaultValue: true,
versionAdded: '1.18.0'
},
{
id: 'Comfy.Validation.Workflows',
name: 'Validate workflows',

View File

@@ -153,19 +153,13 @@
"label": "Load Default Workflow"
},
"Comfy_Manager_CustomNodesManager_ShowCustomNodesMenu": {
"label": "Show the Custom Nodes Manager"
},
"Comfy_Manager_CustomNodesManager_ShowLegacyCustomNodesMenu": {
"label": "Custom Nodes (Legacy)"
},
"Comfy_Manager_ShowLegacyManagerMenu": {
"label": "Manager Menu (Legacy)"
"label": "Custom Nodes Manager"
},
"Comfy_Manager_ShowMissingPacks": {
"label": "Install Missing Custom Nodes"
"label": "Install Missing"
},
"Comfy_Manager_ShowUpdateAvailablePacks": {
"label": "Check for Custom Node Updates"
"label": "Check for Updates"
},
"Comfy_Manager_ToggleManagerProgressDialog": {
"label": "Toggle the Custom Nodes Manager Progress Bar"

View File

@@ -141,7 +141,7 @@
"legacyMenuNotAvailable": "Legacy manager menu is not available, defaulting to the new manager menu.",
"legacyManagerUI": "Use Legacy UI",
"legacyManagerUIDescription": "To use the legacy Manager UI, start ComfyUI with --enable-manager-legacy-ui",
"failed": "Failed",
"failed": "Failed ({count})",
"noNodesFound": "No nodes found",
"noNodesFoundDescription": "The pack's nodes either could not be parsed, or the pack is a frontend extension only and doesn't have any nodes.",
"installationQueue": "Installation Queue",
@@ -149,21 +149,15 @@
"dependencies": "Dependencies",
"inWorkflow": "In Workflow",
"infoPanelEmpty": "Click an item to see the info",
"applyChanges": "Apply Changes",
"restartToApplyChanges": "Click 'Apply Changes' to finish setup",
"restartingBackend": "Restarting backend to apply changes...",
"extensionsSuccessfullyInstalled": "Extension(s) successfully installed and are ready to use!",
"restartToApplyChanges": "To apply changes, please restart ComfyUI",
"loadingVersions": "Loading versions...",
"selectVersion": "Select Version",
"downloads": "Downloads",
"repository": "Repository",
"installingDependencies": "Installing dependencies...",
"uninstall": "Uninstall",
"uninstalling": "Uninstalling",
"update": "Update",
"uninstallSelected": "Uninstall Selected",
"updateSelected": "Update Selected",
"updateAll": "Update All",
"updatingAllPacks": "Updating all packages",
"license": "License",
"nightlyVersion": "Nightly",
@@ -976,13 +970,15 @@
"ComfyUI Issues": "ComfyUI Issues",
"Interrupt": "Interrupt",
"Load Default Workflow": "Load Default Workflow",
"Show the Custom Nodes Manager": "Show the Custom Nodes Manager",
"Custom Nodes (Legacy)": "Custom Nodes (Legacy)",
"Manager Menu (Legacy)": "Manager Menu (Legacy)",
"Install Missing Custom Nodes": "Install Missing Custom Nodes",
"Check for Custom Node Updates": "Check for Custom Node Updates",
"Toggle the Custom Nodes Manager": "Toggle the Custom Nodes Manager",
"Toggle the Custom Nodes Manager Progress Bar": "Toggle the Custom Nodes Manager Progress Bar",
"Open Mask Editor for Selected Node": "Open Mask Editor for Selected Node",
"Custom Nodes (Beta)": "Custom Nodes (Beta)",
"Custom Nodes (Legacy)": "Custom Nodes (Legacy)",
"Manager Menu (Legacy)": "Manager Menu (Legacy)",
"Custom Nodes Manager": "Custom Nodes Manager",
"Install Missing": "Install Missing",
"Toggle Progress Dialog": "Toggle Progress Dialog",
"Unload Models": "Unload Models",
"Unload Models and Execution Cache": "Unload Models and Execution Cache",
"New": "New",

View File

@@ -155,12 +155,6 @@
"Comfy_Manager_CustomNodesManager_ShowCustomNodesMenu": {
"label": "Nodos personalizados (Beta)"
},
"Comfy_Manager_CustomNodesManager_ShowLegacyCustomNodesMenu": {
"label": "Nodos personalizados (heredados)"
},
"Comfy_Manager_ShowLegacyManagerMenu": {
"label": "Menú del gestor (heredado)"
},
"Comfy_Manager_ShowMissingPacks": {
"label": "Instalar faltantes"
},

View File

@@ -610,14 +610,12 @@
"title": "Mantenimiento"
},
"manager": {
"applyChanges": "Aplicar cambios",
"changingVersion": "Cambiando versión de {from} a {to}",
"createdBy": "Creado Por",
"dependencies": "Dependencias",
"discoverCommunityContent": "Descubre paquetes de nodos, extensiones y más creados por la comunidad...",
"downloads": "Descargas",
"errorConnecting": "Error al conectar con el Registro de Nodos Comfy.",
"extensionsSuccessfullyInstalled": "¡Extensión(es) instalada(s) correctamente y lista(s) para usar!",
"failed": "Falló ({count})",
"filter": {
"disabled": "Deshabilitado",
@@ -629,7 +627,6 @@
"installAllMissingNodes": "Instalar todos los nodos faltantes",
"installSelected": "Instalar Seleccionado",
"installationQueue": "Cola de Instalación",
"installingDependencies": "Instalando dependencias...",
"lastUpdated": "Última Actualización",
"latestVersion": "Última",
"legacyManagerUI": "Usar UI antigua",
@@ -646,7 +643,6 @@
"packsSelected": "Paquetes Seleccionados",
"repository": "Repositorio",
"restartToApplyChanges": "Para aplicar los cambios, por favor reinicia ComfyUI",
"restartingBackend": "Reiniciando el backend para aplicar los cambios...",
"searchPlaceholder": "Buscar",
"selectVersion": "Seleccionar Versión",
"sort": {
@@ -671,8 +667,6 @@
"uninstallSelected": "Desinstalar Seleccionado",
"uninstalling": "Desinstalando",
"update": "Actualizar",
"updateAll": "Actualizar todo",
"updateSelected": "Actualizar seleccionados",
"updatingAllPacks": "Actualizando todos los paquetes",
"version": "Versión"
},
@@ -731,7 +725,6 @@
"Bypass/Unbypass Selected Nodes": "Evitar/No evitar nodos seleccionados",
"Canvas Toggle Link Visibility": "Alternar visibilidad de enlace en lienzo",
"Canvas Toggle Lock": "Alternar bloqueo en lienzo",
"Check for Custom Node Updates": "Buscar Actualizaciones de Nodos Personalizados",
"Check for Updates": "Buscar actualizaciones",
"Clear Pending Tasks": "Borrar tareas pendientes",
"Clear Workflow": "Borrar flujo de trabajo",
@@ -745,7 +738,7 @@
"Contact Support": "Contactar soporte",
"Convert Selection to Subgraph": "Convertir selección en subgrafo",
"Convert selected nodes to group node": "Convertir nodos seleccionados en nodo de grupo",
"Custom Nodes (Legacy)": "Nodos Personalizados (Legado)",
"Custom Nodes Manager": "Administrador de Nodos Personalizados",
"Delete Selected Items": "Eliminar elementos seleccionados",
"Desktop User Guide": "Guía de usuario de escritorio",
"Duplicate Current Workflow": "Duplicar flujo de trabajo actual",
@@ -757,16 +750,15 @@
"Give Feedback": "Dar retroalimentación",
"Group Selected Nodes": "Agrupar nodos seleccionados",
"Help": "Ayuda",
"Install Missing Custom Nodes": "Instalar Nodos Personalizados Faltantes",
"Install Missing": "Instalar Faltantes",
"Interrupt": "Interrumpir",
"Load Default Workflow": "Cargar flujo de trabajo predeterminado",
"Manage group nodes": "Gestionar nodos de grupo",
"Manager": "Administrador",
"Manager Menu (Legacy)": "Menú de Gestión (Legado)",
"Move Selected Nodes Down": "Mover nodos seleccionados hacia abajo",
"Move Selected Nodes Left": "Mover nodos seleccionados hacia la izquierda",
"Move Selected Nodes Right": "Mover nodos seleccionados hacia la derecha",
"Move Selected Nodes Up": "Mover nodos seleccionados hacia arriba",
"Manager": "Administrador",
"Mute/Unmute Selected Nodes": "Silenciar/Activar sonido de nodos seleccionados",
"New": "Nuevo",
"Next Opened Workflow": "Siguiente flujo de trabajo abierto",
@@ -796,7 +788,6 @@
"Save": "Guardar",
"Save As": "Guardar como",
"Show Settings Dialog": "Mostrar diálogo de configuración",
"Show the Custom Nodes Manager": "Mostrar el Administrador de Nodos Personalizados",
"Sign Out": "Cerrar sesión",
"Toggle Bottom Panel": "Alternar panel inferior",
"Toggle Focus Mode": "Alternar modo de enfoque",
@@ -808,6 +799,7 @@
"Toggle Terminal Bottom Panel": "Alternar panel inferior de terminal",
"Toggle Theme (Dark/Light)": "Alternar tema (Oscuro/Claro)",
"Toggle Workflows Sidebar": "Alternar barra lateral de los flujos de trabajo",
"Toggle the Custom Nodes Manager": "Alternar el Administrador de Nodos Personalizados",
"Toggle the Custom Nodes Manager Progress Bar": "Alternar la Barra de Progreso del Administrador de Nodos Personalizados",
"Undo": "Deshacer",
"Ungroup selected group nodes": "Desagrupar nodos de grupo seleccionados",

View File

@@ -155,12 +155,6 @@
"Comfy_Manager_CustomNodesManager_ShowCustomNodesMenu": {
"label": "Nœuds personnalisés (Beta)"
},
"Comfy_Manager_CustomNodesManager_ShowLegacyCustomNodesMenu": {
"label": "Nœuds personnalisés (héritage)"
},
"Comfy_Manager_ShowLegacyManagerMenu": {
"label": "Menu du gestionnaire (hérité)"
},
"Comfy_Manager_ShowMissingPacks": {
"label": "Installer manquants"
},

View File

@@ -610,14 +610,12 @@
"title": "Maintenance"
},
"manager": {
"applyChanges": "Appliquer les modifications",
"changingVersion": "Changement de version de {from} à {to}",
"createdBy": "Créé par",
"dependencies": "Dépendances",
"discoverCommunityContent": "Découvrez les packs de nœuds, extensions et plus encore créés par la communauté...",
"downloads": "Téléchargements",
"errorConnecting": "Erreur de connexion au registre de nœuds Comfy.",
"extensionsSuccessfullyInstalled": "Extension(s) installée(s) avec succès et prêtes à l'emploi !",
"failed": "Échoué ({count})",
"filter": {
"disabled": "Désactivé",
@@ -629,7 +627,6 @@
"installAllMissingNodes": "Installer tous les nœuds manquants",
"installSelected": "Installer sélectionné",
"installationQueue": "File d'attente d'installation",
"installingDependencies": "Installation des dépendances...",
"lastUpdated": "Dernière mise à jour",
"latestVersion": "Dernière",
"legacyManagerUI": "Utiliser l'interface utilisateur héritée",
@@ -646,7 +643,6 @@
"packsSelected": "Packs sélectionnés",
"repository": "Référentiel",
"restartToApplyChanges": "Pour appliquer les modifications, veuillez redémarrer ComfyUI",
"restartingBackend": "Redémarrage du backend pour appliquer les modifications...",
"searchPlaceholder": "Recherche",
"selectVersion": "Sélectionner la version",
"sort": {
@@ -671,8 +667,6 @@
"uninstallSelected": "Désinstaller sélectionné",
"uninstalling": "Désinstallation",
"update": "Mettre à jour",
"updateAll": "Tout mettre à jour",
"updateSelected": "Mettre à jour la sélection",
"updatingAllPacks": "Mise à jour de tous les paquets",
"version": "Version"
},
@@ -731,7 +725,6 @@
"Bypass/Unbypass Selected Nodes": "Contourner/Ne pas contourner les nœuds sélectionnés",
"Canvas Toggle Link Visibility": "Basculer la visibilité du lien de la toile",
"Canvas Toggle Lock": "Basculer le verrouillage de la toile",
"Check for Custom Node Updates": "Vérifier les mises à jour des nœuds personnalisés",
"Check for Updates": "Vérifier les Mises à Jour",
"Clear Pending Tasks": "Effacer les tâches en attente",
"Clear Workflow": "Effacer le flux de travail",
@@ -745,7 +738,7 @@
"Contact Support": "Contacter le support",
"Convert Selection to Subgraph": "Convertir la sélection en sous-graphe",
"Convert selected nodes to group node": "Convertir les nœuds sélectionnés en nœud de groupe",
"Custom Nodes (Legacy)": "Nœuds personnalisés (héritage)",
"Custom Nodes Manager": "Gestionnaire de Nœuds Personnalisés",
"Delete Selected Items": "Supprimer les éléments sélectionnés",
"Desktop User Guide": "Guide de l'utilisateur de bureau",
"Duplicate Current Workflow": "Dupliquer le flux de travail actuel",
@@ -757,16 +750,15 @@
"Give Feedback": "Donnez votre avis",
"Group Selected Nodes": "Grouper les nœuds sélectionnés",
"Help": "Aide",
"Install Missing Custom Nodes": "Installer les nœuds personnalisés manquants",
"Install Missing": "Installer Manquants",
"Interrupt": "Interrompre",
"Load Default Workflow": "Charger le flux de travail par défaut",
"Manage group nodes": "Gérer les nœuds de groupe",
"Manager": "Gestionnaire",
"Manager Menu (Legacy)": "Menu du gestionnaire (héritage)",
"Move Selected Nodes Down": "Déplacer les nœuds sélectionnés vers le bas",
"Move Selected Nodes Left": "Déplacer les nœuds sélectionnés vers la gauche",
"Move Selected Nodes Right": "Déplacer les nœuds sélectionnés vers la droite",
"Move Selected Nodes Up": "Déplacer les nœuds sélectionnés vers le haut",
"Manager": "Gestionnaire",
"Mute/Unmute Selected Nodes": "Mettre en sourdine/Activer le son des nœuds sélectionnés",
"New": "Nouveau",
"Next Opened Workflow": "Prochain flux de travail ouvert",
@@ -796,7 +788,6 @@
"Save": "Enregistrer",
"Save As": "Enregistrer sous",
"Show Settings Dialog": "Afficher la boîte de dialogue des paramètres",
"Show the Custom Nodes Manager": "Afficher le gestionnaire de nœuds personnalisés",
"Sign Out": "Se déconnecter",
"Toggle Bottom Panel": "Basculer le panneau inférieur",
"Toggle Focus Mode": "Basculer le mode focus",
@@ -808,6 +799,7 @@
"Toggle Terminal Bottom Panel": "Basculer le panneau inférieur du terminal",
"Toggle Theme (Dark/Light)": "Basculer le thème (Sombre/Clair)",
"Toggle Workflows Sidebar": "Afficher/Masquer la barre latérale des workflows",
"Toggle the Custom Nodes Manager": "Basculer le gestionnaire de nœuds personnalisés",
"Toggle the Custom Nodes Manager Progress Bar": "Basculer la barre de progression du gestionnaire de nœuds personnalisés",
"Undo": "Annuler",
"Ungroup selected group nodes": "Dégrouper les nœuds de groupe sélectionnés",

View File

@@ -155,12 +155,6 @@
"Comfy_Manager_CustomNodesManager_ShowCustomNodesMenu": {
"label": "カスタムノード(ベータ版)"
},
"Comfy_Manager_CustomNodesManager_ShowLegacyCustomNodesMenu": {
"label": "カスタムノード(レガシー)"
},
"Comfy_Manager_ShowLegacyManagerMenu": {
"label": "マネージャーメニュー(レガシー)"
},
"Comfy_Manager_ShowMissingPacks": {
"label": "不足しているパックをインストール"
},

View File

@@ -610,14 +610,12 @@
"title": "メンテナンス"
},
"manager": {
"applyChanges": "変更を適用",
"changingVersion": "バージョンを {from} から {to} に変更",
"createdBy": "作成者",
"dependencies": "依存関係",
"discoverCommunityContent": "コミュニティが作成したノードパック、拡張機能などを探す...",
"downloads": "ダウンロード",
"errorConnecting": "Comfy Node Registryへの接続エラー。",
"extensionsSuccessfullyInstalled": "拡張機能のインストールが完了し、使用可能になりました!",
"failed": "失敗しました ({count})",
"filter": {
"disabled": "無効",
@@ -629,7 +627,6 @@
"installAllMissingNodes": "すべての不足しているノードをインストール",
"installSelected": "選択したものをインストール",
"installationQueue": "インストールキュー",
"installingDependencies": "依存関係をインストールしています...",
"lastUpdated": "最終更新日",
"latestVersion": "最新",
"legacyManagerUI": "レガシーUIを使用する",
@@ -646,7 +643,6 @@
"packsSelected": "選択したパック",
"repository": "リポジトリ",
"restartToApplyChanges": "変更を適用するには、ComfyUIを再起動してください",
"restartingBackend": "変更を適用するためにバックエンドを再起動しています...",
"searchPlaceholder": "検索",
"selectVersion": "バージョンを選択",
"sort": {
@@ -671,8 +667,6 @@
"uninstallSelected": "選択したものをアンインストール",
"uninstalling": "アンインストール中",
"update": "更新",
"updateAll": "すべて更新",
"updateSelected": "選択を更新",
"updatingAllPacks": "すべてのパッケージを更新中",
"version": "バージョン"
},
@@ -731,7 +725,6 @@
"Bypass/Unbypass Selected Nodes": "選択したノードのバイパス/バイパス解除",
"Canvas Toggle Link Visibility": "キャンバスのリンク表示を切り替え",
"Canvas Toggle Lock": "キャンバスのロックを切り替え",
"Check for Custom Node Updates": "カスタムノードのアップデートを確認",
"Check for Updates": "更新を確認",
"Clear Pending Tasks": "保留中のタスクをクリア",
"Clear Workflow": "ワークフローをクリア",
@@ -745,7 +738,7 @@
"Contact Support": "サポートに連絡",
"Convert Selection to Subgraph": "選択範囲をサブグラフに変換",
"Convert selected nodes to group node": "選択したノードをグループノードに変換",
"Custom Nodes (Legacy)": "カスタムノード(レガシー)",
"Custom Nodes Manager": "カスタムノードマネージャ",
"Delete Selected Items": "選択したアイテムを削除",
"Desktop User Guide": "デスクトップユーザーガイド",
"Duplicate Current Workflow": "現在のワークフローを複製",
@@ -757,16 +750,15 @@
"Give Feedback": "フィードバックを送る",
"Group Selected Nodes": "選択したノードをグループ化",
"Help": "ヘルプ",
"Install Missing Custom Nodes": "不足しているカスタムノードをインストール",
"Install Missing": "不足しているものをインストール",
"Interrupt": "中断",
"Load Default Workflow": "デフォルトワークフローを読み込む",
"Manage group nodes": "グループノードを管理",
"Manager": "マネージャー",
"Manager Menu (Legacy)": "マネージャーメニュー(レガシー)",
"Move Selected Nodes Down": "選択したノードを下へ移動",
"Move Selected Nodes Left": "選択したノードを左へ移動",
"Move Selected Nodes Right": "選択したノードを右へ移動",
"Move Selected Nodes Up": "選択したノードを上へ移動",
"Manager": "マネージャー",
"Mute/Unmute Selected Nodes": "選択したノードのミュート/ミュート解除",
"New": "新規",
"Next Opened Workflow": "次に開いたワークフロー",
@@ -796,7 +788,6 @@
"Save": "保存",
"Save As": "名前を付けて保存",
"Show Settings Dialog": "設定ダイアログを表示",
"Show the Custom Nodes Manager": "カスタムノードマネージャーを表示",
"Sign Out": "サインアウト",
"Toggle Bottom Panel": "下部パネルの切り替え",
"Toggle Focus Mode": "フォーカスモードの切り替え",
@@ -808,6 +799,7 @@
"Toggle Terminal Bottom Panel": "ターミナルパネル下部を切り替え",
"Toggle Theme (Dark/Light)": "テーマを切り替え(ダーク/ライト)",
"Toggle Workflows Sidebar": "ワークフローサイドバーを切り替え",
"Toggle the Custom Nodes Manager": "カスタムノードマネージャーを切り替え",
"Toggle the Custom Nodes Manager Progress Bar": "カスタムノードマネージャーの進行状況バーを切り替え",
"Undo": "元に戻す",
"Ungroup selected group nodes": "選択したグループノードのグループ解除",

View File

@@ -155,12 +155,6 @@
"Comfy_Manager_CustomNodesManager_ShowCustomNodesMenu": {
"label": "사용자 정의 노드 (베타)"
},
"Comfy_Manager_CustomNodesManager_ShowLegacyCustomNodesMenu": {
"label": "커스텀 노드 (레거시)"
},
"Comfy_Manager_ShowLegacyManagerMenu": {
"label": "매니저 메뉴 (레거시)"
},
"Comfy_Manager_ShowMissingPacks": {
"label": "누락된 팩 설치"
},

View File

@@ -610,14 +610,12 @@
"title": "유지 보수"
},
"manager": {
"applyChanges": "변경 사항 적용",
"changingVersion": "{from}에서 {to}(으)로 버전 변경 중",
"createdBy": "작성자",
"dependencies": "의존성",
"discoverCommunityContent": "커뮤니티에서 만든 노드 팩 및 확장 프로그램을 찾아보세요...",
"downloads": "다운로드",
"errorConnecting": "Comfy Node Registry에 연결하는 중 오류가 발생했습니다.",
"extensionsSuccessfullyInstalled": "확장 프로그램이 성공적으로 설치되어 사용할 준비가 되었습니다!",
"failed": "실패 ({count})",
"filter": {
"disabled": "비활성화",
@@ -629,7 +627,6 @@
"installAllMissingNodes": "모든 누락된 노드 설치",
"installSelected": "선택한 항목 설치",
"installationQueue": "설치 대기열",
"installingDependencies": "의존성 설치 중...",
"lastUpdated": "마지막 업데이트",
"latestVersion": "최신",
"legacyManagerUI": "레거시 UI 사용",
@@ -646,7 +643,6 @@
"packsSelected": "선택한 노드 팩",
"repository": "저장소",
"restartToApplyChanges": "변경 사항을 적용하려면 ComfyUI를 재시작해 주세요",
"restartingBackend": "변경 사항을 적용하기 위해 백엔드를 재시작하는 중...",
"searchPlaceholder": "검색",
"selectVersion": "버전 선택",
"sort": {
@@ -671,8 +667,6 @@
"uninstallSelected": "선택 항목 제거",
"uninstalling": "제거 중",
"update": "업데이트",
"updateAll": "전체 업데이트",
"updateSelected": "선택 항목 업데이트",
"updatingAllPacks": "모든 패키지 업데이트 중",
"version": "버전"
},
@@ -731,7 +725,6 @@
"Bypass/Unbypass Selected Nodes": "선택한 노드 우회/우회 해제",
"Canvas Toggle Link Visibility": "캔버스 토글 링크 가시성",
"Canvas Toggle Lock": "캔버스 토글 잠금",
"Check for Custom Node Updates": "커스텀 노드 업데이트 확인",
"Check for Updates": "업데이트 확인",
"Clear Pending Tasks": "보류 중인 작업 제거하기",
"Clear Workflow": "워크플로 지우기",
@@ -745,7 +738,7 @@
"Contact Support": "고객 지원 문의",
"Convert Selection to Subgraph": "선택 영역을 서브그래프로 변환",
"Convert selected nodes to group node": "선택한 노드를 그룹 노드로 변환",
"Custom Nodes (Legacy)": "커스텀 노드 (레거시)",
"Custom Nodes Manager": "사용자 정의 노드 관리자",
"Delete Selected Items": "선택한 항목 삭제",
"Desktop User Guide": "데스크톱 사용자 가이드",
"Duplicate Current Workflow": "현재 워크플로 복제",
@@ -757,16 +750,15 @@
"Give Feedback": "피드백 제공",
"Group Selected Nodes": "선택한 노드 그룹화",
"Help": "도움말",
"Install Missing Custom Nodes": "누락된 커스텀 노드 설치",
"Install Missing": "누락된 설치",
"Interrupt": "중단",
"Load Default Workflow": "기본 워크플로 불러오기",
"Manage group nodes": "그룹 노드 관리",
"Manager": "매니저",
"Manager Menu (Legacy)": "매니저 메뉴 (레거시)",
"Move Selected Nodes Down": "선택한 노드 아래로 이동",
"Move Selected Nodes Left": "선택한 노드 왼쪽으로 이동",
"Move Selected Nodes Right": "선택한 노드 오른쪽으로 이동",
"Move Selected Nodes Up": "선택한 노드 위로 이동",
"Manager": "매니저",
"Mute/Unmute Selected Nodes": "선택한 노드 활성화/비활성화",
"New": "새로 만들기",
"Next Opened Workflow": "다음 열린 워크플로",
@@ -796,7 +788,6 @@
"Save": "저장",
"Save As": "다른 이름으로 저장",
"Show Settings Dialog": "설정 대화상자 표시",
"Show the Custom Nodes Manager": "커스텀 노드 관리자 표시",
"Sign Out": "로그아웃",
"Toggle Bottom Panel": "하단 패널 전환",
"Toggle Focus Mode": "포커스 모드 전환",
@@ -808,6 +799,7 @@
"Toggle Terminal Bottom Panel": "터미널 하단 패널 전환",
"Toggle Theme (Dark/Light)": "테마 전환 (어두운/밝은)",
"Toggle Workflows Sidebar": "워크플로우 사이드바 전환",
"Toggle the Custom Nodes Manager": "커스텀 노드 매니저 전환",
"Toggle the Custom Nodes Manager Progress Bar": "커스텀 노드 매니저 진행률 표시줄 전환",
"Undo": "실행 취소",
"Ungroup selected group nodes": "선택한 그룹 노드 그룹 해제",

View File

@@ -155,12 +155,6 @@
"Comfy_Manager_CustomNodesManager_ShowCustomNodesMenu": {
"label": "Пользовательские узлы (Бета)"
},
"Comfy_Manager_CustomNodesManager_ShowLegacyCustomNodesMenu": {
"label": "Пользовательские узлы (устаревшие)"
},
"Comfy_Manager_ShowLegacyManagerMenu": {
"label": "Меню менеджера (устаревшее)"
},
"Comfy_Manager_ShowMissingPacks": {
"label": "Установить отсутствующие"
},

View File

@@ -610,14 +610,12 @@
"title": "Обслуживание"
},
"manager": {
"applyChanges": "Применить изменения",
"changingVersion": "Изменение версии с {from} на {to}",
"createdBy": "Создано",
"dependencies": "Зависимости",
"discoverCommunityContent": "Откройте для себя пакеты узлов, расширения и многое другое, созданные сообществом...",
"downloads": "Загрузки",
"errorConnecting": "Ошибка подключения к реестру Comfy Node.",
"extensionsSuccessfullyInstalled": "Расширение(я) успешно установлены и готовы к использованию!",
"failed": "Не удалось ({count})",
"filter": {
"disabled": "Отключено",
@@ -629,7 +627,6 @@
"installAllMissingNodes": "Установить все отсутствующие узлы",
"installSelected": "Установить выбранное",
"installationQueue": "Очередь установки",
"installingDependencies": "Установка зависимостей...",
"lastUpdated": "Последнее обновление",
"latestVersion": "Последняя",
"legacyManagerUI": "Использовать устаревший UI",
@@ -646,7 +643,6 @@
"packsSelected": "Выбрано пакетов",
"repository": "Репозиторий",
"restartToApplyChanges": "Чтобы применить изменения, пожалуйста, перезапустите ComfyUI",
"restartingBackend": "Перезапуск бэкенда для применения изменений...",
"searchPlaceholder": "Поиск",
"selectVersion": "Выберите версию",
"sort": {
@@ -671,8 +667,6 @@
"uninstallSelected": "Удалить выбранное",
"uninstalling": "Удаление",
"update": "Обновить",
"updateAll": "Обновить все",
"updateSelected": "Обновить выбранное",
"updatingAllPacks": "Обновление всех пакетов",
"version": "Версия"
},
@@ -731,7 +725,6 @@
"Bypass/Unbypass Selected Nodes": "Обойти/восстановить выбранные ноды",
"Canvas Toggle Link Visibility": "Переключение видимости ссылки на холст",
"Canvas Toggle Lock": "Переключение блокировки холста",
"Check for Custom Node Updates": "Проверить обновления пользовательских узлов",
"Check for Updates": "Проверить Обновления",
"Clear Pending Tasks": "Очистить ожидающие задачи",
"Clear Workflow": "Очистить рабочий процесс",
@@ -745,7 +738,7 @@
"Contact Support": "Связаться с поддержкой",
"Convert Selection to Subgraph": "Преобразовать выделенное в подграф",
"Convert selected nodes to group node": "Преобразовать выбранные ноды в групповую ноду",
"Custom Nodes (Legacy)": "Пользовательские узлы (устаревшие)",
"Custom Nodes Manager": "Менеджер Пользовательских Узлов",
"Delete Selected Items": "Удалить выбранные элементы",
"Desktop User Guide": "Руководство пользователя для настольных ПК",
"Duplicate Current Workflow": "Дублировать текущий рабочий процесс",
@@ -757,16 +750,15 @@
"Give Feedback": "Оставить отзыв",
"Group Selected Nodes": "Сгруппировать выбранные ноды",
"Help": "Помощь",
"Install Missing Custom Nodes": "Установить отсутствующие пользовательские узлы",
"Install Missing": "Установить Отсутствующие",
"Interrupt": "Прервать",
"Load Default Workflow": "Загрузить стандартный рабочий процесс",
"Manage group nodes": "Управление групповыми нодами",
"Manager": "Менеджер",
"Manager Menu (Legacy)": "Меню управления (устаревшее)",
"Move Selected Nodes Down": "Переместить выбранные узлы вниз",
"Move Selected Nodes Left": "Переместить выбранные узлы влево",
"Move Selected Nodes Right": "Переместить выбранные узлы вправо",
"Move Selected Nodes Up": "Переместить выбранные узлы вверх",
"Manager": "Менеджер",
"Mute/Unmute Selected Nodes": "Отключить/включить звук для выбранных нод",
"New": "Новый",
"Next Opened Workflow": "Следующий открытый рабочий процесс",
@@ -796,7 +788,6 @@
"Save": "Сохранить",
"Save As": "Сохранить как",
"Show Settings Dialog": "Показать диалог настроек",
"Show the Custom Nodes Manager": "Показать менеджер пользовательских узлов",
"Sign Out": "Выйти",
"Toggle Bottom Panel": "Переключить нижнюю панель",
"Toggle Focus Mode": "Переключить режим фокуса",
@@ -808,6 +799,7 @@
"Toggle Terminal Bottom Panel": "Переключение нижней панели терминала",
"Toggle Theme (Dark/Light)": "Переключение темы (Тёмная/Светлая)",
"Toggle Workflows Sidebar": "Показать/скрыть боковую панель рабочих процессов",
"Toggle the Custom Nodes Manager": "Переключить менеджер пользовательских узлов",
"Toggle the Custom Nodes Manager Progress Bar": "Переключить индикатор выполнения менеджера пользовательских узлов",
"Undo": "Отменить",
"Ungroup selected group nodes": "Разгруппировать выбранные групповые ноды",

View File

@@ -155,12 +155,6 @@
"Comfy_Manager_CustomNodesManager_ShowCustomNodesMenu": {
"label": "自定义节点(测试版)"
},
"Comfy_Manager_CustomNodesManager_ShowLegacyCustomNodesMenu": {
"label": "自定义节点(旧版)"
},
"Comfy_Manager_ShowLegacyManagerMenu": {
"label": "管理器菜单(旧版)"
},
"Comfy_Manager_ShowMissingPacks": {
"label": "安装缺失的包"
},

View File

@@ -610,14 +610,12 @@
"title": "维护"
},
"manager": {
"applyChanges": "套用變更",
"changingVersion": "将版本从 {from} 更改为 {to}",
"createdBy": "创建者",
"dependencies": "依赖关系",
"discoverCommunityContent": "发现社区制作的节点包,扩展等等...",
"downloads": "下载",
"errorConnecting": "连接到Comfy节点注册表时出错。",
"extensionsSuccessfullyInstalled": "擴充功能安裝成功,已可使用!",
"failed": "失败 ({count})",
"filter": {
"disabled": "已禁用",
@@ -629,7 +627,6 @@
"installAllMissingNodes": "安装所有缺失节点",
"installSelected": "安装选定",
"installationQueue": "安装队列",
"installingDependencies": "正在安裝相依套件……",
"lastUpdated": "最后更新",
"latestVersion": "最新",
"legacyManagerUI": "使用旧版UI",
@@ -646,7 +643,6 @@
"packsSelected": "选定的包",
"repository": "仓库",
"restartToApplyChanges": "要应用更改请重新启动ComfyUI",
"restartingBackend": "正在重新啟動後端以套用變更……",
"searchPlaceholder": "搜索",
"selectVersion": "选择版本",
"sort": {
@@ -671,8 +667,6 @@
"uninstallSelected": "卸载所选",
"uninstalling": "正在卸载",
"update": "更新",
"updateAll": "全部更新",
"updateSelected": "更新所选",
"updatingAllPacks": "更新所有包",
"version": "版本"
},
@@ -731,7 +725,6 @@
"Bypass/Unbypass Selected Nodes": "忽略/取消忽略选定节点",
"Canvas Toggle Link Visibility": "切换连线可见性",
"Canvas Toggle Lock": "切换视图锁定",
"Check for Custom Node Updates": "检查自定义节点更新",
"Check for Updates": "检查更新",
"Clear Pending Tasks": "清除待处理任务",
"Clear Workflow": "清除工作流",
@@ -745,7 +738,7 @@
"Contact Support": "联系支持",
"Convert Selection to Subgraph": "将选中内容转换为子图",
"Convert selected nodes to group node": "将选中节点转换为组节点",
"Custom Nodes (Legacy)": "自定义节点(旧版)",
"Custom Nodes Manager": "自定义节点管理器",
"Delete Selected Items": "删除选定的项目",
"Desktop User Guide": "桌面端用户指南",
"Duplicate Current Workflow": "复制当前工作流",
@@ -757,16 +750,15 @@
"Give Feedback": "提供反馈",
"Group Selected Nodes": "将选中节点转换为组节点",
"Help": "帮助",
"Install Missing Custom Nodes": "安装缺失的自定义节点",
"Install Missing": "安装缺失",
"Interrupt": "中断",
"Load Default Workflow": "加载默认工作流",
"Manage group nodes": "管理组节点",
"Manager": "管理器",
"Manager Menu (Legacy)": "管理菜单(旧版)",
"Move Selected Nodes Down": "下移所选节点",
"Move Selected Nodes Left": "左移所选节点",
"Move Selected Nodes Right": "右移所选节点",
"Move Selected Nodes Up": "上移所选节点",
"Manager": "管理器",
"Mute/Unmute Selected Nodes": "静音/取消静音选定节点",
"New": "新建",
"Next Opened Workflow": "下一个打开的工作流",
@@ -796,7 +788,6 @@
"Save": "保存",
"Save As": "另存为",
"Show Settings Dialog": "显示设置对话框",
"Show the Custom Nodes Manager": "显示自定义节点管理器",
"Sign Out": "退出登录",
"Toggle Bottom Panel": "切换底部面板",
"Toggle Focus Mode": "切换专注模式",
@@ -808,6 +799,7 @@
"Toggle Terminal Bottom Panel": "切换终端底部面板",
"Toggle Theme (Dark/Light)": "切换主题(暗/亮)",
"Toggle Workflows Sidebar": "切换工作流侧边栏",
"Toggle the Custom Nodes Manager": "切换自定义节点管理器",
"Toggle the Custom Nodes Manager Progress Bar": "切换自定义节点管理器进度条",
"Undo": "撤销",
"Ungroup selected group nodes": "解散选中组节点",

View File

@@ -113,6 +113,8 @@ const zLogRawResponse = z.object({
entries: z.array(zLogEntry)
})
const zFeatureFlagsWsMessage = z.record(z.string(), z.any())
export type StatusWsMessageStatus = z.infer<typeof zStatusWsMessageStatus>
export type StatusWsMessage = z.infer<typeof zStatusWsMessage>
export type ProgressWsMessage = z.infer<typeof zProgressWsMessage>
@@ -132,6 +134,7 @@ export type ProgressTextWsMessage = z.infer<typeof zProgressTextWsMessage>
export type DisplayComponentWsMessage = z.infer<
typeof zDisplayComponentWsMessage
>
export type FeatureFlagsWsMessage = z.infer<typeof zFeatureFlagsWsMessage>
// End of ws messages
const zPromptInputItem = z.object({

View File

@@ -1,5 +1,6 @@
import axios from 'axios'
import defaultClientFeatureFlags from '@/config/clientFeatureFlags.json'
import type {
DisplayComponentWsMessage,
EmbeddingsResponse,
@@ -11,6 +12,7 @@ import type {
ExecutionStartWsMessage,
ExecutionSuccessWsMessage,
ExtensionsResponse,
FeatureFlagsWsMessage,
HistoryTaskItem,
LogsRawResponse,
LogsWsMessage,
@@ -106,6 +108,7 @@ interface BackendApiCalls {
b_preview: Blob
progress_text: ProgressTextWsMessage
display_component: DisplayComponentWsMessage
feature_flags: FeatureFlagsWsMessage
}
/** Dictionary of all api calls */
@@ -235,6 +238,19 @@ export class ComfyApi extends EventTarget {
reportedUnknownMessageTypes = new Set<string>()
/**
* Get feature flags supported by this frontend client.
* Returns a copy to prevent external modification.
*/
getClientFeatureFlags(): Record<string, unknown> {
return { ...defaultClientFeatureFlags }
}
/**
* Feature flags received from the backend server.
*/
serverFeatureFlags: Record<string, unknown> = {}
/**
* The auth token for the comfy org account if the user is logged in.
* This is only used for {@link queuePrompt} now. It is not directly
@@ -376,6 +392,15 @@ export class ComfyApi extends EventTarget {
this.socket.addEventListener('open', () => {
opened = true
// Send feature flags as the first message
this.socket!.send(
JSON.stringify({
type: 'feature_flags',
data: this.getClientFeatureFlags()
})
)
if (isReconnect) {
this.dispatchCustomEvent('reconnected')
}
@@ -469,6 +494,14 @@ export class ComfyApi extends EventTarget {
case 'b_preview':
this.dispatchCustomEvent(msg.type, msg.data)
break
case 'feature_flags':
// Store server feature flags
this.serverFeatureFlags = msg.data
console.log(
'Server feature flags received:',
this.serverFeatureFlags
)
break
default:
if (this.#registered.has(msg.type)) {
// Fallback for custom types - calls super direct.
@@ -955,17 +988,7 @@ export class ComfyApi extends EventTarget {
return (await axios.get(this.internalURL('/folder_paths'))).data
}
/**
* Gets the custom nodes i18n data from the server.
*
* @returns The custom nodes i18n data
*/
async getCustomNodesI18n(): Promise<Record<string, any>> {
return (await axios.get(this.apiURL('/i18n'))).data
}
/**
* Frees memory by unloading models and optionally freeing execution cache
/* Frees memory by unloading models and optionally freeing execution cache
* @param {Object} options - The options object
* @param {boolean} options.freeExecutionCache - If true, also frees execution cache
*/
@@ -1014,6 +1037,42 @@ export class ComfyApi extends EventTarget {
})
}
}
/**
* Gets the custom nodes i18n data from the server.
*
* @returns The custom nodes i18n data
*/
async getCustomNodesI18n(): Promise<Record<string, any>> {
return (await axios.get(this.apiURL('/i18n'))).data
}
/**
* Checks if the server supports a specific feature.
* @param featureName The name of the feature to check
* @returns true if the feature is supported, false otherwise
*/
serverSupportsFeature(featureName: string): boolean {
return this.serverFeatureFlags[featureName] === true
}
/**
* Gets a server feature flag value.
* @param featureName The name of the feature to get
* @param defaultValue The default value if the feature is not found
* @returns The feature value or default
*/
getServerFeature<T = unknown>(featureName: string, defaultValue?: T): T {
return (this.serverFeatureFlags[featureName] ?? defaultValue) as T
}
/**
* Gets all server feature flags.
* @returns Copy of all server feature flags
*/
getServerFeatures(): Record<string, unknown> {
return { ...this.serverFeatureFlags }
}
}
export const api = new ComfyApi()

View File

@@ -23,9 +23,15 @@ enum ManagerRoute {
START_QUEUE = 'v2/manager/queue/start',
RESET_QUEUE = 'v2/manager/queue/reset',
QUEUE_STATUS = 'v2/manager/queue/status',
INSTALL = 'v2/manager/queue/install',
UPDATE = 'v2/manager/queue/update',
UPDATE_ALL = 'v2/manager/queue/update_all',
UNINSTALL = 'v2/manager/queue/uninstall',
DISABLE = 'v2/manager/queue/disable',
FIX_NODE = 'v2/manager/queue/fix',
LIST_INSTALLED = 'v2/customnode/installed',
GET_NODES = 'v2/customnode/getmappings',
GET_PACKS = 'v2/customnode/getlist',
IMPORT_FAIL_INFO = 'v2/customnode/import_fail_info',
REBOOT = 'v2/manager/reboot',
IS_LEGACY_MANAGER_UI = 'v2/manager/is_legacy_manager_ui',

View File

@@ -6,6 +6,11 @@ import type { components } from '@/types/comfyRegistryTypes'
export const IsInstallingKey: InjectionKey<Ref<boolean>> =
Symbol('isInstalling')
export enum ManagerWsQueueStatus {
DONE = 'all-done',
IN_PROGRESS = 'in_progress'
}
export enum ManagerTab {
All = 'all',
Installed = 'installed',

View File

@@ -699,6 +699,27 @@ export interface paths {
patch?: never
trace?: never
}
'/publishers/{publisherId}/nodes/{nodeId}/claim-my-node': {
parameters: {
query?: never
header?: never
path?: never
cookie?: never
}
get?: never
put?: never
/**
* Claim nodeId into publisherId for the authenticated publisher
* @description This endpoint allows a publisher to claim an unclaimed node that they own the repo, which is identified by the nodeId. The unclaimed node's repository must be owned by the authenticated user.
*
*/
post: operations['claimMyNode']
delete?: never
options?: never
head?: never
patch?: never
trace?: never
}
'/publishers/{publisherId}/nodes/v2': {
parameters: {
query?: never
@@ -1061,6 +1082,23 @@ export interface paths {
patch?: never
trace?: never
}
'/bulk/nodes/versions': {
parameters: {
query?: never
header?: never
path?: never
cookie?: never
}
get?: never
put?: never
/** Retrieve multiple node versions in a single request */
post: operations['getBulkNodeVersions']
delete?: never
options?: never
head?: never
patch?: never
trace?: never
}
'/versions': {
parameters: {
query?: never
@@ -1095,6 +1133,26 @@ export interface paths {
patch?: never
trace?: never
}
'/admin/nodes/{nodeId}': {
parameters: {
query?: never
header?: never
path?: never
cookie?: never
}
get?: never
/**
* Admin Update Node
* @description Only admins can update a node with admin privileges.
*/
put: operations['adminUpdateNode']
post?: never
delete?: never
options?: never
head?: never
patch?: never
trace?: never
}
'/admin/nodes/{nodeId}/versions/{versionNumber}': {
parameters: {
query?: never
@@ -2951,7 +3009,7 @@ export interface paths {
patch?: never
trace?: never
}
'/proxy/moonvalley/text-to-video': {
'/proxy/moonvalley/prompts/text-to-video': {
parameters: {
query?: never
header?: never
@@ -2968,7 +3026,7 @@ export interface paths {
patch?: never
trace?: never
}
'/proxy/moonvalley/text-to-image': {
'/proxy/moonvalley/prompts/text-to-image': {
parameters: {
query?: never
header?: never
@@ -3057,6 +3115,37 @@ export interface paths {
export type webhooks = Record<string, never>
export interface components {
schemas: {
ClaimMyNodeRequest: {
/** @description GitHub token to verify if the user owns the repo of the node */
GH_TOKEN: string
}
BulkNodeVersionsRequest: {
/** @description List of node ID and version pairs to retrieve */
node_versions: components['schemas']['NodeVersionIdentifier'][]
}
NodeVersionIdentifier: {
/** @description The unique identifier of the node */
node_id: string
/** @description The version of the node */
version: string
}
BulkNodeVersionsResponse: {
/** @description List of retrieved node versions with their status */
node_versions: components['schemas']['BulkNodeVersionResult'][]
}
BulkNodeVersionResult: {
/** @description The node and version identifier */
identifier: components['schemas']['NodeVersionIdentifier']
/**
* @description Status of the retrieval operation
* @enum {string}
*/
status: 'success' | 'not_found' | 'error'
/** @description The retrieved node version data (only present if status is success) */
node_version?: components['schemas']['NodeVersion']
/** @description Error message if retrieval failed (only present if status is error) */
error_message?: string
}
PersonalAccessToken: {
/**
* Format: uuid
@@ -8713,71 +8802,212 @@ export interface components {
| 'computer-use-preview'
| 'computer-use-preview-2025-03-11'
| 'chatgpt-4o-latest'
MoonvalleyInferenceParams: {
/** @default 1080 */
MoonvalleyTextToVideoInferenceParams: {
/**
* @description Height of the generated video in pixels
* @default 1080
*/
height: number
/** @default 1920 */
/**
* @description Width of the generated video in pixels
* @default 1920
*/
width: number
/** @default 64 */
/**
* @description Number of frames to generate
* @default 64
*/
num_frames: number
/** @default 24 */
/**
* @description Frames per second of the generated video
* @default 24
*/
fps: number
/**
* Format: float
* @default 12.5
* @description Guidance scale for generation control
* @default 10
*/
guidance_scale: number
/** @description Random seed for generation (default: random) */
seed?: number
/** @default 80 */
/**
* @description Number of denoising steps
* @default 80
*/
steps: number
/** @default true */
/**
* @description Whether to use timestep transformation
* @default true
*/
use_timestep_transform: boolean
/**
* Format: float
* @description Shift value for generation control
* @default 3
*/
shift_value: number
/** @default true */
/**
* @description Whether to use guidance scheduling
* @default true
*/
use_guidance_schedule: boolean
/** @default true */
/**
* @description Whether to add quality guidance
* @default true
*/
add_quality_guidance: boolean
/**
* Format: float
* @description CLIP value for generation control
* @default 3
*/
clip_value: number
/** @default false */
/**
* @description Whether to use negative prompts
* @default false
*/
use_negative_prompts: boolean
/** @description Negative prompt text */
negative_prompt?: string
warmup_steps?: number
cooldown_steps?: number
/**
* @description Number of warmup steps (calculated based on num_frames)
* @default 0
*/
warmup_steps: number
/**
* @description Number of cooldown steps (calculated based on num_frames)
* @default 75
*/
cooldown_steps: number
/**
* Format: float
* @description Caching coefficient for optimization
* @default 0.3
*/
caching_coefficient: number
/** @default 3 */
/**
* @description Number of caching warmup steps
* @default 3
*/
caching_warmup: number
/** @default 3 */
/**
* @description Number of caching cooldown steps
* @default 3
*/
caching_cooldown: number
/** @default 0 */
/**
* @description Index of the conditioning frame
* @default 0
*/
conditioning_frame_index: number
}
MoonvalleyVideoToVideoInferenceParams: {
/**
* Format: float
* @description Guidance scale for generation control
* @default 15
*/
guidance_scale: number
/** @description Random seed for generation (default: random) */
seed?: number
/**
* @description Number of denoising steps
* @default 80
*/
steps: number
/**
* @description Whether to use timestep transformation
* @default true
*/
use_timestep_transform: boolean
/**
* Format: float
* @description Shift value for generation control
* @default 3
*/
shift_value: number
/**
* @description Whether to use guidance scheduling
* @default true
*/
use_guidance_schedule: boolean
/**
* @description Whether to add quality guidance
* @default true
*/
add_quality_guidance: boolean
/**
* Format: float
* @description CLIP value for generation control
* @default 3
*/
clip_value: number
/**
* @description Whether to use negative prompts
* @default false
*/
use_negative_prompts: boolean
/** @description Negative prompt text */
negative_prompt?: string
/**
* @description Number of warmup steps (calculated based on num_frames)
* @default 24
*/
warmup_steps: number
/**
* @description Number of cooldown steps (calculated based on num_frames)
* @default 36
*/
cooldown_steps: number
/**
* Format: float
* @description Caching coefficient for optimization
* @default 0.3
*/
caching_coefficient: number
/**
* @description Number of caching warmup steps
* @default 3
*/
caching_warmup: number
/**
* @description Number of caching cooldown steps
* @default 3
*/
caching_cooldown: number
/**
* @description Index of the conditioning frame
* @default 0
*/
conditioning_frame_index: number
}
MoonvalleyTextToImageRequest: {
prompt_text?: string
image_url?: string
inference_params?: components['schemas']['MoonvalleyInferenceParams']
inference_params?: components['schemas']['MoonvalleyTextToVideoInferenceParams']
webhook_url?: string
}
MoonvalleyTextToVideoRequest: {
prompt_text?: string
image_url?: string
inference_params?: components['schemas']['MoonvalleyInferenceParams']
inference_params?: components['schemas']['MoonvalleyTextToVideoInferenceParams']
webhook_url?: string
}
MoonvalleyVideoToVideoRequest: components['schemas']['MoonvalleyTextToVideoRequest'] & {
MoonvalleyVideoToVideoRequest: {
/** @description Describes the video to generate */
prompt_text: string
/** @description Url to control video */
video_url: string
control_type: string
/**
* @description Supported types for video control
* @enum {string}
*/
control_type: 'motion_control' | 'pose_control'
/** @description Parameters for video-to-video generation inference */
inference_params?: components['schemas']['MoonvalleyVideoToVideoInferenceParams']
/** @description Optional webhook URL for notifications */
webhook_url?: string
}
MoonvalleyPromptResponse: {
id?: string
@@ -10421,6 +10651,89 @@ export interface operations {
}
}
}
claimMyNode: {
parameters: {
query?: never
header?: never
path: {
publisherId: string
nodeId: string
}
cookie?: never
}
requestBody: {
content: {
'application/json': components['schemas']['ClaimMyNodeRequest']
}
}
responses: {
/** @description Node claimed successfully */
204: {
headers: {
[name: string]: unknown
}
content?: never
}
/** @description Bad request, invalid input data */
400: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['ErrorResponse']
}
}
/** @description Unauthorized */
401: {
headers: {
[name: string]: unknown
}
content?: never
}
/** @description Forbidden - various authorization and permission issues
* Includes:
* - The authenticated user does not have permission to claim the node
* - The node is already claimed by another publisher
* - The GH_TOKEN is invalid
* - The repository is not owned by the authenticated GitHub user
* */
403: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['ErrorResponse']
}
}
/** @description Too many requests - GitHub API rate limit exceeded */
429: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['ErrorResponse']
}
}
/** @description Internal server error */
500: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['ErrorResponse']
}
}
/** @description Service unavailable - GitHub API is currently unavailable */
503: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['ErrorResponse']
}
}
}
}
listNodesForPublisherV2: {
parameters: {
query?: {
@@ -11709,6 +12022,48 @@ export interface operations {
}
}
}
getBulkNodeVersions: {
parameters: {
query?: never
header?: never
path?: never
cookie?: never
}
requestBody: {
content: {
'application/json': components['schemas']['BulkNodeVersionsRequest']
}
}
responses: {
/** @description Successfully retrieved node versions */
200: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['BulkNodeVersionsResponse']
}
}
/** @description Bad request, invalid input */
400: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['ErrorResponse']
}
}
/** @description Internal server error */
500: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['ErrorResponse']
}
}
}
}
listAllNodeVersions: {
parameters: {
query?: {
@@ -11834,6 +12189,75 @@ export interface operations {
}
}
}
adminUpdateNode: {
parameters: {
query?: never
header?: never
path: {
nodeId: string
}
cookie?: never
}
requestBody: {
content: {
'application/json': components['schemas']['Node']
}
}
responses: {
/** @description Node updated successfully */
200: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['Node']
}
}
/** @description Bad request, invalid input data. */
400: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['ErrorResponse']
}
}
/** @description Unauthorized */
401: {
headers: {
[name: string]: unknown
}
content?: never
}
/** @description Forbidden */
403: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['ErrorResponse']
}
}
/** @description Node not found */
404: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['ErrorResponse']
}
}
/** @description Internal server error */
500: {
headers: {
[name: string]: unknown
}
content: {
'application/json': components['schemas']['ErrorResponse']
}
}
}
}
adminUpdateNodeVersion: {
parameters: {
query?: never

View File

@@ -46,7 +46,7 @@ export class ExecutableGroupNodeChildDTO extends ExecutableNodeDTO {
return {
node: inputNodeDto,
origin_id: inputNode.id,
origin_id: String(inputNode.id),
origin_slot: link.origin_slot
}
}

View File

@@ -145,6 +145,13 @@ export const graphToPrompt = async (
const resolvedInput = node.resolveInput(i)
if (!resolvedInput) continue
// Resolved to an actual widget value rather than a node connection
if (resolvedInput.widgetInfo) {
const { value } = resolvedInput.widgetInfo
inputs[input.name] = Array.isArray(value) ? { __value__: value } : value
continue
}
inputs[input.name] = [
String(resolvedInput.origin_id),
// @ts-expect-error link.origin_slot is already number.

View File

@@ -0,0 +1,208 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { api } from '@/scripts/api'
describe('API Feature Flags', () => {
let mockWebSocket: any
const wsEventHandlers: { [key: string]: (event: any) => void } = {}
beforeEach(() => {
// Use fake timers
vi.useFakeTimers()
// Mock WebSocket
mockWebSocket = {
readyState: 1, // WebSocket.OPEN
send: vi.fn(),
close: vi.fn(),
addEventListener: vi.fn(
(event: string, handler: (event: any) => void) => {
wsEventHandlers[event] = handler
}
),
removeEventListener: vi.fn()
}
// Mock WebSocket constructor
global.WebSocket = vi.fn().mockImplementation(() => mockWebSocket) as any
// Reset API state
api.serverFeatureFlags = {}
// Mock getClientFeatureFlags to return test feature flags
vi.spyOn(api, 'getClientFeatureFlags').mockReturnValue({
supports_preview_metadata: true,
api_version: '1.0.0',
capabilities: ['bulk_operations', 'async_nodes']
})
})
afterEach(() => {
vi.useRealTimers()
vi.restoreAllMocks()
})
describe('Feature flags negotiation', () => {
it('should send client feature flags as first message on connection', async () => {
// Initialize API connection
const initPromise = api.init()
// Simulate connection open
wsEventHandlers['open'](new Event('open'))
// Check that feature flags were sent as first message
expect(mockWebSocket.send).toHaveBeenCalledTimes(1)
const sentMessage = JSON.parse(mockWebSocket.send.mock.calls[0][0])
expect(sentMessage).toEqual({
type: 'feature_flags',
data: {
supports_preview_metadata: true,
api_version: '1.0.0',
capabilities: ['bulk_operations', 'async_nodes']
}
})
// Simulate server response with status message
wsEventHandlers['message']({
data: JSON.stringify({
type: 'status',
data: {
status: { exec_info: { queue_remaining: 0 } },
sid: 'test-sid'
}
})
})
// Simulate server feature flags response
wsEventHandlers['message']({
data: JSON.stringify({
type: 'feature_flags',
data: {
supports_preview_metadata: true,
async_execution: true,
supported_formats: ['webp', 'jpeg', 'png'],
api_version: '1.0.0',
max_upload_size: 104857600,
capabilities: ['isolated_nodes', 'dynamic_models']
}
})
})
await initPromise
// Check that server features were stored
expect(api.serverFeatureFlags).toEqual({
supports_preview_metadata: true,
async_execution: true,
supported_formats: ['webp', 'jpeg', 'png'],
api_version: '1.0.0',
max_upload_size: 104857600,
capabilities: ['isolated_nodes', 'dynamic_models']
})
})
it('should handle server without feature flags support', async () => {
// Initialize API connection
const initPromise = api.init()
// Simulate connection open
wsEventHandlers['open'](new Event('open'))
// Clear the send mock to reset
mockWebSocket.send.mockClear()
// Simulate server response with status but no feature flags
wsEventHandlers['message']({
data: JSON.stringify({
type: 'status',
data: {
status: { exec_info: { queue_remaining: 0 } },
sid: 'test-sid'
}
})
})
// Simulate some other message (not feature flags)
wsEventHandlers['message']({
data: JSON.stringify({
type: 'execution_start',
data: {}
})
})
await initPromise
// Server features should remain empty
expect(api.serverFeatureFlags).toEqual({})
})
})
describe('Feature checking methods', () => {
beforeEach(() => {
// Set up some test features
api.serverFeatureFlags = {
supports_preview_metadata: true,
async_execution: false,
capabilities: ['isolated_nodes', 'dynamic_models']
}
})
it('should check if server supports a boolean feature', () => {
expect(api.serverSupportsFeature('supports_preview_metadata')).toBe(true)
expect(api.serverSupportsFeature('async_execution')).toBe(false)
expect(api.serverSupportsFeature('non_existent_feature')).toBe(false)
})
it('should get server feature value', () => {
expect(api.getServerFeature('supports_preview_metadata')).toBe(true)
expect(api.getServerFeature('capabilities')).toEqual([
'isolated_nodes',
'dynamic_models'
])
expect(api.getServerFeature('non_existent_feature')).toBeUndefined()
})
})
describe('Client feature flags configuration', () => {
it('should use mocked client feature flags', () => {
// Verify mocked flags are returned
const clientFlags = api.getClientFeatureFlags()
expect(clientFlags).toEqual({
supports_preview_metadata: true,
api_version: '1.0.0',
capabilities: ['bulk_operations', 'async_nodes']
})
})
it('should return a copy of client feature flags', () => {
// Temporarily restore the real implementation for this test
vi.mocked(api.getClientFeatureFlags).mockRestore()
// Verify that modifications to returned object don't affect original
const clientFlags1 = api.getClientFeatureFlags()
const clientFlags2 = api.getClientFeatureFlags()
// Should be different objects
expect(clientFlags1).not.toBe(clientFlags2)
// But with same content
expect(clientFlags1).toEqual(clientFlags2)
// Modifying one should not affect the other
clientFlags1.test_flag = true
expect(api.getClientFeatureFlags()).not.toHaveProperty('test_flag')
})
})
describe('Integration with preview messages', () => {
it('should affect preview message handling based on feature support', () => {
// Test with metadata support
api.serverFeatureFlags = { supports_preview_metadata: true }
expect(api.serverSupportsFeature('supports_preview_metadata')).toBe(true)
// Test without metadata support
api.serverFeatureFlags = {}
expect(api.serverSupportsFeature('supports_preview_metadata')).toBe(false)
})
})
})

View File

@@ -1,598 +1,329 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { nextTick, ref } from 'vue'
import { nextTick } from 'vue'
import { useManagerQueue } from '@/composables/useManagerQueue'
import { app } from '@/scripts/app'
import { api } from '@/scripts/api'
// Mock VueUse's useEventListener
const mockEventListeners = new Map()
const mockWheneverCallback = vi.fn()
vi.mock('@vueuse/core', async () => {
const actual = await vi.importActual('@vueuse/core')
return {
...actual,
useEventListener: vi.fn((target, event, handler) => {
if (!mockEventListeners.has(event)) {
mockEventListeners.set(event, [])
}
mockEventListeners.get(event).push(handler)
// Mock the addEventListener behavior
if (target && target.addEventListener) {
target.addEventListener(event, handler)
}
// Return cleanup function
return () => {
if (target && target.removeEventListener) {
target.removeEventListener(event, handler)
}
}
}),
whenever: vi.fn((_source, cb) => {
mockWheneverCallback.mockImplementation(cb)
})
vi.mock('@/scripts/api', () => ({
api: {
addEventListener: vi.fn(),
removeEventListener: vi.fn(),
dispatchEvent: vi.fn()
}
})
vi.mock('@/scripts/app', () => ({
app: {
api: {
clientId: 'test-client-id',
addEventListener: vi.fn(),
removeEventListener: vi.fn(),
dispatchEvent: vi.fn()
}
}
}))
vi.mock('@/services/comfyManagerService', () => ({
useComfyManagerService: vi.fn(() => ({
getTaskQueue: vi.fn().mockResolvedValue({
queue_running: [],
queue_pending: []
}),
getTaskHistory: vi.fn().mockResolvedValue({}),
clearTaskHistory: vi.fn().mockResolvedValue(null),
deleteTaskHistoryItems: vi.fn().mockResolvedValue(null)
}))
}))
const mockShowManagerProgressDialog = vi.fn()
vi.mock('@/services/dialogService', () => ({
useDialogService: vi.fn(() => ({
showManagerProgressDialog: mockShowManagerProgressDialog
}))
}))
describe('useManagerQueue', () => {
let taskHistory: any
let taskQueue: any
let installedPacks: any
// Helper functions
const createMockTask = (
id: string,
clientId = 'test-client-id',
additional = {}
) => ({
id,
client_id: clientId,
...additional
const createMockTask = (result: any = 'result') => ({
task: vi.fn().mockResolvedValue(result),
onComplete: vi.fn()
})
const createMockHistoryItem = (
clientId = 'test-client-id',
result = 'success',
additional = {}
) => ({
client_id: clientId,
result,
...additional
})
const createMockState = (overrides = {}) => ({
running_queue: [],
pending_queue: [],
history: {},
installed_packs: {},
...overrides
})
const triggerWebSocketEvent = (eventType: string, state: any) => {
const mockEventListener = app.api.addEventListener as any
const eventCall = mockEventListener.mock.calls.find(
(call: any) => call[0] === eventType
)
if (eventCall) {
const handler = eventCall[1]
handler({
type: eventType,
detail: { state }
})
}
const createQueueWithMockTask = () => {
const queue = useManagerQueue()
const mockTask = createMockTask()
queue.enqueueTask(mockTask)
return { queue, mockTask }
}
const getEventHandler = (eventType: string) => {
const mockEventListener = app.api.addEventListener as any
const eventCall = mockEventListener.mock.calls.find(
(call: any) => call[0] === eventType
)
return eventCall ? eventCall[1] : null
const getEventListenerCallback = () =>
vi.mocked(api.addEventListener).mock.calls[0][1]
const simulateServerStatus = async (status: 'all-done' | 'in_progress') => {
const event = new CustomEvent('cm-queue-status', {
detail: { status }
})
getEventListenerCallback()!(event as any)
await nextTick()
}
beforeEach(() => {
vi.clearAllMocks()
mockEventListeners.clear()
taskHistory = ref({})
taskQueue = ref({
history: {},
running_queue: [],
pending_queue: [],
installed_packs: {}
})
installedPacks = ref({})
})
afterEach(() => {
vi.clearAllMocks()
mockEventListeners.clear()
})
describe('initialization', () => {
it('should initialize with empty queue and DONE status', () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
const queue = useManagerQueue()
expect(queue.queueLength.value).toBe(0)
expect(queue.statusMessage.value).toBe('all-done')
expect(queue.allTasksDone.value).toBe(true)
})
it('should set up event listeners on creation', () => {
useManagerQueue(taskHistory, taskQueue, installedPacks)
expect(app.api.addEventListener).toHaveBeenCalled()
})
})
describe('processing state handling', () => {
it('should update processing state based on queue length', async () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
describe('queue management', () => {
it('should add tasks to the queue', () => {
const queue = useManagerQueue()
const mockTask = createMockTask()
// Initially empty queue
expect(queue.isProcessing.value).toBe(false)
expect(queue.allTasksDone.value).toBe(true)
queue.enqueueTask(mockTask)
// Add tasks to queue
taskQueue.value.running_queue = [createMockTask('task1')]
taskQueue.value.pending_queue = [createMockTask('task2')]
// Force reactivity update
await nextTick()
expect(queue.queueLength.value).toBe(2)
})
it('should trigger progress dialog when queue length changes', async () => {
useManagerQueue(taskHistory, taskQueue, installedPacks)
// Trigger the whenever callback
mockWheneverCallback()
expect(mockShowManagerProgressDialog).toHaveBeenCalled()
})
})
describe('task state management', () => {
it('should reflect task queue state changes', async () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
// Add running tasks
taskQueue.value.running_queue = [createMockTask('task1')]
taskQueue.value.pending_queue = [createMockTask('task2')]
await nextTick()
expect(queue.queueLength.value).toBe(2)
expect(queue.allTasksDone.value).toBe(false)
})
it('should handle empty queue state', async () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
taskQueue.value.running_queue = []
taskQueue.value.pending_queue = []
await nextTick()
expect(queue.queueLength.value).toBe(0)
expect(queue.allTasksDone.value).toBe(true)
})
it('should handle large queue sizes', async () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
// Create large queues
taskQueue.value.running_queue = Array(50)
.fill(null)
.map((_, i) => createMockTask(`running-${i}`))
taskQueue.value.pending_queue = Array(100)
.fill(null)
.map((_, i) => createMockTask(`pending-${i}`))
await nextTick()
expect(queue.queueLength.value).toBe(150)
expect(queue.allTasksDone.value).toBe(false)
})
})
describe('queue data management', () => {
it('should provide access to task queue state', async () => {
const runningTasks = [createMockTask('task1')]
const pendingTasks = [createMockTask('task2'), createMockTask('task3')]
taskQueue.value.running_queue = runningTasks
taskQueue.value.pending_queue = pendingTasks
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
await nextTick()
expect(queue.taskQueue.value.running_queue).toEqual(runningTasks)
expect(queue.taskQueue.value.pending_queue).toEqual(pendingTasks)
expect(queue.queueLength.value).toBe(3)
})
it('should provide access to task history', async () => {
const mockHistory = {
task1: createMockHistoryItem(),
task2: createMockHistoryItem('test-client-id', 'error')
}
taskHistory.value = mockHistory
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
await nextTick()
expect(queue.taskHistory.value).toEqual(mockHistory)
expect(queue.historyCount.value).toBe(2)
})
it('should handle empty state gracefully', async () => {
taskQueue.value.running_queue = []
taskQueue.value.pending_queue = []
taskHistory.value = {}
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
await nextTick()
expect(queue.queueLength.value).toBe(0)
expect(queue.historyCount.value).toBe(0)
})
})
describe('state management', () => {
it('should provide reactive task history', async () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
taskHistory.value = {
task1: createMockHistoryItem(),
task2: createMockHistoryItem('test-client-id', 'error')
}
await nextTick()
expect(queue.taskHistory.value).toEqual(taskHistory.value)
expect(queue.historyCount.value).toBe(2)
})
it('should provide reactive installed packs', async () => {
installedPacks.value = {
pack1: { version: '1.0' },
pack2: { version: '2.0' }
}
await nextTick()
// The composable should have access to installedPacks through the parameter
expect(installedPacks.value).toEqual({
pack1: { version: '1.0' },
pack2: { version: '2.0' }
})
})
})
describe('computed properties', () => {
it('should correctly compute allTasksDone', async () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
// Empty queue = all done
expect(queue.allTasksDone.value).toBe(true)
// Add pending tasks
taskQueue.value.pending_queue = [createMockTask('task1')]
await nextTick()
expect(queue.allTasksDone.value).toBe(false)
// Clear queue
taskQueue.value.running_queue = []
taskQueue.value.pending_queue = []
await nextTick()
expect(queue.allTasksDone.value).toBe(true)
})
it('should correctly compute queueLength', async () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
expect(queue.queueLength.value).toBe(0)
taskQueue.value.running_queue = [createMockTask('task1')]
taskQueue.value.pending_queue = [
createMockTask('task2'),
createMockTask('task3')
]
await nextTick()
expect(queue.queueLength.value).toBe(3)
})
})
describe('client filtering functionality', () => {
it('should filter tasks by client ID in WebSocket events', async () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
const mockState = createMockState({
running_queue: [
createMockTask('task1'),
createMockTask('task2', 'other-client-id')
],
pending_queue: [createMockTask('task3')]
})
triggerWebSocketEvent('cm-task-completed', mockState)
await nextTick()
// Should only include tasks from this client
expect(taskQueue.value.running_queue).toEqual([createMockTask('task1')])
expect(taskQueue.value.pending_queue).toEqual([createMockTask('task3')])
expect(queue.queueLength.value).toBe(2)
})
it('should filter history by client ID in WebSocket events', async () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
const mockState = createMockState({
history: {
task1: createMockHistoryItem(),
task2: createMockHistoryItem('other-client-id'),
task3: createMockHistoryItem()
}
})
triggerWebSocketEvent('cm-task-completed', mockState)
await nextTick()
// Should only include history items from this client
expect(Object.keys(taskHistory.value)).toHaveLength(2)
expect(taskHistory.value).toHaveProperty('task1')
expect(taskHistory.value).toHaveProperty('task3')
expect(taskHistory.value).not.toHaveProperty('task2')
expect(queue.historyCount.value).toBe(2)
})
it('should handle all tasks being from other clients', async () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
const mockState = createMockState({
running_queue: [
createMockTask('task1', 'other-client-1'),
createMockTask('task2', 'other-client-2')
],
pending_queue: [createMockTask('task3', 'other-client-1')],
history: {
task4: createMockHistoryItem('other-client-1'),
task5: createMockHistoryItem('other-client-2')
}
})
triggerWebSocketEvent('cm-task-completed', mockState)
await nextTick()
// Should have no tasks or history
expect(taskQueue.value.running_queue).toEqual([])
expect(taskQueue.value.pending_queue).toEqual([])
expect(taskHistory.value).toEqual({})
expect(queue.queueLength.value).toBe(0)
expect(queue.historyCount.value).toBe(0)
})
})
describe('WebSocket event handling', () => {
it('should handle task done events', async () => {
useManagerQueue(taskHistory, taskQueue, installedPacks)
const mockState = createMockState({
running_queue: [createMockTask('task1')],
history: {
task1: createMockHistoryItem()
},
installed_packs: { pack1: { version: '1.0' } }
})
triggerWebSocketEvent('cm-task-completed', mockState)
await nextTick()
expect(taskQueue.value.running_queue).toEqual([createMockTask('task1')])
expect(taskQueue.value.pending_queue).toEqual([])
expect(taskHistory.value).toEqual({
task1: createMockHistoryItem()
})
expect(installedPacks.value).toEqual({ pack1: { version: '1.0' } })
})
it('should handle task started events', async () => {
useManagerQueue(taskHistory, taskQueue, installedPacks)
const mockState = createMockState({
running_queue: [createMockTask('task1')],
pending_queue: [createMockTask('task2')],
installed_packs: { pack1: { version: '1.0' } }
})
triggerWebSocketEvent('cm-task-started', mockState)
await nextTick()
expect(taskQueue.value.running_queue).toEqual([createMockTask('task1')])
expect(taskQueue.value.pending_queue).toEqual([createMockTask('task2')])
expect(installedPacks.value).toEqual({ pack1: { version: '1.0' } })
})
it('should filter out tasks from other clients in WebSocket events', async () => {
useManagerQueue(taskHistory, taskQueue, installedPacks)
const mockState = createMockState({
running_queue: [
createMockTask('task1'),
createMockTask('task2', 'other-client-id')
],
pending_queue: [createMockTask('task3', 'other-client-id')],
history: {
task1: createMockHistoryItem(),
task2: createMockHistoryItem('other-client-id')
}
})
triggerWebSocketEvent('cm-task-completed', mockState)
await nextTick()
// Should only include tasks from this client
expect(taskQueue.value.running_queue).toEqual([createMockTask('task1')])
expect(taskQueue.value.pending_queue).toEqual([])
expect(taskHistory.value).toEqual({
task1: createMockHistoryItem()
})
})
it('should ignore events with wrong type', async () => {
useManagerQueue(taskHistory, taskQueue, installedPacks)
const handler = getEventHandler('cm-task-completed')
// Send event with wrong type
handler({
type: 'wrong-event-type',
detail: {
state: createMockState({ running_queue: [createMockTask('task1')] })
}
})
await nextTick()
// Should not update state
expect(taskQueue.value.running_queue).toEqual([])
})
})
describe('cleanup functionality', () => {
it('should clean up event listeners on stopListening', () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
const mockRemoveEventListener = app.api.removeEventListener as any
queue.stopListening()
expect(mockRemoveEventListener).toHaveBeenCalledTimes(2)
// Check that both event types were called with the correct event names
const calls = mockRemoveEventListener.mock.calls
const eventTypes = calls.map((call: any) => call[0])
expect(eventTypes).toContain('cm-task-completed')
expect(eventTypes).toContain('cm-task-started')
// Check that functions were passed as second parameter
calls.forEach((call: any) => {
expect(typeof call[1]).toBe('function')
})
})
it('should handle multiple stopListening calls gracefully', () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
const mockRemoveEventListener = app.api.removeEventListener as any
queue.stopListening()
queue.stopListening()
// Should still only be called twice (once per event type)
expect(mockRemoveEventListener).toHaveBeenCalledTimes(4)
})
})
describe('edge cases', () => {
it('should handle undefined installed_packs in state update', async () => {
useManagerQueue(taskHistory, taskQueue, installedPacks)
const mockState = createMockState({
running_queue: [createMockTask('task1')],
installed_packs: undefined
})
triggerWebSocketEvent('cm-task-completed', mockState)
await nextTick()
// Should not update installedPacks when undefined
expect(installedPacks.value).toEqual({})
})
it('should handle rapid successive events', async () => {
const queue = useManagerQueue(taskHistory, taskQueue, installedPacks)
// Send multiple events rapidly
for (let i = 0; i < 10; i++) {
triggerWebSocketEvent(
'cm-task-completed',
createMockState({
running_queue: [createMockTask(`task${i}`)],
history: { [`task${i}`]: createMockHistoryItem() }
})
)
}
await nextTick()
// Should have the last state
expect(taskQueue.value.running_queue).toEqual([createMockTask('task9')])
expect(queue.queueLength.value).toBe(1)
expect(queue.allTasksDone.value).toBe(false)
})
it('should maintain consistency when mixing event types', async () => {
useManagerQueue(taskHistory, taskQueue, installedPacks)
it('should clear the queue when clearQueue is called', () => {
const queue = useManagerQueue()
// Send alternating event types
triggerWebSocketEvent(
'cm-task-started',
createMockState({
running_queue: [createMockTask('task1')],
pending_queue: [createMockTask('task2')]
})
)
// Add some tasks
queue.enqueueTask(createMockTask())
queue.enqueueTask(createMockTask())
triggerWebSocketEvent(
'cm-task-completed',
createMockState({
running_queue: [],
pending_queue: [createMockTask('task2')],
history: { task1: createMockHistoryItem() }
})
)
expect(queue.queueLength.value).toBe(2)
// Clear the queue
queue.clearQueue()
expect(queue.queueLength.value).toBe(0)
expect(queue.allTasksDone.value).toBe(true)
})
})
describe('server status handling', () => {
it('should update server status when receiving websocket events', async () => {
const queue = useManagerQueue()
await simulateServerStatus('in_progress')
expect(queue.statusMessage.value).toBe('in_progress')
expect(queue.allTasksDone.value).toBe(false)
})
it('should handle invalid status values gracefully', async () => {
const queue = useManagerQueue()
// Simulate an invalid status
const event = new CustomEvent('cm-queue-status', {
detail: null as any
})
getEventListenerCallback()!(event)
await nextTick()
expect(taskQueue.value.running_queue).toEqual([])
expect(taskQueue.value.pending_queue).toEqual([createMockTask('task2')])
expect(taskHistory.value).toHaveProperty('task1')
// Should maintain the default status
expect(queue.statusMessage.value).toBe('all-done')
})
it('should handle missing status property gracefully', async () => {
const queue = useManagerQueue()
// Simulate a detail object without status property
const event = new CustomEvent('cm-queue-status', {
detail: { someOtherProperty: 'value' } as any
})
getEventListenerCallback()!(event)
await nextTick()
// Should maintain the default status
expect(queue.statusMessage.value).toBe('all-done')
})
})
describe('task execution', () => {
it('should start the next task when server is idle and queue has items', async () => {
const { queue, mockTask } = createQueueWithMockTask()
await simulateServerStatus('all-done')
// Task should have been started
expect(mockTask.task).toHaveBeenCalled()
expect(queue.queueLength.value).toBe(0)
})
it('should execute onComplete callback when task completes and server becomes idle', async () => {
const { mockTask } = createQueueWithMockTask()
// Start the task
await simulateServerStatus('all-done')
expect(mockTask.task).toHaveBeenCalled()
// Simulate task completion
await mockTask.task.mock.results[0].value
// Simulate server cycle (in_progress -> done)
await simulateServerStatus('in_progress')
expect(mockTask.onComplete).not.toHaveBeenCalled()
await simulateServerStatus('all-done')
expect(mockTask.onComplete).toHaveBeenCalled()
})
it('should handle tasks without onComplete callback', async () => {
const queue = useManagerQueue()
const mockTask = { task: vi.fn().mockResolvedValue('result') }
queue.enqueueTask(mockTask)
// Start the task
await simulateServerStatus('all-done')
expect(mockTask.task).toHaveBeenCalled()
// Simulate task completion
await mockTask.task.mock.results[0].value
// Simulate server cycle
await simulateServerStatus('in_progress')
await simulateServerStatus('all-done')
// Should not throw errors even without onComplete
expect(queue.allTasksDone.value).toBe(true)
})
it('should process multiple tasks in sequence', async () => {
const queue = useManagerQueue()
const mockTask1 = createMockTask('result1')
const mockTask2 = createMockTask('result2')
// Add tasks to the queue
queue.enqueueTask(mockTask1)
queue.enqueueTask(mockTask2)
expect(queue.queueLength.value).toBe(2)
// Process first task
await simulateServerStatus('all-done')
expect(mockTask1.task).toHaveBeenCalled()
expect(queue.queueLength.value).toBe(1)
// Complete first task
await mockTask1.task.mock.results[0].value
await simulateServerStatus('in_progress')
await simulateServerStatus('all-done')
expect(mockTask1.onComplete).toHaveBeenCalled()
// Process second task
expect(mockTask2.task).toHaveBeenCalled()
expect(queue.queueLength.value).toBe(0)
// Complete second task
await mockTask2.task.mock.results[0].value
await simulateServerStatus('in_progress')
await simulateServerStatus('all-done')
expect(mockTask2.onComplete).toHaveBeenCalled()
// Queue should be empty and all tasks done
expect(queue.queueLength.value).toBe(0)
expect(queue.allTasksDone.value).toBe(true)
})
it('should handle task that returns rejected promise', async () => {
const queue = useManagerQueue()
const mockTask = {
task: vi.fn().mockRejectedValue(new Error('Task failed')),
onComplete: vi.fn()
}
queue.enqueueTask(mockTask)
// Start the task
await simulateServerStatus('all-done')
expect(mockTask.task).toHaveBeenCalled()
// Let the promise rejection happen
try {
await mockTask.task()
} catch (e) {
// Ignore the error
}
// Simulate server cycle
await simulateServerStatus('in_progress')
await simulateServerStatus('all-done')
// onComplete should still be called for failed tasks
expect(mockTask.onComplete).toHaveBeenCalled()
})
it('should handle multiple multiple tasks enqueued at once while server busy', async () => {
const queue = useManagerQueue()
const mockTask1 = createMockTask()
const mockTask2 = createMockTask()
const mockTask3 = createMockTask()
// Three tasks enqueued at once
await simulateServerStatus('in_progress')
await Promise.all([
queue.enqueueTask(mockTask1),
queue.enqueueTask(mockTask2),
queue.enqueueTask(mockTask3)
])
// Task 1
await simulateServerStatus('all-done')
expect(mockTask1.task).toHaveBeenCalled()
// Verify state of onComplete callbacks
expect(mockTask1.onComplete).toHaveBeenCalled()
expect(mockTask2.onComplete).not.toHaveBeenCalled()
expect(mockTask3.onComplete).not.toHaveBeenCalled()
// Verify state of queue
expect(queue.queueLength.value).toBe(2)
expect(queue.allTasksDone.value).toBe(false)
// Task 2
await simulateServerStatus('in_progress')
await simulateServerStatus('all-done')
expect(mockTask2.task).toHaveBeenCalled()
// Verify state of onComplete callbacks
expect(mockTask2.onComplete).toHaveBeenCalled()
expect(mockTask3.onComplete).not.toHaveBeenCalled()
// Verify state of queue
expect(queue.queueLength.value).toBe(1)
expect(queue.allTasksDone.value).toBe(false)
// Task 3
await simulateServerStatus('in_progress')
await simulateServerStatus('all-done')
// Verify state of onComplete callbacks
expect(mockTask3.task).toHaveBeenCalled()
expect(mockTask3.onComplete).toHaveBeenCalled()
// Verify state of queue
expect(queue.queueLength.value).toBe(0)
expect(queue.allTasksDone.value).toBe(true)
})
it('should handle adding tasks while processing is in progress', async () => {
const queue = useManagerQueue()
const mockTask1 = createMockTask()
const mockTask2 = createMockTask()
// Add first task and start processing
queue.enqueueTask(mockTask1)
await simulateServerStatus('all-done')
expect(mockTask1.task).toHaveBeenCalled()
// Add second task while first is processing
queue.enqueueTask(mockTask2)
expect(queue.queueLength.value).toBe(1)
// Complete first task
await mockTask1.task.mock.results[0].value
await simulateServerStatus('in_progress')
await simulateServerStatus('all-done')
// Second task should now be processed
expect(mockTask2.task).toHaveBeenCalled()
})
it('should handle server status changes without tasks in queue', async () => {
const queue = useManagerQueue()
// Cycle server status without any tasks
await simulateServerStatus('in_progress')
await simulateServerStatus('all-done')
await simulateServerStatus('in_progress')
await simulateServerStatus('all-done')
// Should not cause any errors
expect(queue.allTasksDone.value).toBe(true)
})
})
})