Merge branch 'main' into warning-when-frontend-version-mismatches
708
.claude/commands/comprehensive-pr-review.md
Normal file
@@ -0,0 +1,708 @@
|
||||
# Comprehensive PR Review for ComfyUI Frontend
|
||||
|
||||
<task>
|
||||
You are performing a comprehensive code review for PR #$1 in the ComfyUI frontend repository. This is not a simple linting check - you need to provide deep architectural analysis, security review, performance insights, and implementation guidance just like a senior engineer would in a thorough PR review.
|
||||
|
||||
Your review should cover:
|
||||
1. Architecture and design patterns
|
||||
2. Security vulnerabilities and risks
|
||||
3. Performance implications
|
||||
4. Code quality and maintainability
|
||||
5. Integration with existing systems
|
||||
6. Best practices and conventions
|
||||
7. Testing considerations
|
||||
8. Documentation needs
|
||||
</task>
|
||||
|
||||
Arguments: PR number passed via PR_NUMBER environment variable
|
||||
|
||||
## Phase 0: Initialize Variables and Helper Functions
|
||||
|
||||
```bash
|
||||
# Validate PR_NUMBER first thing
|
||||
if [ -z "$PR_NUMBER" ]; then
|
||||
echo "Error: PR_NUMBER environment variable is not set"
|
||||
echo "Usage: PR_NUMBER=<number> claude run /comprehensive-pr-review"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Initialize all counters at the start
|
||||
CRITICAL_COUNT=0
|
||||
HIGH_COUNT=0
|
||||
MEDIUM_COUNT=0
|
||||
LOW_COUNT=0
|
||||
ARCHITECTURE_ISSUES=0
|
||||
SECURITY_ISSUES=0
|
||||
PERFORMANCE_ISSUES=0
|
||||
QUALITY_ISSUES=0
|
||||
|
||||
# Helper function for posting review comments
|
||||
post_review_comment() {
|
||||
local file_path=$1
|
||||
local line_number=$2
|
||||
local severity=$3 # critical/high/medium/low
|
||||
local category=$4 # architecture/security/performance/quality
|
||||
local issue=$5
|
||||
local context=$6
|
||||
local suggestion=$7
|
||||
|
||||
# Update counters
|
||||
case $severity in
|
||||
"critical") ((CRITICAL_COUNT++)) ;;
|
||||
"high") ((HIGH_COUNT++)) ;;
|
||||
"medium") ((MEDIUM_COUNT++)) ;;
|
||||
"low") ((LOW_COUNT++)) ;;
|
||||
esac
|
||||
|
||||
case $category in
|
||||
"architecture") ((ARCHITECTURE_ISSUES++)) ;;
|
||||
"security") ((SECURITY_ISSUES++)) ;;
|
||||
"performance") ((PERFORMANCE_ISSUES++)) ;;
|
||||
"quality") ((QUALITY_ISSUES++)) ;;
|
||||
esac
|
||||
|
||||
# Post inline comment via GitHub CLI
|
||||
local comment="${issue}\n${context}\n${suggestion}"
|
||||
gh pr review $PR_NUMBER --comment --body "$comment" -F - <<< "$comment"
|
||||
}
|
||||
```
|
||||
|
||||
## Phase 1: Environment Setup and PR Context
|
||||
|
||||
```bash
|
||||
# Pre-flight checks
|
||||
check_prerequisites() {
|
||||
# Check gh CLI is available
|
||||
if ! command -v gh &> /dev/null; then
|
||||
echo "Error: gh CLI is not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# In GitHub Actions, auth is handled via GITHUB_TOKEN
|
||||
if [ -n "$GITHUB_ACTIONS" ] && [ -z "$GITHUB_TOKEN" ]; then
|
||||
echo "Error: GITHUB_TOKEN is not set in GitHub Actions"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if we're authenticated
|
||||
if ! gh auth status &> /dev/null; then
|
||||
echo "Error: Not authenticated with GitHub. Run 'gh auth login'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set repository if not already set
|
||||
if [ -z "$REPOSITORY" ]; then
|
||||
REPOSITORY="Comfy-Org/ComfyUI_frontend"
|
||||
fi
|
||||
|
||||
# Check PR exists and is open
|
||||
PR_STATE=$(gh pr view $PR_NUMBER --repo $REPOSITORY --json state -q .state 2>/dev/null || echo "NOT_FOUND")
|
||||
if [ "$PR_STATE" = "NOT_FOUND" ]; then
|
||||
echo "Error: PR #$PR_NUMBER not found in $REPOSITORY"
|
||||
exit 1
|
||||
elif [ "$PR_STATE" != "OPEN" ]; then
|
||||
echo "Error: PR #$PR_NUMBER is not open (state: $PR_STATE)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check API rate limits
|
||||
RATE_REMAINING=$(gh api /rate_limit --jq '.rate.remaining' 2>/dev/null || echo "5000")
|
||||
if [ "$RATE_REMAINING" -lt 100 ]; then
|
||||
echo "Warning: Low API rate limit: $RATE_REMAINING remaining"
|
||||
if [ "$RATE_REMAINING" -lt 50 ]; then
|
||||
echo "Error: Insufficient API rate limit for comprehensive review"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Pre-flight checks passed"
|
||||
}
|
||||
|
||||
# Run pre-flight checks
|
||||
check_prerequisites
|
||||
|
||||
echo "Starting comprehensive review of PR #$PR_NUMBER"
|
||||
|
||||
# Fetch PR information with error handling
|
||||
echo "Fetching PR information..."
|
||||
if ! gh pr view $PR_NUMBER --repo $REPOSITORY --json files,title,body,additions,deletions,baseRefName,headRefName > pr_info.json; then
|
||||
echo "Error: Failed to fetch PR information"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract branch names
|
||||
BASE_BRANCH=$(jq -r '.baseRefName' < pr_info.json)
|
||||
HEAD_BRANCH=$(jq -r '.headRefName' < pr_info.json)
|
||||
|
||||
# Checkout PR branch locally for better file inspection
|
||||
echo "Checking out PR branch..."
|
||||
git fetch origin "pull/$PR_NUMBER/head:pr-$PR_NUMBER"
|
||||
git checkout "pr-$PR_NUMBER"
|
||||
|
||||
# Get changed files using git locally (much faster)
|
||||
git diff --name-only "origin/$BASE_BRANCH" > changed_files.txt
|
||||
|
||||
# Get the diff using git locally
|
||||
git diff "origin/$BASE_BRANCH" > pr_diff.txt
|
||||
|
||||
# Get detailed file changes with line numbers
|
||||
git diff --name-status "origin/$BASE_BRANCH" > file_changes.txt
|
||||
|
||||
# For API compatibility, create a simplified pr_files.json
|
||||
echo '[]' > pr_files.json
|
||||
while IFS=$'\t' read -r status file; do
|
||||
if [[ "$status" != "D" ]]; then # Skip deleted files
|
||||
# Get the patch for this file
|
||||
patch=$(git diff "origin/$BASE_BRANCH" -- "$file" | jq -Rs .)
|
||||
additions=$(git diff --numstat "origin/$BASE_BRANCH" -- "$file" | awk '{print $1}')
|
||||
deletions=$(git diff --numstat "origin/$BASE_BRANCH" -- "$file" | awk '{print $2}')
|
||||
|
||||
jq --arg file "$file" \
|
||||
--arg patch "$patch" \
|
||||
--arg additions "$additions" \
|
||||
--arg deletions "$deletions" \
|
||||
'. += [{
|
||||
"filename": $file,
|
||||
"patch": $patch,
|
||||
"additions": ($additions | tonumber),
|
||||
"deletions": ($deletions | tonumber)
|
||||
}]' pr_files.json > pr_files.json.tmp
|
||||
mv pr_files.json.tmp pr_files.json
|
||||
fi
|
||||
done < file_changes.txt
|
||||
|
||||
# Setup caching directory
|
||||
CACHE_DIR=".claude-review-cache"
|
||||
mkdir -p "$CACHE_DIR"
|
||||
|
||||
# Function to cache analysis results
|
||||
cache_analysis() {
|
||||
local file_path=$1
|
||||
local analysis_result=$2
|
||||
local file_hash=$(git hash-object "$file_path" 2>/dev/null || echo "no-hash")
|
||||
|
||||
if [ "$file_hash" != "no-hash" ]; then
|
||||
echo "$analysis_result" > "$CACHE_DIR/${file_hash}.cache"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to get cached analysis
|
||||
get_cached_analysis() {
|
||||
local file_path=$1
|
||||
local file_hash=$(git hash-object "$file_path" 2>/dev/null || echo "no-hash")
|
||||
|
||||
if [ "$file_hash" != "no-hash" ] && [ -f "$CACHE_DIR/${file_hash}.cache" ]; then
|
||||
cat "$CACHE_DIR/${file_hash}.cache"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Clean old cache entries (older than 7 days)
|
||||
find "$CACHE_DIR" -name "*.cache" -mtime +7 -delete 2>/dev/null || true
|
||||
```
|
||||
|
||||
## Phase 2: Load Comprehensive Knowledge Base
|
||||
|
||||
```bash
|
||||
# Don't create knowledge directory until we know we need it
|
||||
KNOWLEDGE_FOUND=false
|
||||
|
||||
# Use local cache for knowledge base to avoid repeated downloads
|
||||
KNOWLEDGE_CACHE_DIR=".claude-knowledge-cache"
|
||||
mkdir -p "$KNOWLEDGE_CACHE_DIR"
|
||||
|
||||
# Option to use cloned prompt library for better performance
|
||||
PROMPT_LIBRARY_PATH="../comfy-claude-prompt-library"
|
||||
if [ -d "$PROMPT_LIBRARY_PATH" ]; then
|
||||
echo "Using local prompt library at $PROMPT_LIBRARY_PATH"
|
||||
USE_LOCAL_PROMPT_LIBRARY=true
|
||||
else
|
||||
echo "No local prompt library found, will use GitHub API"
|
||||
USE_LOCAL_PROMPT_LIBRARY=false
|
||||
fi
|
||||
|
||||
# Function to fetch with cache
|
||||
fetch_with_cache() {
|
||||
local url=$1
|
||||
local output_file=$2
|
||||
local cache_file="$KNOWLEDGE_CACHE_DIR/$(echo "$url" | sed 's/[^a-zA-Z0-9]/_/g')"
|
||||
|
||||
# Check if cached version exists and is less than 1 day old
|
||||
if [ -f "$cache_file" ] && [ $(find "$cache_file" -mtime -1 2>/dev/null | wc -l) -gt 0 ]; then
|
||||
# Create knowledge directory only when we actually have content
|
||||
if [ "$KNOWLEDGE_FOUND" = "false" ]; then
|
||||
mkdir -p review_knowledge
|
||||
KNOWLEDGE_FOUND=true
|
||||
fi
|
||||
cp "$cache_file" "$output_file"
|
||||
echo "Using cached version of $(basename "$output_file")"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Try to fetch fresh version
|
||||
if curl -s -f "$url" > "$output_file.tmp"; then
|
||||
# Create knowledge directory only when we actually have content
|
||||
if [ "$KNOWLEDGE_FOUND" = "false" ]; then
|
||||
mkdir -p review_knowledge
|
||||
KNOWLEDGE_FOUND=true
|
||||
fi
|
||||
mv "$output_file.tmp" "$output_file"
|
||||
cp "$output_file" "$cache_file"
|
||||
echo "Downloaded fresh version of $(basename "$output_file")"
|
||||
return 0
|
||||
else
|
||||
# If fetch failed but we have a cache, use it
|
||||
if [ -f "$cache_file" ]; then
|
||||
if [ "$KNOWLEDGE_FOUND" = "false" ]; then
|
||||
mkdir -p review_knowledge
|
||||
KNOWLEDGE_FOUND=true
|
||||
fi
|
||||
cp "$cache_file" "$output_file"
|
||||
echo "Using stale cache for $(basename "$output_file") (download failed)"
|
||||
return 0
|
||||
fi
|
||||
echo "Failed to load $(basename "$output_file")"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Load REPOSITORY_GUIDE.md for deep architectural understanding
|
||||
echo "Loading ComfyUI Frontend repository guide..."
|
||||
if [ "$USE_LOCAL_PROMPT_LIBRARY" = "true" ] && [ -f "$PROMPT_LIBRARY_PATH/project-summaries-for-agents/ComfyUI_frontend/REPOSITORY_GUIDE.md" ]; then
|
||||
if [ "$KNOWLEDGE_FOUND" = "false" ]; then
|
||||
mkdir -p review_knowledge
|
||||
KNOWLEDGE_FOUND=true
|
||||
fi
|
||||
cp "$PROMPT_LIBRARY_PATH/project-summaries-for-agents/ComfyUI_frontend/REPOSITORY_GUIDE.md" "review_knowledge/repository_guide.md"
|
||||
echo "Loaded repository guide from local prompt library"
|
||||
else
|
||||
fetch_with_cache "https://raw.githubusercontent.com/Comfy-Org/comfy-claude-prompt-library/master/project-summaries-for-agents/ComfyUI_frontend/REPOSITORY_GUIDE.md" "review_knowledge/repository_guide.md"
|
||||
fi
|
||||
|
||||
# 3. Discover and load relevant knowledge folders from GitHub API
|
||||
echo "Discovering available knowledge folders..."
|
||||
KNOWLEDGE_API_URL="https://api.github.com/repos/Comfy-Org/comfy-claude-prompt-library/contents/.claude/knowledge"
|
||||
if KNOWLEDGE_FOLDERS=$(curl -s "$KNOWLEDGE_API_URL" | jq -r '.[] | select(.type=="dir") | .name' 2>/dev/null); then
|
||||
echo "Available knowledge folders: $KNOWLEDGE_FOLDERS"
|
||||
|
||||
# Analyze changed files to determine which knowledge folders might be relevant
|
||||
CHANGED_FILES=$(cat changed_files.txt)
|
||||
PR_TITLE=$(jq -r '.title' < pr_info.json)
|
||||
PR_BODY=$(jq -r '.body // ""' < pr_info.json)
|
||||
|
||||
# For each knowledge folder, check if it might be relevant to the PR
|
||||
for folder in $KNOWLEDGE_FOLDERS; do
|
||||
# Simple heuristic: if folder name appears in changed file paths or PR context
|
||||
if echo "$CHANGED_FILES $PR_TITLE $PR_BODY" | grep -qi "$folder"; then
|
||||
echo "Loading knowledge folder: $folder"
|
||||
# Fetch all files in that knowledge folder
|
||||
FOLDER_API_URL="https://api.github.com/repos/Comfy-Org/comfy-claude-prompt-library/contents/.claude/knowledge/$folder"
|
||||
curl -s "$FOLDER_API_URL" | jq -r '.[] | select(.type=="file") | .download_url' 2>/dev/null | \
|
||||
while read url; do
|
||||
if [ -n "$url" ]; then
|
||||
filename=$(basename "$url")
|
||||
fetch_with_cache "$url" "review_knowledge/${folder}_${filename}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo "Could not discover knowledge folders"
|
||||
fi
|
||||
|
||||
# 4. Load validation rules from the repository
|
||||
echo "Loading validation rules..."
|
||||
VALIDATION_API_URL="https://api.github.com/repos/Comfy-Org/comfy-claude-prompt-library/contents/.claude/commands/validation"
|
||||
if VALIDATION_FILES=$(curl -s "$VALIDATION_API_URL" | jq -r '.[] | select(.name | contains("frontend") or contains("security") or contains("performance")) | .download_url' 2>/dev/null); then
|
||||
for url in $VALIDATION_FILES; do
|
||||
if [ -n "$url" ]; then
|
||||
filename=$(basename "$url")
|
||||
fetch_with_cache "$url" "review_knowledge/validation_${filename}"
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo "Could not load validation rules"
|
||||
fi
|
||||
|
||||
# 5. Load local project guidelines
|
||||
if [ -f "CLAUDE.md" ]; then
|
||||
if [ "$KNOWLEDGE_FOUND" = "false" ]; then
|
||||
mkdir -p review_knowledge
|
||||
KNOWLEDGE_FOUND=true
|
||||
fi
|
||||
cp CLAUDE.md review_knowledge/local_claude.md
|
||||
fi
|
||||
if [ -f ".github/CLAUDE.md" ]; then
|
||||
if [ "$KNOWLEDGE_FOUND" = "false" ]; then
|
||||
mkdir -p review_knowledge
|
||||
KNOWLEDGE_FOUND=true
|
||||
fi
|
||||
cp .github/CLAUDE.md review_knowledge/github_claude.md
|
||||
fi
|
||||
```
|
||||
|
||||
## Phase 3: Deep Analysis Instructions
|
||||
|
||||
Perform a comprehensive analysis covering these areas:
|
||||
|
||||
### 3.1 Architectural Analysis
|
||||
Based on the repository guide and project summary, evaluate:
|
||||
- Does this change align with the established architecture patterns?
|
||||
- Are domain boundaries respected?
|
||||
- Is the extension system used appropriately?
|
||||
- Are components properly organized by feature?
|
||||
- Does it follow the established service/composable/store patterns?
|
||||
|
||||
### 3.2 Code Quality Beyond Linting
|
||||
- Cyclomatic complexity and cognitive load
|
||||
- SOLID principles adherence
|
||||
- DRY violations that aren't caught by simple duplication checks
|
||||
- Proper abstraction levels
|
||||
- Interface design and API clarity
|
||||
- No leftover debug code (console.log, commented code, TODO comments)
|
||||
|
||||
### 3.3 Library Usage Enforcement
|
||||
CRITICAL: Never re-implement functionality that exists in our standard libraries:
|
||||
- **Tailwind CSS**: Use utility classes instead of custom CSS or style attributes
|
||||
- **PrimeVue**: Never re-implement components that exist in PrimeVue (buttons, modals, dropdowns, etc.)
|
||||
- **VueUse**: Never re-implement composables that exist in VueUse (useLocalStorage, useDebounceFn, etc.)
|
||||
- **Lodash**: Never re-implement utility functions (debounce, throttle, cloneDeep, etc.)
|
||||
- **Common components**: Reuse components from src/components/common/
|
||||
- **DOMPurify**: Always use for HTML sanitization
|
||||
- **Fuse.js**: Use for fuzzy search functionality
|
||||
- **Marked**: Use for markdown parsing
|
||||
- **Pinia**: Use for global state management, not custom solutions
|
||||
- **Zod**: Use for form validation with zodResolver pattern
|
||||
- **Tiptap**: Use for rich text/markdown editing
|
||||
- **Xterm.js**: Use for terminal emulation
|
||||
- **Axios**: Use for HTTP client initialization
|
||||
|
||||
### 3.4 Security Deep Dive
|
||||
Beyond obvious vulnerabilities:
|
||||
- Authentication/authorization implications
|
||||
- Data validation completeness
|
||||
- State management security
|
||||
- Cross-origin concerns
|
||||
- Extension security boundaries
|
||||
|
||||
### 3.5 Performance Analysis
|
||||
- Render performance implications
|
||||
- Layout thrashing prevention
|
||||
- Memory leak potential
|
||||
- Network request optimization
|
||||
- State management efficiency
|
||||
|
||||
### 3.6 Integration Concerns
|
||||
- Breaking changes to internal APIs
|
||||
- Extension compatibility
|
||||
- Backward compatibility
|
||||
- Migration requirements
|
||||
|
||||
## Phase 4: Create Detailed Review Comments
|
||||
|
||||
CRITICAL: Keep comments extremely concise and effective. Use only as many words as absolutely necessary.
|
||||
- NO markdown formatting (no #, ##, ###, **, etc.)
|
||||
- NO emojis
|
||||
- Get to the point immediately
|
||||
- Burden the reader as little as possible
|
||||
|
||||
For each issue found, create a concise inline comment with:
|
||||
1. What's wrong (one line)
|
||||
2. Why it matters (one line)
|
||||
3. How to fix it (one line)
|
||||
4. Code example only if essential
|
||||
|
||||
```bash
|
||||
# Helper function for comprehensive comments
|
||||
post_review_comment() {
|
||||
local file_path=$1
|
||||
local line_number=$2
|
||||
local severity=$3 # critical/high/medium/low
|
||||
local category=$4 # architecture/security/performance/quality
|
||||
local issue=$5
|
||||
local context=$6
|
||||
local suggestion=$7
|
||||
local example=$8
|
||||
|
||||
local body="### [$category] $severity Priority
|
||||
|
||||
**Issue**: $issue
|
||||
|
||||
**Context**: $context
|
||||
|
||||
**Suggestion**: $suggestion"
|
||||
|
||||
if [ -n "$example" ]; then
|
||||
body="$body
|
||||
|
||||
**Example**:
|
||||
\`\`\`typescript
|
||||
$example
|
||||
\`\`\`"
|
||||
fi
|
||||
|
||||
body="$body
|
||||
|
||||
*Related: See [repository guide](https://github.com/Comfy-Org/comfy-claude-prompt-library/blob/master/project-summaries-for-agents/ComfyUI_frontend/REPOSITORY_GUIDE.md) for patterns*"
|
||||
|
||||
gh api -X POST /repos/$REPOSITORY/pulls/$PR_NUMBER/comments \
|
||||
-f path="$file_path" \
|
||||
-f line=$line_number \
|
||||
-f body="$body" \
|
||||
-f commit_id="$COMMIT_SHA" \
|
||||
-f side='RIGHT' || echo "Failed to post comment at $file_path:$line_number"
|
||||
}
|
||||
```
|
||||
|
||||
## Phase 5: Validation Rules Application
|
||||
|
||||
Apply ALL validation rules from the loaded knowledge, but focus on the changed lines:
|
||||
|
||||
### From Frontend Standards
|
||||
- Vue 3 Composition API patterns
|
||||
- Component communication patterns
|
||||
- Proper use of composables
|
||||
- TypeScript strict mode compliance
|
||||
- Bundle optimization
|
||||
|
||||
### From Security Audit
|
||||
- Input validation
|
||||
- XSS prevention
|
||||
- CSRF protection
|
||||
- Secure state management
|
||||
- API security
|
||||
|
||||
### From Performance Check
|
||||
- Render optimization
|
||||
- Memory management
|
||||
- Network efficiency
|
||||
- Bundle size impact
|
||||
|
||||
## Phase 6: Contextual Review Based on PR Type
|
||||
|
||||
Analyze the PR description and changes to determine the type:
|
||||
|
||||
```bash
|
||||
# Extract PR metadata with error handling
|
||||
if [ ! -f pr_info.json ]; then
|
||||
echo "Error: pr_info.json not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PR_TITLE=$(jq -r '.title // "Unknown"' < pr_info.json)
|
||||
PR_BODY=$(jq -r '.body // ""' < pr_info.json)
|
||||
FILE_COUNT=$(wc -l < changed_files.txt)
|
||||
ADDITIONS=$(jq -r '.additions // 0' < pr_info.json)
|
||||
DELETIONS=$(jq -r '.deletions // 0' < pr_info.json)
|
||||
|
||||
# Determine PR type and apply specific review criteria
|
||||
if echo "$PR_TITLE $PR_BODY" | grep -qiE "(feature|feat)"; then
|
||||
echo "Detected feature PR - applying feature review criteria"
|
||||
# Check for tests, documentation, backward compatibility
|
||||
elif echo "$PR_TITLE $PR_BODY" | grep -qiE "(fix|bug)"; then
|
||||
echo "Detected bug fix - checking root cause and regression tests"
|
||||
# Verify fix addresses root cause, includes tests
|
||||
elif echo "$PR_TITLE $PR_BODY" | grep -qiE "(refactor)"; then
|
||||
echo "Detected refactoring - ensuring behavior preservation"
|
||||
# Check that tests still pass, no behavior changes
|
||||
fi
|
||||
```
|
||||
|
||||
## Phase 7: Generate Comprehensive Summary
|
||||
|
||||
After all inline comments, create a detailed summary:
|
||||
|
||||
```bash
|
||||
# Initialize metrics tracking
|
||||
REVIEW_START_TIME=$(date +%s)
|
||||
|
||||
# Create the comprehensive summary
|
||||
gh pr review $PR_NUMBER --comment --body "# Comprehensive PR Review
|
||||
|
||||
This review is generated by Claude. It may not always be accurate, as with human reviewers. If you believe that any of the comments are invalid or incorrect, please state why for each. For others, please implement the changes in one way or another.
|
||||
|
||||
## Review Summary
|
||||
|
||||
**PR**: $PR_TITLE (#$PR_NUMBER)
|
||||
**Impact**: $ADDITIONS additions, $DELETIONS deletions across $FILE_COUNT files
|
||||
|
||||
### Issue Distribution
|
||||
- Critical: $CRITICAL_COUNT
|
||||
- High: $HIGH_COUNT
|
||||
- Medium: $MEDIUM_COUNT
|
||||
- Low: $LOW_COUNT
|
||||
|
||||
### Category Breakdown
|
||||
- Architecture: $ARCHITECTURE_ISSUES issues
|
||||
- Security: $SECURITY_ISSUES issues
|
||||
- Performance: $PERFORMANCE_ISSUES issues
|
||||
- Code Quality: $QUALITY_ISSUES issues
|
||||
|
||||
## Key Findings
|
||||
|
||||
### Architecture & Design
|
||||
[Detailed architectural analysis based on repository patterns]
|
||||
|
||||
### Security Considerations
|
||||
[Security implications beyond basic vulnerabilities]
|
||||
|
||||
### Performance Impact
|
||||
[Performance analysis including bundle size, render impact]
|
||||
|
||||
### Integration Points
|
||||
[How this affects other systems, extensions, etc.]
|
||||
|
||||
## Positive Observations
|
||||
[What was done well, good patterns followed]
|
||||
|
||||
## References
|
||||
- [Repository Architecture Guide](https://github.com/Comfy-Org/comfy-claude-prompt-library/blob/master/project-summaries-for-agents/ComfyUI_frontend/REPOSITORY_GUIDE.md)
|
||||
- [Frontend Standards](https://github.com/Comfy-Org/comfy-claude-prompt-library/blob/master/.claude/commands/validation/frontend-code-standards.md)
|
||||
- [Security Guidelines](https://github.com/Comfy-Org/comfy-claude-prompt-library/blob/master/.claude/commands/validation/security-audit.md)
|
||||
|
||||
## Next Steps
|
||||
1. Address critical issues before merge
|
||||
2. Consider architectural feedback for long-term maintainability
|
||||
3. Add tests for uncovered scenarios
|
||||
4. Update documentation if needed
|
||||
|
||||
---
|
||||
*This is a comprehensive automated review. For architectural decisions requiring human judgment, please request additional manual review.*"
|
||||
```
|
||||
|
||||
## Important: Think Deeply
|
||||
|
||||
When reviewing:
|
||||
1. **Think hard** about architectural implications
|
||||
2. Consider how changes affect the entire system
|
||||
3. Look for subtle bugs and edge cases
|
||||
4. Evaluate maintainability over time
|
||||
5. Consider extension developer experience
|
||||
6. Think about migration paths
|
||||
|
||||
This is a COMPREHENSIVE review, not a linting pass. Provide the same quality feedback a senior engineer would give after careful consideration.
|
||||
|
||||
## Phase 8: Track Review Metrics
|
||||
|
||||
After completing the review, save metrics for analysis:
|
||||
|
||||
```bash
|
||||
# Calculate review duration
|
||||
REVIEW_END_TIME=$(date +%s)
|
||||
REVIEW_DURATION=$((REVIEW_END_TIME - REVIEW_START_TIME))
|
||||
|
||||
# Calculate total issues
|
||||
TOTAL_ISSUES=$((CRITICAL_COUNT + HIGH_COUNT + MEDIUM_COUNT + LOW_COUNT))
|
||||
|
||||
# Create metrics directory if it doesn't exist
|
||||
METRICS_DIR=".claude/review-metrics"
|
||||
mkdir -p "$METRICS_DIR"
|
||||
|
||||
# Generate metrics file
|
||||
METRICS_FILE="$METRICS_DIR/metrics-$(date +%Y%m).json"
|
||||
|
||||
# Create or update monthly metrics file
|
||||
if [ -f "$METRICS_FILE" ]; then
|
||||
# Append to existing file
|
||||
jq -n \
|
||||
--arg pr "$PR_NUMBER" \
|
||||
--arg title "$PR_TITLE" \
|
||||
--arg timestamp "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
|
||||
--arg duration "$REVIEW_DURATION" \
|
||||
--arg files "$FILE_COUNT" \
|
||||
--arg additions "$ADDITIONS" \
|
||||
--arg deletions "$DELETIONS" \
|
||||
--arg total "$TOTAL_ISSUES" \
|
||||
--arg critical "$CRITICAL_COUNT" \
|
||||
--arg high "$HIGH_COUNT" \
|
||||
--arg medium "$MEDIUM_COUNT" \
|
||||
--arg low "$LOW_COUNT" \
|
||||
--arg architecture "$ARCHITECTURE_ISSUES" \
|
||||
--arg security "$SECURITY_ISSUES" \
|
||||
--arg performance "$PERFORMANCE_ISSUES" \
|
||||
--arg quality "$QUALITY_ISSUES" \
|
||||
'{
|
||||
pr_number: $pr,
|
||||
pr_title: $title,
|
||||
timestamp: $timestamp,
|
||||
review_duration_seconds: ($duration | tonumber),
|
||||
files_reviewed: ($files | tonumber),
|
||||
lines_added: ($additions | tonumber),
|
||||
lines_deleted: ($deletions | tonumber),
|
||||
issues: {
|
||||
total: ($total | tonumber),
|
||||
by_severity: {
|
||||
critical: ($critical | tonumber),
|
||||
high: ($high | tonumber),
|
||||
medium: ($medium | tonumber),
|
||||
low: ($low | tonumber)
|
||||
},
|
||||
by_category: {
|
||||
architecture: ($architecture | tonumber),
|
||||
security: ($security | tonumber),
|
||||
performance: ($performance | tonumber),
|
||||
quality: ($quality | tonumber)
|
||||
}
|
||||
}
|
||||
}' > "$METRICS_FILE.new"
|
||||
|
||||
# Merge with existing data
|
||||
jq -s '.[0] + [.[1]]' "$METRICS_FILE" "$METRICS_FILE.new" > "$METRICS_FILE.tmp"
|
||||
mv "$METRICS_FILE.tmp" "$METRICS_FILE"
|
||||
rm "$METRICS_FILE.new"
|
||||
else
|
||||
# Create new file
|
||||
jq -n \
|
||||
--arg pr "$PR_NUMBER" \
|
||||
--arg title "$PR_TITLE" \
|
||||
--arg timestamp "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
|
||||
--arg duration "$REVIEW_DURATION" \
|
||||
--arg files "$FILE_COUNT" \
|
||||
--arg additions "$ADDITIONS" \
|
||||
--arg deletions "$DELETIONS" \
|
||||
--arg total "$TOTAL_ISSUES" \
|
||||
--arg critical "$CRITICAL_COUNT" \
|
||||
--arg high "$HIGH_COUNT" \
|
||||
--arg medium "$MEDIUM_COUNT" \
|
||||
--arg low "$LOW_COUNT" \
|
||||
--arg architecture "$ARCHITECTURE_ISSUES" \
|
||||
--arg security "$SECURITY_ISSUES" \
|
||||
--arg performance "$PERFORMANCE_ISSUES" \
|
||||
--arg quality "$QUALITY_ISSUES" \
|
||||
'[{
|
||||
pr_number: $pr,
|
||||
pr_title: $title,
|
||||
timestamp: $timestamp,
|
||||
review_duration_seconds: ($duration | tonumber),
|
||||
files_reviewed: ($files | tonumber),
|
||||
lines_added: ($additions | tonumber),
|
||||
lines_deleted: ($deletions | tonumber),
|
||||
issues: {
|
||||
total: ($total | tonumber),
|
||||
by_severity: {
|
||||
critical: ($critical | tonumber),
|
||||
high: ($high | tonumber),
|
||||
medium: ($medium | tonumber),
|
||||
low: ($low | tonumber)
|
||||
},
|
||||
by_category: {
|
||||
architecture: ($architecture | tonumber),
|
||||
security: ($security | tonumber),
|
||||
performance: ($performance | tonumber),
|
||||
quality: ($quality | tonumber)
|
||||
}
|
||||
}
|
||||
}]' > "$METRICS_FILE"
|
||||
fi
|
||||
|
||||
echo "Review metrics saved to $METRICS_FILE"
|
||||
```
|
||||
|
||||
This creates monthly metrics files (e.g., `metrics-202407.json`) that track:
|
||||
- Which PRs were reviewed
|
||||
- How long reviews took
|
||||
- Types and severity of issues found
|
||||
- Trends over time
|
||||
|
||||
You can later analyze these to see patterns and improve your development process.
|
||||
625
.claude/commands/create-frontend-release.md
Normal file
@@ -0,0 +1,625 @@
|
||||
# Create Frontend Release
|
||||
|
||||
This command guides you through creating a comprehensive frontend release with semantic versioning analysis, automated change detection, security scanning, and multi-stage human verification.
|
||||
|
||||
<task>
|
||||
Create a frontend release with version type: $ARGUMENTS
|
||||
|
||||
Expected format: Version increment type and optional description
|
||||
Examples:
|
||||
- `patch` - Bug fixes only
|
||||
- `minor` - New features, backward compatible
|
||||
- `major` - Breaking changes
|
||||
- `prerelease` - Alpha/beta/rc releases
|
||||
- `patch "Critical security fixes"` - With custom description
|
||||
- `minor --skip-changelog` - Skip automated changelog generation
|
||||
- `minor --dry-run` - Simulate release without executing
|
||||
|
||||
If no arguments provided, the command will always perform prerelease if the current version is prerelease, or patch in other cases. This command will never perform minor or major releases without explicit direction.
|
||||
</task>
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before starting, ensure:
|
||||
- You have push access to the repository
|
||||
- GitHub CLI (`gh`) is authenticated
|
||||
- You're on a clean main branch working tree
|
||||
- All intended changes are merged to main
|
||||
- You understand the scope of changes being released
|
||||
|
||||
## Critical Checks Before Starting
|
||||
|
||||
### 1. Check Current Version Status
|
||||
```bash
|
||||
# Get current version and check if it's a pre-release
|
||||
CURRENT_VERSION=$(node -p "require('./package.json').version")
|
||||
if [[ "$CURRENT_VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+- ]]; then
|
||||
echo "⚠️ Current version $CURRENT_VERSION is a pre-release"
|
||||
echo "Consider releasing stable (e.g., 1.24.0-1 → 1.24.0) first"
|
||||
fi
|
||||
```
|
||||
|
||||
### 2. Find Last Stable Release
|
||||
```bash
|
||||
# Get last stable release tag (no pre-release suffix)
|
||||
LAST_STABLE=$(git tag -l "v*" | grep -v "\-" | sort -V | tail -1)
|
||||
echo "Last stable release: $LAST_STABLE"
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
**Environment Variables:**
|
||||
- `RELEASE_SKIP_SECURITY_SCAN=true` - Skip security audit
|
||||
- `RELEASE_AUTO_APPROVE=true` - Skip some confirmation prompts
|
||||
- `RELEASE_DRY_RUN=true` - Simulate release without executing
|
||||
|
||||
## Release Process
|
||||
|
||||
### Step 1: Environment Safety Check
|
||||
|
||||
1. Verify clean working directory:
|
||||
```bash
|
||||
git status --porcelain
|
||||
```
|
||||
2. Confirm on main branch:
|
||||
```bash
|
||||
git branch --show-current
|
||||
```
|
||||
3. Pull latest changes:
|
||||
```bash
|
||||
git pull origin main
|
||||
```
|
||||
4. Check GitHub CLI authentication:
|
||||
```bash
|
||||
gh auth status
|
||||
```
|
||||
5. Verify npm/PyPI publishing access (dry run)
|
||||
6. **CONFIRMATION REQUIRED**: Environment ready for release?
|
||||
|
||||
### Step 2: Analyze Recent Changes
|
||||
|
||||
1. Get current version from package.json
|
||||
2. **IMPORTANT**: Determine correct base for comparison:
|
||||
```bash
|
||||
# If current version is pre-release, use last stable release
|
||||
if [[ "$CURRENT_VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+- ]]; then
|
||||
BASE_TAG=$LAST_STABLE
|
||||
else
|
||||
BASE_TAG=$(git describe --tags --abbrev=0)
|
||||
fi
|
||||
```
|
||||
3. Find commits since base release (CRITICAL: use --first-parent):
|
||||
```bash
|
||||
git log ${BASE_TAG}..HEAD --oneline --no-merges --first-parent
|
||||
```
|
||||
4. Count total commits:
|
||||
```bash
|
||||
COMMIT_COUNT=$(git log ${BASE_TAG}..HEAD --oneline --no-merges --first-parent | wc -l)
|
||||
echo "Found $COMMIT_COUNT commits since $BASE_TAG"
|
||||
```
|
||||
5. Analyze commits for:
|
||||
- Breaking changes (BREAKING CHANGE, !, feat())
|
||||
- New features (feat:, feature:)
|
||||
- Bug fixes (fix:, bugfix:)
|
||||
- Documentation changes (docs:)
|
||||
- Dependency updates
|
||||
6. **VERIFY PR TARGET BRANCHES**:
|
||||
```bash
|
||||
# Get merged PRs and verify they were merged to main
|
||||
gh pr list --state merged --limit 50 --json number,title,baseRefName,mergedAt | \
|
||||
jq -r '.[] | select(.baseRefName == "main") | "\(.number): \(.title)"'
|
||||
```
|
||||
7. **HUMAN ANALYSIS**: Review change summary and verify scope
|
||||
|
||||
### Step 3: Version Preview
|
||||
|
||||
**Version Preview:**
|
||||
- Current: `${CURRENT_VERSION}`
|
||||
- Proposed: Show exact version number
|
||||
- **CONFIRMATION REQUIRED**: Proceed with version `X.Y.Z`?
|
||||
|
||||
### Step 4: Security and Dependency Audit
|
||||
|
||||
1. Run security audit:
|
||||
```bash
|
||||
npm audit --audit-level moderate
|
||||
```
|
||||
2. Check for known vulnerabilities in dependencies
|
||||
3. Scan for hardcoded secrets or credentials:
|
||||
```bash
|
||||
git log -p ${BASE_TAG}..HEAD | grep -iE "(password|key|secret|token)" || echo "No sensitive data found"
|
||||
```
|
||||
4. Verify no sensitive data in recent commits
|
||||
5. **SECURITY REVIEW**: Address any critical findings before proceeding?
|
||||
|
||||
### Step 5: Pre-Release Testing
|
||||
|
||||
1. Run complete test suite:
|
||||
```bash
|
||||
npm run test:unit
|
||||
npm run test:component
|
||||
npm run test:browser
|
||||
```
|
||||
2. Run type checking:
|
||||
```bash
|
||||
npm run typecheck
|
||||
```
|
||||
3. Run linting (may have issues with missing packages):
|
||||
```bash
|
||||
npm run lint || echo "Lint issues - verify if critical"
|
||||
```
|
||||
4. Test build process:
|
||||
```bash
|
||||
npm run build
|
||||
npm run build:types
|
||||
```
|
||||
5. **QUALITY GATE**: All tests and builds passing?
|
||||
|
||||
### Step 6: Breaking Change Analysis
|
||||
|
||||
1. Analyze API changes in:
|
||||
- Public TypeScript interfaces
|
||||
- Extension APIs
|
||||
- Component props
|
||||
- CLAUDE.md guidelines
|
||||
2. Check for:
|
||||
- Removed public functions/classes
|
||||
- Changed function signatures
|
||||
- Deprecated feature removals
|
||||
- Configuration changes
|
||||
3. Generate breaking change summary
|
||||
4. **COMPATIBILITY REVIEW**: Breaking changes documented and justified?
|
||||
|
||||
### Step 7: Generate and Save Changelog
|
||||
|
||||
1. Extract commit messages since base release:
|
||||
```bash
|
||||
git log ${BASE_TAG}..HEAD --oneline --no-merges --first-parent > commits.txt
|
||||
```
|
||||
2. **CRITICAL**: Verify PR inclusion by checking merge location:
|
||||
```bash
|
||||
# For each significant PR mentioned, verify it's on main
|
||||
for PR in ${SIGNIFICANT_PRS}; do
|
||||
COMMIT=$(gh pr view $PR --json mergeCommit -q .mergeCommit.oid)
|
||||
git branch -r --contains $COMMIT | grep -q "origin/main" || \
|
||||
echo "WARNING: PR #$PR not on main branch!"
|
||||
done
|
||||
```
|
||||
3. Group by type:
|
||||
- 🚀 **Features** (feat:)
|
||||
- 🐛 **Bug Fixes** (fix:)
|
||||
- 💥 **Breaking Changes** (BREAKING CHANGE)
|
||||
- 📚 **Documentation** (docs:)
|
||||
- 🔧 **Maintenance** (chore:, refactor:)
|
||||
- ⬆️ **Dependencies** (deps:, dependency updates)
|
||||
4. Include PR numbers and links
|
||||
5. Add issue references (Fixes #123)
|
||||
6. **Save changelog locally:**
|
||||
```bash
|
||||
# Save to dated file for history
|
||||
echo "$CHANGELOG" > release-notes-${NEW_VERSION}-$(date +%Y%m%d).md
|
||||
|
||||
# Save to current for easy access
|
||||
echo "$CHANGELOG" > CURRENT_RELEASE_NOTES.md
|
||||
```
|
||||
7. **CHANGELOG REVIEW**: Verify all PRs listed are actually on main branch
|
||||
|
||||
### Step 8: Create Enhanced Release Notes
|
||||
|
||||
1. Create comprehensive user-facing release notes including:
|
||||
- **What's New**: Major features and improvements
|
||||
- **Bug Fixes**: User-visible fixes
|
||||
- **Breaking Changes**: Migration guide if applicable
|
||||
- **Dependencies**: Major dependency updates
|
||||
- **Performance**: Notable performance improvements
|
||||
- **Contributors**: Thank contributors for their work
|
||||
2. Reference related documentation updates
|
||||
3. Include screenshots for UI changes (if available)
|
||||
4. **Save release notes:**
|
||||
```bash
|
||||
# Enhanced release notes for GitHub
|
||||
echo "$RELEASE_NOTES" > github-release-notes-${NEW_VERSION}.md
|
||||
```
|
||||
5. **CONTENT REVIEW**: Release notes clear and helpful for users?
|
||||
|
||||
### Step 9: Create Version Bump PR
|
||||
|
||||
**For standard version bumps (patch/minor/major):**
|
||||
```bash
|
||||
# Trigger the workflow
|
||||
gh workflow run version-bump.yaml -f version_type=${VERSION_TYPE}
|
||||
|
||||
# Workflow runs quickly - usually creates PR within 30 seconds
|
||||
echo "Workflow triggered. Waiting for PR creation..."
|
||||
```
|
||||
|
||||
**For releasing a stable version:**
|
||||
1. Must manually create branch and update version:
|
||||
```bash
|
||||
git checkout -b version-bump-${NEW_VERSION}
|
||||
# Edit package.json to remove pre-release suffix
|
||||
git add package.json
|
||||
git commit -m "${NEW_VERSION}"
|
||||
git push origin version-bump-${NEW_VERSION}
|
||||
```
|
||||
|
||||
2. Wait for PR creation (if using workflow) or create manually:
|
||||
```bash
|
||||
# For workflow-created PRs - wait and find it
|
||||
sleep 30
|
||||
# Look for PR from comfy-pr-bot (not github-actions)
|
||||
PR_NUMBER=$(gh pr list --author comfy-pr-bot --limit 1 --json number --jq '.[0].number')
|
||||
|
||||
# Verify we got the PR
|
||||
if [ -z "$PR_NUMBER" ]; then
|
||||
echo "PR not found yet. Checking recent PRs..."
|
||||
gh pr list --limit 5 --json number,title,author
|
||||
fi
|
||||
|
||||
# For manual PRs
|
||||
gh pr create --title "${NEW_VERSION}" \
|
||||
--body-file enhanced-pr-description.md \
|
||||
--label "Release"
|
||||
```
|
||||
3. **Create enhanced PR description:**
|
||||
```bash
|
||||
cat > enhanced-pr-description.md << EOF
|
||||
# Release v${NEW_VERSION}
|
||||
|
||||
## Version Change
|
||||
\`${CURRENT_VERSION}\` → \`${NEW_VERSION}\` (${VERSION_TYPE})
|
||||
|
||||
## Changelog
|
||||
${CHANGELOG}
|
||||
|
||||
## Breaking Changes
|
||||
${BREAKING_CHANGES}
|
||||
|
||||
## Testing Performed
|
||||
- ✅ Full test suite (unit, component, browser)
|
||||
- ✅ TypeScript compilation
|
||||
- ✅ Linting checks
|
||||
- ✅ Build verification
|
||||
- ✅ Security audit
|
||||
|
||||
## Distribution Channels
|
||||
- GitHub Release (with dist.zip)
|
||||
- PyPI Package (comfyui-frontend-package)
|
||||
- npm Package (@comfyorg/comfyui-frontend-types)
|
||||
|
||||
## Post-Release Tasks
|
||||
- [ ] Verify all distribution channels
|
||||
- [ ] Update external documentation
|
||||
- [ ] Monitor for issues
|
||||
EOF
|
||||
```
|
||||
4. Update PR with enhanced description:
|
||||
```bash
|
||||
gh pr edit ${PR_NUMBER} --body-file enhanced-pr-description.md
|
||||
```
|
||||
5. Add changelog as comment for easy reference:
|
||||
```bash
|
||||
gh pr comment ${PR_NUMBER} --body-file CURRENT_RELEASE_NOTES.md
|
||||
```
|
||||
6. **PR REVIEW**: Version bump PR created and enhanced correctly?
|
||||
|
||||
### Step 11: Critical Release PR Verification
|
||||
|
||||
1. **CRITICAL**: Verify PR has "Release" label:
|
||||
```bash
|
||||
gh pr view ${PR_NUMBER} --json labels | jq -r '.labels[].name' | grep -q "Release" || \
|
||||
echo "ERROR: Release label missing! Add it immediately!"
|
||||
```
|
||||
2. Check for update-locales commits:
|
||||
```bash
|
||||
# WARNING: update-locales may add [skip ci] which blocks release workflow!
|
||||
gh pr view ${PR_NUMBER} --json commits | grep -q "skip ci" && \
|
||||
echo "WARNING: [skip ci] detected - release workflow may not trigger!"
|
||||
```
|
||||
3. Verify version number in package.json
|
||||
4. Review all changed files
|
||||
5. Ensure no unintended changes included
|
||||
6. Wait for required PR checks:
|
||||
```bash
|
||||
gh pr checks ${PR_NUMBER} --watch
|
||||
```
|
||||
7. **FINAL CODE REVIEW**: Release label present and no [skip ci]?
|
||||
|
||||
### Step 12: Pre-Merge Validation
|
||||
|
||||
1. **Review Requirements**: Release PRs require approval
|
||||
2. Monitor CI checks - watch for update-locales
|
||||
3. **CRITICAL WARNING**: If update-locales adds [skip ci], the release workflow won't trigger!
|
||||
4. Check no new commits to main since PR creation
|
||||
5. **DEPLOYMENT READINESS**: Ready to merge?
|
||||
|
||||
### Step 13: Execute Release
|
||||
|
||||
1. **FINAL CONFIRMATION**: Merge PR to trigger release?
|
||||
2. Merge the Release PR:
|
||||
```bash
|
||||
gh pr merge ${PR_NUMBER} --merge
|
||||
```
|
||||
3. **IMMEDIATELY CHECK**: Did release workflow trigger?
|
||||
```bash
|
||||
sleep 10
|
||||
gh run list --workflow=release.yaml --limit=1
|
||||
```
|
||||
4. If workflow didn't trigger due to [skip ci]:
|
||||
```bash
|
||||
echo "ERROR: Release workflow didn't trigger!"
|
||||
echo "Options:"
|
||||
echo "1. Create patch release (e.g., 1.24.1) to trigger workflow"
|
||||
echo "2. Investigate manual release options"
|
||||
```
|
||||
5. If workflow triggered, monitor execution:
|
||||
```bash
|
||||
WORKFLOW_RUN_ID=$(gh run list --workflow=release.yaml --limit=1 --json databaseId --jq '.[0].databaseId')
|
||||
gh run watch ${WORKFLOW_RUN_ID}
|
||||
```
|
||||
|
||||
### Step 14: Enhance GitHub Release
|
||||
|
||||
1. Wait for automatic release creation:
|
||||
```bash
|
||||
# Wait for release to be created
|
||||
while ! gh release view v${NEW_VERSION} >/dev/null 2>&1; do
|
||||
echo "Waiting for release creation..."
|
||||
sleep 10
|
||||
done
|
||||
```
|
||||
|
||||
2. **Enhance the GitHub release:**
|
||||
```bash
|
||||
# Update release with our enhanced notes
|
||||
gh release edit v${NEW_VERSION} \
|
||||
--title "🚀 ComfyUI Frontend v${NEW_VERSION}" \
|
||||
--notes-file github-release-notes-${NEW_VERSION}.md \
|
||||
--latest
|
||||
|
||||
# Add any additional assets if needed
|
||||
# gh release upload v${NEW_VERSION} additional-assets.zip
|
||||
```
|
||||
|
||||
3. **Verify release details:**
|
||||
```bash
|
||||
gh release view v${NEW_VERSION}
|
||||
```
|
||||
|
||||
### Step 15: Verify Multi-Channel Distribution
|
||||
|
||||
1. **GitHub Release:**
|
||||
```bash
|
||||
gh release view v${NEW_VERSION} --json assets,body,createdAt,tagName
|
||||
```
|
||||
- ✅ Check release notes
|
||||
- ✅ Verify dist.zip attachment
|
||||
- ✅ Confirm release marked as latest (for main branch)
|
||||
|
||||
2. **PyPI Package:**
|
||||
```bash
|
||||
# Check PyPI availability (may take a few minutes)
|
||||
for i in {1..10}; do
|
||||
if curl -s https://pypi.org/pypi/comfyui-frontend-package/json | jq -r '.releases | keys[]' | grep -q ${NEW_VERSION}; then
|
||||
echo "✅ PyPI package available"
|
||||
break
|
||||
fi
|
||||
echo "⏳ Waiting for PyPI package... (attempt $i/10)"
|
||||
sleep 30
|
||||
done
|
||||
```
|
||||
|
||||
3. **npm Package:**
|
||||
```bash
|
||||
# Check npm availability
|
||||
for i in {1..10}; do
|
||||
if npm view @comfyorg/comfyui-frontend-types@${NEW_VERSION} version >/dev/null 2>&1; then
|
||||
echo "✅ npm package available"
|
||||
break
|
||||
fi
|
||||
echo "⏳ Waiting for npm package... (attempt $i/10)"
|
||||
sleep 30
|
||||
done
|
||||
```
|
||||
|
||||
4. **DISTRIBUTION VERIFICATION**: All channels published successfully?
|
||||
|
||||
### Step 16: Post-Release Monitoring Setup
|
||||
|
||||
1. **Monitor immediate release health:**
|
||||
```bash
|
||||
# Check for immediate issues
|
||||
gh issue list --label "bug" --state open --limit 5 --json title,number,createdAt
|
||||
|
||||
# Monitor download metrics (if accessible)
|
||||
gh release view v${NEW_VERSION} --json assets --jq '.assets[].downloadCount'
|
||||
```
|
||||
|
||||
2. **Update documentation tracking:**
|
||||
```bash
|
||||
cat > post-release-checklist.md << EOF
|
||||
# Post-Release Checklist for v${NEW_VERSION}
|
||||
|
||||
## Immediate Tasks (Next 24 hours)
|
||||
- [ ] Monitor error rates and user feedback
|
||||
- [ ] Watch for critical issues
|
||||
- [ ] Verify documentation is up to date
|
||||
- [ ] Check community channels for questions
|
||||
|
||||
## Short-term Tasks (Next week)
|
||||
- [ ] Update external integration guides
|
||||
- [ ] Monitor adoption metrics
|
||||
- [ ] Gather user feedback
|
||||
- [ ] Plan next release cycle
|
||||
|
||||
## Long-term Tasks
|
||||
- [ ] Analyze release process improvements
|
||||
- [ ] Update release templates based on learnings
|
||||
- [ ] Document any new patterns discovered
|
||||
|
||||
## Key Metrics to Track
|
||||
- Download counts: GitHub, PyPI, npm
|
||||
- Issue reports related to v${NEW_VERSION}
|
||||
- Community feedback and adoption
|
||||
- Performance impact measurements
|
||||
EOF
|
||||
```
|
||||
|
||||
3. **Create release summary:**
|
||||
```bash
|
||||
cat > release-summary-${NEW_VERSION}.md << EOF
|
||||
# Release Summary: ComfyUI Frontend v${NEW_VERSION}
|
||||
|
||||
**Released:** $(date)
|
||||
**Type:** ${VERSION_TYPE}
|
||||
**Duration:** ~${RELEASE_DURATION} minutes
|
||||
**Release Commit:** ${RELEASE_COMMIT}
|
||||
|
||||
## Metrics
|
||||
- **Commits Included:** ${COMMITS_COUNT}
|
||||
- **Contributors:** ${CONTRIBUTORS_COUNT}
|
||||
- **Files Changed:** ${FILES_CHANGED}
|
||||
- **Lines Added/Removed:** +${LINES_ADDED}/-${LINES_REMOVED}
|
||||
|
||||
## Distribution Status
|
||||
- ✅ GitHub Release: Published
|
||||
- ✅ PyPI Package: Available
|
||||
- ✅ npm Types: Available
|
||||
|
||||
## Next Steps
|
||||
- Monitor for 24-48 hours
|
||||
- Address any critical issues immediately
|
||||
- Plan next release cycle
|
||||
|
||||
## Files Generated
|
||||
- \`release-notes-${NEW_VERSION}-$(date +%Y%m%d).md\` - Detailed changelog
|
||||
- \`github-release-notes-${NEW_VERSION}.md\` - GitHub release notes
|
||||
- \`post-release-checklist.md\` - Follow-up tasks
|
||||
EOF
|
||||
```
|
||||
|
||||
4. **RELEASE COMPLETION**: All post-release setup completed?
|
||||
|
||||
## Advanced Safety Features
|
||||
|
||||
### Rollback Procedures
|
||||
|
||||
**Pre-Merge Rollback:**
|
||||
```bash
|
||||
# Close version bump PR and reset
|
||||
gh pr close ${PR_NUMBER}
|
||||
git reset --hard origin/main
|
||||
git clean -fd
|
||||
```
|
||||
|
||||
**Post-Merge Rollback:**
|
||||
```bash
|
||||
# Create immediate patch release with reverts
|
||||
git revert ${RELEASE_COMMIT}
|
||||
# Follow this command again with patch version
|
||||
```
|
||||
|
||||
**Emergency Procedures:**
|
||||
```bash
|
||||
# Document incident
|
||||
cat > release-incident-${NEW_VERSION}.md << EOF
|
||||
# Release Incident Report
|
||||
|
||||
**Version:** ${NEW_VERSION}
|
||||
**Issue:** [Describe the problem]
|
||||
**Impact:** [Severity and scope]
|
||||
**Resolution:** [Steps taken]
|
||||
**Prevention:** [Future improvements]
|
||||
EOF
|
||||
|
||||
# Contact package registries for critical issues
|
||||
echo "For critical security issues, consider:"
|
||||
echo "- PyPI: Contact support for package yanking"
|
||||
echo "- npm: Use 'npm unpublish' within 72 hours"
|
||||
echo "- GitHub: Update release with warning notes"
|
||||
```
|
||||
|
||||
### Quality Gates Summary
|
||||
|
||||
The command implements multiple quality gates:
|
||||
|
||||
1. **🔒 Security Gate**: Vulnerability scanning, secret detection
|
||||
2. **🧪 Quality Gate**: Full test suite, linting, type checking
|
||||
3. **📋 Content Gate**: Changelog accuracy, release notes quality
|
||||
4. **🔄 Process Gate**: Release timing verification
|
||||
5. **✅ Verification Gate**: Multi-channel publishing confirmation
|
||||
6. **📊 Monitoring Gate**: Post-release health tracking
|
||||
|
||||
## Common Scenarios
|
||||
|
||||
### Scenario 1: Regular Feature Release
|
||||
```bash
|
||||
/project:create-frontend-release minor
|
||||
```
|
||||
- Analyzes features since last release
|
||||
- Generates changelog automatically
|
||||
- Creates comprehensive release notes
|
||||
|
||||
### Scenario 2: Critical Security Patch
|
||||
```bash
|
||||
/project:create-frontend-release patch "Security fixes for CVE-2024-XXXX"
|
||||
```
|
||||
- Expedited security scanning
|
||||
- Enhanced monitoring setup
|
||||
|
||||
### Scenario 3: Major Version with Breaking Changes
|
||||
```bash
|
||||
/project:create-frontend-release major
|
||||
```
|
||||
- Comprehensive breaking change analysis
|
||||
- Migration guide generation
|
||||
|
||||
### Scenario 4: Pre-release Testing
|
||||
```bash
|
||||
/project:create-frontend-release prerelease
|
||||
```
|
||||
- Creates alpha/beta/rc versions
|
||||
- Draft release status
|
||||
- Python package specs require that prereleases use alpha/beta/rc as the preid
|
||||
|
||||
## Common Issues and Solutions
|
||||
|
||||
### Issue: Pre-release Version Confusion
|
||||
**Problem**: Not sure whether to promote pre-release or create new version
|
||||
**Solution**:
|
||||
- Follow semver standards: a prerelease version is followed by a normal release. It should have the same major, minor, and patch versions as the prerelease.
|
||||
|
||||
### Issue: Wrong Commit Count
|
||||
**Problem**: Changelog includes commits from other branches
|
||||
**Solution**: Always use `--first-parent` flag with git log
|
||||
|
||||
**Update**: Sometimes update-locales doesn't add [skip ci] - always verify!
|
||||
|
||||
### Issue: Missing PRs in Changelog
|
||||
**Problem**: PR was merged to different branch
|
||||
**Solution**: Verify PR merge target with:
|
||||
```bash
|
||||
gh pr view ${PR_NUMBER} --json baseRefName
|
||||
```
|
||||
|
||||
### Issue: Release Failed Due to [skip ci]
|
||||
**Problem**: Release workflow didn't trigger after merge
|
||||
**Prevention**: Always avoid this scenario
|
||||
- Ensure that `[skip ci]` or similar flags are NOT in the `HEAD` commit message of the PR
|
||||
- Push a new, empty commit to the PR
|
||||
- Always double-check this immediately before merging
|
||||
|
||||
**Recovery Strategy**:
|
||||
1. Revert version in a new PR (e.g., 1.24.0 → 1.24.0-1)
|
||||
2. Merge the revert PR
|
||||
3. Run version bump workflow again
|
||||
4. This creates a fresh PR without [skip ci]
|
||||
Benefits: Cleaner than creating extra version numbers
|
||||
|
||||
## Key Learnings & Notes
|
||||
|
||||
1. **PR Author**: Version bump PRs are created by `comfy-pr-bot`, not `github-actions`
|
||||
2. **Workflow Speed**: Version bump workflow typically completes in ~20-30 seconds
|
||||
3. **Update-locales Behavior**: Inconsistent - sometimes adds [skip ci], sometimes doesn't
|
||||
4. **Recovery Options**: Reverting version is cleaner than creating extra versions
|
||||
|
||||
222
.claude/commands/create-hotfix-release.md
Normal file
@@ -0,0 +1,222 @@
|
||||
# Create Hotfix Release
|
||||
|
||||
This command guides you through creating a patch/hotfix release for ComfyUI Frontend with comprehensive safety checks and human confirmations at each step.
|
||||
|
||||
<task>
|
||||
Create a hotfix release by cherry-picking commits or PR commits from main to a core branch: $ARGUMENTS
|
||||
|
||||
Expected format: Comma-separated list of commits or PR numbers
|
||||
Examples:
|
||||
- `abc123,def456,ghi789` (commits)
|
||||
- `#1234,#5678` (PRs)
|
||||
- `abc123,#1234,def456` (mixed)
|
||||
|
||||
If no arguments provided, the command will help identify the correct core branch and guide you through selecting commits/PRs.
|
||||
</task>
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before starting, ensure:
|
||||
- You have push access to the repository
|
||||
- GitHub CLI (`gh`) is authenticated
|
||||
- You're on a clean working tree
|
||||
- You understand the commits/PRs you're cherry-picking
|
||||
|
||||
## Hotfix Release Process
|
||||
|
||||
### Step 1: Identify Target Core Branch
|
||||
|
||||
1. Fetch the current ComfyUI requirements.txt from master branch:
|
||||
```bash
|
||||
curl -s https://raw.githubusercontent.com/comfyanonymous/ComfyUI/master/requirements.txt | grep "comfyui-frontend-package"
|
||||
```
|
||||
2. Extract the `comfyui-frontend-package` version (e.g., `comfyui-frontend-package==1.23.4`)
|
||||
3. Parse version to get major.minor (e.g., `1.23.4` → `1.23`)
|
||||
4. Determine core branch: `core/<major>.<minor>` (e.g., `core/1.23`)
|
||||
5. Verify the core branch exists: `git ls-remote origin refs/heads/core/*`
|
||||
6. **CONFIRMATION REQUIRED**: Is `core/X.Y` the correct target branch?
|
||||
|
||||
### Step 2: Parse and Validate Arguments
|
||||
|
||||
1. Parse the comma-separated list of commits/PRs
|
||||
2. For each item:
|
||||
- If starts with `#`: Treat as PR number
|
||||
- Otherwise: Treat as commit hash
|
||||
3. For PR numbers:
|
||||
- Fetch PR details using `gh pr view <number>`
|
||||
- Extract the merge commit if PR is merged
|
||||
- If PR has multiple commits, list them all
|
||||
- **CONFIRMATION REQUIRED**: Use merge commit or cherry-pick individual commits?
|
||||
4. Validate all commit hashes exist in the repository
|
||||
|
||||
### Step 3: Analyze Target Changes
|
||||
|
||||
1. For each commit/PR to cherry-pick:
|
||||
- Display commit hash, author, date
|
||||
- Show PR title and number (if applicable)
|
||||
- Display commit message
|
||||
- Show files changed and diff statistics
|
||||
- Check if already in core branch: `git branch --contains <commit>`
|
||||
2. Identify potential conflicts by checking changed files
|
||||
3. **CONFIRMATION REQUIRED**: Proceed with these commits?
|
||||
|
||||
### Step 4: Create Hotfix Branch
|
||||
|
||||
1. Checkout the core branch (e.g., `core/1.23`)
|
||||
2. Pull latest changes: `git pull origin core/X.Y`
|
||||
3. Display current version from package.json
|
||||
4. Create hotfix branch: `hotfix/<version>-<timestamp>`
|
||||
- Example: `hotfix/1.23.4-20241120`
|
||||
5. **CONFIRMATION REQUIRED**: Created branch correctly?
|
||||
|
||||
### Step 5: Cherry-pick Changes
|
||||
|
||||
For each commit:
|
||||
1. Attempt cherry-pick: `git cherry-pick <commit>`
|
||||
2. If conflicts occur:
|
||||
- Display conflict details
|
||||
- Show conflicting sections
|
||||
- Provide resolution guidance
|
||||
- **CONFIRMATION REQUIRED**: Conflicts resolved correctly?
|
||||
3. After successful cherry-pick:
|
||||
- Show the changes: `git show HEAD`
|
||||
- Run validation: `npm run typecheck && npm run lint`
|
||||
4. **CONFIRMATION REQUIRED**: Cherry-pick successful and valid?
|
||||
|
||||
### Step 6: Create PR to Core Branch
|
||||
|
||||
1. Push the hotfix branch: `git push origin hotfix/<version>-<timestamp>`
|
||||
2. Create PR using gh CLI:
|
||||
```bash
|
||||
gh pr create --base core/X.Y --head hotfix/<version>-<timestamp> \
|
||||
--title "[Hotfix] Cherry-pick fixes to core/X.Y" \
|
||||
--body "Cherry-picked commits: ..."
|
||||
```
|
||||
3. Add appropriate labels (but NOT "Release" yet)
|
||||
4. PR body should include:
|
||||
- List of cherry-picked commits/PRs
|
||||
- Original issue references
|
||||
- Testing instructions
|
||||
- Impact assessment
|
||||
5. **CONFIRMATION REQUIRED**: PR created correctly?
|
||||
|
||||
### Step 7: Wait for Tests
|
||||
|
||||
1. Monitor PR checks: `gh pr checks`
|
||||
2. Display test results as they complete
|
||||
3. If any tests fail:
|
||||
- Show failure details
|
||||
- Analyze if related to cherry-picks
|
||||
- **DECISION REQUIRED**: Fix and continue, or abort?
|
||||
4. Wait for all required checks to pass
|
||||
5. **CONFIRMATION REQUIRED**: All tests passing?
|
||||
|
||||
### Step 8: Merge Hotfix PR
|
||||
|
||||
1. Verify all checks have passed
|
||||
2. Check for required approvals
|
||||
3. Merge the PR: `gh pr merge --merge`
|
||||
4. Delete the hotfix branch
|
||||
5. **CONFIRMATION REQUIRED**: PR merged successfully?
|
||||
|
||||
### Step 9: Create Version Bump
|
||||
|
||||
1. Checkout the core branch: `git checkout core/X.Y`
|
||||
2. Pull latest changes: `git pull origin core/X.Y`
|
||||
3. Read current version from package.json
|
||||
4. Determine patch version increment:
|
||||
- Current: `1.23.4` → New: `1.23.5`
|
||||
5. Create release branch named with new version: `release/1.23.5`
|
||||
6. Update version in package.json to `1.23.5`
|
||||
7. Commit: `git commit -m "[release] Bump version to 1.23.5"`
|
||||
8. **CONFIRMATION REQUIRED**: Version bump correct?
|
||||
|
||||
### Step 10: Create Release PR
|
||||
|
||||
1. Push release branch: `git push origin release/1.23.5`
|
||||
2. Create PR with Release label:
|
||||
```bash
|
||||
gh pr create --base core/X.Y --head release/1.23.5 \
|
||||
--title "[Release] v1.23.5" \
|
||||
--body "..." \
|
||||
--label "Release"
|
||||
```
|
||||
3. **CRITICAL**: Verify "Release" label is added
|
||||
4. PR description should include:
|
||||
- Version: `1.23.4` → `1.23.5`
|
||||
- Included fixes (link to previous PR)
|
||||
- Release notes for users
|
||||
5. **CONFIRMATION REQUIRED**: Release PR has "Release" label?
|
||||
|
||||
### Step 11: Monitor Release Process
|
||||
|
||||
1. Wait for PR checks to pass
|
||||
2. **FINAL CONFIRMATION**: Ready to trigger release by merging?
|
||||
3. Merge the PR: `gh pr merge --merge`
|
||||
4. Monitor release workflow:
|
||||
```bash
|
||||
gh run list --workflow=release.yaml --limit=1
|
||||
gh run watch
|
||||
```
|
||||
5. Track progress:
|
||||
- GitHub release draft/publication
|
||||
- PyPI upload
|
||||
- npm types publication
|
||||
|
||||
### Step 12: Post-Release Verification
|
||||
|
||||
1. Verify GitHub release:
|
||||
```bash
|
||||
gh release view v1.23.5
|
||||
```
|
||||
2. Check PyPI package:
|
||||
```bash
|
||||
pip index versions comfyui-frontend-package | grep 1.23.5
|
||||
```
|
||||
3. Verify npm package:
|
||||
```bash
|
||||
npm view @comfyorg/comfyui-frontend-types@1.23.5
|
||||
```
|
||||
4. Generate release summary with:
|
||||
- Version released
|
||||
- Commits included
|
||||
- Issues fixed
|
||||
- Distribution status
|
||||
5. **CONFIRMATION REQUIRED**: Release completed successfully?
|
||||
|
||||
## Safety Checks
|
||||
|
||||
Throughout the process:
|
||||
- Always verify core branch matches ComfyUI's requirements.txt
|
||||
- For PRs: Ensure using correct commits (merge vs individual)
|
||||
- Check version numbers follow semantic versioning
|
||||
- **Critical**: "Release" label must be on version bump PR
|
||||
- Validate cherry-picks don't break core branch stability
|
||||
- Keep audit trail of all operations
|
||||
|
||||
## Rollback Procedures
|
||||
|
||||
If something goes wrong:
|
||||
- Before push: `git reset --hard origin/core/X.Y`
|
||||
- After PR creation: Close PR and start over
|
||||
- After failed release: Create new patch version with fixes
|
||||
- Document any issues for future reference
|
||||
|
||||
## Important Notes
|
||||
|
||||
- Core branch version will be behind main - this is expected
|
||||
- The "Release" label triggers the PyPI/npm publication
|
||||
- PR numbers must include the `#` prefix
|
||||
- Mixed commits/PRs are supported but review carefully
|
||||
- Always wait for full test suite before proceeding
|
||||
|
||||
## Expected Timeline
|
||||
|
||||
- Step 1-3: ~10 minutes (analysis)
|
||||
- Steps 4-6: ~15-30 minutes (cherry-picking)
|
||||
- Step 7: ~10-20 minutes (tests)
|
||||
- Steps 8-10: ~10 minutes (version bump)
|
||||
- Step 11-12: ~15-20 minutes (release)
|
||||
- Total: ~60-90 minutes
|
||||
|
||||
This process ensures a safe, verified hotfix release with multiple confirmation points and clear tracking of what changes are being released.
|
||||
36
.github/CLAUDE.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# ComfyUI Frontend - Claude Review Context
|
||||
|
||||
This file provides additional context for the automated PR review system.
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### PrimeVue Component Migrations
|
||||
|
||||
When reviewing, flag these deprecated components:
|
||||
- `Dropdown` → Use `Select` from 'primevue/select'
|
||||
- `OverlayPanel` → Use `Popover` from 'primevue/popover'
|
||||
- `Calendar` → Use `DatePicker` from 'primevue/datepicker'
|
||||
- `InputSwitch` → Use `ToggleSwitch` from 'primevue/toggleswitch'
|
||||
- `Sidebar` → Use `Drawer` from 'primevue/drawer'
|
||||
- `Chips` → Use `AutoComplete` with multiple enabled and typeahead disabled
|
||||
- `TabMenu` → Use `Tabs` without panels
|
||||
- `Steps` → Use `Stepper` without panels
|
||||
- `InlineMessage` → Use `Message` component
|
||||
|
||||
### API Utilities Reference
|
||||
|
||||
- `api.apiURL()` - Backend API calls (/prompt, /queue, /view, etc.)
|
||||
- `api.fileURL()` - Static file access (templates, extensions)
|
||||
- `$t()` / `i18n.global.t()` - Internationalization
|
||||
- `DOMPurify.sanitize()` - HTML sanitization
|
||||
|
||||
## Review Scope
|
||||
|
||||
This automated review performs comprehensive analysis including:
|
||||
- Architecture and design patterns
|
||||
- Security vulnerabilities
|
||||
- Performance implications
|
||||
- Code quality and maintainability
|
||||
- Integration concerns
|
||||
|
||||
For implementation details, see `.claude/commands/comprehensive-pr-review.md`.
|
||||
75
.github/workflows/claude-pr-review.yml
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
name: Claude PR Review
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
wait-for-ci:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.label.name == 'claude-review'
|
||||
outputs:
|
||||
should-proceed: ${{ steps.check-status.outputs.proceed }}
|
||||
steps:
|
||||
- name: Wait for other CI checks
|
||||
uses: lewagon/wait-on-check-action@v1.3.1
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
check-regexp: '^(ESLint|Prettier Check|Tests CI|Vitest Tests)'
|
||||
wait-interval: 30
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Check if we should proceed
|
||||
id: check-status
|
||||
run: |
|
||||
# Get all check runs for this commit
|
||||
CHECK_RUNS=$(gh api repos/${{ github.repository }}/commits/${{ github.event.pull_request.head.sha }}/check-runs --jq '.check_runs[] | select(.name | test("ESLint|Prettier Check|Tests CI|Vitest Tests")) | {name, conclusion}')
|
||||
|
||||
# Check if any required checks failed
|
||||
if echo "$CHECK_RUNS" | grep -q '"conclusion": "failure"'; then
|
||||
echo "Some CI checks failed - skipping Claude review"
|
||||
echo "proceed=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "All CI checks passed - proceeding with Claude review"
|
||||
echo "proceed=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
claude-review:
|
||||
needs: wait-for-ci
|
||||
if: needs.wait-for-ci.outputs.should-proceed == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Install dependencies for analysis tools
|
||||
run: |
|
||||
npm install -g typescript @vue/compiler-sfc
|
||||
|
||||
- name: Run Claude PR Review
|
||||
uses: anthropics/claude-code-action@main
|
||||
with:
|
||||
prompt_file: .claude/commands/comprehensive-pr-review.md
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
max_turns: 1
|
||||
timeout_minutes: 30
|
||||
env:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
|
||||
BASE_SHA: ${{ github.event.pull_request.base.sha }}
|
||||
REPOSITORY: ${{ github.repository }}
|
||||
154
.github/workflows/pr-checks.yml
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
name: PR Checks
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, synchronize, reopened]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_run: ${{ steps.check-changes.outputs.should_run }}
|
||||
has_browser_tests: ${{ steps.check-coverage.outputs.has_browser_tests }}
|
||||
has_screen_recording: ${{ steps.check-recording.outputs.has_recording }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Ensure base branch is available
|
||||
run: |
|
||||
# Fetch the specific base commit to ensure it's available for git diff
|
||||
git fetch origin ${{ github.event.pull_request.base.sha }}
|
||||
|
||||
- name: Check if significant changes exist
|
||||
id: check-changes
|
||||
run: |
|
||||
# Get list of changed files
|
||||
CHANGED_FILES=$(git diff --name-only ${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }})
|
||||
|
||||
# Filter for src/ files
|
||||
SRC_FILES=$(echo "$CHANGED_FILES" | grep '^src/' || true)
|
||||
|
||||
if [ -z "$SRC_FILES" ]; then
|
||||
echo "No src/ files changed"
|
||||
echo "should_run=false" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Count lines changed in src files
|
||||
TOTAL_LINES=0
|
||||
for file in $SRC_FILES; do
|
||||
if [ -f "$file" ]; then
|
||||
# Count added lines (non-empty)
|
||||
ADDED=$(git diff ${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }} -- "$file" | grep '^+' | grep -v '^+++' | grep -v '^+$' | wc -l)
|
||||
# Count removed lines (non-empty)
|
||||
REMOVED=$(git diff ${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }} -- "$file" | grep '^-' | grep -v '^---' | grep -v '^-$' | wc -l)
|
||||
TOTAL_LINES=$((TOTAL_LINES + ADDED + REMOVED))
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Total lines changed in src/: $TOTAL_LINES"
|
||||
|
||||
if [ $TOTAL_LINES -gt 3 ]; then
|
||||
echo "should_run=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "should_run=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Check browser test coverage
|
||||
id: check-coverage
|
||||
if: steps.check-changes.outputs.should_run == 'true'
|
||||
run: |
|
||||
# Check if browser tests were updated
|
||||
BROWSER_TEST_CHANGES=$(git diff --name-only ${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }} | grep '^browser_tests/.*\.ts$' || true)
|
||||
|
||||
if [ -n "$BROWSER_TEST_CHANGES" ]; then
|
||||
echo "has_browser_tests=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "has_browser_tests=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Check for screen recording
|
||||
id: check-recording
|
||||
if: steps.check-changes.outputs.should_run == 'true'
|
||||
run: |
|
||||
# Check PR body for screen recording
|
||||
PR_BODY="${{ github.event.pull_request.body }}"
|
||||
|
||||
# Check for GitHub user attachments or YouTube links
|
||||
if echo "$PR_BODY" | grep -qiE 'github\.com/user-attachments/assets/[a-f0-9-]+|youtube\.com/watch|youtu\.be/'; then
|
||||
echo "has_recording=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "has_recording=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Final check and create results
|
||||
id: final-check
|
||||
if: always()
|
||||
run: |
|
||||
# Initialize results
|
||||
WARNINGS_JSON=""
|
||||
|
||||
# Only run checks if should_run is true
|
||||
if [ "${{ steps.check-changes.outputs.should_run }}" == "true" ]; then
|
||||
# Check browser test coverage
|
||||
if [ "${{ steps.check-coverage.outputs.has_browser_tests }}" != "true" ]; then
|
||||
if [ -n "$WARNINGS_JSON" ]; then
|
||||
WARNINGS_JSON="${WARNINGS_JSON},"
|
||||
fi
|
||||
WARNINGS_JSON="${WARNINGS_JSON}{\"message\":\"⚠️ **Warning: E2E Test Coverage Missing**\\n\\nIf this PR modifies behavior that can be covered by browser-based E2E tests, those tests are required. PRs lacking applicable test coverage may not be reviewed until added. Please add or update browser tests to ensure code quality and prevent regressions.\"}"
|
||||
fi
|
||||
|
||||
# Check screen recording
|
||||
if [ "${{ steps.check-recording.outputs.has_recording }}" != "true" ]; then
|
||||
if [ -n "$WARNINGS_JSON" ]; then
|
||||
WARNINGS_JSON="${WARNINGS_JSON},"
|
||||
fi
|
||||
WARNINGS_JSON="${WARNINGS_JSON}{\"message\":\"⚠️ **Warning: Visual Documentation Missing**\\n\\nIf this PR changes user-facing behavior, visual proof (screen recording or screenshot) is required. PRs without applicable visual documentation may not be reviewed until provided.\\nYou can add it by:\\n\\n- GitHub: Drag & drop media directly into the PR description\\n\\n- YouTube: Include a link to a short demo\"}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create results JSON
|
||||
if [ -n "$WARNINGS_JSON" ]; then
|
||||
# Create JSON with warnings
|
||||
cat > pr-check-results.json << EOF
|
||||
{
|
||||
"fails": [],
|
||||
"warnings": [$WARNINGS_JSON],
|
||||
"messages": [],
|
||||
"markdowns": []
|
||||
}
|
||||
EOF
|
||||
echo "failed=false" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
# Create JSON with success
|
||||
cat > pr-check-results.json << 'EOF'
|
||||
{
|
||||
"fails": [],
|
||||
"warnings": [],
|
||||
"messages": [],
|
||||
"markdowns": []
|
||||
}
|
||||
EOF
|
||||
echo "failed=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
# Write PR metadata
|
||||
echo "${{ github.event.pull_request.number }}" > pr-number.txt
|
||||
echo "${{ github.event.pull_request.head.sha }}" > pr-sha.txt
|
||||
|
||||
- name: Upload results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: pr-check-results-${{ github.run_id }}
|
||||
path: |
|
||||
pr-check-results.json
|
||||
pr-number.txt
|
||||
pr-sha.txt
|
||||
retention-days: 1
|
||||
149
.github/workflows/pr-comment.yml
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
name: PR Comment
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["PR Checks"]
|
||||
types: [completed]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
issues: write
|
||||
statuses: write
|
||||
|
||||
jobs:
|
||||
comment:
|
||||
if: github.event.workflow_run.event == 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: pr-check-results-${{ github.event.workflow_run.id }}
|
||||
path: /tmp/pr-artifacts
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
run-id: ${{ github.event.workflow_run.id }}
|
||||
|
||||
- name: Post results
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
// Helper function to safely read files
|
||||
function safeReadFile(filePath) {
|
||||
try {
|
||||
if (!fs.existsSync(filePath)) return null;
|
||||
return fs.readFileSync(filePath, 'utf8').trim();
|
||||
} catch (e) {
|
||||
console.error(`Error reading ${filePath}:`, e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Read artifact files
|
||||
const artifactDir = '/tmp/pr-artifacts';
|
||||
const prNumber = safeReadFile(path.join(artifactDir, 'pr-number.txt'));
|
||||
const prSha = safeReadFile(path.join(artifactDir, 'pr-sha.txt'));
|
||||
const resultsJson = safeReadFile(path.join(artifactDir, 'pr-check-results.json'));
|
||||
|
||||
// Validate PR number
|
||||
if (!prNumber || isNaN(parseInt(prNumber))) {
|
||||
throw new Error('Invalid or missing PR number');
|
||||
}
|
||||
|
||||
// Parse and validate results
|
||||
let results;
|
||||
try {
|
||||
results = JSON.parse(resultsJson || '{}');
|
||||
} catch (e) {
|
||||
console.error('Failed to parse check results:', e);
|
||||
|
||||
// Post error comment
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: parseInt(prNumber),
|
||||
body: `⚠️ PR checks failed to complete properly. Error parsing results: ${e.message}`
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Format check messages
|
||||
const messages = [];
|
||||
|
||||
if (results.fails && results.fails.length > 0) {
|
||||
messages.push('### ❌ Failures\n' + results.fails.map(f => f.message).join('\n\n'));
|
||||
}
|
||||
|
||||
if (results.warnings && results.warnings.length > 0) {
|
||||
messages.push('### ⚠️ Warnings\n' + results.warnings.map(w => w.message).join('\n\n'));
|
||||
}
|
||||
|
||||
if (results.messages && results.messages.length > 0) {
|
||||
messages.push('### 💬 Messages\n' + results.messages.map(m => m.message).join('\n\n'));
|
||||
}
|
||||
|
||||
if (results.markdowns && results.markdowns.length > 0) {
|
||||
messages.push(...results.markdowns.map(m => m.message));
|
||||
}
|
||||
|
||||
// Find existing bot comment
|
||||
const comments = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: parseInt(prNumber)
|
||||
});
|
||||
|
||||
const botComment = comments.data.find(comment =>
|
||||
comment.user.type === 'Bot' &&
|
||||
comment.body.includes('<!-- pr-checks-comment -->')
|
||||
);
|
||||
|
||||
// Post comment if there are any messages
|
||||
if (messages.length > 0) {
|
||||
const body = messages.join('\n\n');
|
||||
const commentBody = `<!-- pr-checks-comment -->\n${body}`;
|
||||
|
||||
if (botComment) {
|
||||
await github.rest.issues.updateComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: botComment.id,
|
||||
body: commentBody
|
||||
});
|
||||
} else {
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: parseInt(prNumber),
|
||||
body: commentBody
|
||||
});
|
||||
}
|
||||
} else {
|
||||
// No messages - delete existing comment if present
|
||||
if (botComment) {
|
||||
await github.rest.issues.deleteComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: botComment.id
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Set commit status based on failures
|
||||
if (prSha) {
|
||||
const hasFailures = results.fails && results.fails.length > 0;
|
||||
const hasWarnings = results.warnings && results.warnings.length > 0;
|
||||
await github.rest.repos.createCommitStatus({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
sha: prSha,
|
||||
state: hasFailures ? 'failure' : 'success',
|
||||
context: 'pr-checks',
|
||||
description: hasFailures
|
||||
? `${results.fails.length} check(s) failed`
|
||||
: hasWarnings
|
||||
? `${results.warnings.length} warning(s)`
|
||||
: 'All checks passed'
|
||||
});
|
||||
}
|
||||
@@ -9,9 +9,10 @@ module.exports = defineConfig({
|
||||
entry: 'src/locales/en',
|
||||
entryLocale: 'en',
|
||||
output: 'src/locales',
|
||||
outputLocales: ['zh', 'ru', 'ja', 'ko', 'fr', 'es'],
|
||||
outputLocales: ['zh', 'zh-TW', 'ru', 'ja', 'ko', 'fr', 'es'],
|
||||
reference: `Special names to keep untranslated: flux, photomaker, clip, vae, cfg, stable audio, stable cascade, stable zero, controlnet, lora, HiDream.
|
||||
'latent' is the short form of 'latent space'.
|
||||
'mask' is in the context of image processing.
|
||||
Note: For Traditional Chinese (Taiwan), use Taiwan-specific terminology and traditional characters.
|
||||
`
|
||||
});
|
||||
|
||||
@@ -529,7 +529,7 @@ Have another idea? Drop into Discord or open an issue, and let's chat!
|
||||
### Prerequisites & Technology Stack
|
||||
|
||||
- **Required Software**:
|
||||
- Node.js (v16 or later) and npm
|
||||
- Node.js (v16 or later; v20/v22 strongly recommended) and npm
|
||||
- Git for version control
|
||||
- A running ComfyUI backend instance
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ Clone <https://github.com/Comfy-Org/ComfyUI_devtools> to your `custom_nodes` dir
|
||||
_ComfyUI_devtools adds additional API endpoints and nodes to ComfyUI for browser testing._
|
||||
|
||||
### Node.js & Playwright Prerequisites
|
||||
Ensure you have Node.js v20 or later installed. Then, set up the Chromium test driver:
|
||||
Ensure you have Node.js v20 or v22 installed. Then, set up the Chromium test driver:
|
||||
```bash
|
||||
npx playwright install chromium --with-deps
|
||||
```
|
||||
|
||||
716
browser_tests/assets/nested-subgraph.json
Normal file
@@ -0,0 +1,716 @@
|
||||
{
|
||||
"id": "976d6e9a-927d-42db-abd4-96bfc0ecf8d9",
|
||||
"revision": 0,
|
||||
"last_node_id": 10,
|
||||
"last_link_id": 11,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 10,
|
||||
"type": "8beb610f-ddd1-4489-ae0d-2f732a4042ae",
|
||||
"pos": [
|
||||
532,
|
||||
412.5
|
||||
],
|
||||
"size": [
|
||||
140,
|
||||
46
|
||||
],
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"inputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [
|
||||
10
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "VAE",
|
||||
"type": "VAE",
|
||||
"links": [
|
||||
11
|
||||
]
|
||||
}
|
||||
],
|
||||
"title": "subgraph 2",
|
||||
"properties": {},
|
||||
"widgets_values": []
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "VAEDecode",
|
||||
"pos": [
|
||||
758.2109985351562,
|
||||
398.3681335449219
|
||||
],
|
||||
"size": [
|
||||
210,
|
||||
46
|
||||
],
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "samples",
|
||||
"type": "LATENT",
|
||||
"link": 10
|
||||
},
|
||||
{
|
||||
"name": "vae",
|
||||
"type": "VAE",
|
||||
"link": 11
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "IMAGE",
|
||||
"type": "IMAGE",
|
||||
"slot_index": 0,
|
||||
"links": [
|
||||
9
|
||||
]
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
},
|
||||
"widgets_values": []
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"type": "SaveImage",
|
||||
"pos": [
|
||||
1028.9615478515625,
|
||||
381.83746337890625
|
||||
],
|
||||
"size": [
|
||||
210,
|
||||
270
|
||||
],
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 9
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"properties": {},
|
||||
"widgets_values": [
|
||||
"ComfyUI"
|
||||
]
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
[
|
||||
9,
|
||||
8,
|
||||
0,
|
||||
9,
|
||||
0,
|
||||
"IMAGE"
|
||||
],
|
||||
[
|
||||
10,
|
||||
10,
|
||||
0,
|
||||
8,
|
||||
0,
|
||||
"LATENT"
|
||||
],
|
||||
[
|
||||
11,
|
||||
10,
|
||||
1,
|
||||
8,
|
||||
1,
|
||||
"VAE"
|
||||
]
|
||||
],
|
||||
"groups": [],
|
||||
"definitions": {
|
||||
"subgraphs": [
|
||||
{
|
||||
"id": "8beb610f-ddd1-4489-ae0d-2f732a4042ae",
|
||||
"version": 1,
|
||||
"state": {
|
||||
"lastGroupId": 0,
|
||||
"lastNodeId": 10,
|
||||
"lastLinkId": 14,
|
||||
"lastRerouteId": 0
|
||||
},
|
||||
"revision": 0,
|
||||
"config": {},
|
||||
"name": "subgraph 2",
|
||||
"inputNode": {
|
||||
"id": -10,
|
||||
"bounding": [
|
||||
-154,
|
||||
415.5,
|
||||
120,
|
||||
40
|
||||
]
|
||||
},
|
||||
"outputNode": {
|
||||
"id": -20,
|
||||
"bounding": [
|
||||
1238,
|
||||
395.5,
|
||||
120,
|
||||
80
|
||||
]
|
||||
},
|
||||
"inputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"id": "4d6c7e4e-971e-4f78-9218-9a604db53a4b",
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"linkIds": [
|
||||
7
|
||||
],
|
||||
"localized_name": "LATENT",
|
||||
"pos": {
|
||||
"0": 1258,
|
||||
"1": 415.5
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "f8201d4f-7fc6-4a1b-b8c9-9f0716d9c09a",
|
||||
"name": "VAE",
|
||||
"type": "VAE",
|
||||
"linkIds": [
|
||||
14
|
||||
],
|
||||
"localized_name": "VAE",
|
||||
"pos": {
|
||||
"0": 1258,
|
||||
"1": 435.5
|
||||
}
|
||||
}
|
||||
],
|
||||
"widgets": [],
|
||||
"nodes": [
|
||||
{
|
||||
"id": 6,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
415,
|
||||
186
|
||||
],
|
||||
"size": [
|
||||
422.84503173828125,
|
||||
164.31304931640625
|
||||
],
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"localized_name": "clip",
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 13
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"localized_name": "CONDITIONING",
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"slot_index": 0,
|
||||
"links": [
|
||||
4
|
||||
]
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": [
|
||||
"beautiful scenery nature glass bottle landscape, , purple galaxy bottle,"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "KSampler",
|
||||
"pos": [
|
||||
863,
|
||||
186
|
||||
],
|
||||
"size": [
|
||||
315,
|
||||
262
|
||||
],
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"localized_name": "model",
|
||||
"name": "model",
|
||||
"type": "MODEL",
|
||||
"link": 12
|
||||
},
|
||||
{
|
||||
"localized_name": "positive",
|
||||
"name": "positive",
|
||||
"type": "CONDITIONING",
|
||||
"link": 4
|
||||
},
|
||||
{
|
||||
"localized_name": "negative",
|
||||
"name": "negative",
|
||||
"type": "CONDITIONING",
|
||||
"link": 10
|
||||
},
|
||||
{
|
||||
"localized_name": "latent_image",
|
||||
"name": "latent_image",
|
||||
"type": "LATENT",
|
||||
"link": 11
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"localized_name": "LATENT",
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"slot_index": 0,
|
||||
"links": [
|
||||
7
|
||||
]
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "KSampler"
|
||||
},
|
||||
"widgets_values": [
|
||||
32115495257102,
|
||||
"randomize",
|
||||
20,
|
||||
8,
|
||||
"euler",
|
||||
"normal",
|
||||
1
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"type": "dbe5763f-440b-47b4-82ac-454f1f98b0e3",
|
||||
"pos": [
|
||||
194.13900756835938,
|
||||
657.3333740234375
|
||||
],
|
||||
"size": [
|
||||
140,
|
||||
106
|
||||
],
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"inputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"localized_name": "CONDITIONING",
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [
|
||||
10
|
||||
]
|
||||
},
|
||||
{
|
||||
"localized_name": "LATENT",
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [
|
||||
11
|
||||
]
|
||||
},
|
||||
{
|
||||
"localized_name": "MODEL",
|
||||
"name": "MODEL",
|
||||
"type": "MODEL",
|
||||
"links": [
|
||||
12
|
||||
]
|
||||
},
|
||||
{
|
||||
"localized_name": "CLIP",
|
||||
"name": "CLIP",
|
||||
"type": "CLIP",
|
||||
"links": [
|
||||
13
|
||||
]
|
||||
},
|
||||
{
|
||||
"localized_name": "VAE",
|
||||
"name": "VAE",
|
||||
"type": "VAE",
|
||||
"links": [
|
||||
14
|
||||
]
|
||||
}
|
||||
],
|
||||
"title": "subgraph 3",
|
||||
"properties": {},
|
||||
"widgets_values": []
|
||||
}
|
||||
],
|
||||
"groups": [],
|
||||
"links": [
|
||||
{
|
||||
"id": 4,
|
||||
"origin_id": 6,
|
||||
"origin_slot": 0,
|
||||
"target_id": 3,
|
||||
"target_slot": 1,
|
||||
"type": "CONDITIONING"
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"origin_id": 3,
|
||||
"origin_slot": 0,
|
||||
"target_id": -20,
|
||||
"target_slot": 0,
|
||||
"type": "LATENT"
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"origin_id": 10,
|
||||
"origin_slot": 0,
|
||||
"target_id": 3,
|
||||
"target_slot": 2,
|
||||
"type": "CONDITIONING"
|
||||
},
|
||||
{
|
||||
"id": 11,
|
||||
"origin_id": 10,
|
||||
"origin_slot": 1,
|
||||
"target_id": 3,
|
||||
"target_slot": 3,
|
||||
"type": "LATENT"
|
||||
},
|
||||
{
|
||||
"id": 12,
|
||||
"origin_id": 10,
|
||||
"origin_slot": 2,
|
||||
"target_id": 3,
|
||||
"target_slot": 0,
|
||||
"type": "MODEL"
|
||||
},
|
||||
{
|
||||
"id": 13,
|
||||
"origin_id": 10,
|
||||
"origin_slot": 3,
|
||||
"target_id": 6,
|
||||
"target_slot": 0,
|
||||
"type": "CLIP"
|
||||
},
|
||||
{
|
||||
"id": 14,
|
||||
"origin_id": 10,
|
||||
"origin_slot": 4,
|
||||
"target_id": -20,
|
||||
"target_slot": 1,
|
||||
"type": "VAE"
|
||||
}
|
||||
],
|
||||
"extra": {}
|
||||
},
|
||||
{
|
||||
"id": "dbe5763f-440b-47b4-82ac-454f1f98b0e3",
|
||||
"version": 1,
|
||||
"state": {
|
||||
"lastGroupId": 0,
|
||||
"lastNodeId": 9,
|
||||
"lastLinkId": 9,
|
||||
"lastRerouteId": 0
|
||||
},
|
||||
"revision": 0,
|
||||
"config": {},
|
||||
"name": "subgraph 3",
|
||||
"inputNode": {
|
||||
"id": -10,
|
||||
"bounding": [
|
||||
-154,
|
||||
517,
|
||||
120,
|
||||
40
|
||||
]
|
||||
},
|
||||
"outputNode": {
|
||||
"id": -20,
|
||||
"bounding": [
|
||||
898.2780151367188,
|
||||
467,
|
||||
128.6640625,
|
||||
140
|
||||
]
|
||||
},
|
||||
"inputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"id": "b4882169-329b-43f6-a373-81abfbdea55b",
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"linkIds": [
|
||||
6
|
||||
],
|
||||
"localized_name": "CONDITIONING",
|
||||
"pos": {
|
||||
"0": 918.2780151367188,
|
||||
"1": 487
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "01f51f96-a741-428e-8772-9557ee50b609",
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"linkIds": [
|
||||
2
|
||||
],
|
||||
"localized_name": "LATENT",
|
||||
"pos": {
|
||||
"0": 918.2780151367188,
|
||||
"1": 507
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "47fa906e-d80b-45c3-a596-211a0e59d4a1",
|
||||
"name": "MODEL",
|
||||
"type": "MODEL",
|
||||
"linkIds": [
|
||||
1
|
||||
],
|
||||
"localized_name": "MODEL",
|
||||
"pos": {
|
||||
"0": 918.2780151367188,
|
||||
"1": 527
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "f03dccd7-10e8-4513-9994-15854a92d192",
|
||||
"name": "CLIP",
|
||||
"type": "CLIP",
|
||||
"linkIds": [
|
||||
3
|
||||
],
|
||||
"localized_name": "CLIP",
|
||||
"pos": {
|
||||
"0": 918.2780151367188,
|
||||
"1": 547
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "a666877f-e34f-49bc-8a78-b26156656b83",
|
||||
"name": "VAE",
|
||||
"type": "VAE",
|
||||
"linkIds": [
|
||||
8
|
||||
],
|
||||
"localized_name": "VAE",
|
||||
"pos": {
|
||||
"0": 918.2780151367188,
|
||||
"1": 567
|
||||
}
|
||||
}
|
||||
],
|
||||
"widgets": [],
|
||||
"nodes": [
|
||||
{
|
||||
"id": 7,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
413,
|
||||
389
|
||||
],
|
||||
"size": [
|
||||
425.27801513671875,
|
||||
180.6060791015625
|
||||
],
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"localized_name": "clip",
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 5
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"localized_name": "CONDITIONING",
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"slot_index": 0,
|
||||
"links": [
|
||||
6
|
||||
]
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": [
|
||||
"text, watermark"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "EmptyLatentImage",
|
||||
"pos": [
|
||||
473,
|
||||
609
|
||||
],
|
||||
"size": [
|
||||
315,
|
||||
106
|
||||
],
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"inputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"localized_name": "LATENT",
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"slot_index": 0,
|
||||
"links": [
|
||||
2
|
||||
]
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "EmptyLatentImage"
|
||||
},
|
||||
"widgets_values": [
|
||||
512,
|
||||
512,
|
||||
1
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [
|
||||
26,
|
||||
474
|
||||
],
|
||||
"size": [
|
||||
315,
|
||||
98
|
||||
],
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"inputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"localized_name": "MODEL",
|
||||
"name": "MODEL",
|
||||
"type": "MODEL",
|
||||
"slot_index": 0,
|
||||
"links": [
|
||||
1
|
||||
]
|
||||
},
|
||||
{
|
||||
"localized_name": "CLIP",
|
||||
"name": "CLIP",
|
||||
"type": "CLIP",
|
||||
"slot_index": 1,
|
||||
"links": [
|
||||
3,
|
||||
5
|
||||
]
|
||||
},
|
||||
{
|
||||
"localized_name": "VAE",
|
||||
"name": "VAE",
|
||||
"type": "VAE",
|
||||
"slot_index": 2,
|
||||
"links": [
|
||||
8
|
||||
]
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
},
|
||||
"widgets_values": [
|
||||
"v1-5-pruned-emaonly-fp16.safetensors"
|
||||
]
|
||||
}
|
||||
],
|
||||
"groups": [],
|
||||
"links": [
|
||||
{
|
||||
"id": 5,
|
||||
"origin_id": 4,
|
||||
"origin_slot": 1,
|
||||
"target_id": 7,
|
||||
"target_slot": 0,
|
||||
"type": "CLIP"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"origin_id": 7,
|
||||
"origin_slot": 0,
|
||||
"target_id": -20,
|
||||
"target_slot": 0,
|
||||
"type": "CONDITIONING"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"origin_id": 5,
|
||||
"origin_slot": 0,
|
||||
"target_id": -20,
|
||||
"target_slot": 1,
|
||||
"type": "LATENT"
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"origin_id": 4,
|
||||
"origin_slot": 0,
|
||||
"target_id": -20,
|
||||
"target_slot": 2,
|
||||
"type": "MODEL"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"origin_id": 4,
|
||||
"origin_slot": 1,
|
||||
"target_id": -20,
|
||||
"target_slot": 3,
|
||||
"type": "CLIP"
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"origin_id": 4,
|
||||
"origin_slot": 2,
|
||||
"target_id": -20,
|
||||
"target_slot": 4,
|
||||
"type": "VAE"
|
||||
}
|
||||
],
|
||||
"extra": {}
|
||||
}
|
||||
]
|
||||
},
|
||||
"config": {},
|
||||
"extra": {
|
||||
"frontendVersion": "1.24.0-1"
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
382
browser_tests/tests/featureFlags.spec.ts
Normal file
@@ -0,0 +1,382 @@
|
||||
import { expect } from '@playwright/test'
|
||||
|
||||
import { comfyPageFixture as test } from '../fixtures/ComfyPage'
|
||||
|
||||
test.describe('Feature Flags', () => {
|
||||
test('Client and server exchange feature flags on connection', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Navigate to a new page to capture the initial WebSocket connection
|
||||
const newPage = await comfyPage.page.context().newPage()
|
||||
|
||||
// Set up monitoring before navigation
|
||||
await newPage.addInitScript(() => {
|
||||
// This runs before any page scripts
|
||||
window.__capturedMessages = {
|
||||
clientFeatureFlags: null,
|
||||
serverFeatureFlags: null
|
||||
}
|
||||
|
||||
// Capture outgoing client messages
|
||||
const originalSend = WebSocket.prototype.send
|
||||
WebSocket.prototype.send = function (data) {
|
||||
try {
|
||||
const parsed = JSON.parse(data)
|
||||
if (parsed.type === 'feature_flags') {
|
||||
window.__capturedMessages.clientFeatureFlags = parsed
|
||||
}
|
||||
} catch (e) {
|
||||
// Not JSON, ignore
|
||||
}
|
||||
return originalSend.call(this, data)
|
||||
}
|
||||
|
||||
// Monitor for server feature flags
|
||||
const checkInterval = setInterval(() => {
|
||||
if (
|
||||
window['app']?.api?.serverFeatureFlags &&
|
||||
Object.keys(window['app'].api.serverFeatureFlags).length > 0
|
||||
) {
|
||||
window.__capturedMessages.serverFeatureFlags =
|
||||
window['app'].api.serverFeatureFlags
|
||||
clearInterval(checkInterval)
|
||||
}
|
||||
}, 100)
|
||||
|
||||
// Clear after 10 seconds
|
||||
setTimeout(() => clearInterval(checkInterval), 10000)
|
||||
})
|
||||
|
||||
// Navigate to the app
|
||||
await newPage.goto(comfyPage.url)
|
||||
|
||||
// Wait for both client and server feature flags
|
||||
await newPage.waitForFunction(
|
||||
() =>
|
||||
window.__capturedMessages.clientFeatureFlags !== null &&
|
||||
window.__capturedMessages.serverFeatureFlags !== null,
|
||||
{ timeout: 10000 }
|
||||
)
|
||||
|
||||
// Get the captured messages
|
||||
const messages = await newPage.evaluate(() => window.__capturedMessages)
|
||||
|
||||
// Verify client sent feature flags
|
||||
expect(messages.clientFeatureFlags).toBeTruthy()
|
||||
expect(messages.clientFeatureFlags).toHaveProperty('type', 'feature_flags')
|
||||
expect(messages.clientFeatureFlags).toHaveProperty('data')
|
||||
expect(messages.clientFeatureFlags.data).toHaveProperty(
|
||||
'supports_preview_metadata'
|
||||
)
|
||||
expect(
|
||||
typeof messages.clientFeatureFlags.data.supports_preview_metadata
|
||||
).toBe('boolean')
|
||||
|
||||
// Verify server sent feature flags back
|
||||
expect(messages.serverFeatureFlags).toBeTruthy()
|
||||
expect(messages.serverFeatureFlags).toHaveProperty(
|
||||
'supports_preview_metadata'
|
||||
)
|
||||
expect(typeof messages.serverFeatureFlags.supports_preview_metadata).toBe(
|
||||
'boolean'
|
||||
)
|
||||
expect(messages.serverFeatureFlags).toHaveProperty('max_upload_size')
|
||||
expect(typeof messages.serverFeatureFlags.max_upload_size).toBe('number')
|
||||
expect(Object.keys(messages.serverFeatureFlags).length).toBeGreaterThan(0)
|
||||
|
||||
await newPage.close()
|
||||
})
|
||||
|
||||
test('Server feature flags are received and accessible', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Wait for connection to establish
|
||||
await comfyPage.page.waitForTimeout(1000)
|
||||
|
||||
// Get the actual server feature flags from the backend
|
||||
const serverFlags = await comfyPage.page.evaluate(() => {
|
||||
return window['app'].api.serverFeatureFlags
|
||||
})
|
||||
|
||||
// Verify we received real feature flags from the backend
|
||||
expect(serverFlags).toBeTruthy()
|
||||
expect(Object.keys(serverFlags).length).toBeGreaterThan(0)
|
||||
|
||||
// The backend should send feature flags
|
||||
expect(serverFlags).toHaveProperty('supports_preview_metadata')
|
||||
expect(typeof serverFlags.supports_preview_metadata).toBe('boolean')
|
||||
expect(serverFlags).toHaveProperty('max_upload_size')
|
||||
expect(typeof serverFlags.max_upload_size).toBe('number')
|
||||
})
|
||||
|
||||
test('serverSupportsFeature method works with real backend flags', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Wait for connection
|
||||
await comfyPage.page.waitForTimeout(1000)
|
||||
|
||||
// Test serverSupportsFeature with real backend flags
|
||||
const supportsPreviewMetadata = await comfyPage.page.evaluate(() => {
|
||||
return window['app'].api.serverSupportsFeature(
|
||||
'supports_preview_metadata'
|
||||
)
|
||||
})
|
||||
// The method should return a boolean based on the backend's value
|
||||
expect(typeof supportsPreviewMetadata).toBe('boolean')
|
||||
|
||||
// Test non-existent feature - should always return false
|
||||
const supportsNonExistent = await comfyPage.page.evaluate(() => {
|
||||
return window['app'].api.serverSupportsFeature('non_existent_feature_xyz')
|
||||
})
|
||||
expect(supportsNonExistent).toBe(false)
|
||||
|
||||
// Test that the method only returns true for boolean true values
|
||||
const testResults = await comfyPage.page.evaluate(() => {
|
||||
// Temporarily modify serverFeatureFlags to test behavior
|
||||
const original = window['app'].api.serverFeatureFlags
|
||||
window['app'].api.serverFeatureFlags = {
|
||||
bool_true: true,
|
||||
bool_false: false,
|
||||
string_value: 'yes',
|
||||
number_value: 1,
|
||||
null_value: null
|
||||
}
|
||||
|
||||
const results = {
|
||||
bool_true: window['app'].api.serverSupportsFeature('bool_true'),
|
||||
bool_false: window['app'].api.serverSupportsFeature('bool_false'),
|
||||
string_value: window['app'].api.serverSupportsFeature('string_value'),
|
||||
number_value: window['app'].api.serverSupportsFeature('number_value'),
|
||||
null_value: window['app'].api.serverSupportsFeature('null_value')
|
||||
}
|
||||
|
||||
// Restore original
|
||||
window['app'].api.serverFeatureFlags = original
|
||||
return results
|
||||
})
|
||||
|
||||
// serverSupportsFeature should only return true for boolean true values
|
||||
expect(testResults.bool_true).toBe(true)
|
||||
expect(testResults.bool_false).toBe(false)
|
||||
expect(testResults.string_value).toBe(false)
|
||||
expect(testResults.number_value).toBe(false)
|
||||
expect(testResults.null_value).toBe(false)
|
||||
})
|
||||
|
||||
test('getServerFeature method works with real backend data', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Wait for connection
|
||||
await comfyPage.page.waitForTimeout(1000)
|
||||
|
||||
// Test getServerFeature method
|
||||
const previewMetadataValue = await comfyPage.page.evaluate(() => {
|
||||
return window['app'].api.getServerFeature('supports_preview_metadata')
|
||||
})
|
||||
expect(typeof previewMetadataValue).toBe('boolean')
|
||||
|
||||
// Test getting max_upload_size
|
||||
const maxUploadSize = await comfyPage.page.evaluate(() => {
|
||||
return window['app'].api.getServerFeature('max_upload_size')
|
||||
})
|
||||
expect(typeof maxUploadSize).toBe('number')
|
||||
expect(maxUploadSize).toBeGreaterThan(0)
|
||||
|
||||
// Test getServerFeature with default value for non-existent feature
|
||||
const defaultValue = await comfyPage.page.evaluate(() => {
|
||||
return window['app'].api.getServerFeature(
|
||||
'non_existent_feature_xyz',
|
||||
'default'
|
||||
)
|
||||
})
|
||||
expect(defaultValue).toBe('default')
|
||||
})
|
||||
|
||||
test('getServerFeatures returns all backend feature flags', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Wait for connection
|
||||
await comfyPage.page.waitForTimeout(1000)
|
||||
|
||||
// Test getServerFeatures returns all flags
|
||||
const allFeatures = await comfyPage.page.evaluate(() => {
|
||||
return window['app'].api.getServerFeatures()
|
||||
})
|
||||
|
||||
expect(allFeatures).toBeTruthy()
|
||||
expect(allFeatures).toHaveProperty('supports_preview_metadata')
|
||||
expect(typeof allFeatures.supports_preview_metadata).toBe('boolean')
|
||||
expect(allFeatures).toHaveProperty('max_upload_size')
|
||||
expect(Object.keys(allFeatures).length).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
test('Client feature flags are immutable', async ({ comfyPage }) => {
|
||||
// Test that getClientFeatureFlags returns a copy
|
||||
const immutabilityTest = await comfyPage.page.evaluate(() => {
|
||||
const flags1 = window['app'].api.getClientFeatureFlags()
|
||||
const flags2 = window['app'].api.getClientFeatureFlags()
|
||||
|
||||
// Modify the first object
|
||||
flags1.test_modification = true
|
||||
|
||||
// Get flags again to check if original was modified
|
||||
const flags3 = window['app'].api.getClientFeatureFlags()
|
||||
|
||||
return {
|
||||
areEqual: flags1 === flags2,
|
||||
hasModification: flags3.test_modification !== undefined,
|
||||
hasSupportsPreview: flags1.supports_preview_metadata !== undefined,
|
||||
supportsPreviewValue: flags1.supports_preview_metadata
|
||||
}
|
||||
})
|
||||
|
||||
// Verify they are different objects (not the same reference)
|
||||
expect(immutabilityTest.areEqual).toBe(false)
|
||||
|
||||
// Verify modification didn't affect the original
|
||||
expect(immutabilityTest.hasModification).toBe(false)
|
||||
|
||||
// Verify the flags contain expected properties
|
||||
expect(immutabilityTest.hasSupportsPreview).toBe(true)
|
||||
expect(typeof immutabilityTest.supportsPreviewValue).toBe('boolean') // From clientFeatureFlags.json
|
||||
})
|
||||
|
||||
test('Server features are immutable when accessed via getServerFeatures', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Wait for connection to establish
|
||||
await comfyPage.page.waitForTimeout(1000)
|
||||
|
||||
const immutabilityTest = await comfyPage.page.evaluate(() => {
|
||||
// Get a copy of server features
|
||||
const features1 = window['app'].api.getServerFeatures()
|
||||
|
||||
// Try to modify it
|
||||
features1.supports_preview_metadata = false
|
||||
features1.new_feature = 'added'
|
||||
|
||||
// Get another copy
|
||||
const features2 = window['app'].api.getServerFeatures()
|
||||
|
||||
return {
|
||||
modifiedValue: features1.supports_preview_metadata,
|
||||
originalValue: features2.supports_preview_metadata,
|
||||
hasNewFeature: features2.new_feature !== undefined,
|
||||
hasSupportsPreview: features2.supports_preview_metadata !== undefined
|
||||
}
|
||||
})
|
||||
|
||||
// The modification should only affect the copy
|
||||
expect(immutabilityTest.modifiedValue).toBe(false)
|
||||
expect(typeof immutabilityTest.originalValue).toBe('boolean') // Backend sends boolean for supports_preview_metadata
|
||||
expect(immutabilityTest.hasNewFeature).toBe(false)
|
||||
expect(immutabilityTest.hasSupportsPreview).toBe(true)
|
||||
})
|
||||
|
||||
test('Feature flags are negotiated early in connection lifecycle', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// This test verifies that feature flags are available early in the app lifecycle
|
||||
// which is important for protocol negotiation
|
||||
|
||||
// Create a new page to ensure clean state
|
||||
const newPage = await comfyPage.page.context().newPage()
|
||||
|
||||
// Set up monitoring before navigation
|
||||
await newPage.addInitScript(() => {
|
||||
// Track when various app components are ready
|
||||
;(window as any).__appReadiness = {
|
||||
featureFlagsReceived: false,
|
||||
apiInitialized: false,
|
||||
appInitialized: false
|
||||
}
|
||||
|
||||
// Monitor when feature flags arrive by checking periodically
|
||||
const checkFeatureFlags = setInterval(() => {
|
||||
if (
|
||||
window['app']?.api?.serverFeatureFlags?.supports_preview_metadata !==
|
||||
undefined
|
||||
) {
|
||||
;(window as any).__appReadiness.featureFlagsReceived = true
|
||||
clearInterval(checkFeatureFlags)
|
||||
}
|
||||
}, 10)
|
||||
|
||||
// Monitor API initialization
|
||||
const checkApi = setInterval(() => {
|
||||
if (window['app']?.api) {
|
||||
;(window as any).__appReadiness.apiInitialized = true
|
||||
clearInterval(checkApi)
|
||||
}
|
||||
}, 10)
|
||||
|
||||
// Monitor app initialization
|
||||
const checkApp = setInterval(() => {
|
||||
if (window['app']?.graph) {
|
||||
;(window as any).__appReadiness.appInitialized = true
|
||||
clearInterval(checkApp)
|
||||
}
|
||||
}, 10)
|
||||
|
||||
// Clean up after 10 seconds
|
||||
setTimeout(() => {
|
||||
clearInterval(checkFeatureFlags)
|
||||
clearInterval(checkApi)
|
||||
clearInterval(checkApp)
|
||||
}, 10000)
|
||||
})
|
||||
|
||||
// Navigate to the app
|
||||
await newPage.goto(comfyPage.url)
|
||||
|
||||
// Wait for feature flags to be received
|
||||
await newPage.waitForFunction(
|
||||
() =>
|
||||
window['app']?.api?.serverFeatureFlags?.supports_preview_metadata !==
|
||||
undefined,
|
||||
{
|
||||
timeout: 10000
|
||||
}
|
||||
)
|
||||
|
||||
// Get readiness state
|
||||
const readiness = await newPage.evaluate(() => {
|
||||
return {
|
||||
...(window as any).__appReadiness,
|
||||
currentFlags: window['app'].api.serverFeatureFlags
|
||||
}
|
||||
})
|
||||
|
||||
// Verify feature flags are available
|
||||
expect(readiness.currentFlags).toHaveProperty('supports_preview_metadata')
|
||||
expect(typeof readiness.currentFlags.supports_preview_metadata).toBe(
|
||||
'boolean'
|
||||
)
|
||||
expect(readiness.currentFlags).toHaveProperty('max_upload_size')
|
||||
|
||||
// Verify feature flags were received (we detected them via polling)
|
||||
expect(readiness.featureFlagsReceived).toBe(true)
|
||||
|
||||
// Verify API was initialized (feature flags require API)
|
||||
expect(readiness.apiInitialized).toBe(true)
|
||||
|
||||
await newPage.close()
|
||||
})
|
||||
|
||||
test('Backend /features endpoint returns feature flags', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Test the HTTP endpoint directly
|
||||
const response = await comfyPage.page.request.get(
|
||||
`${comfyPage.url}/api/features`
|
||||
)
|
||||
expect(response.ok()).toBe(true)
|
||||
|
||||
const features = await response.json()
|
||||
expect(features).toBeTruthy()
|
||||
expect(features).toHaveProperty('supports_preview_metadata')
|
||||
expect(typeof features.supports_preview_metadata).toBe('boolean')
|
||||
expect(features).toHaveProperty('max_upload_size')
|
||||
expect(Object.keys(features).length).toBeGreaterThan(0)
|
||||
})
|
||||
})
|
||||
|
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 63 KiB |
|
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 63 KiB |
|
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 63 KiB |
@@ -130,4 +130,239 @@ test.describe('Release Notifications', () => {
|
||||
whatsNewSection.locator('text=No recent releases')
|
||||
).toBeVisible()
|
||||
})
|
||||
|
||||
test('should hide "What\'s New" section when notifications are disabled', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Disable version update notifications
|
||||
await comfyPage.setSetting('Comfy.Notification.ShowVersionUpdates', false)
|
||||
|
||||
// Mock release API with test data
|
||||
await comfyPage.page.route('**/releases**', async (route) => {
|
||||
const url = route.request().url()
|
||||
if (
|
||||
url.includes('api.comfy.org') ||
|
||||
url.includes('stagingapi.comfy.org')
|
||||
) {
|
||||
await route.fulfill({
|
||||
status: 200,
|
||||
contentType: 'application/json',
|
||||
body: JSON.stringify([
|
||||
{
|
||||
id: 1,
|
||||
project: 'comfyui',
|
||||
version: 'v0.3.44',
|
||||
attention: 'high',
|
||||
content: '## New Features\n\n- Added awesome feature',
|
||||
published_at: new Date().toISOString()
|
||||
}
|
||||
])
|
||||
})
|
||||
} else {
|
||||
await route.continue()
|
||||
}
|
||||
})
|
||||
|
||||
await comfyPage.setup({ mockReleases: false })
|
||||
|
||||
// Open help center
|
||||
const helpCenterButton = comfyPage.page.locator('.comfy-help-center-btn')
|
||||
await helpCenterButton.waitFor({ state: 'visible' })
|
||||
await helpCenterButton.click()
|
||||
|
||||
// Verify help center menu appears
|
||||
const helpMenu = comfyPage.page.locator('.help-center-menu')
|
||||
await expect(helpMenu).toBeVisible()
|
||||
|
||||
// Verify "What's New?" section is hidden
|
||||
const whatsNewSection = comfyPage.page.locator('.whats-new-section')
|
||||
await expect(whatsNewSection).not.toBeVisible()
|
||||
|
||||
// Should not show any popups or toasts
|
||||
await expect(comfyPage.page.locator('.whats-new-popup')).not.toBeVisible()
|
||||
await expect(
|
||||
comfyPage.page.locator('.release-notification-toast')
|
||||
).not.toBeVisible()
|
||||
})
|
||||
|
||||
test('should not make API calls when notifications are disabled', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Disable version update notifications
|
||||
await comfyPage.setSetting('Comfy.Notification.ShowVersionUpdates', false)
|
||||
|
||||
// Track API calls
|
||||
let apiCallCount = 0
|
||||
await comfyPage.page.route('**/releases**', async (route) => {
|
||||
const url = route.request().url()
|
||||
if (
|
||||
url.includes('api.comfy.org') ||
|
||||
url.includes('stagingapi.comfy.org')
|
||||
) {
|
||||
apiCallCount++
|
||||
await route.fulfill({
|
||||
status: 200,
|
||||
contentType: 'application/json',
|
||||
body: JSON.stringify([])
|
||||
})
|
||||
} else {
|
||||
await route.continue()
|
||||
}
|
||||
})
|
||||
|
||||
await comfyPage.setup({ mockReleases: false })
|
||||
|
||||
// Wait a bit to ensure any potential API calls would have been made
|
||||
await comfyPage.page.waitForTimeout(1000)
|
||||
|
||||
// Verify no API calls were made
|
||||
expect(apiCallCount).toBe(0)
|
||||
})
|
||||
|
||||
test('should show "What\'s New" section when notifications are enabled', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Enable version update notifications (default behavior)
|
||||
await comfyPage.setSetting('Comfy.Notification.ShowVersionUpdates', true)
|
||||
|
||||
// Mock release API with test data
|
||||
await comfyPage.page.route('**/releases**', async (route) => {
|
||||
const url = route.request().url()
|
||||
if (
|
||||
url.includes('api.comfy.org') ||
|
||||
url.includes('stagingapi.comfy.org')
|
||||
) {
|
||||
await route.fulfill({
|
||||
status: 200,
|
||||
contentType: 'application/json',
|
||||
body: JSON.stringify([
|
||||
{
|
||||
id: 1,
|
||||
project: 'comfyui',
|
||||
version: 'v0.3.44',
|
||||
attention: 'medium',
|
||||
content: '## New Features\n\n- Added awesome feature',
|
||||
published_at: new Date().toISOString()
|
||||
}
|
||||
])
|
||||
})
|
||||
} else {
|
||||
await route.continue()
|
||||
}
|
||||
})
|
||||
|
||||
await comfyPage.setup({ mockReleases: false })
|
||||
|
||||
// Open help center
|
||||
const helpCenterButton = comfyPage.page.locator('.comfy-help-center-btn')
|
||||
await helpCenterButton.waitFor({ state: 'visible' })
|
||||
await helpCenterButton.click()
|
||||
|
||||
// Verify help center menu appears
|
||||
const helpMenu = comfyPage.page.locator('.help-center-menu')
|
||||
await expect(helpMenu).toBeVisible()
|
||||
|
||||
// Verify "What's New?" section is visible
|
||||
const whatsNewSection = comfyPage.page.locator('.whats-new-section')
|
||||
await expect(whatsNewSection).toBeVisible()
|
||||
|
||||
// Should show the release
|
||||
await expect(
|
||||
whatsNewSection.locator('text=Comfy v0.3.44 Release')
|
||||
).toBeVisible()
|
||||
})
|
||||
|
||||
test('should toggle "What\'s New" section when setting changes', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Mock release API with test data
|
||||
await comfyPage.page.route('**/releases**', async (route) => {
|
||||
const url = route.request().url()
|
||||
if (
|
||||
url.includes('api.comfy.org') ||
|
||||
url.includes('stagingapi.comfy.org')
|
||||
) {
|
||||
await route.fulfill({
|
||||
status: 200,
|
||||
contentType: 'application/json',
|
||||
body: JSON.stringify([
|
||||
{
|
||||
id: 1,
|
||||
project: 'comfyui',
|
||||
version: 'v0.3.44',
|
||||
attention: 'low',
|
||||
content: '## Bug Fixes\n\n- Fixed minor issue',
|
||||
published_at: new Date().toISOString()
|
||||
}
|
||||
])
|
||||
})
|
||||
} else {
|
||||
await route.continue()
|
||||
}
|
||||
})
|
||||
|
||||
// Start with notifications enabled
|
||||
await comfyPage.setSetting('Comfy.Notification.ShowVersionUpdates', true)
|
||||
await comfyPage.setup({ mockReleases: false })
|
||||
|
||||
// Open help center
|
||||
const helpCenterButton = comfyPage.page.locator('.comfy-help-center-btn')
|
||||
await helpCenterButton.waitFor({ state: 'visible' })
|
||||
await helpCenterButton.click()
|
||||
|
||||
// Verify "What's New?" section is visible
|
||||
const whatsNewSection = comfyPage.page.locator('.whats-new-section')
|
||||
await expect(whatsNewSection).toBeVisible()
|
||||
|
||||
// Close help center
|
||||
await comfyPage.page.click('.help-center-backdrop')
|
||||
|
||||
// Disable notifications
|
||||
await comfyPage.setSetting('Comfy.Notification.ShowVersionUpdates', false)
|
||||
|
||||
// Reopen help center
|
||||
await helpCenterButton.click()
|
||||
|
||||
// Verify "What's New?" section is now hidden
|
||||
await expect(whatsNewSection).not.toBeVisible()
|
||||
})
|
||||
|
||||
test('should handle edge case with empty releases and disabled notifications', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Disable notifications
|
||||
await comfyPage.setSetting('Comfy.Notification.ShowVersionUpdates', false)
|
||||
|
||||
// Mock empty releases
|
||||
await comfyPage.page.route('**/releases**', async (route) => {
|
||||
const url = route.request().url()
|
||||
if (
|
||||
url.includes('api.comfy.org') ||
|
||||
url.includes('stagingapi.comfy.org')
|
||||
) {
|
||||
await route.fulfill({
|
||||
status: 200,
|
||||
contentType: 'application/json',
|
||||
body: JSON.stringify([])
|
||||
})
|
||||
} else {
|
||||
await route.continue()
|
||||
}
|
||||
})
|
||||
|
||||
await comfyPage.setup({ mockReleases: false })
|
||||
|
||||
// Open help center
|
||||
const helpCenterButton = comfyPage.page.locator('.comfy-help-center-btn')
|
||||
await helpCenterButton.waitFor({ state: 'visible' })
|
||||
await helpCenterButton.click()
|
||||
|
||||
// Verify help center still works
|
||||
const helpMenu = comfyPage.page.locator('.help-center-menu')
|
||||
await expect(helpMenu).toBeVisible()
|
||||
|
||||
// Section should be hidden regardless of empty releases
|
||||
const whatsNewSection = comfyPage.page.locator('.whats-new-section')
|
||||
await expect(whatsNewSection).not.toBeVisible()
|
||||
})
|
||||
})
|
||||
|
||||
82
browser_tests/tests/subgraphBreadcrumb.spec.ts
Normal file
@@ -0,0 +1,82 @@
|
||||
import { expect } from '@playwright/test'
|
||||
|
||||
import { comfyPageFixture as test } from '../fixtures/ComfyPage'
|
||||
|
||||
test.describe('Subgraph Breadcrumb Title Sync', () => {
|
||||
test.beforeEach(async ({ comfyPage }) => {
|
||||
await comfyPage.setSetting('Comfy.UseNewMenu', 'Top')
|
||||
})
|
||||
|
||||
test('Breadcrumb updates when subgraph node title is changed', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Load a workflow with subgraphs
|
||||
await comfyPage.loadWorkflow('nested-subgraph')
|
||||
await comfyPage.nextFrame()
|
||||
|
||||
// Get the subgraph node by ID (node 10 is the subgraph)
|
||||
const subgraphNode = await comfyPage.getNodeRefById('10')
|
||||
|
||||
// Get node position and double-click on it to enter the subgraph
|
||||
const nodePos = await subgraphNode.getPosition()
|
||||
const nodeSize = await subgraphNode.getSize()
|
||||
await comfyPage.canvas.dblclick({
|
||||
position: {
|
||||
x: nodePos.x + nodeSize.width / 2,
|
||||
y: nodePos.y + nodeSize.height / 2 + 10
|
||||
}
|
||||
})
|
||||
await comfyPage.nextFrame()
|
||||
|
||||
// Wait for breadcrumb to appear
|
||||
await comfyPage.page.waitForSelector('.subgraph-breadcrumb', {
|
||||
state: 'visible',
|
||||
timeout: 20000
|
||||
})
|
||||
|
||||
// Get initial breadcrumb text
|
||||
const breadcrumb = comfyPage.page.locator('.subgraph-breadcrumb')
|
||||
const initialBreadcrumbText = await breadcrumb.textContent()
|
||||
|
||||
// Go back to main graph
|
||||
await comfyPage.page.keyboard.press('Escape')
|
||||
|
||||
// Double-click on the title area of the subgraph node to edit
|
||||
await comfyPage.canvas.dblclick({
|
||||
position: {
|
||||
x: nodePos.x + nodeSize.width / 2,
|
||||
y: nodePos.y - 10 // Title area is above the node body
|
||||
},
|
||||
delay: 5
|
||||
})
|
||||
|
||||
// Wait for title editor to appear
|
||||
await expect(comfyPage.page.locator('.node-title-editor')).toBeVisible()
|
||||
|
||||
// Clear existing text and type new title
|
||||
await comfyPage.page.keyboard.press('Control+a')
|
||||
const newTitle = 'Updated Subgraph Title'
|
||||
await comfyPage.page.keyboard.type(newTitle)
|
||||
await comfyPage.page.keyboard.press('Enter')
|
||||
|
||||
// Wait a frame for the update to complete
|
||||
await comfyPage.nextFrame()
|
||||
|
||||
// Enter the subgraph again
|
||||
await comfyPage.canvas.dblclick({
|
||||
position: {
|
||||
x: nodePos.x + nodeSize.width / 2,
|
||||
y: nodePos.y + nodeSize.height / 2
|
||||
},
|
||||
delay: 5
|
||||
})
|
||||
|
||||
// Wait for breadcrumb
|
||||
await comfyPage.page.waitForSelector('.subgraph-breadcrumb')
|
||||
|
||||
// Check that breadcrumb now shows the new title
|
||||
const updatedBreadcrumbText = await breadcrumb.textContent()
|
||||
expect(updatedBreadcrumbText).toContain(newTitle)
|
||||
expect(updatedBreadcrumbText).not.toBe(initialBreadcrumbText)
|
||||
})
|
||||
})
|
||||
|
Before Width: | Height: | Size: 81 KiB After Width: | Height: | Size: 81 KiB |
|
Before Width: | Height: | Size: 78 KiB After Width: | Height: | Size: 77 KiB |
|
Before Width: | Height: | Size: 190 KiB After Width: | Height: | Size: 190 KiB |
|
Before Width: | Height: | Size: 116 KiB After Width: | Height: | Size: 116 KiB |
|
Before Width: | Height: | Size: 238 KiB After Width: | Height: | Size: 238 KiB |
289
browser_tests/tests/useSettingSearch.spec.ts
Normal file
@@ -0,0 +1,289 @@
|
||||
import { expect } from '@playwright/test'
|
||||
|
||||
import { comfyPageFixture as test } from '../fixtures/ComfyPage'
|
||||
|
||||
test.describe('Settings Search functionality', () => {
|
||||
test.beforeEach(async ({ comfyPage }) => {
|
||||
// Register test settings to verify hidden/deprecated filtering
|
||||
await comfyPage.page.evaluate(() => {
|
||||
window['app'].registerExtension({
|
||||
name: 'TestSettingsExtension',
|
||||
settings: [
|
||||
{
|
||||
id: 'TestHiddenSetting',
|
||||
name: 'Test Hidden Setting',
|
||||
type: 'hidden',
|
||||
defaultValue: 'hidden_value',
|
||||
category: ['Test', 'Hidden']
|
||||
},
|
||||
{
|
||||
id: 'TestDeprecatedSetting',
|
||||
name: 'Test Deprecated Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'deprecated_value',
|
||||
deprecated: true,
|
||||
category: ['Test', 'Deprecated']
|
||||
},
|
||||
{
|
||||
id: 'TestVisibleSetting',
|
||||
name: 'Test Visible Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'visible_value',
|
||||
category: ['Test', 'Visible']
|
||||
}
|
||||
]
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
test('can open settings dialog and use search box', async ({ comfyPage }) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
// Find the search box
|
||||
const searchBox = comfyPage.page.locator('.settings-search-box input')
|
||||
await expect(searchBox).toBeVisible()
|
||||
|
||||
// Verify search box has the correct placeholder
|
||||
await expect(searchBox).toHaveAttribute(
|
||||
'placeholder',
|
||||
expect.stringContaining('Search')
|
||||
)
|
||||
})
|
||||
|
||||
test('search box is functional and accepts input', async ({ comfyPage }) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
// Find and interact with the search box
|
||||
const searchBox = comfyPage.page.locator('.settings-search-box input')
|
||||
await searchBox.fill('Comfy')
|
||||
|
||||
// Verify the input was accepted
|
||||
await expect(searchBox).toHaveValue('Comfy')
|
||||
})
|
||||
|
||||
test('search box clears properly', async ({ comfyPage }) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
// Find and interact with the search box
|
||||
const searchBox = comfyPage.page.locator('.settings-search-box input')
|
||||
await searchBox.fill('test')
|
||||
await expect(searchBox).toHaveValue('test')
|
||||
|
||||
// Clear the search box
|
||||
await searchBox.clear()
|
||||
await expect(searchBox).toHaveValue('')
|
||||
})
|
||||
|
||||
test('settings categories are visible in sidebar', async ({ comfyPage }) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
// Check that the sidebar has categories
|
||||
const categories = comfyPage.page.locator(
|
||||
'.settings-sidebar .p-listbox-option'
|
||||
)
|
||||
expect(await categories.count()).toBeGreaterThan(0)
|
||||
|
||||
// Check that at least one category is visible
|
||||
await expect(categories.first()).toBeVisible()
|
||||
})
|
||||
|
||||
test('can select different categories in sidebar', async ({ comfyPage }) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
// Get categories and click on different ones
|
||||
const categories = comfyPage.page.locator(
|
||||
'.settings-sidebar .p-listbox-option'
|
||||
)
|
||||
const categoryCount = await categories.count()
|
||||
|
||||
if (categoryCount > 1) {
|
||||
// Click on the second category
|
||||
await categories.nth(1).click()
|
||||
|
||||
// Verify the category is selected
|
||||
await expect(categories.nth(1)).toHaveClass(/p-listbox-option-selected/)
|
||||
}
|
||||
})
|
||||
|
||||
test('settings content area is visible', async ({ comfyPage }) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
// Check that the content area is visible
|
||||
const contentArea = comfyPage.page.locator('.settings-content')
|
||||
await expect(contentArea).toBeVisible()
|
||||
|
||||
// Check that tab panels are visible
|
||||
const tabPanels = comfyPage.page.locator('.settings-tab-panels')
|
||||
await expect(tabPanels).toBeVisible()
|
||||
})
|
||||
|
||||
test('search functionality affects UI state', async ({ comfyPage }) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
// Find the search box
|
||||
const searchBox = comfyPage.page.locator('.settings-search-box input')
|
||||
|
||||
// Type in search box
|
||||
await searchBox.fill('graph')
|
||||
await comfyPage.page.waitForTimeout(200) // Wait for debounce
|
||||
|
||||
// Verify that the search input is handled
|
||||
await expect(searchBox).toHaveValue('graph')
|
||||
})
|
||||
|
||||
test('settings dialog can be closed', async ({ comfyPage }) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
// Close with escape key
|
||||
await comfyPage.page.keyboard.press('Escape')
|
||||
|
||||
// Verify dialog is closed
|
||||
await expect(settingsDialog).not.toBeVisible()
|
||||
})
|
||||
|
||||
test('search box has proper debouncing behavior', async ({ comfyPage }) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
// Type rapidly in search box
|
||||
const searchBox = comfyPage.page.locator('.settings-search-box input')
|
||||
await searchBox.fill('a')
|
||||
await searchBox.fill('ab')
|
||||
await searchBox.fill('abc')
|
||||
await searchBox.fill('abcd')
|
||||
|
||||
// Wait for debounce
|
||||
await comfyPage.page.waitForTimeout(200)
|
||||
|
||||
// Verify final value
|
||||
await expect(searchBox).toHaveValue('abcd')
|
||||
})
|
||||
|
||||
test('search excludes hidden settings from results', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
// Search for our test settings
|
||||
const searchBox = comfyPage.page.locator('.settings-search-box input')
|
||||
await searchBox.fill('Test')
|
||||
await comfyPage.page.waitForTimeout(300) // Wait for debounce
|
||||
|
||||
// Get all settings content
|
||||
const settingsContent = comfyPage.page.locator('.settings-tab-panels')
|
||||
|
||||
// Should show visible setting but not hidden setting
|
||||
await expect(settingsContent).toContainText('Test Visible Setting')
|
||||
await expect(settingsContent).not.toContainText('Test Hidden Setting')
|
||||
})
|
||||
|
||||
test('search excludes deprecated settings from results', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
// Search for our test settings
|
||||
const searchBox = comfyPage.page.locator('.settings-search-box input')
|
||||
await searchBox.fill('Test')
|
||||
await comfyPage.page.waitForTimeout(300) // Wait for debounce
|
||||
|
||||
// Get all settings content
|
||||
const settingsContent = comfyPage.page.locator('.settings-tab-panels')
|
||||
|
||||
// Should show visible setting but not deprecated setting
|
||||
await expect(settingsContent).toContainText('Test Visible Setting')
|
||||
await expect(settingsContent).not.toContainText('Test Deprecated Setting')
|
||||
})
|
||||
|
||||
test('search shows visible settings but excludes hidden and deprecated', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
// Search for our test settings
|
||||
const searchBox = comfyPage.page.locator('.settings-search-box input')
|
||||
await searchBox.fill('Test')
|
||||
await comfyPage.page.waitForTimeout(300) // Wait for debounce
|
||||
|
||||
// Get all settings content
|
||||
const settingsContent = comfyPage.page.locator('.settings-tab-panels')
|
||||
|
||||
// Should only show the visible setting
|
||||
await expect(settingsContent).toContainText('Test Visible Setting')
|
||||
|
||||
// Should not show hidden or deprecated settings
|
||||
await expect(settingsContent).not.toContainText('Test Hidden Setting')
|
||||
await expect(settingsContent).not.toContainText('Test Deprecated Setting')
|
||||
})
|
||||
|
||||
test('search by setting name excludes hidden and deprecated', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
// Open settings dialog
|
||||
await comfyPage.page.keyboard.press('Control+,')
|
||||
const settingsDialog = comfyPage.page.locator('.settings-container')
|
||||
await expect(settingsDialog).toBeVisible()
|
||||
|
||||
const searchBox = comfyPage.page.locator('.settings-search-box input')
|
||||
const settingsContent = comfyPage.page.locator('.settings-tab-panels')
|
||||
|
||||
// Search specifically for hidden setting by name
|
||||
await searchBox.clear()
|
||||
await searchBox.fill('Hidden')
|
||||
await comfyPage.page.waitForTimeout(300)
|
||||
|
||||
// Should not show the hidden setting even when searching by name
|
||||
await expect(settingsContent).not.toContainText('Test Hidden Setting')
|
||||
|
||||
// Search specifically for deprecated setting by name
|
||||
await searchBox.clear()
|
||||
await searchBox.fill('Deprecated')
|
||||
await comfyPage.page.waitForTimeout(300)
|
||||
|
||||
// Should not show the deprecated setting even when searching by name
|
||||
await expect(settingsContent).not.toContainText('Test Deprecated Setting')
|
||||
|
||||
// Search for visible setting by name - should work
|
||||
await searchBox.clear()
|
||||
await searchBox.fill('Visible')
|
||||
await comfyPage.page.waitForTimeout(300)
|
||||
|
||||
// Should show the visible setting
|
||||
await expect(settingsContent).toContainText('Test Visible Setting')
|
||||
})
|
||||
})
|
||||
12
package-lock.json
generated
@@ -1,18 +1,18 @@
|
||||
{
|
||||
"name": "@comfyorg/comfyui-frontend",
|
||||
"version": "1.24.0-1",
|
||||
"version": "1.24.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@comfyorg/comfyui-frontend",
|
||||
"version": "1.24.0-1",
|
||||
"version": "1.24.1",
|
||||
"license": "GPL-3.0-only",
|
||||
"dependencies": {
|
||||
"@alloc/quick-lru": "^5.2.0",
|
||||
"@atlaskit/pragmatic-drag-and-drop": "^1.3.1",
|
||||
"@comfyorg/comfyui-electron-types": "^0.4.43",
|
||||
"@comfyorg/litegraph": "^0.16.4",
|
||||
"@comfyorg/litegraph": "^0.16.9",
|
||||
"@primevue/forms": "^4.2.5",
|
||||
"@primevue/themes": "^4.2.5",
|
||||
"@sentry/vue": "^8.48.0",
|
||||
@@ -949,9 +949,9 @@
|
||||
"license": "GPL-3.0-only"
|
||||
},
|
||||
"node_modules/@comfyorg/litegraph": {
|
||||
"version": "0.16.4",
|
||||
"resolved": "https://registry.npmjs.org/@comfyorg/litegraph/-/litegraph-0.16.4.tgz",
|
||||
"integrity": "sha512-g4zhMxiCoE/WMap65fMNncQoTzn8ebMojjdv/b3Pc5RYmPQrwA32SDAvT/ndpkUonkOYmT9DgVmTxQv7LDQ6tA==",
|
||||
"version": "0.16.9",
|
||||
"resolved": "https://registry.npmjs.org/@comfyorg/litegraph/-/litegraph-0.16.9.tgz",
|
||||
"integrity": "sha512-ZsvqkLqdG65e2UyM8oTOUTv/7VFEyGbG/C9dCZnhxdNq30UaE+F0iLaKq/17u6w4yewyZuqIn5MoOtjpxPqLDQ==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@cspotcode/source-map-support": {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@comfyorg/comfyui-frontend",
|
||||
"private": true,
|
||||
"version": "1.24.0-1",
|
||||
"version": "1.24.1",
|
||||
"type": "module",
|
||||
"repository": "https://github.com/Comfy-Org/ComfyUI_frontend",
|
||||
"homepage": "https://comfy.org",
|
||||
@@ -77,7 +77,7 @@
|
||||
"@alloc/quick-lru": "^5.2.0",
|
||||
"@atlaskit/pragmatic-drag-and-drop": "^1.3.1",
|
||||
"@comfyorg/comfyui-electron-types": "^0.4.43",
|
||||
"@comfyorg/litegraph": "^0.16.4",
|
||||
"@comfyorg/litegraph": "^0.16.9",
|
||||
"@primevue/forms": "^4.2.5",
|
||||
"@primevue/themes": "^4.2.5",
|
||||
"@sentry/vue": "^8.48.0",
|
||||
|
||||
@@ -17,7 +17,15 @@ const IGNORE_PATTERNS = [
|
||||
/^templateWorkflows\./, // Template workflows are loaded dynamically
|
||||
/^dataTypes\./, // Data types might be referenced dynamically
|
||||
/^contextMenu\./, // Context menu items might be dynamic
|
||||
/^color\./ // Color names might be used dynamically
|
||||
/^color\./, // Color names might be used dynamically
|
||||
// Auto-generated categories from collect-i18n-general.ts
|
||||
/^menuLabels\./, // Menu labels generated from command labels
|
||||
/^settingsCategories\./, // Settings categories generated from setting definitions
|
||||
/^serverConfigItems\./, // Server config items generated from SERVER_CONFIG_ITEMS
|
||||
/^serverConfigCategories\./, // Server config categories generated from config categories
|
||||
/^nodeCategories\./, // Node categories generated from node definitions
|
||||
// Setting option values that are dynamically generated
|
||||
/\.options\./ // All setting options are rendered dynamically
|
||||
]
|
||||
|
||||
// Get list of staged locale files
|
||||
@@ -97,17 +105,21 @@ function shouldIgnoreKey(key: string): boolean {
|
||||
|
||||
// Search for key usage in source files
|
||||
function isKeyUsed(key: string, sourceFiles: string[]): boolean {
|
||||
// Escape special regex characters
|
||||
const escapeRegex = (str: string) =>
|
||||
str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
|
||||
const escapedKey = escapeRegex(key)
|
||||
const lastPart = key.split('.').pop()
|
||||
const escapedLastPart = lastPart ? escapeRegex(lastPart) : ''
|
||||
|
||||
// Common patterns for i18n key usage
|
||||
const patterns = [
|
||||
// Direct usage: $t('key'), t('key'), i18n.t('key')
|
||||
new RegExp(`[t$]\\s*\\(\\s*['"\`]${key}['"\`]`, 'g'),
|
||||
new RegExp(`[t$]\\s*\\(\\s*['"\`]${escapedKey}['"\`]`, 'g'),
|
||||
// With namespace: $t('g.key'), t('namespace.key')
|
||||
new RegExp(
|
||||
`[t$]\\s*\\(\\s*['"\`][^'"]+\\.${key.split('.').pop()}['"\`]`,
|
||||
'g'
|
||||
),
|
||||
new RegExp(`[t$]\\s*\\(\\s*['"\`][^'"]+\\.${escapedLastPart}['"\`]`, 'g'),
|
||||
// Dynamic keys might reference parts of the key
|
||||
new RegExp(`['"\`]${key}['"\`]`, 'g')
|
||||
new RegExp(`['"\`]${escapedKey}['"\`]`, 'g')
|
||||
]
|
||||
|
||||
for (const file of sourceFiles) {
|
||||
@@ -154,7 +166,7 @@ async function checkNewUnusedKeys() {
|
||||
|
||||
// Report results
|
||||
if (unusedNewKeys.length > 0) {
|
||||
console.log('\n❌ Found unused NEW i18n keys:\n')
|
||||
console.log('\n⚠️ Warning: Found unused NEW i18n keys:\n')
|
||||
|
||||
for (const key of unusedNewKeys.sort()) {
|
||||
console.log(` - ${key}`)
|
||||
@@ -164,9 +176,10 @@ async function checkNewUnusedKeys() {
|
||||
console.log(
|
||||
'\nThese keys were added but are not used anywhere in the codebase.'
|
||||
)
|
||||
console.log('Please either use them or remove them before committing.')
|
||||
console.log('Consider using them or removing them in a future update.')
|
||||
|
||||
process.exit(1)
|
||||
// Changed from process.exit(1) to process.exit(0) for warning only
|
||||
process.exit(0)
|
||||
} else {
|
||||
// Silent success - no output needed
|
||||
}
|
||||
|
||||
@@ -41,7 +41,15 @@ const previousCanvasDraggable = ref(true)
|
||||
|
||||
const onEdit = (newValue: string) => {
|
||||
if (titleEditorStore.titleEditorTarget && newValue.trim() !== '') {
|
||||
titleEditorStore.titleEditorTarget.title = newValue.trim()
|
||||
const trimmedTitle = newValue.trim()
|
||||
titleEditorStore.titleEditorTarget.title = trimmedTitle
|
||||
|
||||
// If this is a subgraph node, sync the runtime subgraph name for breadcrumb reactivity
|
||||
const target = titleEditorStore.titleEditorTarget
|
||||
if (target instanceof LGraphNode && target.isSubgraphNode?.()) {
|
||||
target.subgraph.name = trimmedTitle
|
||||
}
|
||||
|
||||
app.graph.setDirtyCanvas(true, true)
|
||||
}
|
||||
showInput.value = false
|
||||
|
||||
@@ -54,7 +54,7 @@
|
||||
</Teleport>
|
||||
|
||||
<!-- What's New Section -->
|
||||
<section class="whats-new-section">
|
||||
<section v-if="showVersionUpdates" class="whats-new-section">
|
||||
<h3 class="section-description">{{ $t('helpCenter.whatsNew') }}</h3>
|
||||
|
||||
<!-- Release Items -->
|
||||
@@ -126,6 +126,7 @@ import { useI18n } from 'vue-i18n'
|
||||
import { type ReleaseNote } from '@/services/releaseService'
|
||||
import { useCommandStore } from '@/stores/commandStore'
|
||||
import { useReleaseStore } from '@/stores/releaseStore'
|
||||
import { useSettingStore } from '@/stores/settingStore'
|
||||
import { electronAPI, isElectron } from '@/utils/envUtil'
|
||||
import { formatVersionAnchor } from '@/utils/formatUtil'
|
||||
|
||||
@@ -161,13 +162,14 @@ const TIME_UNITS = {
|
||||
const SUBMENU_CONFIG = {
|
||||
DELAY_MS: 100,
|
||||
OFFSET_PX: 8,
|
||||
Z_INDEX: 1002
|
||||
Z_INDEX: 10001
|
||||
} as const
|
||||
|
||||
// Composables
|
||||
const { t, locale } = useI18n()
|
||||
const releaseStore = useReleaseStore()
|
||||
const commandStore = useCommandStore()
|
||||
const settingStore = useSettingStore()
|
||||
|
||||
// Emits
|
||||
const emit = defineEmits<{
|
||||
@@ -182,6 +184,9 @@ let hoverTimeout: number | null = null
|
||||
|
||||
// Computed
|
||||
const hasReleases = computed(() => releaseStore.releases.length > 0)
|
||||
const showVersionUpdates = computed(() =>
|
||||
settingStore.get('Comfy.Notification.ShowVersionUpdates')
|
||||
)
|
||||
|
||||
const moreMenuItem = computed(() =>
|
||||
menuItems.value.find((item) => item.key === 'more')
|
||||
|
||||
@@ -32,28 +32,32 @@
|
||||
|
||||
<div class="whats-new-popup" @click.stop>
|
||||
<!-- Close Button -->
|
||||
<button class="close-button" aria-label="Close" @click="closePopup">
|
||||
<button
|
||||
class="close-button"
|
||||
:aria-label="$t('g.close')"
|
||||
@click="closePopup"
|
||||
>
|
||||
<div class="close-icon"></div>
|
||||
</button>
|
||||
|
||||
<!-- Release Content -->
|
||||
<div class="popup-content">
|
||||
<div class="content-text" v-html="formattedContent"></div>
|
||||
</div>
|
||||
|
||||
<!-- Actions Section -->
|
||||
<div class="popup-actions">
|
||||
<a
|
||||
class="learn-more-link"
|
||||
:href="changelogUrl"
|
||||
target="_blank"
|
||||
rel="noopener,noreferrer"
|
||||
@click="closePopup"
|
||||
>
|
||||
{{ $t('whatsNewPopup.learnMore') }}
|
||||
</a>
|
||||
<!-- TODO: CTA button -->
|
||||
<!-- <button class="cta-button" @click="handleCTA">CTA</button> -->
|
||||
<!-- Actions Section -->
|
||||
<div class="popup-actions">
|
||||
<a
|
||||
class="learn-more-link"
|
||||
:href="changelogUrl"
|
||||
target="_blank"
|
||||
rel="noopener,noreferrer"
|
||||
@click="closePopup"
|
||||
>
|
||||
{{ $t('whatsNewPopup.learnMore') }}
|
||||
</a>
|
||||
<!-- TODO: CTA button -->
|
||||
<!-- <button class="cta-button" @click="handleCTA">CTA</button> -->
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -68,7 +72,7 @@ import type { ReleaseNote } from '@/services/releaseService'
|
||||
import { useReleaseStore } from '@/stores/releaseStore'
|
||||
import { formatVersionAnchor } from '@/utils/formatUtil'
|
||||
|
||||
const { locale } = useI18n()
|
||||
const { locale, t } = useI18n()
|
||||
const releaseStore = useReleaseStore()
|
||||
|
||||
// Local state for dismissed status
|
||||
@@ -101,13 +105,12 @@ const changelogUrl = computed(() => {
|
||||
// Format release content for display using marked
|
||||
const formattedContent = computed(() => {
|
||||
if (!latestRelease.value?.content) {
|
||||
return '<p>No release notes available.</p>'
|
||||
return `<p>${t('whatsNewPopup.noReleaseNotes')}</p>`
|
||||
}
|
||||
|
||||
try {
|
||||
// Use marked to parse markdown to HTML
|
||||
return marked(latestRelease.value.content, {
|
||||
breaks: true, // Convert line breaks to <br>
|
||||
gfm: true // Enable GitHub Flavored Markdown
|
||||
})
|
||||
} catch (error) {
|
||||
@@ -199,14 +202,10 @@ defineExpose({
|
||||
}
|
||||
|
||||
.whats-new-popup {
|
||||
padding: 32px 32px 24px;
|
||||
background: #353535;
|
||||
border-radius: 12px;
|
||||
max-width: 400px;
|
||||
width: 400px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 32px;
|
||||
outline: 1px solid #4e4e4e;
|
||||
outline-offset: -1px;
|
||||
box-shadow: 0px 8px 32px rgba(0, 0, 0, 0.3);
|
||||
@@ -217,6 +216,11 @@ defineExpose({
|
||||
.popup-content {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 24px;
|
||||
max-height: 80vh;
|
||||
overflow-y: auto;
|
||||
padding: 32px 32px 24px;
|
||||
border-radius: 12px;
|
||||
}
|
||||
|
||||
/* Close button */
|
||||
@@ -224,17 +228,17 @@ defineExpose({
|
||||
position: absolute;
|
||||
top: 0;
|
||||
right: 0;
|
||||
width: 31px;
|
||||
height: 31px;
|
||||
padding: 6px 7px;
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
padding: 6px;
|
||||
background: #7c7c7c;
|
||||
border-radius: 15.5px;
|
||||
border-radius: 16px;
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
transform: translate(50%, -50%);
|
||||
transform: translate(30%, -30%);
|
||||
transition:
|
||||
background-color 0.2s ease,
|
||||
transform 0.1s ease;
|
||||
@@ -247,7 +251,7 @@ defineExpose({
|
||||
|
||||
.close-button:active {
|
||||
background: #6a6a6a;
|
||||
transform: translate(50%, -50%) scale(0.95);
|
||||
transform: translate(30%, -30%) scale(0.95);
|
||||
}
|
||||
|
||||
.close-icon {
|
||||
@@ -288,73 +292,45 @@ defineExpose({
|
||||
.content-text {
|
||||
color: white;
|
||||
font-size: 14px;
|
||||
font-family: 'Inter', sans-serif;
|
||||
font-weight: 400;
|
||||
line-height: 1.5;
|
||||
word-wrap: break-word;
|
||||
}
|
||||
|
||||
/* Style the markdown content */
|
||||
/* Title */
|
||||
.content-text :deep(*) {
|
||||
box-sizing: border-box;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.content-text :deep(h1) {
|
||||
color: white;
|
||||
font-size: 20px;
|
||||
font-weight: 700;
|
||||
margin: 0 0 16px 0;
|
||||
line-height: 1.3;
|
||||
}
|
||||
|
||||
.content-text :deep(h2) {
|
||||
color: white;
|
||||
font-size: 18px;
|
||||
font-weight: 600;
|
||||
margin: 16px 0 12px 0;
|
||||
line-height: 1.3;
|
||||
}
|
||||
|
||||
.content-text :deep(h2:first-child) {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
.content-text :deep(h3) {
|
||||
color: white;
|
||||
font-size: 16px;
|
||||
font-weight: 600;
|
||||
margin: 12px 0 8px 0;
|
||||
line-height: 1.3;
|
||||
font-weight: 700;
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
|
||||
.content-text :deep(h3:first-child) {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
.content-text :deep(h4) {
|
||||
color: white;
|
||||
font-size: 14px;
|
||||
font-weight: 600;
|
||||
margin: 8px 0 6px 0;
|
||||
}
|
||||
|
||||
.content-text :deep(h4:first-child) {
|
||||
margin-top: 0;
|
||||
/* Version subtitle - targets the first p tag after h1 */
|
||||
.content-text :deep(h1 + p) {
|
||||
color: #c0c0c0;
|
||||
font-size: 16px;
|
||||
font-weight: 500;
|
||||
margin-bottom: 16px;
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
/* Regular paragraphs - short description */
|
||||
.content-text :deep(p) {
|
||||
margin: 0 0 12px 0;
|
||||
line-height: 1.6;
|
||||
}
|
||||
|
||||
.content-text :deep(p:first-child) {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
.content-text :deep(p:last-child) {
|
||||
margin-bottom: 0;
|
||||
margin-bottom: 16px;
|
||||
color: #e0e0e0;
|
||||
}
|
||||
|
||||
/* List */
|
||||
.content-text :deep(ul),
|
||||
.content-text :deep(ol) {
|
||||
margin: 0 0 12px 0;
|
||||
padding-left: 24px;
|
||||
margin-bottom: 16px;
|
||||
padding-left: 0;
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
.content-text :deep(ul:first-child),
|
||||
@@ -367,12 +343,63 @@ defineExpose({
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
/* List items */
|
||||
.content-text :deep(li) {
|
||||
margin-bottom: 8px;
|
||||
position: relative;
|
||||
padding-left: 20px;
|
||||
}
|
||||
|
||||
.content-text :deep(li:last-child) {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
/* Custom bullet points */
|
||||
.content-text :deep(li::before) {
|
||||
content: '';
|
||||
position: absolute;
|
||||
left: 0;
|
||||
top: 10px;
|
||||
display: flex;
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
aspect-ratio: 1/1;
|
||||
border-radius: 100px;
|
||||
background: #60a5fa;
|
||||
}
|
||||
|
||||
/* List item strong text */
|
||||
.content-text :deep(li strong) {
|
||||
color: #fff;
|
||||
font-size: 14px;
|
||||
display: block;
|
||||
margin-bottom: 4px;
|
||||
}
|
||||
|
||||
.content-text :deep(li p) {
|
||||
font-size: 12px;
|
||||
margin-bottom: 0;
|
||||
line-height: 2;
|
||||
}
|
||||
|
||||
/* Code styling */
|
||||
.content-text :deep(code) {
|
||||
background-color: #2a2a2a;
|
||||
border: 1px solid #4a4a4a;
|
||||
border-radius: 4px;
|
||||
padding: 2px 6px;
|
||||
color: #f8f8f2;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
/* Remove top margin for first media element */
|
||||
.content-text :deep(img:first-child),
|
||||
.content-text :deep(video:first-child),
|
||||
.content-text :deep(iframe:first-child) {
|
||||
margin-top: -32px; /* Align with the top edge of the popup content */
|
||||
margin-bottom: 12px;
|
||||
margin-bottom: 24px;
|
||||
}
|
||||
|
||||
/* Media elements */
|
||||
@@ -381,8 +408,7 @@ defineExpose({
|
||||
.content-text :deep(iframe) {
|
||||
width: calc(100% + 64px);
|
||||
height: auto;
|
||||
border-radius: 6px;
|
||||
margin: 12px -32px;
|
||||
margin: 24px -32px;
|
||||
display: block;
|
||||
}
|
||||
|
||||
@@ -397,7 +423,6 @@ defineExpose({
|
||||
.learn-more-link {
|
||||
color: #60a5fa;
|
||||
font-size: 14px;
|
||||
font-family: 'Inter', sans-serif;
|
||||
font-weight: 500;
|
||||
line-height: 18.2px;
|
||||
text-decoration: none;
|
||||
@@ -417,7 +442,6 @@ defineExpose({
|
||||
border: none;
|
||||
color: #121212;
|
||||
font-size: 14px;
|
||||
font-family: 'Inter', sans-serif;
|
||||
font-weight: 500;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
:key="tab.id"
|
||||
:icon="tab.icon"
|
||||
:icon-badge="tab.iconBadge"
|
||||
:tooltip="tab.tooltip + getTabTooltipSuffix(tab)"
|
||||
:tooltip="tab.tooltip"
|
||||
:tooltip-suffix="getTabTooltipSuffix(tab)"
|
||||
:selected="tab.id === selectedTab?.id"
|
||||
:class="tab.id + '-tab-button'"
|
||||
@click="onTabClick(tab)"
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
</Teleport>
|
||||
|
||||
<!-- Backdrop to close popup when clicking outside -->
|
||||
<Teleport to="#graph-canvas-container">
|
||||
<Teleport to="body">
|
||||
<div
|
||||
v-if="isHelpCenterVisible"
|
||||
class="help-center-backdrop"
|
||||
@@ -101,14 +101,14 @@ onMounted(async () => {
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
z-index: 999;
|
||||
z-index: 9999;
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
.help-center-popup {
|
||||
position: absolute;
|
||||
bottom: 1rem;
|
||||
z-index: 1000;
|
||||
z-index: 10000;
|
||||
animation: slideInUp 0.2s ease-out;
|
||||
pointer-events: auto;
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import PrimeVue from 'primevue/config'
|
||||
import OverlayBadge from 'primevue/overlaybadge'
|
||||
import Tooltip from 'primevue/tooltip'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import { createI18n } from 'vue-i18n'
|
||||
|
||||
import SidebarIcon from './SidebarIcon.vue'
|
||||
|
||||
@@ -15,6 +16,14 @@ type SidebarIconProps = {
|
||||
iconBadge?: string | (() => string | null)
|
||||
}
|
||||
|
||||
const i18n = createI18n({
|
||||
legacy: false,
|
||||
locale: 'en',
|
||||
messages: {
|
||||
en: {}
|
||||
}
|
||||
})
|
||||
|
||||
describe('SidebarIcon', () => {
|
||||
const exampleProps: SidebarIconProps = {
|
||||
icon: 'pi pi-cog',
|
||||
@@ -24,7 +33,7 @@ describe('SidebarIcon', () => {
|
||||
const mountSidebarIcon = (props: Partial<SidebarIconProps>, options = {}) => {
|
||||
return mount(SidebarIcon, {
|
||||
global: {
|
||||
plugins: [PrimeVue],
|
||||
plugins: [PrimeVue, i18n],
|
||||
directives: { tooltip: Tooltip },
|
||||
components: { OverlayBadge, Button }
|
||||
},
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
<template>
|
||||
<Button
|
||||
v-tooltip="{ value: tooltip, showDelay: 300, hideDelay: 300 }"
|
||||
v-tooltip="{
|
||||
value: computedTooltip,
|
||||
showDelay: 300,
|
||||
hideDelay: 300
|
||||
}"
|
||||
text
|
||||
:pt="{
|
||||
root: {
|
||||
@@ -9,7 +13,7 @@
|
||||
? 'p-button-primary side-bar-button-selected'
|
||||
: 'p-button-secondary'
|
||||
}`,
|
||||
'aria-label': tooltip
|
||||
'aria-label': computedTooltip
|
||||
}
|
||||
}"
|
||||
@click="emit('click', $event)"
|
||||
@@ -27,16 +31,20 @@
|
||||
import Button from 'primevue/button'
|
||||
import OverlayBadge from 'primevue/overlaybadge'
|
||||
import { computed } from 'vue'
|
||||
import { useI18n } from 'vue-i18n'
|
||||
|
||||
const { t } = useI18n()
|
||||
const {
|
||||
icon = '',
|
||||
selected = false,
|
||||
tooltip = '',
|
||||
tooltipSuffix = '',
|
||||
iconBadge = ''
|
||||
} = defineProps<{
|
||||
icon?: string
|
||||
selected?: boolean
|
||||
tooltip?: string
|
||||
tooltipSuffix?: string
|
||||
iconBadge?: string | (() => string | null)
|
||||
}>()
|
||||
|
||||
@@ -47,6 +55,7 @@ const overlayValue = computed(() =>
|
||||
typeof iconBadge === 'function' ? iconBadge() ?? '' : iconBadge
|
||||
)
|
||||
const shouldShowBadge = computed(() => !!overlayValue.value)
|
||||
const computedTooltip = computed(() => t(tooltip) + tooltipSuffix)
|
||||
</script>
|
||||
|
||||
<style>
|
||||
|
||||
@@ -159,7 +159,7 @@ const toggleExpanded = () => {
|
||||
|
||||
const removeTask = async (task: TaskItemImpl) => {
|
||||
if (task.isRunning) {
|
||||
await api.interrupt()
|
||||
await api.interrupt(task.promptId)
|
||||
}
|
||||
await queueStore.delete(task)
|
||||
}
|
||||
|
||||
@@ -151,7 +151,7 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
|
||||
|
||||
const renderingSpeed = String(renderingSpeedWidget.value)
|
||||
if (renderingSpeed.toLowerCase().includes('quality')) {
|
||||
basePrice = 0.08
|
||||
basePrice = 0.09
|
||||
} else if (renderingSpeed.toLowerCase().includes('balanced')) {
|
||||
basePrice = 0.06
|
||||
} else if (renderingSpeed.toLowerCase().includes('turbo')) {
|
||||
@@ -275,30 +275,33 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
|
||||
const modelWidget = node.widgets?.find(
|
||||
(w) => w.name === 'model_name'
|
||||
) as IComboWidget
|
||||
const nWidget = node.widgets?.find(
|
||||
(w) => w.name === 'n'
|
||||
) as IComboWidget
|
||||
|
||||
if (!modelWidget)
|
||||
return '$0.0035-0.028/Run (varies with modality & model)'
|
||||
return '$0.0035-0.028 x n/Run (varies with modality & model)'
|
||||
|
||||
const model = String(modelWidget.value)
|
||||
const n = Number(nWidget?.value) || 1
|
||||
let basePrice = 0.014 // default
|
||||
|
||||
if (modality.includes('text to image')) {
|
||||
if (model.includes('kling-v1')) {
|
||||
return '$0.0035/Run'
|
||||
} else if (
|
||||
model.includes('kling-v1-5') ||
|
||||
model.includes('kling-v2')
|
||||
) {
|
||||
return '$0.014/Run'
|
||||
if (model.includes('kling-v1-5') || model.includes('kling-v2')) {
|
||||
basePrice = 0.014
|
||||
} else if (model.includes('kling-v1')) {
|
||||
basePrice = 0.0035
|
||||
}
|
||||
} else if (modality.includes('image to image')) {
|
||||
if (model.includes('kling-v1')) {
|
||||
return '$0.0035/Run'
|
||||
} else if (model.includes('kling-v1-5')) {
|
||||
return '$0.028/Run'
|
||||
if (model.includes('kling-v1-5')) {
|
||||
basePrice = 0.028
|
||||
} else if (model.includes('kling-v1')) {
|
||||
basePrice = 0.0035
|
||||
}
|
||||
}
|
||||
|
||||
return '$0.014/Run'
|
||||
const totalCost = (basePrice * n).toFixed(4)
|
||||
return `$${totalCost}/Run`
|
||||
}
|
||||
},
|
||||
KlingLipSyncAudioToVideoNode: {
|
||||
@@ -319,15 +322,15 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
|
||||
const effectScene = String(effectSceneWidget.value)
|
||||
if (
|
||||
effectScene.includes('fuzzyfuzzy') ||
|
||||
effectScene.includes('squish') ||
|
||||
effectScene.includes('expansion')
|
||||
effectScene.includes('squish')
|
||||
) {
|
||||
return '$0.28/Run'
|
||||
} else if (
|
||||
effectScene.includes('dizzydizzy') ||
|
||||
effectScene.includes('bloombloom')
|
||||
) {
|
||||
} else if (effectScene.includes('dizzydizzy')) {
|
||||
return '$0.49/Run'
|
||||
} else if (effectScene.includes('bloombloom')) {
|
||||
return '$0.49/Run'
|
||||
} else if (effectScene.includes('expansion')) {
|
||||
return '$0.28/Run'
|
||||
}
|
||||
|
||||
return '$0.28/Run'
|
||||
@@ -445,12 +448,12 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
|
||||
} else if (model.includes('ray-2')) {
|
||||
if (duration.includes('5s')) {
|
||||
if (resolution.includes('4k')) return '$6.37/Run'
|
||||
if (resolution.includes('1080p')) return '$2.30/Run'
|
||||
if (resolution.includes('1080p')) return '$1.59/Run'
|
||||
if (resolution.includes('720p')) return '$0.71/Run'
|
||||
if (resolution.includes('540p')) return '$0.40/Run'
|
||||
} else if (duration.includes('9s')) {
|
||||
if (resolution.includes('4k')) return '$11.47/Run'
|
||||
if (resolution.includes('1080p')) return '$4.14/Run'
|
||||
if (resolution.includes('1080p')) return '$2.87/Run'
|
||||
if (resolution.includes('720p')) return '$1.28/Run'
|
||||
if (resolution.includes('540p')) return '$0.72/Run'
|
||||
}
|
||||
@@ -496,12 +499,12 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
|
||||
} else if (model.includes('ray-2')) {
|
||||
if (duration.includes('5s')) {
|
||||
if (resolution.includes('4k')) return '$6.37/Run'
|
||||
if (resolution.includes('1080p')) return '$2.30/Run'
|
||||
if (resolution.includes('1080p')) return '$1.59/Run'
|
||||
if (resolution.includes('720p')) return '$0.71/Run'
|
||||
if (resolution.includes('540p')) return '$0.40/Run'
|
||||
} else if (duration.includes('9s')) {
|
||||
if (resolution.includes('4k')) return '$11.47/Run'
|
||||
if (resolution.includes('1080p')) return '$4.14/Run'
|
||||
if (resolution.includes('1080p')) return '$2.87/Run'
|
||||
if (resolution.includes('720p')) return '$1.28/Run'
|
||||
if (resolution.includes('540p')) return '$0.72/Run'
|
||||
}
|
||||
@@ -523,19 +526,26 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
|
||||
const sizeWidget = node.widgets?.find(
|
||||
(w) => w.name === 'size'
|
||||
) as IComboWidget
|
||||
const nWidget = node.widgets?.find(
|
||||
(w) => w.name === 'n'
|
||||
) as IComboWidget
|
||||
|
||||
if (!sizeWidget) return '$0.016-0.02/Run (varies with size)'
|
||||
if (!sizeWidget) return '$0.016-0.02 x n/Run (varies with size & n)'
|
||||
|
||||
const size = String(sizeWidget.value)
|
||||
const n = Number(nWidget?.value) || 1
|
||||
let basePrice = 0.02 // default
|
||||
|
||||
if (size.includes('1024x1024')) {
|
||||
return '$0.02/Run'
|
||||
basePrice = 0.02
|
||||
} else if (size.includes('512x512')) {
|
||||
return '$0.018/Run'
|
||||
basePrice = 0.018
|
||||
} else if (size.includes('256x256')) {
|
||||
return '$0.016/Run'
|
||||
basePrice = 0.016
|
||||
}
|
||||
|
||||
return '$0.02/Run'
|
||||
const totalCost = (basePrice * n).toFixed(3)
|
||||
return `$${totalCost}/Run`
|
||||
}
|
||||
},
|
||||
OpenAIDalle3: {
|
||||
@@ -570,19 +580,30 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
|
||||
const qualityWidget = node.widgets?.find(
|
||||
(w) => w.name === 'quality'
|
||||
) as IComboWidget
|
||||
const nWidget = node.widgets?.find(
|
||||
(w) => w.name === 'n'
|
||||
) as IComboWidget
|
||||
|
||||
if (!qualityWidget) return '$0.011-0.30/Run (varies with quality)'
|
||||
if (!qualityWidget)
|
||||
return '$0.011-0.30 x n/Run (varies with quality & n)'
|
||||
|
||||
const quality = String(qualityWidget.value)
|
||||
const n = Number(nWidget?.value) || 1
|
||||
let basePriceRange = '$0.046-0.07' // default medium
|
||||
|
||||
if (quality.includes('high')) {
|
||||
return '$0.167-0.30/Run'
|
||||
basePriceRange = '$0.167-0.30'
|
||||
} else if (quality.includes('medium')) {
|
||||
return '$0.046-0.07/Run'
|
||||
basePriceRange = '$0.046-0.07'
|
||||
} else if (quality.includes('low')) {
|
||||
return '$0.011-0.02/Run'
|
||||
basePriceRange = '$0.011-0.02'
|
||||
}
|
||||
|
||||
return '$0.046-0.07/Run'
|
||||
if (n === 1) {
|
||||
return `${basePriceRange}/Run`
|
||||
} else {
|
||||
return `${basePriceRange} x ${n}/Run`
|
||||
}
|
||||
}
|
||||
},
|
||||
PikaImageToVideoNode2_2: {
|
||||
@@ -717,6 +738,42 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
|
||||
RecraftCrispUpscaleNode: {
|
||||
displayPrice: '$0.004/Run'
|
||||
},
|
||||
RecraftGenerateColorFromImageNode: {
|
||||
displayPrice: (node: LGraphNode): string => {
|
||||
const nWidget = node.widgets?.find(
|
||||
(w) => w.name === 'n'
|
||||
) as IComboWidget
|
||||
if (!nWidget) return '$0.04 x n/Run'
|
||||
|
||||
const n = Number(nWidget.value) || 1
|
||||
const cost = (0.04 * n).toFixed(2)
|
||||
return `$${cost}/Run`
|
||||
}
|
||||
},
|
||||
RecraftGenerateImageNode: {
|
||||
displayPrice: (node: LGraphNode): string => {
|
||||
const nWidget = node.widgets?.find(
|
||||
(w) => w.name === 'n'
|
||||
) as IComboWidget
|
||||
if (!nWidget) return '$0.04 x n/Run'
|
||||
|
||||
const n = Number(nWidget.value) || 1
|
||||
const cost = (0.04 * n).toFixed(2)
|
||||
return `$${cost}/Run`
|
||||
}
|
||||
},
|
||||
RecraftGenerateVectorImageNode: {
|
||||
displayPrice: (node: LGraphNode): string => {
|
||||
const nWidget = node.widgets?.find(
|
||||
(w) => w.name === 'n'
|
||||
) as IComboWidget
|
||||
if (!nWidget) return '$0.08 x n/Run'
|
||||
|
||||
const n = Number(nWidget.value) || 1
|
||||
const cost = (0.08 * n).toFixed(2)
|
||||
return `$${cost}/Run`
|
||||
}
|
||||
},
|
||||
RecraftImageInpaintingNode: {
|
||||
displayPrice: (node: LGraphNode): string => {
|
||||
const nWidget = node.widgets?.find(
|
||||
@@ -772,7 +829,16 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
|
||||
}
|
||||
},
|
||||
RecraftVectorizeImageNode: {
|
||||
displayPrice: '$0.01/Run'
|
||||
displayPrice: (node: LGraphNode): string => {
|
||||
const nWidget = node.widgets?.find(
|
||||
(w) => w.name === 'n'
|
||||
) as IComboWidget
|
||||
if (!nWidget) return '$0.01 x n/Run'
|
||||
|
||||
const n = Number(nWidget.value) || 1
|
||||
const cost = (0.01 * n).toFixed(2)
|
||||
return `$${cost}/Run`
|
||||
}
|
||||
},
|
||||
StabilityStableImageSD_3_5Node: {
|
||||
displayPrice: (node: LGraphNode): string => {
|
||||
@@ -881,6 +947,63 @@ const apiNodeCosts: Record<string, { displayPrice: string | PricingFunction }> =
|
||||
|
||||
return '$0.0172/Run'
|
||||
}
|
||||
},
|
||||
MoonvalleyTxt2VideoNode: {
|
||||
displayPrice: (node: LGraphNode): string => {
|
||||
const lengthWidget = node.widgets?.find(
|
||||
(w) => w.name === 'length'
|
||||
) as IComboWidget
|
||||
|
||||
// If no length widget exists, default to 5s pricing
|
||||
if (!lengthWidget) return '$1.50/Run'
|
||||
|
||||
const length = String(lengthWidget.value)
|
||||
if (length === '5s') {
|
||||
return '$1.50/Run'
|
||||
} else if (length === '10s') {
|
||||
return '$3.00/Run'
|
||||
}
|
||||
|
||||
return '$1.50/Run'
|
||||
}
|
||||
},
|
||||
MoonvalleyImg2VideoNode: {
|
||||
displayPrice: (node: LGraphNode): string => {
|
||||
const lengthWidget = node.widgets?.find(
|
||||
(w) => w.name === 'length'
|
||||
) as IComboWidget
|
||||
|
||||
// If no length widget exists, default to 5s pricing
|
||||
if (!lengthWidget) return '$1.50/Run'
|
||||
|
||||
const length = String(lengthWidget.value)
|
||||
if (length === '5s') {
|
||||
return '$1.50/Run'
|
||||
} else if (length === '10s') {
|
||||
return '$3.00/Run'
|
||||
}
|
||||
|
||||
return '$1.50/Run'
|
||||
}
|
||||
},
|
||||
MoonvalleyVideo2VideoNode: {
|
||||
displayPrice: (node: LGraphNode): string => {
|
||||
const lengthWidget = node.widgets?.find(
|
||||
(w) => w.name === 'length'
|
||||
) as IComboWidget
|
||||
|
||||
// If no length widget exists, default to 5s pricing
|
||||
if (!lengthWidget) return '$2.25/Run'
|
||||
|
||||
const length = String(lengthWidget.value)
|
||||
if (length === '5s') {
|
||||
return '$2.25/Run'
|
||||
} else if (length === '10s') {
|
||||
return '$4.00/Run'
|
||||
}
|
||||
|
||||
return '$2.25/Run'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -915,13 +1038,13 @@ export const useNodePricing = () => {
|
||||
const widgetMap: Record<string, string[]> = {
|
||||
KlingTextToVideoNode: ['mode', 'model_name', 'duration'],
|
||||
KlingImage2VideoNode: ['mode', 'model_name', 'duration'],
|
||||
KlingImageGenerationNode: ['modality', 'model_name'],
|
||||
KlingImageGenerationNode: ['modality', 'model_name', 'n'],
|
||||
KlingDualCharacterVideoEffectNode: ['mode', 'model_name', 'duration'],
|
||||
KlingSingleImageVideoEffectNode: ['effect_scene'],
|
||||
KlingStartEndFrameNode: ['mode', 'model_name', 'duration'],
|
||||
OpenAIDalle3: ['size', 'quality'],
|
||||
OpenAIDalle2: ['size'],
|
||||
OpenAIGPTImage1: ['quality'],
|
||||
OpenAIDalle2: ['size', 'n'],
|
||||
OpenAIGPTImage1: ['quality', 'n'],
|
||||
IdeogramV1: ['num_images'],
|
||||
IdeogramV2: ['num_images'],
|
||||
IdeogramV3: ['rendering_speed', 'num_images'],
|
||||
@@ -945,7 +1068,14 @@ export const useNodePricing = () => {
|
||||
RecraftTextToImageNode: ['n'],
|
||||
RecraftImageToImageNode: ['n'],
|
||||
RecraftImageInpaintingNode: ['n'],
|
||||
RecraftTextToVectorNode: ['n']
|
||||
RecraftTextToVectorNode: ['n'],
|
||||
RecraftVectorizeImageNode: ['n'],
|
||||
RecraftGenerateColorFromImageNode: ['n'],
|
||||
RecraftGenerateImageNode: ['n'],
|
||||
RecraftGenerateVectorImageNode: ['n'],
|
||||
MoonvalleyTxt2VideoNode: ['length'],
|
||||
MoonvalleyImg2VideoNode: ['length'],
|
||||
MoonvalleyVideo2VideoNode: ['length']
|
||||
}
|
||||
return widgetMap[nodeType] || []
|
||||
}
|
||||
|
||||
@@ -53,6 +53,11 @@ export function useSettingSearch() {
|
||||
const queryLower = query.toLocaleLowerCase()
|
||||
const allSettings = Object.values(settingStore.settingsById)
|
||||
const filteredSettings = allSettings.filter((setting) => {
|
||||
// Filter out hidden and deprecated settings, just like in normal settings tree
|
||||
if (setting.type === 'hidden' || setting.deprecated) {
|
||||
return false
|
||||
}
|
||||
|
||||
const idLower = setting.id.toLowerCase()
|
||||
const nameLower = setting.name.toLowerCase()
|
||||
const translatedName = st(
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { markRaw } from 'vue'
|
||||
import { useI18n } from 'vue-i18n'
|
||||
|
||||
import ModelLibrarySidebarTab from '@/components/sidebar/tabs/ModelLibrarySidebarTab.vue'
|
||||
import { useElectronDownloadStore } from '@/stores/electronDownloadStore'
|
||||
@@ -7,13 +6,11 @@ import type { SidebarTabExtension } from '@/types/extensionTypes'
|
||||
import { isElectron } from '@/utils/envUtil'
|
||||
|
||||
export const useModelLibrarySidebarTab = (): SidebarTabExtension => {
|
||||
const { t } = useI18n()
|
||||
|
||||
return {
|
||||
id: 'model-library',
|
||||
icon: 'pi pi-box',
|
||||
title: t('sideToolbar.modelLibrary'),
|
||||
tooltip: t('sideToolbar.modelLibrary'),
|
||||
title: 'sideToolbar.modelLibrary',
|
||||
tooltip: 'sideToolbar.modelLibrary',
|
||||
component: markRaw(ModelLibrarySidebarTab),
|
||||
type: 'vue',
|
||||
iconBadge: () => {
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
import { markRaw } from 'vue'
|
||||
import { useI18n } from 'vue-i18n'
|
||||
|
||||
import NodeLibrarySidebarTab from '@/components/sidebar/tabs/NodeLibrarySidebarTab.vue'
|
||||
import type { SidebarTabExtension } from '@/types/extensionTypes'
|
||||
|
||||
export const useNodeLibrarySidebarTab = (): SidebarTabExtension => {
|
||||
const { t } = useI18n()
|
||||
return {
|
||||
id: 'node-library',
|
||||
icon: 'pi pi-book',
|
||||
title: t('sideToolbar.nodeLibrary'),
|
||||
tooltip: t('sideToolbar.nodeLibrary'),
|
||||
title: 'sideToolbar.nodeLibrary',
|
||||
tooltip: 'sideToolbar.nodeLibrary',
|
||||
component: markRaw(NodeLibrarySidebarTab),
|
||||
type: 'vue'
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
import { markRaw } from 'vue'
|
||||
import { useI18n } from 'vue-i18n'
|
||||
|
||||
import QueueSidebarTab from '@/components/sidebar/tabs/QueueSidebarTab.vue'
|
||||
import { useQueuePendingTaskCountStore } from '@/stores/queueStore'
|
||||
import type { SidebarTabExtension } from '@/types/extensionTypes'
|
||||
|
||||
export const useQueueSidebarTab = (): SidebarTabExtension => {
|
||||
const { t } = useI18n()
|
||||
const queuePendingTaskCountStore = useQueuePendingTaskCountStore()
|
||||
return {
|
||||
id: 'queue',
|
||||
@@ -15,8 +13,8 @@ export const useQueueSidebarTab = (): SidebarTabExtension => {
|
||||
const value = queuePendingTaskCountStore.count.toString()
|
||||
return value === '0' ? null : value
|
||||
},
|
||||
title: t('sideToolbar.queue'),
|
||||
tooltip: t('sideToolbar.queue'),
|
||||
title: 'sideToolbar.queue',
|
||||
tooltip: 'sideToolbar.queue',
|
||||
component: markRaw(QueueSidebarTab),
|
||||
type: 'vue'
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { markRaw } from 'vue'
|
||||
import { useI18n } from 'vue-i18n'
|
||||
|
||||
import WorkflowsSidebarTab from '@/components/sidebar/tabs/WorkflowsSidebarTab.vue'
|
||||
import { useSettingStore } from '@/stores/settingStore'
|
||||
@@ -7,10 +6,8 @@ import { useWorkflowStore } from '@/stores/workflowStore'
|
||||
import type { SidebarTabExtension } from '@/types/extensionTypes'
|
||||
|
||||
export const useWorkflowsSidebarTab = (): SidebarTabExtension => {
|
||||
const { t } = useI18n()
|
||||
const settingStore = useSettingStore()
|
||||
const workflowStore = useWorkflowStore()
|
||||
|
||||
return {
|
||||
id: 'workflows',
|
||||
icon: 'pi pi-folder-open',
|
||||
@@ -23,8 +20,8 @@ export const useWorkflowsSidebarTab = (): SidebarTabExtension => {
|
||||
const value = workflowStore.openWorkflows.length.toString()
|
||||
return value === '0' ? null : value
|
||||
},
|
||||
title: t('sideToolbar.workflows'),
|
||||
tooltip: t('sideToolbar.workflows'),
|
||||
title: 'sideToolbar.workflows',
|
||||
tooltip: 'sideToolbar.workflows',
|
||||
component: markRaw(WorkflowsSidebarTab),
|
||||
type: 'vue'
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import { useDialogService } from '@/services/dialogService'
|
||||
import { useLitegraphService } from '@/services/litegraphService'
|
||||
import { useWorkflowService } from '@/services/workflowService'
|
||||
import type { ComfyCommand } from '@/stores/commandStore'
|
||||
import { useExecutionStore } from '@/stores/executionStore'
|
||||
import { useCanvasStore, useTitleEditorStore } from '@/stores/graphStore'
|
||||
import { useQueueSettingsStore, useQueueStore } from '@/stores/queueStore'
|
||||
import { useSettingStore } from '@/stores/settingStore'
|
||||
@@ -39,6 +40,7 @@ export function useCoreCommands(): ComfyCommand[] {
|
||||
const firebaseAuthActions = useFirebaseAuthActions()
|
||||
const toastStore = useToastStore()
|
||||
const canvasStore = useCanvasStore()
|
||||
const executionStore = useExecutionStore()
|
||||
const getTracker = () => workflowStore.activeWorkflow?.changeTracker
|
||||
|
||||
const getSelectedNodes = (): LGraphNode[] => {
|
||||
@@ -203,7 +205,7 @@ export function useCoreCommands(): ComfyCommand[] {
|
||||
icon: 'pi pi-stop',
|
||||
label: 'Interrupt',
|
||||
function: async () => {
|
||||
await api.interrupt()
|
||||
await api.interrupt(executionStore.activePromptId)
|
||||
toastStore.add({
|
||||
severity: 'info',
|
||||
summary: t('g.interrupted'),
|
||||
|
||||
@@ -10,14 +10,19 @@ import { type ComfyWidgetConstructorV2 } from '@/scripts/widgets'
|
||||
import { useSettingStore } from '@/stores/settingStore'
|
||||
|
||||
function onFloatValueChange(this: INumericWidget, v: number) {
|
||||
this.value = this.options.round
|
||||
? _.clamp(
|
||||
Math.round((v + Number.EPSILON) / this.options.round) *
|
||||
this.options.round,
|
||||
this.options.min ?? -Infinity,
|
||||
this.options.max ?? Infinity
|
||||
)
|
||||
: v
|
||||
const round = this.options.round
|
||||
if (round) {
|
||||
const precision =
|
||||
this.options.precision ?? Math.max(0, -Math.floor(Math.log10(round)))
|
||||
const rounded = Math.round(v / round) * round
|
||||
this.value = _.clamp(
|
||||
Number(rounded.toFixed(precision)),
|
||||
this.options.min ?? -Infinity,
|
||||
this.options.max ?? Infinity
|
||||
)
|
||||
} else {
|
||||
this.value = v
|
||||
}
|
||||
}
|
||||
|
||||
export const _for_testing = {
|
||||
@@ -62,7 +67,7 @@ export const useFloatWidget = () => {
|
||||
max: inputSpec.max ?? 2048,
|
||||
round:
|
||||
enableRounding && precision && !inputSpec.round
|
||||
? (1_000_000 * Math.pow(0.1, precision)) / 1_000_000
|
||||
? Math.pow(10, -precision)
|
||||
: (inputSpec.round as number),
|
||||
/** @deprecated Use step2 instead. The 10x value is a legacy implementation. */
|
||||
step: step * 10.0,
|
||||
|
||||
3
src/config/clientFeatureFlags.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"supports_preview_metadata": false
|
||||
}
|
||||
@@ -290,6 +290,7 @@ export const CORE_SETTINGS: SettingParams[] = [
|
||||
options: [
|
||||
{ value: 'en', text: 'English' },
|
||||
{ value: 'zh', text: '中文' },
|
||||
{ value: 'zh-TW', text: '繁體中文' },
|
||||
{ value: 'ru', text: 'Русский' },
|
||||
{ value: 'ja', text: '日本語' },
|
||||
{ value: 'ko', text: '한국어' },
|
||||
@@ -330,6 +331,14 @@ export const CORE_SETTINGS: SettingParams[] = [
|
||||
defaultValue: true,
|
||||
versionAdded: '1.20.3'
|
||||
},
|
||||
{
|
||||
id: 'Comfy.Notification.ShowVersionUpdates',
|
||||
category: ['Comfy', 'Notification Preferences'],
|
||||
name: 'Show version updates',
|
||||
tooltip: 'Show updates for new models, and major new features.',
|
||||
type: 'boolean',
|
||||
defaultValue: true
|
||||
},
|
||||
{
|
||||
id: 'Comfy.ConfirmClear',
|
||||
category: ['Comfy', 'Workflow', 'ConfirmClear'],
|
||||
@@ -431,6 +440,8 @@ export const CORE_SETTINGS: SettingParams[] = [
|
||||
name: 'Use new menu',
|
||||
type: 'combo',
|
||||
options: ['Disabled', 'Top', 'Bottom'],
|
||||
tooltip:
|
||||
'Menu bar position. On mobile devices, the menu is always shown at the top.',
|
||||
migrateDeprecatedValue: (value: string) => {
|
||||
// Floating is now supported by dragging the docked actionbar off.
|
||||
if (value === 'Floating') {
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
import { LiteGraph } from '@comfyorg/litegraph'
|
||||
import { LGraphNode, type NodeId } from '@comfyorg/litegraph/dist/LGraphNode'
|
||||
import {
|
||||
type ExecutableLGraphNode,
|
||||
type ExecutionId,
|
||||
LGraphNode,
|
||||
LiteGraph,
|
||||
SubgraphNode
|
||||
} from '@comfyorg/litegraph'
|
||||
import { type NodeId } from '@comfyorg/litegraph/dist/LGraphNode'
|
||||
|
||||
import { t } from '@/i18n'
|
||||
import {
|
||||
@@ -13,6 +19,8 @@ import { useNodeDefStore } from '@/stores/nodeDefStore'
|
||||
import { useToastStore } from '@/stores/toastStore'
|
||||
import { useWidgetStore } from '@/stores/widgetStore'
|
||||
import { ComfyExtension } from '@/types/comfy'
|
||||
import { ExecutableGroupNodeChildDTO } from '@/utils/executableGroupNodeChildDTO'
|
||||
import { GROUP } from '@/utils/executableGroupNodeDto'
|
||||
import { deserialiseAndCreate, serialise } from '@/utils/vintageClipboard'
|
||||
|
||||
import { api } from '../../scripts/api'
|
||||
@@ -26,8 +34,6 @@ type GroupNodeWorkflowData = {
|
||||
nodes: ComfyNode[]
|
||||
}
|
||||
|
||||
const GROUP = Symbol()
|
||||
|
||||
// v1 Prefix + Separator: workflow/
|
||||
// v2 Prefix + Separator: workflow> (ComfyUI_frontend v1.2.63)
|
||||
const PREFIX = 'workflow'
|
||||
@@ -813,6 +819,7 @@ export class GroupNodeHandler {
|
||||
innerNodeIndex++
|
||||
) {
|
||||
const innerNode = this.innerNodes[innerNodeIndex]
|
||||
innerNode.graph ??= this.node.graph
|
||||
|
||||
for (const w of innerNode.widgets ?? []) {
|
||||
if (w.type === 'converted-widget') {
|
||||
@@ -899,7 +906,20 @@ export class GroupNodeHandler {
|
||||
return link
|
||||
}
|
||||
|
||||
this.node.getInnerNodes = () => {
|
||||
/** @internal Used to flatten the subgraph before execution. Recursive; call with no args. */
|
||||
this.node.getInnerNodes = (
|
||||
computedNodeDtos: Map<ExecutionId, ExecutableLGraphNode>,
|
||||
/** The path of subgraph node IDs. */
|
||||
subgraphNodePath: readonly NodeId[] = [],
|
||||
/** The list of nodes to add to. */
|
||||
nodes: ExecutableLGraphNode[] = [],
|
||||
/** The set of visited nodes. */
|
||||
visited = new Set<LGraphNode>()
|
||||
): ExecutableLGraphNode[] => {
|
||||
if (visited.has(this.node))
|
||||
throw new Error('RecursionError: while flattening subgraph')
|
||||
visited.add(this.node)
|
||||
|
||||
if (!this.innerNodes) {
|
||||
// @ts-expect-error fixme ts strict error
|
||||
this.node.setInnerNodes(
|
||||
@@ -910,6 +930,8 @@ export class GroupNodeHandler {
|
||||
innerNode.configure(n)
|
||||
// @ts-expect-error fixme ts strict error
|
||||
innerNode.id = `${this.node.id}:${i}`
|
||||
// @ts-expect-error fixme ts strict error
|
||||
innerNode.graph = this.node.graph
|
||||
return innerNode
|
||||
})
|
||||
)
|
||||
@@ -917,7 +939,31 @@ export class GroupNodeHandler {
|
||||
|
||||
this.updateInnerWidgets()
|
||||
|
||||
return this.innerNodes
|
||||
const subgraphInstanceIdPath = [...subgraphNodePath, this.node.id]
|
||||
|
||||
// Assertion: Deprecated, does not matter.
|
||||
const subgraphNode = (this.node.graph?.getNodeById(
|
||||
subgraphNodePath.at(-1)
|
||||
) ?? undefined) as SubgraphNode | undefined
|
||||
|
||||
for (const node of this.innerNodes) {
|
||||
node.graph ??= this.node.graph
|
||||
|
||||
// Create minimal DTOs rather than cloning the node
|
||||
const currentId = String(node.id)
|
||||
node.id = currentId.split(':').at(-1)
|
||||
const aVeryRealNode = new ExecutableGroupNodeChildDTO(
|
||||
node,
|
||||
subgraphInstanceIdPath,
|
||||
computedNodeDtos,
|
||||
subgraphNode
|
||||
)
|
||||
node.id = currentId
|
||||
aVeryRealNode.groupNodeHandler = this
|
||||
|
||||
nodes.push(aVeryRealNode)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// @ts-expect-error fixme ts strict error
|
||||
@@ -1503,6 +1549,9 @@ export class GroupNodeHandler {
|
||||
|
||||
this.linkOutputs(node, i)
|
||||
app.graph.remove(node)
|
||||
|
||||
// Set internal ID to what is expected after workflow is reloaded
|
||||
node.id = `${this.node.id}:${i}`
|
||||
}
|
||||
|
||||
this.linkInputs()
|
||||
@@ -1608,8 +1657,14 @@ async function convertSelectedNodesToGroupNode() {
|
||||
if (nodes.length === 1) {
|
||||
throw new Error('Please select multiple nodes to convert to group node')
|
||||
}
|
||||
if (nodes.some((n) => GroupNodeHandler.isGroupNode(n))) {
|
||||
throw new Error('Selected nodes contain a group node')
|
||||
|
||||
for (const node of nodes) {
|
||||
if (node instanceof SubgraphNode) {
|
||||
throw new Error('Selected nodes contain a subgraph node')
|
||||
}
|
||||
if (GroupNodeHandler.isGroupNode(node)) {
|
||||
throw new Error('Selected nodes contain a group node')
|
||||
}
|
||||
}
|
||||
return await GroupNodeHandler.fromNodes(nodes)
|
||||
}
|
||||
|
||||
@@ -4854,7 +4854,7 @@ class KeyboardManager {
|
||||
private maskEditor: MaskEditorDialog
|
||||
private messageBroker: MessageBroker
|
||||
|
||||
// Binded functions, for use in addListeners and removeListeners
|
||||
// Bound functions, for use in addListeners and removeListeners
|
||||
private handleKeyDownBound = this.handleKeyDown.bind(this)
|
||||
private handleKeyUpBound = this.handleKeyUp.bind(this)
|
||||
private clearKeysBound = this.clearKeys.bind(this)
|
||||
|
||||
@@ -6,7 +6,7 @@ import type {
|
||||
LLink,
|
||||
Vector2
|
||||
} from '@comfyorg/litegraph'
|
||||
import type { CanvasMouseEvent } from '@comfyorg/litegraph/dist/types/events'
|
||||
import type { CanvasPointerEvent } from '@comfyorg/litegraph/dist/types/events'
|
||||
import type { IBaseWidget } from '@comfyorg/litegraph/dist/types/widgets'
|
||||
|
||||
import {
|
||||
@@ -78,7 +78,7 @@ export class PrimitiveNode extends LGraphNode {
|
||||
app.canvas,
|
||||
node,
|
||||
app.canvas.graph_mouse,
|
||||
{} as CanvasMouseEvent
|
||||
{} as CanvasPointerEvent
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,10 @@ import ruCommands from './locales/ru/commands.json'
|
||||
import ru from './locales/ru/main.json'
|
||||
import ruNodes from './locales/ru/nodeDefs.json'
|
||||
import ruSettings from './locales/ru/settings.json'
|
||||
import zhTWCommands from './locales/zh-TW/commands.json'
|
||||
import zhTW from './locales/zh-TW/main.json'
|
||||
import zhTWNodes from './locales/zh-TW/nodeDefs.json'
|
||||
import zhTWSettings from './locales/zh-TW/settings.json'
|
||||
import zhCommands from './locales/zh/commands.json'
|
||||
import zh from './locales/zh/main.json'
|
||||
import zhNodes from './locales/zh/nodeDefs.json'
|
||||
@@ -41,6 +45,7 @@ function buildLocale<M, N, C, S>(main: M, nodes: N, commands: C, settings: S) {
|
||||
const messages = {
|
||||
en: buildLocale(en, enNodes, enCommands, enSettings),
|
||||
zh: buildLocale(zh, zhNodes, zhCommands, zhSettings),
|
||||
'zh-TW': buildLocale(zhTW, zhTWNodes, zhTWCommands, zhTWSettings),
|
||||
ru: buildLocale(ru, ruNodes, ruCommands, ruSettings),
|
||||
ja: buildLocale(ja, jaNodes, jaCommands, jaSettings),
|
||||
ko: buildLocale(ko, koNodes, koCommands, koSettings),
|
||||
|
||||
172
src/locales/CONTRIBUTING.md
Normal file
@@ -0,0 +1,172 @@
|
||||
# Contributing Translations to ComfyUI
|
||||
|
||||
## Quick Start for New Languages
|
||||
|
||||
1. **Let us know** - Open an issue or reach out on Discord to request a new language
|
||||
2. **Get technical setup help** - We'll help configure the initial files or you can follow the technical process below
|
||||
3. **Automatic translation** - Our CI system will generate translations using OpenAI when you create a PR
|
||||
4. **Review and refine** - You can improve the auto-generated translations and become a maintainer for that language
|
||||
|
||||
## Technical Process (Confirmed Working)
|
||||
|
||||
### Prerequisites
|
||||
- Node.js installed
|
||||
- Git/GitHub knowledge
|
||||
- OpenAI API key (optional - CI will handle translations)
|
||||
|
||||
### Step 1: Update Configuration Files
|
||||
|
||||
**Time required: ~10 minutes**
|
||||
|
||||
#### 1.1 Update `.i18nrc.cjs`
|
||||
Add your language code to the `outputLocales` array:
|
||||
|
||||
```javascript
|
||||
module.exports = defineConfig({
|
||||
// ... existing config
|
||||
outputLocales: ['zh', 'zh-TW', 'ru', 'ja', 'ko', 'fr', 'es'], // Add your language here
|
||||
reference: `Special names to keep untranslated: flux, photomaker, clip, vae, cfg, stable audio, stable cascade, stable zero, controlnet, lora, HiDream.
|
||||
'latent' is the short form of 'latent space'.
|
||||
'mask' is in the context of image processing.
|
||||
Note: For Traditional Chinese (Taiwan), use Taiwan-specific terminology and traditional characters.
|
||||
`
|
||||
});
|
||||
```
|
||||
|
||||
#### 1.2 Update `src/constants/coreSettings.ts`
|
||||
Add your language to the dropdown options:
|
||||
|
||||
```typescript
|
||||
{
|
||||
id: 'Comfy.Locale',
|
||||
name: 'Language',
|
||||
type: 'combo',
|
||||
options: [
|
||||
{ value: 'en', text: 'English' },
|
||||
{ value: 'zh', text: '中文' },
|
||||
{ value: 'zh-TW', text: '繁體中文 (台灣)' }, // Add your language here
|
||||
{ value: 'ru', text: 'Русский' },
|
||||
{ value: 'ja', text: '日本語' },
|
||||
{ value: 'ko', text: '한국어' },
|
||||
{ value: 'fr', text: 'Français' },
|
||||
{ value: 'es', text: 'Español' }
|
||||
],
|
||||
defaultValue: () => navigator.language.split('-')[0] || 'en'
|
||||
},
|
||||
```
|
||||
|
||||
#### 1.3 Update `src/i18n.ts`
|
||||
Add imports for your new language files:
|
||||
|
||||
```typescript
|
||||
// Add these imports (replace zh-TW with your language code)
|
||||
import zhTWCommands from './locales/zh-TW/commands.json'
|
||||
import zhTW from './locales/zh-TW/main.json'
|
||||
import zhTWNodes from './locales/zh-TW/nodeDefs.json'
|
||||
import zhTWSettings from './locales/zh-TW/settings.json'
|
||||
|
||||
// Add to the messages object
|
||||
const messages = {
|
||||
en: buildLocale(en, enNodes, enCommands, enSettings),
|
||||
zh: buildLocale(zh, zhNodes, zhCommands, zhSettings),
|
||||
'zh-TW': buildLocale(zhTW, zhTWNodes, zhTWCommands, zhTWSettings), // Add this line
|
||||
// ... other languages
|
||||
}
|
||||
```
|
||||
|
||||
### Step 2: Generate Translation Files
|
||||
|
||||
#### Option A: Local Generation (Optional)
|
||||
```bash
|
||||
# Only if you have OpenAI API key configured
|
||||
npm run locale
|
||||
```
|
||||
|
||||
#### Option B: Let CI Handle It (Recommended)
|
||||
- Create your PR with the configuration changes above
|
||||
- Our GitHub CI will automatically generate translation files
|
||||
- Empty JSON files are fine - they'll be populated by the workflow
|
||||
|
||||
### Step 3: Test Your Changes
|
||||
|
||||
```bash
|
||||
npm run typecheck # Check for TypeScript errors
|
||||
npm run dev # Start development server
|
||||
```
|
||||
|
||||
**Testing checklist:**
|
||||
- [ ] Language appears in ComfyUI Settings > Locale dropdown
|
||||
- [ ] Can select the new language without errors
|
||||
- [ ] Partial translations display correctly
|
||||
- [ ] UI falls back to English for untranslated strings
|
||||
- [ ] No console errors when switching languages
|
||||
|
||||
### Step 4: Submit PR
|
||||
|
||||
1. **Create PR** with your configuration changes
|
||||
2. **CI will run** and automatically populate translation files
|
||||
3. **Request review** from language maintainers: @Yorha4D @KarryCharon @DorotaLuna @shinshin86
|
||||
4. **Get added to CODEOWNERS** as a reviewer for your language
|
||||
|
||||
## What Happens in CI
|
||||
|
||||
Our automated translation workflow:
|
||||
1. **Collects strings**: Scans the UI for translatable text
|
||||
2. **Updates English files**: Ensures all strings are captured
|
||||
3. **Generates translations**: Uses OpenAI API to translate to all configured languages
|
||||
4. **Commits back**: Automatically updates your PR with complete translations
|
||||
|
||||
## File Structure
|
||||
|
||||
Each language has 4 translation files:
|
||||
- `main.json` - Main UI text (~2000+ entries)
|
||||
- `commands.json` - Command descriptions (~200+ entries)
|
||||
- `settings.json` - Settings panel (~400+ entries)
|
||||
- `nodeDefs.json` - Node definitions (~varies based on installed nodes)
|
||||
|
||||
## Translation Quality
|
||||
|
||||
- **Auto-translations are high quality** but may need refinement
|
||||
- **Technical terms** are preserved (flux, photomaker, clip, vae, etc.)
|
||||
- **Context-aware** translations based on UI usage
|
||||
- **Native speaker review** is encouraged for quality improvements
|
||||
|
||||
## Common Issues & Solutions
|
||||
|
||||
### Issue: TypeScript errors on imports
|
||||
**Solution**: Ensure your language code matches exactly in all three files
|
||||
|
||||
### Issue: Empty translation files
|
||||
**Solution**: This is normal - CI will populate them when you create a PR
|
||||
|
||||
### Issue: Language not appearing in dropdown
|
||||
**Solution**: Check that the language code in `coreSettings.ts` matches your other files exactly
|
||||
|
||||
### Issue: Rate limits during local translation
|
||||
**Solution**: This is expected - let CI handle the translation generation
|
||||
|
||||
## Regional Variants
|
||||
|
||||
For regional variants (like zh-TW for Taiwan), use:
|
||||
- **Language-region codes**: `zh-TW`, `pt-BR`, `en-US`
|
||||
- **Specific terminology**: Add region-specific context to the reference string
|
||||
- **Native display names**: Use the local language name in the dropdown
|
||||
|
||||
## Getting Help
|
||||
|
||||
- **Tag translation maintainers**: @Yorha4D @KarryCharon @DorotaLuna @shinshin86
|
||||
- **Check existing language PRs** for examples
|
||||
- **Open an issue** describing your language addition request
|
||||
- **Reference this tested process** - we've confirmed it works!
|
||||
|
||||
## Becoming a Language Maintainer
|
||||
|
||||
After your language is added:
|
||||
1. **Get added to CODEOWNERS** for your language files
|
||||
2. **Review future PRs** affecting your language
|
||||
3. **Coordinate with other native speakers** for quality improvements
|
||||
4. **Help maintain translations** as the UI evolves
|
||||
|
||||
---
|
||||
|
||||
*This process was tested and confirmed working with Traditional Chinese (Taiwan) addition.*
|
||||
@@ -14,68 +14,17 @@ Our project supports multiple languages using `vue-i18n`. This allows users arou
|
||||
|
||||
## How to Add a New Language
|
||||
|
||||
We welcome the addition of new languages. You can add a new language by following these steps:
|
||||
Want to add a new language to ComfyUI? See our detailed [Contributing Guide](./CONTRIBUTING.md) with step-by-step instructions and confirmed working process.
|
||||
|
||||
### 1\. Generate language files
|
||||
### Quick Start
|
||||
1. Open an issue or reach out on Discord to request a new language
|
||||
2. Follow the [technical process](./CONTRIBUTING.md#technical-process-confirmed-working) or ask for help
|
||||
3. Our CI will automatically generate translations using OpenAI
|
||||
4. Become a maintainer for your language
|
||||
|
||||
We use [lobe-i18n](https://github.com/lobehub/lobe-cli-toolbox/blob/master/packages/lobe-i18n/README.md) as our translation tool, which integrates with LLM for efficient localization.
|
||||
|
||||
Update the configuration file to include the new language(s) you wish to add:
|
||||
|
||||
```javascript
|
||||
const { defineConfig } = require('@lobehub/i18n-cli');
|
||||
|
||||
module.exports = defineConfig({
|
||||
entry: 'src/locales/en.json', // Base language file
|
||||
entryLocale: 'en',
|
||||
output: 'src/locales',
|
||||
outputLocales: ['zh', 'ru', 'ja', 'ko', 'fr', 'es'], // Add the new language(s) here
|
||||
});
|
||||
```
|
||||
|
||||
Set your OpenAI API Key by running the following command:
|
||||
|
||||
```sh
|
||||
npx lobe-i18n --option
|
||||
```
|
||||
|
||||
Once configured, generate the translation files with:
|
||||
|
||||
```sh
|
||||
npx lobe-i18n locale
|
||||
```
|
||||
|
||||
This will create the language files for the specified languages in the configuration.
|
||||
|
||||
### 2\. Update i18n Configuration
|
||||
|
||||
Import the newly generated locale file(s) in the `src/i18n.ts` file to include them in the application's i18n setup.
|
||||
|
||||
### 3\. Enable Selection of the New Language
|
||||
|
||||
Add the newly added language to the following item in `src/constants/coreSettings.ts`:
|
||||
|
||||
```typescript
|
||||
{
|
||||
id: 'Comfy.Locale',
|
||||
name: 'Locale',
|
||||
type: 'combo',
|
||||
// Add the new language(s) here
|
||||
options: [
|
||||
{ value: 'en', text: 'English' },
|
||||
{ value: 'zh', text: '中文' },
|
||||
{ value: 'ru', text: 'Русский' },
|
||||
{ value: 'ja', text: '日本語' },
|
||||
{ value: 'ko', text: '한국어' },
|
||||
{ value: 'fr', text: 'Français' },
|
||||
{ value: 'es', text: 'Español' }
|
||||
],
|
||||
defaultValue: navigator.language.split('-')[0] || 'en'
|
||||
},
|
||||
```
|
||||
|
||||
This will make the new language selectable in the application's settings.
|
||||
|
||||
### 4\. Test the Translations
|
||||
|
||||
Start the development server, switch to the new language, and verify the translations. You can switch languages by opening the ComfyUI Settings and selecting from the `ComfyUI > Locale` dropdown box.
|
||||
### File Structure
|
||||
Each language has 4 translation files in `src/locales/[language-code]/`:
|
||||
- `main.json` - Main UI text
|
||||
- `commands.json` - Command descriptions
|
||||
- `settings.json` - Settings panel
|
||||
- `nodeDefs.json` - Node definitions
|
||||
|
||||
@@ -547,182 +547,194 @@
|
||||
"Audio": "Audio",
|
||||
"Image API": "Image API",
|
||||
"Video API": "Video API",
|
||||
"LLM API": "LLM API",
|
||||
"All": "All Templates"
|
||||
},
|
||||
"templateDescription": {
|
||||
"Basics": {
|
||||
"default": "Generate images from text descriptions.",
|
||||
"default": "Generate images from text prompts.",
|
||||
"image2image": "Transform existing images using text prompts.",
|
||||
"lora": "Apply LoRA models for specialized styles or subjects.",
|
||||
"lora": "Generate images with LoRA models for specialized styles or subjects.",
|
||||
"lora_multiple": "Generate images by combining multiple LoRA models.",
|
||||
"inpaint_example": "Edit specific parts of images seamlessly.",
|
||||
"inpain_model_outpainting": "Extend images beyond their original boundaries.",
|
||||
"embedding_example": "Use textual inversion for consistent styles.",
|
||||
"gligen_textbox_example": "Specify the location and size of objects.",
|
||||
"lora_multiple": "Combine multiple LoRA models for unique results."
|
||||
"inpaint_model_outpainting": "Extend images beyond their original boundaries.",
|
||||
"embedding_example": "Generate images using textual inversion for consistent styles.",
|
||||
"gligen_textbox_example": "Generate images with precise object placement using text boxes."
|
||||
},
|
||||
"Flux": {
|
||||
"flux_dev_checkpoint_example": "Create images using Flux development models.",
|
||||
"flux_schnell": "Generate images quickly with Flux Schnell.",
|
||||
"flux_fill_inpaint_example": "Fill in missing parts of images.",
|
||||
"flux_fill_outpaint_example": "Extend images using Flux outpainting.",
|
||||
"flux_canny_model_example": "Generate images from edge detection.",
|
||||
"flux_depth_lora_example": "Create images with depth-aware LoRA.",
|
||||
"flux_redux_model_example": "Transfer style from a reference image to guide image generation with Flux."
|
||||
"flux_kontext_dev_basic": "Edit image using Flux Kontext with full node visibility, perfect for learning the workflow.",
|
||||
"flux_kontext_dev_grouped": "Streamlined version of Flux Kontext with grouped nodes for cleaner workspace.",
|
||||
"flux_dev_checkpoint_example": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.",
|
||||
"flux_schnell": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.",
|
||||
"flux_dev_full_text_to_image": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.",
|
||||
"flux_schnell_full_text_to_image": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.",
|
||||
"flux_fill_inpaint_example": "Fill missing parts of images using Flux inpainting.",
|
||||
"flux_fill_outpaint_example": "Extend images beyond boundaries using Flux outpainting.",
|
||||
"flux_canny_model_example": "Generate images guided by edge detection using Flux Canny.",
|
||||
"flux_depth_lora_example": "Generate images guided by depth information using Flux LoRA.",
|
||||
"flux_redux_model_example": "Generate images by transferring style from reference images using Flux Redux."
|
||||
},
|
||||
"Image": {
|
||||
"hidream_i1_dev": "Generate images with HiDream I1 Dev.",
|
||||
"hidream_i1_fast": "Generate images quickly with HiDream I1.",
|
||||
"hidream_i1_full": "Generate images with HiDream I1.",
|
||||
"hidream_e1_full": "Edit images with HiDream E1.",
|
||||
"sd3_5_simple_example": "Generate images with SD 3.5.",
|
||||
"sd3_5_large_canny_controlnet_example": "Use edge detection to guide image generation with SD 3.5.",
|
||||
"sd3_5_large_depth": "Create depth-aware images with SD 3.5.",
|
||||
"sd3_5_large_blur": "Generate images from blurred reference images with SD 3.5.",
|
||||
"sdxl_simple_example": "Create high-quality images with SDXL.",
|
||||
"sdxl_refiner_prompt_example": "Enhance SDXL outputs with refiners.",
|
||||
"sdxl_revision_text_prompts": "Transfer concepts from reference images to guide image generation with SDXL.",
|
||||
"sdxl_revision_zero_positive": "Add text prompts alongside reference images to guide image generation with SDXL.",
|
||||
"sdxlturbo_example": "Generate images in a single step with SDXL Turbo."
|
||||
"image_omnigen2_t2i": "Generate high-quality images from text prompts using OmniGen2's unified 7B multimodal model with dual-path architecture.",
|
||||
"image_omnigen2_image_edit": "Edit images with natural language instructions using OmniGen2's advanced image editing capabilities and text rendering support.",
|
||||
"image_cosmos_predict2_2B_t2i": "Generate images with Cosmos-Predict2 2B T2I, delivering physically accurate, high-fidelity, and detail-rich image generation.",
|
||||
"image_chroma_text_to_image": "Chroma is modified from flux and has some changes in the architecture.",
|
||||
"hidream_i1_dev": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware.",
|
||||
"hidream_i1_fast": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware.",
|
||||
"hidream_i1_full": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output.",
|
||||
"hidream_e1_full": "Edit images with HiDream E1 - Professional natural language image editing model.",
|
||||
"sd3_5_simple_example": "Generate images using SD 3.5.",
|
||||
"sd3_5_large_canny_controlnet_example": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
|
||||
"sd3_5_large_depth": "Generate images guided by depth information using SD 3.5.",
|
||||
"sd3_5_large_blur": "Generate images guided by blurred reference images using SD 3.5.",
|
||||
"sdxl_simple_example": "Generate high-quality images using SDXL.",
|
||||
"sdxl_refiner_prompt_example": "Enhance SDXL images using refiner models.",
|
||||
"sdxl_revision_text_prompts": "Generate images by transferring concepts from reference images using SDXL Revision.",
|
||||
"sdxl_revision_zero_positive": "Generate images using both text prompts and reference images with SDXL Revision.",
|
||||
"sdxlturbo_example": "Generate images in a single step using SDXL Turbo.",
|
||||
"image_lotus_depth_v1_1": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention."
|
||||
},
|
||||
"Video": {
|
||||
"text_to_video_wan": "Quickly Generate videos from text descriptions.",
|
||||
"image_to_video_wan": "Quickly Generate videos from images.",
|
||||
"wan2_1_fun_inp": "Create videos from start and end frames.",
|
||||
"wan2_1_fun_control": "Guide video generation with pose, depth, edge controls and more.",
|
||||
"wan2_1_flf2v_720_f16": "Generate video through controlling the first and last frames.",
|
||||
"ltxv_text_to_video": "Generate videos from text descriptions.",
|
||||
"ltxv_image_to_video": "Convert still images into videos.",
|
||||
"mochi_text_to_video_example": "Create videos with Mochi model.",
|
||||
"hunyuan_video_text_to_video": "Generate videos using Hunyuan model.",
|
||||
"image_to_video": "Transform images into animated videos.",
|
||||
"txt_to_image_to_video": "Generate images from text and then convert them into videos."
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Generate videos with Cosmos-Predict2 2B Video2World, generating physically accurate, high-fidelity, and consistent video simulations.",
|
||||
"video_wan_vace_14B_t2v": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
|
||||
"video_wan_vace_14B_ref2v": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
|
||||
"video_wan_vace_14B_v2v": "Generate videos by controlling input videos and reference images using Wan VACE.",
|
||||
"video_wan_vace_outpainting": "Generate extended videos by expanding video size using Wan VACE outpainting.",
|
||||
"video_wan_vace_flf2v": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
|
||||
"video_wan_vace_inpainting": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Generate high-quality videos with advanced camera control using the full 14B model",
|
||||
"text_to_video_wan": "Generate videos from text prompts using Wan 2.1.",
|
||||
"image_to_video_wan": "Generate videos from images using Wan 2.1.",
|
||||
"wan2_1_fun_inp": "Generate videos from start and end frames using Wan 2.1 inpainting.",
|
||||
"wan2_1_fun_control": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
|
||||
"wan2_1_flf2v_720_f16": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
|
||||
"ltxv_text_to_video": "Generate videos from text prompts.",
|
||||
"ltxv_image_to_video": "Generate videos from still images.",
|
||||
"mochi_text_to_video_example": "Generate videos from text prompts using Mochi model.",
|
||||
"hunyuan_video_text_to_video": "Generate videos from text prompts using Hunyuan model.",
|
||||
"image_to_video": "Generate videos from still images.",
|
||||
"txt_to_image_to_video": "Generate videos by first creating images from text prompts."
|
||||
},
|
||||
"Image API": {
|
||||
"api_openai_image_1_t2i": "Use GPT Image 1 API to generate images from text descriptions.",
|
||||
"api_openai_image_1_i2i": "Use GPT Image 1 API to generate images from images.",
|
||||
"api_openai_image_1_inpaint": "Use GPT Image 1 API to inpaint images.",
|
||||
"api_openai_image_1_multi_inputs": "Use GPT Image 1 API with multiple inputs to generate images.",
|
||||
"api-openai-dall-e-2-t2i": "Use Dall-E 2 API to generate images from text descriptions.",
|
||||
"api-openai-dall-e-2-inpaint": "Use Dall-E 2 API to inpaint images.",
|
||||
"api-openai-dall-e-3-t2i": "Use Dall-E 3 API to generate images from text descriptions.",
|
||||
"api_bfl_flux_pro_t2i": "Create images with FLUX.1 [pro]'s excellent prompt following, visual quality, image detail and output diversity.",
|
||||
"api_stability_sd3_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
|
||||
"api_ideogram_v3_t2i": "Generate images with high-quality image-prompt alignment, photorealism, and text rendering. Create professional-quality logos, promotional posters, landing page concepts, product photography, and more. Effortlessly craft sophisticated spatial compositions with intricate backgrounds, precise and nuanced lighting and colors, and lifelike environmental detail.",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "Input multiple images and edit them with Flux.1 Kontext.",
|
||||
"api_bfl_flux_1_kontext_pro_image": "Edit images with Flux.1 Kontext pro image.",
|
||||
"api_bfl_flux_1_kontext_max_image": "Edit images with Flux.1 Kontext max image.",
|
||||
"api_bfl_flux_pro_t2i": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
|
||||
"api_luma_photon_i2i": "Guide image generation using a combination of images and prompt.",
|
||||
"api_luma_photon_style_ref": "Apply and blend style references with exact control. Luma Photon captures the essence of each reference image, letting you combine distinct visual elements while maintaining professional quality.",
|
||||
"api_recraft_image_gen_with_color_control": "Create a custom palette to reuse for multiple images or hand-pick colors for each photo. Match your brand's color palette and craft visuals that are distinctly yours.",
|
||||
"api_luma_photon_style_ref": "Generate images by blending style references with precise control using Luma Photon.",
|
||||
"api_recraft_image_gen_with_color_control": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
|
||||
"api_recraft_image_gen_with_style_control": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.",
|
||||
"api_recraft_vector_gen": "Go from a text prompt to vector image with Recraft's AI vector generator. Produce the best-quality vector art for logos, posters, icon sets, ads, banners and mockups. Perfect your designs with sharp, high-quality SVG files. Create branded vector illustrations for your app or website in seconds."
|
||||
"api_recraft_vector_gen": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
|
||||
"api_runway_text_to_image": "Generate high-quality images from text prompts using Runway's AI model.",
|
||||
"api_runway_reference_to_image": "Generate new images based on reference styles and compositions with Runway's AI.",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
|
||||
"api_stability_ai_i2i": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
|
||||
"api_stability_ai_sd3_5_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
|
||||
"api_stability_ai_sd3_5_i2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
|
||||
"api_ideogram_v3_t2i": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
|
||||
"api_openai_image_1_t2i": "Generate images from text prompts using OpenAI GPT Image 1 API.",
|
||||
"api_openai_image_1_i2i": "Generate images from input images using OpenAI GPT Image 1 API.",
|
||||
"api_openai_image_1_inpaint": "Edit images using inpainting with OpenAI GPT Image 1 API.",
|
||||
"api_openai_image_1_multi_inputs": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
|
||||
"api_openai_dall_e_2_t2i": "Generate images from text prompts using OpenAI Dall-E 2 API.",
|
||||
"api_openai_dall_e_2_inpaint": "Edit images using inpainting with OpenAI Dall-E 2 API.",
|
||||
"api_openai_dall_e_3_t2i": "Generate images from text prompts using OpenAI Dall-E 3 API."
|
||||
},
|
||||
"Video API": {
|
||||
"api_moonvalley_text_to_video": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.",
|
||||
"api_moonvalley_image_to_video": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.",
|
||||
"api_kling_i2v": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
|
||||
"api_kling_effects": "Generate dynamic videos by applying visual effects to images using Kling.",
|
||||
"api_kling_flf": "Generate videos through controlling the first and last frames.",
|
||||
"api_luma_i2v": "Take static images and instantly create magical high quality animations.",
|
||||
"api_kling_i2v": "Create videos with great prompt adherence for actions, expressions, and camera movements. Now supporting complex prompts with sequential actions for you to be the director of your scene.",
|
||||
"api_veo2_i2v": "Use Google Veo2 API to generate videos from images.",
|
||||
"api_hailuo_minimax_i2v": "Create refined videos from images and text, including CGI integration and trendy photo effects like viral AI hugging. Choose from a variety of video styles and themes to match your creative vision.",
|
||||
"api_pika_scene": "Use multiple images as ingredients and generate videos that incorporate all of them.",
|
||||
"api_pixverse_template_i2v": "Transforms static images into dynamic videos with motion and effects.",
|
||||
"api_pixverse_t2v": "Generate videos with accurate prompt interpretation and stunning video dynamics."
|
||||
"api_luma_t2v": "High-quality videos can be generated using simple prompts.",
|
||||
"api_hailuo_minimax_t2v": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
|
||||
"api_hailuo_minimax_i2v": "Generate refined videos from images and text with CGI integration using MiniMax.",
|
||||
"api_pixverse_i2v": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
||||
"api_pixverse_template_i2v": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
||||
"api_pixverse_t2v": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
|
||||
"api_runway_gen4_turo_image_to_video": "Generate dynamic videos from images using Runway Gen4 Turbo.",
|
||||
"api_runway_first_last_frame": "Generate smooth video transitions between two keyframes with Runway's precision.",
|
||||
"api_pika_i2v": "Generate smooth animated videos from single static images using Pika AI.",
|
||||
"api_pika_scene": "Generate videos that incorporate multiple input images using Pika Scenes.",
|
||||
"api_veo2_i2v": "Generate videos from images using Google Veo2 API."
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Generate detailed 3D models from single photos using Rodin AI.",
|
||||
"api_rodin_multiview_to_model": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.",
|
||||
"api_tripo_text_to_model": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
|
||||
"api_tripo_image_to_model": "Generate professional 3D assets from 2D images using Tripo engine.",
|
||||
"api_tripo_multiview_to_model": "Build 3D models from multiple angles with Tripo's advanced scanner."
|
||||
},
|
||||
"LLM API": {
|
||||
"api_openai_chat": "Engage with OpenAI's advanced language models for intelligent conversations.",
|
||||
"api_google_gemini": "Experience Google's multimodal AI with Gemini's reasoning capabilities."
|
||||
},
|
||||
"Upscaling": {
|
||||
"hiresfix_latent_workflow": "Enhance image quality in latent space.",
|
||||
"esrgan_example": "Use upscale models to enhance image quality.",
|
||||
"hiresfix_esrgan_workflow": "Use upscale models during intermediate steps.",
|
||||
"latent_upscale_different_prompt_model": "Upscale and change prompt across passes."
|
||||
"hiresfix_latent_workflow": "Upscale images by enhancing quality in latent space.",
|
||||
"esrgan_example": "Upscale images using ESRGAN models to enhance quality.",
|
||||
"hiresfix_esrgan_workflow": "Upscale images using ESRGAN models during intermediate generation steps.",
|
||||
"latent_upscale_different_prompt_model": "Upscale images while changing prompts across generation passes."
|
||||
},
|
||||
"ControlNet": {
|
||||
"controlnet_example": "Control image generation with reference images.",
|
||||
"2_pass_pose_worship": "Generate images from pose references.",
|
||||
"depth_controlnet": "Create images with depth-aware generation.",
|
||||
"depth_t2i_adapter": "Quickly generate depth-aware images with a T2I adapter.",
|
||||
"mixing_controlnets": "Combine multiple ControlNet models together."
|
||||
"controlnet_example": "Generate images guided by scribble reference images using ControlNet.",
|
||||
"2_pass_pose_worship": "Generate images guided by pose references using ControlNet.",
|
||||
"depth_controlnet": "Generate images guided by depth information using ControlNet.",
|
||||
"depth_t2i_adapter": "Generate images guided by depth information using T2I adapter.",
|
||||
"mixing_controlnets": "Generate images by combining multiple ControlNet models."
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "Control image composition with areas.",
|
||||
"area_composition_reversed": "Reverse area composition workflow.",
|
||||
"area_composition_square_area_for_subject": "Create consistent subject placement."
|
||||
"area_composition": "Generate images by controlling composition with defined areas.",
|
||||
"area_composition_square_area_for_subject": "Generate images with consistent subject placement using area composition."
|
||||
},
|
||||
"3D": {
|
||||
"hunyuan3d-non-multiview-train": "Use Hunyuan3D 2.0 to generate models from a single view.",
|
||||
"hunyuan-3d-multiview-elf": " Use Hunyuan3D 2mv to generate models from multiple views.",
|
||||
"hunyuan-3d-turbo": "Use Hunyuan3D 2mv turbo to generate models from multiple views.",
|
||||
"stable_zero123_example": "Generate 3D views from single images."
|
||||
"3d_hunyuan3d_image_to_model": "Generate 3D models from single images using Hunyuan3D 2.0.",
|
||||
"3d_hunyuan3d_multiview_to_model": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
|
||||
"stable_zero123_example": "Generate 3D views from single images using Stable Zero123."
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "Generate audio from text descriptions."
|
||||
"audio_stable_audio_example": "Generate audio from text prompts using Stable Audio.",
|
||||
"audio_ace_step_1_t2a_instrumentals": "Generate instrumental music from text prompts using ACE-Step v1.",
|
||||
"audio_ace_step_1_t2a_song": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
|
||||
"audio_ace_step_1_m2m_editing": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M."
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"Flux": {
|
||||
"flux_dev_checkpoint_example": "Flux Dev",
|
||||
"flux_schnell": "Flux Schnell",
|
||||
"flux_fill_inpaint_example": "Flux Inpaint",
|
||||
"flux_fill_outpaint_example": "Flux Outpaint",
|
||||
"flux_canny_model_example": "Flux Canny Model",
|
||||
"flux_redux_model_example": "Flux Redux Model",
|
||||
"flux_depth_lora_example": "Flux Depth Lora"
|
||||
},
|
||||
"Basics": {
|
||||
"default": "Image Generation",
|
||||
"image2image": "Image to Image",
|
||||
"embedding_example": "Embedding",
|
||||
"gligen_textbox_example": "Gligen Textbox",
|
||||
"lora": "Lora",
|
||||
"lora_multiple": "Lora Multiple",
|
||||
"lora": "LoRA",
|
||||
"lora_multiple": "LoRA Multiple",
|
||||
"inpaint_example": "Inpaint",
|
||||
"inpain_model_outpainting": "Outpaint"
|
||||
"inpaint_model_outpainting": "Outpaint",
|
||||
"embedding_example": "Embedding",
|
||||
"gligen_textbox_example": "Gligen Textbox"
|
||||
},
|
||||
"ControlNet": {
|
||||
"controlnet_example": "Scribble ControlNet",
|
||||
"2_pass_pose_worship": "Pose ControlNet 2 Pass",
|
||||
"depth_controlnet": "Depth ControlNet",
|
||||
"depth_t2i_adapter": "Depth T2I Adapter",
|
||||
"mixing_controlnets": "Mixing ControlNets"
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "ESRGAN",
|
||||
"hiresfix_latent_workflow": "Upscale",
|
||||
"hiresfix_esrgan_workflow": "HiresFix ESRGAN Workflow",
|
||||
"latent_upscale_different_prompt_model": "Latent Upscale Different Prompt Model"
|
||||
},
|
||||
"Video": {
|
||||
"text_to_video_wan": "Wan 2.1 Text to Video",
|
||||
"image_to_video_wan": "Wan 2.1 Image to Video",
|
||||
"image_to_video": "SVD Image to Video",
|
||||
"txt_to_image_to_video": "SVD Text to Image to Video",
|
||||
"ltxv_image_to_video": "LTXV Image to Video",
|
||||
"ltxv_text_to_video": "LTXV Text to Video",
|
||||
"mochi_text_to_video_example": "Mochi Text to Video",
|
||||
"hunyuan_video_text_to_video": "Hunyuan Video Text to Video",
|
||||
"wan2_1_fun_inp": "Wan 2.1 Inpainting",
|
||||
"wan2_1_fun_control": "Wan 2.1 ControlNet"
|
||||
},
|
||||
"Image API": {
|
||||
"api_openai_image_1_t2i": "OpenAI Image-1 Text to Image",
|
||||
"api_openai_image_1_i2i": "OpenAI Image-1 Image to Image",
|
||||
"api_openai_image_1_inpaint": "OpenAI Image-1 Inpaint",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI Image-1 Multi Inputs",
|
||||
"api-openai-dall-e-2-t2i": "Dall-E 2 Text to Image",
|
||||
"api-openai-dall-e-2-inpaint": "Dall-E 2 Inpaint",
|
||||
"api-openai-dall-e-3-t2i": "Dall-E 3 Text to Image",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux 1.1[pro] Ultra Text to Image",
|
||||
"api_stability_sd3_t2i": "Stability AI Stable Image Ultra Text to Image",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3 Text to Image",
|
||||
"api_luma_photon_i2i": "Luma Photon Image to Image",
|
||||
"api_luma_photon_style_ref": "Luma Photon Style Reference",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft Color Control Image Generation",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft Style Control Image Generation",
|
||||
"api_recraft_vector_gen": "Recraft Vector Generation"
|
||||
},
|
||||
"Video API": {
|
||||
"api_luma_i2v": "Luma Image to Video",
|
||||
"api_kling_i2v": "Kling Image to Video",
|
||||
"api_veo2_i2v": "Veo2 Image to Video",
|
||||
"api_hailuo_minimax_i2v": "MiniMax Image to Video",
|
||||
"api_pika_scene": "Pika Scenes: Images to Video",
|
||||
"api_pixverse_template_i2v": "PixVerse Template Effects: Image to Video",
|
||||
"api_pixverse_t2v": "PixVerse Text to Video"
|
||||
"Flux": {
|
||||
"flux_kontext_dev_basic": "Flux Kontext Dev(Basic)",
|
||||
"flux_kontext_dev_grouped": "Flux Kontext Dev(Grouped)",
|
||||
"flux_dev_checkpoint_example": "Flux Dev fp8",
|
||||
"flux_schnell": "Flux Schnell fp8",
|
||||
"flux_dev_full_text_to_image": "Flux Dev full text to image",
|
||||
"flux_schnell_full_text_to_image": "Flux Schnell full text to image",
|
||||
"flux_fill_inpaint_example": "Flux Inpaint",
|
||||
"flux_fill_outpaint_example": "Flux Outpaint",
|
||||
"flux_canny_model_example": "Flux Canny Model",
|
||||
"flux_depth_lora_example": "Flux Depth LoRA",
|
||||
"flux_redux_model_example": "Flux Redux Model"
|
||||
},
|
||||
"Image": {
|
||||
"image_omnigen2_t2i": "OmniGen2 Text to Image",
|
||||
"image_omnigen2_image_edit": "OmniGen2 Image Edit",
|
||||
"image_cosmos_predict2_2B_t2i": "Cosmos Predict2 2B T2I",
|
||||
"image_chroma_text_to_image": "Chroma text to image",
|
||||
"hidream_i1_dev": "HiDream I1 Dev",
|
||||
"hidream_i1_fast": "HiDream I1 Fast",
|
||||
"hidream_i1_full": "HiDream I1 Full",
|
||||
"hidream_e1_full": "HiDream E1 Full",
|
||||
"sd3_5_simple_example": "SD3.5 Simple",
|
||||
"sd3_5_large_canny_controlnet_example": "SD3.5 Large Canny ControlNet",
|
||||
"sd3_5_large_depth": "SD3.5 Large Depth",
|
||||
@@ -732,23 +744,114 @@
|
||||
"sdxl_revision_text_prompts": "SDXL Revision Text Prompts",
|
||||
"sdxl_revision_zero_positive": "SDXL Revision Zero Positive",
|
||||
"sdxlturbo_example": "SDXL Turbo",
|
||||
"hidream_i1_dev": "HiDream I1 Dev",
|
||||
"hidream_i1_fast": "HiDream I1 Fast",
|
||||
"hidream_i1_full": "HiDream I1 Full"
|
||||
"image_lotus_depth_v1_1": "Lotus Depth"
|
||||
},
|
||||
"Video": {
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos Predict2 2B Video2World 480p 16fps",
|
||||
"video_wan_vace_14B_t2v": "Wan VACE Text to Video",
|
||||
"video_wan_vace_14B_ref2v": "Wan VACE Reference to Video",
|
||||
"video_wan_vace_14B_v2v": "Wan VACE Control Video",
|
||||
"video_wan_vace_outpainting": "Wan VACE Outpainting",
|
||||
"video_wan_vace_flf2v": "Wan VACE First-Last Frame",
|
||||
"video_wan_vace_inpainting": "Wan VACE Inpainting",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B",
|
||||
"text_to_video_wan": "Wan 2.1 Text to Video",
|
||||
"image_to_video_wan": "Wan 2.1 Image to Video",
|
||||
"wan2_1_fun_inp": "Wan 2.1 Inpainting",
|
||||
"wan2_1_fun_control": "Wan 2.1 ControlNet",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16",
|
||||
"ltxv_text_to_video": "LTXV Text to Video",
|
||||
"ltxv_image_to_video": "LTXV Image to Video",
|
||||
"mochi_text_to_video_example": "Mochi Text to Video",
|
||||
"hunyuan_video_text_to_video": "Hunyuan Video Text to Video",
|
||||
"image_to_video": "SVD Image to Video",
|
||||
"txt_to_image_to_video": "SVD Text to Image to Video"
|
||||
},
|
||||
"Image API": {
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "BFL Flux.1 Kontext Multiple Image Input",
|
||||
"api_bfl_flux_1_kontext_pro_image": "BFL Flux.1 Kontext Pro",
|
||||
"api_bfl_flux_1_kontext_max_image": "BFL Flux.1 Kontext Max",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux[Pro]: Text to Image",
|
||||
"api_luma_photon_i2i": "Luma Photon: Image to Image",
|
||||
"api_luma_photon_style_ref": "Luma Photon: Style Reference",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft: Color Control Image Generation",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft: Style Control Image Generation",
|
||||
"api_recraft_vector_gen": "Recraft: Vector Generation",
|
||||
"api_runway_text_to_image": "Runway: Text to Image",
|
||||
"api_runway_reference_to_image": "Runway: Reference to Image",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Stability AI: Stable Image Ultra Text to Image",
|
||||
"api_stability_ai_i2i": "Stability AI: Image to Image",
|
||||
"api_stability_ai_sd3_5_t2i": "Stability AI: SD3.5 Text to Image",
|
||||
"api_stability_ai_sd3_5_i2i": "Stability AI: SD3.5 Image to Image",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3: Text to Image",
|
||||
"api_openai_image_1_t2i": "OpenAI: GPT-Image-1 Text to Image",
|
||||
"api_openai_image_1_i2i": "OpenAI: GPT-Image-1 Image to Image",
|
||||
"api_openai_image_1_inpaint": "OpenAI: GPT-Image-1 Inpaint",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI: GPT-Image-1 Multi Inputs",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI: Dall-E 2 Text to Image",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI: Dall-E 2 Inpaint",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI: Dall-E 3 Text to Image"
|
||||
},
|
||||
"Video API": {
|
||||
"api_moonvalley_text_to_video": "Moonvalley: Text to Video",
|
||||
"api_moonvalley_image_to_video": "Moonvalley: Image to Video",
|
||||
"api_kling_i2v": "Kling: Image to Video",
|
||||
"api_kling_effects": "Kling: Video Effects",
|
||||
"api_kling_flf": "Kling: FLF2V",
|
||||
"api_luma_i2v": "Luma: Image to Video",
|
||||
"api_luma_t2v": "Luma: Text to Video",
|
||||
"api_hailuo_minimax_t2v": "MiniMax: Text to Video",
|
||||
"api_hailuo_minimax_i2v": "MiniMax: Image to Video",
|
||||
"api_pixverse_i2v": "PixVerse: Image to Video",
|
||||
"api_pixverse_template_i2v": "PixVerse Templates: Image to Video",
|
||||
"api_pixverse_t2v": "PixVerse: Text to Video",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway: Gen3a Turbo Image to Video",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway: Gen4 Turbo Image to Video",
|
||||
"api_runway_first_last_frame": "Runway: First Last Frame to Video",
|
||||
"api_pika_i2v": "Pika: Image to Video",
|
||||
"api_pika_scene": "Pika Scenes: Images to Video",
|
||||
"api_veo2_i2v": "Veo2: Image to Video"
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin: Image to Model",
|
||||
"api_rodin_multiview_to_model": "Rodin: Multiview to Model",
|
||||
"api_tripo_text_to_model": "Tripo: Text to Model",
|
||||
"api_tripo_image_to_model": "Tripo: Image to Model",
|
||||
"api_tripo_multiview_to_model": "Tripo: Multiview to Model"
|
||||
},
|
||||
"LLM API": {
|
||||
"api_openai_chat": "OpenAI: Chat",
|
||||
"api_google_gemini": "Google Gemini: Chat"
|
||||
},
|
||||
"Upscaling": {
|
||||
"hiresfix_latent_workflow": "Upscale",
|
||||
"esrgan_example": "ESRGAN",
|
||||
"hiresfix_esrgan_workflow": "HiresFix ESRGAN Workflow",
|
||||
"latent_upscale_different_prompt_model": "Latent Upscale Different Prompt Model"
|
||||
},
|
||||
"ControlNet": {
|
||||
"controlnet_example": "Scribble ControlNet",
|
||||
"2_pass_pose_worship": "Pose ControlNet 2 Pass",
|
||||
"depth_controlnet": "Depth ControlNet",
|
||||
"depth_t2i_adapter": "Depth T2I Adapter",
|
||||
"mixing_controlnets": "Mixing ControlNets"
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "Area Composition",
|
||||
"area_composition_reversed": "Area Composition Reversed",
|
||||
"area_composition_square_area_for_subject": "Area Composition Square Area for Subject"
|
||||
},
|
||||
"3D": {
|
||||
"stable_zero123_example": "Stable Zero123",
|
||||
"hunyuan3d-non-multiview-train": "Hunyuan3D 2.0",
|
||||
"hunyuan-3d-multiview-elf": "Hunyuan3D 2.0 MV",
|
||||
"hunyuan-3d-turbo": "Hunyuan3D 2.0 MV Turbo"
|
||||
"3d_hunyuan3d_image_to_model": "Hunyuan3D 2.0",
|
||||
"3d_hunyuan3d_multiview_to_model": "Hunyuan3D 2.0 MV",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3D 2.0 MV Turbo",
|
||||
"stable_zero123_example": "Stable Zero123"
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "Stable Audio"
|
||||
"audio_stable_audio_example": "Stable Audio",
|
||||
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1 Text to Instrumentals Music",
|
||||
"audio_ace_step_1_t2a_song": "ACE Step v1 Text to Song",
|
||||
"audio_ace_step_1_m2m_editing": "ACE Step v1 M2M Editing"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -949,7 +1052,8 @@
|
||||
"Light": "Light",
|
||||
"User": "User",
|
||||
"Credits": "Credits",
|
||||
"API Nodes": "API Nodes"
|
||||
"API Nodes": "API Nodes",
|
||||
"Notification Preferences": "Notification Preferences"
|
||||
},
|
||||
"serverConfigItems": {
|
||||
"listen": {
|
||||
@@ -1495,6 +1599,7 @@
|
||||
"loadError": "Failed to load help: {error}"
|
||||
},
|
||||
"whatsNewPopup": {
|
||||
"learnMore": "Learn more"
|
||||
"learnMore": "Learn more",
|
||||
"noReleaseNotes": "No release notes available."
|
||||
}
|
||||
}
|
||||
@@ -259,6 +259,10 @@
|
||||
"name": "Number of nodes suggestions",
|
||||
"tooltip": "Only for litegraph searchbox/context menu"
|
||||
},
|
||||
"Comfy_Notification_ShowVersionUpdates": {
|
||||
"name": "Show version updates",
|
||||
"tooltip": "Show updates for new models, and major new features."
|
||||
},
|
||||
"Comfy_Pointer_ClickBufferTime": {
|
||||
"name": "Pointer click drift delay",
|
||||
"tooltip": "After pressing a pointer button down, this is the maximum time (in milliseconds) that pointer movement can be ignored for.\n\nHelps prevent objects from being unintentionally nudged if the pointer is moved whilst clicking."
|
||||
@@ -318,6 +322,7 @@
|
||||
},
|
||||
"Comfy_UseNewMenu": {
|
||||
"name": "Use new menu",
|
||||
"tooltip": "Menu bar position. On mobile devices, the menu is always shown at the top.",
|
||||
"options": {
|
||||
"Disabled": "Disabled",
|
||||
"Top": "Top",
|
||||
|
||||
@@ -785,13 +785,13 @@
|
||||
"Toggle Bottom Panel": "Alternar panel inferior",
|
||||
"Toggle Focus Mode": "Alternar modo de enfoque",
|
||||
"Toggle Logs Bottom Panel": "Alternar panel inferior de registros",
|
||||
"Toggle Model Library Sidebar": "Alternar barra lateral de biblioteca de modelos",
|
||||
"Toggle Node Library Sidebar": "Alternar barra lateral de biblioteca de nodos",
|
||||
"Toggle Queue Sidebar": "Alternar barra lateral de cola",
|
||||
"Toggle Model Library Sidebar": "Alternar barra lateral de la biblioteca de modelos",
|
||||
"Toggle Node Library Sidebar": "Alternar barra lateral de la biblioteca de nodos",
|
||||
"Toggle Queue Sidebar": "Alternar barra lateral de la cola",
|
||||
"Toggle Search Box": "Alternar caja de búsqueda",
|
||||
"Toggle Terminal Bottom Panel": "Alternar panel inferior de terminal",
|
||||
"Toggle Theme (Dark/Light)": "Alternar tema (Oscuro/Claro)",
|
||||
"Toggle Workflows Sidebar": "Alternar barra lateral de flujos de trabajo",
|
||||
"Toggle Workflows Sidebar": "Alternar barra lateral de los flujos de trabajo",
|
||||
"Toggle the Custom Nodes Manager": "Alternar el Administrador de Nodos Personalizados",
|
||||
"Toggle the Custom Nodes Manager Progress Bar": "Alternar la Barra de Progreso del Administrador de Nodos Personalizados",
|
||||
"Undo": "Deshacer",
|
||||
@@ -1099,6 +1099,7 @@
|
||||
"Node Search Box": "Caja de Búsqueda de Nodo",
|
||||
"Node Widget": "Widget de Nodo",
|
||||
"NodeLibrary": "Biblioteca de Nodos",
|
||||
"Notification Preferences": "Preferencias de notificación",
|
||||
"Pointer": "Puntero",
|
||||
"Queue": "Cola",
|
||||
"QueueButton": "Botón de Cola",
|
||||
@@ -1199,34 +1200,44 @@
|
||||
"Flux": "Flux",
|
||||
"Image": "Imagen",
|
||||
"Image API": "API de Imagen",
|
||||
"LLM API": "API LLM",
|
||||
"Upscaling": "Ampliación",
|
||||
"Video": "Video",
|
||||
"Video API": "API de Video"
|
||||
},
|
||||
"template": {
|
||||
"3D": {
|
||||
"hunyuan-3d-multiview-elf": "Hunyuan3D 2.0 MV",
|
||||
"hunyuan-3d-turbo": "Hunyuan3D 2.0 MV Turbo",
|
||||
"hunyuan3d-non-multiview-train": "Hunyuan3D 2.0",
|
||||
"3d_hunyuan3d_image_to_model": "Hunyuan3D 2.0",
|
||||
"3d_hunyuan3d_multiview_to_model": "Hunyuan3D 2.0 MV",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3D 2.0 MV Turbo",
|
||||
"stable_zero123_example": "Estable Zero123"
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin: Imagen a Modelo",
|
||||
"api_rodin_multiview_to_model": "Rodin: Multivista a Modelo",
|
||||
"api_tripo_image_to_model": "Tripo: Imagen a Modelo",
|
||||
"api_tripo_multiview_to_model": "Tripo: Multivista a Modelo",
|
||||
"api_tripo_text_to_model": "Tripo: Texto a Modelo"
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "Composición de Área",
|
||||
"area_composition_reversed": "Composición de Área Invertida",
|
||||
"area_composition_square_area_for_subject": "Composición de Área Cuadrada para el Sujeto"
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "Audio Estable"
|
||||
"audio_ace_step_1_m2m_editing": "ACE Step v1 Edición M2M",
|
||||
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1 Texto a Música Instrumental",
|
||||
"audio_ace_step_1_t2a_song": "ACE Step v1 Texto a Canción",
|
||||
"audio_stable_audio_example": "Stable Audio"
|
||||
},
|
||||
"Basics": {
|
||||
"default": "Generación de Imagen",
|
||||
"embedding_example": "Incrustación",
|
||||
"gligen_textbox_example": "Caja de Texto Gligen",
|
||||
"image2image": "Imagen a Imagen",
|
||||
"inpain_model_outpainting": "Outpaint",
|
||||
"inpaint_example": "Inpaint",
|
||||
"lora": "Lora",
|
||||
"lora_multiple": "Lora Múltiple"
|
||||
"inpaint_model_outpainting": "Outpaint",
|
||||
"lora": "LoRA",
|
||||
"lora_multiple": "LoRA Múltiple"
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "ControlNet de Pose 2 Pasadas",
|
||||
@@ -1237,17 +1248,27 @@
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "Flux Canny Model",
|
||||
"flux_depth_lora_example": "Flux Depth Lora",
|
||||
"flux_dev_checkpoint_example": "Flux Dev",
|
||||
"flux_depth_lora_example": "Flux Depth LoRA",
|
||||
"flux_dev_checkpoint_example": "Flux Dev fp8",
|
||||
"flux_dev_full_text_to_image": "Flux Dev texto a imagen completo",
|
||||
"flux_fill_inpaint_example": "Flux Inpaint",
|
||||
"flux_fill_outpaint_example": "Flux Outpaint",
|
||||
"flux_kontext_dev_basic": "Flux Kontext Dev (Básico)",
|
||||
"flux_kontext_dev_grouped": "Flux Kontext Dev (Agrupado)",
|
||||
"flux_redux_model_example": "Flux Redux Model",
|
||||
"flux_schnell": "Flux Schnell"
|
||||
"flux_schnell": "Flux Schnell fp8",
|
||||
"flux_schnell_full_text_to_image": "Flux Schnell texto a imagen completo"
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "HiDream E1 Completo",
|
||||
"hidream_i1_dev": "HiDream I1 Dev",
|
||||
"hidream_i1_fast": "HiDream I1 Rápido",
|
||||
"hidream_i1_full": "HiDream I1 Completo",
|
||||
"image_chroma_text_to_image": "Chroma texto a imagen",
|
||||
"image_cosmos_predict2_2B_t2i": "Cosmos Predict2 2B T2I",
|
||||
"image_lotus_depth_v1_1": "Lotus Depth",
|
||||
"image_omnigen2_image_edit": "OmniGen2 Edición de Imagen",
|
||||
"image_omnigen2_t2i": "OmniGen2 Texto a Imagen",
|
||||
"sd3_5_large_blur": "SD3.5 Grande Desenfoque",
|
||||
"sd3_5_large_canny_controlnet_example": "SD3.5 Grande Canny ControlNet",
|
||||
"sd3_5_large_depth": "SD3.5 Grande Profundidad",
|
||||
@@ -1259,21 +1280,33 @@
|
||||
"sdxlturbo_example": "SDXL Turbo"
|
||||
},
|
||||
"Image API": {
|
||||
"api-openai-dall-e-2-inpaint": "Dall-E 2 Rellenar",
|
||||
"api-openai-dall-e-2-t2i": "Dall-E 2 Texto a Imagen",
|
||||
"api-openai-dall-e-3-t2i": "Dall-E 3 Texto a Imagen",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux 1.1[pro] Ultra Texto a Imagen",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3 Texto a Imagen",
|
||||
"api_luma_photon_i2i": "Luma Photon Imagen a Imagen",
|
||||
"api_luma_photon_style_ref": "Luma Photon Referencia de Estilo",
|
||||
"api_openai_image_1_i2i": "OpenAI Image-1 Imagen a Imagen",
|
||||
"api_openai_image_1_inpaint": "OpenAI Image-1 Rellenar",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI Image-1 Múltiples Entradas",
|
||||
"api_openai_image_1_t2i": "OpenAI Image-1 Texto a Imagen",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft Generación de Imagen con Control de Color",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft Generación de Imagen con Control de Estilo",
|
||||
"api_recraft_vector_gen": "Recraft Generación de Vectores",
|
||||
"api_stability_sd3_t2i": "Stability AI Stable Image Ultra Texto a Imagen"
|
||||
"api_bfl_flux_1_kontext_max_image": "BFL Flux.1 Kontext Max",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "BFL Flux.1 Kontext Entrada de Múltiples Imágenes",
|
||||
"api_bfl_flux_1_kontext_pro_image": "BFL Flux.1 Kontext Pro",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux[Pro]: Texto a Imagen",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3: Texto a Imagen",
|
||||
"api_luma_photon_i2i": "Luma Photon: Imagen a Imagen",
|
||||
"api_luma_photon_style_ref": "Luma Photon: Referencia de Estilo",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI: Dall-E 2 Rellenar",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI: Dall-E 2 Texto a Imagen",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI: Dall-E 3 Texto a Imagen",
|
||||
"api_openai_image_1_i2i": "OpenAI: GPT-Image-1 Imagen a Imagen",
|
||||
"api_openai_image_1_inpaint": "OpenAI: GPT-Image-1 Rellenar",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI: GPT-Image-1 Múltiples Entradas",
|
||||
"api_openai_image_1_t2i": "OpenAI: GPT-Image-1 Texto a Imagen",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft: Generación de Imagen con Control de Color",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft: Generación de Imagen con Control de Estilo",
|
||||
"api_recraft_vector_gen": "Recraft: Generación de Vectores",
|
||||
"api_runway_reference_to_image": "Runway: Referencia a Imagen",
|
||||
"api_runway_text_to_image": "Runway: Texto a Imagen",
|
||||
"api_stability_ai_i2i": "Stability AI: Imagen a Imagen",
|
||||
"api_stability_ai_sd3_5_i2i": "Stability AI: SD3.5 Imagen a Imagen",
|
||||
"api_stability_ai_sd3_5_t2i": "Stability AI: SD3.5 Texto a Imagen",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Stability AI: Stable Image Ultra Texto a Imagen"
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Google Gemini: Chat",
|
||||
"api_openai_chat": "OpenAI: Chat"
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "ESRGAN",
|
||||
@@ -1290,119 +1323,190 @@
|
||||
"mochi_text_to_video_example": "Mochi Texto a Video",
|
||||
"text_to_video_wan": "Wan 2.1 Texto a Video",
|
||||
"txt_to_image_to_video": "SVD Texto a Imagen a Video",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos Predict2 2B Video2World 480p 16fps",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B",
|
||||
"video_wan_vace_14B_ref2v": "Wan VACE Referencia a Video",
|
||||
"video_wan_vace_14B_t2v": "Wan VACE Texto a Video",
|
||||
"video_wan_vace_14B_v2v": "Wan VACE Control Video",
|
||||
"video_wan_vace_flf2v": "Wan VACE Primer-Ultimo Fotograma",
|
||||
"video_wan_vace_inpainting": "Wan VACE Inpainting",
|
||||
"video_wan_vace_outpainting": "Wan VACE Outpainting",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16",
|
||||
"wan2_1_fun_control": "Wan 2.1 ControlNet",
|
||||
"wan2_1_fun_inp": "Wan 2.1 Relleno"
|
||||
"wan2_1_fun_inp": "Wan 2.1 Inpainting"
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "MiniMax Imagen a Video",
|
||||
"api_kling_i2v": "Kling Imagen a Video",
|
||||
"api_luma_i2v": "Luma Imagen a Video",
|
||||
"api_hailuo_minimax_i2v": "MiniMax: Imagen a Video",
|
||||
"api_hailuo_minimax_t2v": "MiniMax: Texto a Video",
|
||||
"api_kling_effects": "Kling: Efectos de Video",
|
||||
"api_kling_flf": "Kling: FLF2V",
|
||||
"api_kling_i2v": "Kling: Imagen a Video",
|
||||
"api_luma_i2v": "Luma: Imagen a Video",
|
||||
"api_luma_t2v": "Luma: Texto a Video",
|
||||
"api_moonvalley_image_to_video": "Moonvalley: Imagen a Video",
|
||||
"api_moonvalley_text_to_video": "Moonvalley: Texto a Video",
|
||||
"api_pika_i2v": "Pika: Imagen a Video",
|
||||
"api_pika_scene": "Pika Escenas: Imágenes a Video",
|
||||
"api_pixverse_t2v": "PixVerse Texto a Video",
|
||||
"api_pixverse_template_i2v": "PixVerse Template Effects: Imagen a Video",
|
||||
"api_veo2_i2v": "Veo2 Imagen a Video"
|
||||
"api_pixverse_i2v": "PixVerse: Imagen a Video",
|
||||
"api_pixverse_t2v": "PixVerse: Texto a Video",
|
||||
"api_pixverse_template_i2v": "PixVerse Plantillas: Imagen a Video",
|
||||
"api_runway_first_last_frame": "Runway: Primer Último Fotograma a Video",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway: Gen3a Turbo Imagen a Video",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway: Gen4 Turbo Imagen a Video",
|
||||
"api_veo2_i2v": "Veo2: Imagen a Video"
|
||||
}
|
||||
},
|
||||
"templateDescription": {
|
||||
"3D": {
|
||||
"hunyuan-3d-multiview-elf": "Usa Hunyuan3D 2mv para generar modelos desde múltiples vistas.",
|
||||
"hunyuan-3d-turbo": "Usa Hunyuan3D 2mv turbo para generar modelos desde múltiples vistas.",
|
||||
"hunyuan3d-non-multiview-train": "Usa Hunyuan3D 2.0 para generar modelos desde una sola vista.",
|
||||
"stable_zero123_example": "Genera vistas 3D a partir de imágenes individuales."
|
||||
"3d_hunyuan3d_image_to_model": "Genera modelos 3D a partir de imágenes individuales usando Hunyuan3D 2.0.",
|
||||
"3d_hunyuan3d_multiview_to_model": "Genera modelos 3D a partir de múltiples vistas usando Hunyuan3D 2.0 MV.",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Genera modelos 3D a partir de múltiples vistas usando Hunyuan3D 2.0 MV Turbo.",
|
||||
"stable_zero123_example": "Genera vistas 3D a partir de imágenes individuales usando Stable Zero123."
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Genera modelos 3D detallados a partir de una sola foto usando Rodin AI.",
|
||||
"api_rodin_multiview_to_model": "Esculpe modelos 3D completos usando reconstrucción multivista de Rodin.",
|
||||
"api_tripo_image_to_model": "Genera activos 3D profesionales a partir de imágenes 2D usando el motor Tripo.",
|
||||
"api_tripo_multiview_to_model": "Construye modelos 3D a partir de múltiples ángulos con el escáner avanzado de Tripo.",
|
||||
"api_tripo_text_to_model": "Crea objetos 3D a partir de descripciones con modelado basado en texto de Tripo."
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "Controla la composición de la imagen por áreas.",
|
||||
"area_composition_reversed": "Invierte el flujo de composición por áreas.",
|
||||
"area_composition_square_area_for_subject": "Crea una colocación consistente del sujeto."
|
||||
"area_composition": "Genera imágenes controlando la composición con áreas definidas.",
|
||||
"area_composition_square_area_for_subject": "Genera imágenes con colocación consistente del sujeto usando composición de áreas."
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "Genera audio a partir de descripciones de texto."
|
||||
"audio_ace_step_1_m2m_editing": "Edita canciones existentes para cambiar el estilo y la letra usando ACE-Step v1 M2M.",
|
||||
"audio_ace_step_1_t2a_instrumentals": "Genera música instrumental a partir de texto usando ACE-Step v1.",
|
||||
"audio_ace_step_1_t2a_song": "Genera canciones con voz a partir de texto usando ACE-Step v1, soportando múltiples idiomas y personalización de estilo.",
|
||||
"audio_stable_audio_example": "Genera audio a partir de descripciones de texto usando Stable Audio."
|
||||
},
|
||||
"Basics": {
|
||||
"default": "Genera imágenes a partir de descripciones de texto.",
|
||||
"embedding_example": "Utiliza inversión textual para estilos consistentes.",
|
||||
"gligen_textbox_example": "Especifica la ubicación y el tamaño de los objetos.",
|
||||
"embedding_example": "Genera imágenes usando inversión textual para estilos consistentes.",
|
||||
"gligen_textbox_example": "Genera imágenes con colocación precisa de objetos usando cajas de texto.",
|
||||
"image2image": "Transforma imágenes existentes usando indicaciones de texto.",
|
||||
"inpain_model_outpainting": "Extiende imágenes más allá de sus límites originales.",
|
||||
"inpaint_example": "Edita partes específicas de imágenes de manera fluida.",
|
||||
"lora": "Aplica modelos LoRA para estilos o temas especializados.",
|
||||
"lora_multiple": "Combina múltiples modelos LoRA para resultados únicos."
|
||||
"inpaint_model_outpainting": "Extiende imágenes más allá de sus límites originales.",
|
||||
"lora": "Genera imágenes con modelos LoRA para estilos o temas especializados.",
|
||||
"lora_multiple": "Genera imágenes combinando múltiples modelos LoRA."
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "Genera imágenes a partir de referencias de pose.",
|
||||
"controlnet_example": "Controla la generación de imágenes con imágenes de referencia.",
|
||||
"depth_controlnet": "Crea imágenes con generación consciente de profundidad.",
|
||||
"depth_t2i_adapter": "Genera rápidamente imágenes conscientes de profundidad con un adaptador T2I.",
|
||||
"mixing_controlnets": "Combina múltiples modelos ControlNet juntos."
|
||||
"2_pass_pose_worship": "Genera imágenes guiadas por referencias de pose usando ControlNet.",
|
||||
"controlnet_example": "Genera imágenes guiadas por imágenes de garabato usando ControlNet.",
|
||||
"depth_controlnet": "Genera imágenes guiadas por información de profundidad usando ControlNet.",
|
||||
"depth_t2i_adapter": "Genera imágenes guiadas por información de profundidad usando el adaptador T2I.",
|
||||
"mixing_controlnets": "Genera imágenes combinando múltiples modelos ControlNet."
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "Genera imágenes a partir de detección de bordes.",
|
||||
"flux_depth_lora_example": "Crea imágenes con LoRA consciente de profundidad.",
|
||||
"flux_dev_checkpoint_example": "Crea imágenes usando modelos de desarrollo de Flux.",
|
||||
"flux_fill_inpaint_example": "Rellena partes faltantes de imágenes.",
|
||||
"flux_fill_outpaint_example": "Extiende imágenes usando outpainting de Flux.",
|
||||
"flux_redux_model_example": "Transfiere el estilo de una imagen de referencia para guiar la generación de imágenes con Flux.",
|
||||
"flux_schnell": "Genera imágenes rápidamente con Flux Schnell."
|
||||
"flux_canny_model_example": "Genera imágenes guiadas por detección de bordes usando Flux Canny.",
|
||||
"flux_depth_lora_example": "Genera imágenes guiadas por información de profundidad usando Flux LoRA.",
|
||||
"flux_dev_checkpoint_example": "Genera imágenes usando la versión cuantizada fp8 de Flux Dev. Ideal para dispositivos con poca VRAM, solo requiere un archivo de modelo, pero la calidad es ligeramente inferior a la versión completa.",
|
||||
"flux_dev_full_text_to_image": "Genera imágenes de alta calidad con la versión completa de Flux Dev. Requiere más VRAM y múltiples archivos de modelo, pero ofrece la mejor adherencia a la indicación y calidad de imagen.",
|
||||
"flux_fill_inpaint_example": "Rellena partes faltantes de imágenes usando inpainting de Flux.",
|
||||
"flux_fill_outpaint_example": "Extiende imágenes más allá de los límites usando outpainting de Flux.",
|
||||
"flux_kontext_dev_basic": "Edita imágenes usando Flux Kontext con visibilidad total de nodos, ideal para aprender el flujo de trabajo.",
|
||||
"flux_kontext_dev_grouped": "Versión simplificada de Flux Kontext con nodos agrupados para un espacio de trabajo más limpio.",
|
||||
"flux_redux_model_example": "Genera imágenes transfiriendo el estilo de imágenes de referencia usando Flux Redux.",
|
||||
"flux_schnell": "Genera imágenes rápidamente con la versión cuantizada fp8 de Flux Schnell. Perfecto para hardware de gama baja, solo requiere 4 pasos.",
|
||||
"flux_schnell_full_text_to_image": "Genera imágenes rápidamente con la versión completa de Flux Schnell. Licencia Apache2.0, solo requiere 4 pasos manteniendo buena calidad."
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "Edita imágenes con HiDream E1.",
|
||||
"hidream_i1_dev": "Genera imágenes con HiDream I1 Dev.",
|
||||
"hidream_i1_fast": "Genera imágenes rápidamente con HiDream I1.",
|
||||
"hidream_i1_full": "Genera imágenes con HiDream I1.",
|
||||
"sd3_5_large_blur": "Genera imágenes a partir de imágenes de referencia borrosas con SD 3.5.",
|
||||
"sd3_5_large_canny_controlnet_example": "Usa detección de bordes para guiar la generación de imágenes con SD 3.5.",
|
||||
"sd3_5_large_depth": "Crea imágenes conscientes de profundidad con SD 3.5.",
|
||||
"sd3_5_simple_example": "Genera imágenes con SD 3.5.",
|
||||
"sdxl_refiner_prompt_example": "Mejora los resultados de SDXL con refinadores.",
|
||||
"sdxl_revision_text_prompts": "Transfiere conceptos de imágenes de referencia para guiar la generación de imágenes con SDXL.",
|
||||
"sdxl_revision_zero_positive": "Agrega indicaciones de texto junto a imágenes de referencia para guiar la generación de imágenes con SDXL.",
|
||||
"sdxl_simple_example": "Crea imágenes de alta calidad con SDXL.",
|
||||
"sdxlturbo_example": "Genera imágenes en un solo paso con SDXL Turbo."
|
||||
"hidream_e1_full": "Edita imágenes con HiDream E1 - Modelo profesional de edición de imágenes por lenguaje natural.",
|
||||
"hidream_i1_dev": "Genera imágenes con HiDream I1 Dev - Versión equilibrada con 28 pasos de inferencia, adecuada para hardware medio.",
|
||||
"hidream_i1_fast": "Genera imágenes rápidamente con HiDream I1 Fast - Versión ligera con 16 pasos, ideal para previsualizaciones rápidas.",
|
||||
"hidream_i1_full": "Genera imágenes con HiDream I1 Full - Versión completa con 50 pasos para la máxima calidad.",
|
||||
"image_chroma_text_to_image": "Chroma está modificado de Flux y tiene algunos cambios en la arquitectura.",
|
||||
"image_cosmos_predict2_2B_t2i": "Genera imágenes con Cosmos-Predict2 2B T2I, logrando generación física precisa, alta fidelidad y gran detalle.",
|
||||
"image_lotus_depth_v1_1": "Ejecuta Lotus Depth en ComfyUI para estimación de profundidad monocular eficiente y detallada.",
|
||||
"image_omnigen2_image_edit": "Edita imágenes con instrucciones en lenguaje natural usando las avanzadas capacidades de edición de imagen y soporte de texto de OmniGen2.",
|
||||
"image_omnigen2_t2i": "Genera imágenes de alta calidad a partir de texto usando el modelo multimodal unificado 7B de OmniGen2 con arquitectura de doble vía.",
|
||||
"sd3_5_large_blur": "Genera imágenes guiadas por imágenes de referencia borrosas usando SD 3.5.",
|
||||
"sd3_5_large_canny_controlnet_example": "Genera imágenes guiadas por detección de bordes usando SD 3.5 Canny ControlNet.",
|
||||
"sd3_5_large_depth": "Genera imágenes guiadas por información de profundidad usando SD 3.5.",
|
||||
"sd3_5_simple_example": "Genera imágenes usando SD 3.5.",
|
||||
"sdxl_refiner_prompt_example": "Mejora imágenes SDXL usando modelos refinadores.",
|
||||
"sdxl_revision_text_prompts": "Genera imágenes transfiriendo conceptos de imágenes de referencia usando SDXL Revision.",
|
||||
"sdxl_revision_zero_positive": "Genera imágenes usando tanto indicaciones de texto como imágenes de referencia con SDXL Revision.",
|
||||
"sdxl_simple_example": "Genera imágenes de alta calidad usando SDXL.",
|
||||
"sdxlturbo_example": "Genera imágenes en un solo paso usando SDXL Turbo."
|
||||
},
|
||||
"Image API": {
|
||||
"api-openai-dall-e-2-inpaint": "Usa la API Dall-E 2 para hacer inpainting en imágenes.",
|
||||
"api-openai-dall-e-2-t2i": "Usa la API Dall-E 2 para generar imágenes a partir de descripciones de texto.",
|
||||
"api-openai-dall-e-3-t2i": "Usa la API Dall-E 3 para generar imágenes a partir de descripciones de texto.",
|
||||
"api_bfl_flux_pro_t2i": "Crea imágenes con FLUX.1 [pro] y su excelente seguimiento de indicaciones, calidad visual, detalle de imagen y diversidad de resultados.",
|
||||
"api_ideogram_v3_t2i": "Genera imágenes con alineación de indicaciones de alta calidad, fotorrealismo y renderizado de texto. Crea logotipos de calidad profesional, carteles promocionales, conceptos de páginas de destino, fotografía de productos y más. Crea composiciones espaciales sofisticadas con fondos intrincados, iluminación y colores precisos y matizados, y detalles ambientales realistas.",
|
||||
"api_bfl_flux_1_kontext_max_image": "Edita imágenes con Flux.1 Kontext max image.",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "Introduce múltiples imágenes y edítalas con Flux.1 Kontext.",
|
||||
"api_bfl_flux_1_kontext_pro_image": "Edita imágenes con Flux.1 Kontext pro image.",
|
||||
"api_bfl_flux_pro_t2i": "Genera imágenes con excelente seguimiento de indicaciones y calidad visual usando FLUX.1 Pro.",
|
||||
"api_ideogram_v3_t2i": "Genera imágenes de calidad profesional con excelente alineación de indicaciones, fotorrealismo y renderizado de texto usando Ideogram V3.",
|
||||
"api_luma_photon_i2i": "Guía la generación de imágenes usando una combinación de imágenes e indicaciones.",
|
||||
"api_luma_photon_style_ref": "Aplica y combina referencias de estilo con control exacto. Luma Photon captura la esencia de cada imagen de referencia, permitiéndote combinar elementos visuales distintos manteniendo calidad profesional.",
|
||||
"api_openai_image_1_i2i": "Usa la API GPT Image 1 para generar imágenes a partir de imágenes.",
|
||||
"api_openai_image_1_inpaint": "Usa la API GPT Image 1 para hacer inpainting en imágenes.",
|
||||
"api_openai_image_1_multi_inputs": "Usa la API GPT Image 1 con múltiples entradas para generar imágenes.",
|
||||
"api_openai_image_1_t2i": "Usa la API GPT Image 1 para generar imágenes a partir de descripciones de texto.",
|
||||
"api_recraft_image_gen_with_color_control": "Crea una paleta personalizada para reutilizar en múltiples imágenes o selecciona colores para cada foto. Haz coincidir la paleta de tu marca y crea imágenes visuales que sean distintivamente tuyas.",
|
||||
"api_recraft_image_gen_with_style_control": "Controla el estilo con ejemplos visuales, alinea la posición y ajusta objetos. Guarda y comparte estilos para una consistencia perfecta de marca.",
|
||||
"api_recraft_vector_gen": "Pasa de una indicación de texto a una imagen vectorial con el generador de vectores IA de Recraft. Produce arte vectorial de la mejor calidad para logotipos, carteles, conjuntos de iconos, anuncios, banners y maquetas. Perfecciona tus diseños con archivos SVG nítidos y de alta calidad. Crea ilustraciones vectoriales de marca para tu app o sitio web en segundos.",
|
||||
"api_stability_sd3_t2i": "Genera imágenes de alta calidad con excelente adherencia a las indicaciones. Perfecto para casos de uso profesional a resolución de 1 megapíxel."
|
||||
"api_luma_photon_style_ref": "Genera imágenes combinando referencias de estilo con control preciso usando Luma Photon.",
|
||||
"api_openai_dall_e_2_inpaint": "Edita imágenes usando inpainting con la API OpenAI Dall-E 2.",
|
||||
"api_openai_dall_e_2_t2i": "Genera imágenes a partir de texto usando la API OpenAI Dall-E 2.",
|
||||
"api_openai_dall_e_3_t2i": "Genera imágenes a partir de texto usando la API OpenAI Dall-E 3.",
|
||||
"api_openai_image_1_i2i": "Genera imágenes a partir de imágenes usando la API OpenAI GPT Image 1.",
|
||||
"api_openai_image_1_inpaint": "Edita imágenes usando inpainting con la API OpenAI GPT Image 1.",
|
||||
"api_openai_image_1_multi_inputs": "Genera imágenes a partir de múltiples entradas usando la API OpenAI GPT Image 1.",
|
||||
"api_openai_image_1_t2i": "Genera imágenes a partir de texto usando la API OpenAI GPT Image 1.",
|
||||
"api_recraft_image_gen_with_color_control": "Genera imágenes con paletas de color personalizadas y visuales de marca usando Recraft.",
|
||||
"api_recraft_image_gen_with_style_control": "Controla el estilo con ejemplos visuales, alinea la posición y ajusta objetos. Guarda y comparte estilos para consistencia de marca.",
|
||||
"api_recraft_vector_gen": "Genera imágenes vectoriales de alta calidad a partir de texto usando el generador de vectores IA de Recraft.",
|
||||
"api_runway_reference_to_image": "Genera nuevas imágenes basadas en estilos y composiciones de referencia con Runway.",
|
||||
"api_runway_text_to_image": "Genera imágenes de alta calidad a partir de texto usando el modelo IA de Runway.",
|
||||
"api_stability_ai_i2i": "Transforma imágenes con generación de alta calidad usando Stability AI, ideal para edición profesional y transferencia de estilo.",
|
||||
"api_stability_ai_sd3_5_i2i": "Genera imágenes de alta calidad con excelente adherencia a la indicación. Perfecto para uso profesional a 1 megapíxel.",
|
||||
"api_stability_ai_sd3_5_t2i": "Genera imágenes de alta calidad con excelente adherencia a la indicación. Perfecto para uso profesional a 1 megapíxel.",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Genera imágenes de alta calidad con excelente adherencia a la indicación. Perfecto para uso profesional a 1 megapíxel."
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Experimenta la IA multimodal de Google con las capacidades de razonamiento de Gemini.",
|
||||
"api_openai_chat": "Interactúa con los avanzados modelos de lenguaje de OpenAI para conversaciones inteligentes."
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "Usa modelos de escalado para mejorar la calidad de imagen.",
|
||||
"hiresfix_esrgan_workflow": "Usa modelos de escalado durante pasos intermedios.",
|
||||
"hiresfix_latent_workflow": "Mejora la calidad de imagen en el espacio latente.",
|
||||
"latent_upscale_different_prompt_model": "Escala y cambia la indicación entre pasadas."
|
||||
"esrgan_example": "Escala imágenes usando modelos ESRGAN para mejorar la calidad.",
|
||||
"hiresfix_esrgan_workflow": "Escala imágenes usando modelos ESRGAN durante pasos intermedios.",
|
||||
"hiresfix_latent_workflow": "Escala imágenes mejorando la calidad en el espacio latente.",
|
||||
"latent_upscale_different_prompt_model": "Escala imágenes cambiando las indicaciones entre pasadas."
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "Genera videos usando el modelo Hunyuan.",
|
||||
"image_to_video": "Transforma imágenes en videos animados.",
|
||||
"image_to_video_wan": "Genera videos rápidamente a partir de imágenes.",
|
||||
"ltxv_image_to_video": "Convierte imágenes fijas en videos.",
|
||||
"ltxv_text_to_video": "Genera videos a partir de descripciones de texto.",
|
||||
"mochi_text_to_video_example": "Crea videos con el modelo Mochi.",
|
||||
"text_to_video_wan": "Genera videos rápidamente a partir de descripciones de texto.",
|
||||
"txt_to_image_to_video": "Genera imágenes a partir de texto y luego conviértelas en videos.",
|
||||
"wan2_1_flf2v_720_f16": "Genera video controlando el primer y último fotograma.",
|
||||
"wan2_1_fun_control": "Guía la generación de video con pose, profundidad, controles de bordes y más.",
|
||||
"wan2_1_fun_inp": "Crea videos a partir de fotogramas iniciales y finales."
|
||||
"hunyuan_video_text_to_video": "Genera videos a partir de texto usando el modelo Hunyuan.",
|
||||
"image_to_video": "Genera videos a partir de imágenes fijas.",
|
||||
"image_to_video_wan": "Genera videos a partir de imágenes usando Wan 2.1.",
|
||||
"ltxv_image_to_video": "Genera videos a partir de imágenes fijas.",
|
||||
"ltxv_text_to_video": "Genera videos a partir de texto.",
|
||||
"mochi_text_to_video_example": "Genera videos a partir de texto usando el modelo Mochi.",
|
||||
"text_to_video_wan": "Genera videos a partir de texto usando Wan 2.1.",
|
||||
"txt_to_image_to_video": "Genera videos creando primero imágenes a partir de texto.",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Genera videos con Cosmos-Predict2 2B Video2World, logrando simulaciones físicas precisas, alta fidelidad y consistencia.",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Genera videos de alta calidad con control avanzado de cámara usando el modelo completo de 14B.",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Genera videos dinámicos con movimientos de cámara cinematográficos usando Wan 2.1 Fun Camera 1.3B.",
|
||||
"video_wan_vace_14B_ref2v": "Crea videos que coinciden con el estilo y contenido de una imagen de referencia.",
|
||||
"video_wan_vace_14B_t2v": "Transforma descripciones de texto en videos de alta calidad. Soporta 480p y 720p con el modelo VACE-14B.",
|
||||
"video_wan_vace_14B_v2v": "Genera videos controlando videos de entrada e imágenes de referencia usando Wan VACE.",
|
||||
"video_wan_vace_flf2v": "Genera transiciones suaves definiendo fotogramas iniciales y finales. Soporta secuencias de fotogramas personalizadas.",
|
||||
"video_wan_vace_inpainting": "Edita regiones específicas en videos preservando el contenido circundante.",
|
||||
"video_wan_vace_outpainting": "Genera videos extendidos expandiendo el tamaño usando Wan VACE outpainting.",
|
||||
"wan2_1_flf2v_720_f16": "Genera videos controlando primer y último fotograma usando Wan 2.1 FLF2V.",
|
||||
"wan2_1_fun_control": "Genera videos guiados por pose, profundidad y bordes usando Wan 2.1 ControlNet.",
|
||||
"wan2_1_fun_inp": "Genera videos a partir de fotogramas iniciales y finales usando Wan 2.1 inpainting."
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "Crea videos refinados a partir de imágenes y texto, incluyendo integración CGI y efectos fotográficos de tendencia como abrazos virales de IA. Elige entre una variedad de estilos y temas de video para que coincidan con tu visión creativa.",
|
||||
"api_kling_i2v": "Crea videos con gran adherencia a las indicaciones para acciones, expresiones y movimientos de cámara. Ahora soporta indicaciones complejas con acciones secuenciales para que seas el director de tu escena.",
|
||||
"api_hailuo_minimax_i2v": "Genera videos refinados a partir de imágenes y texto con integración CGI usando MiniMax.",
|
||||
"api_hailuo_minimax_t2v": "Genera videos de alta calidad directamente desde texto. Explora las capacidades avanzadas de IA de MiniMax para crear narrativas visuales diversas con efectos CGI profesionales.",
|
||||
"api_kling_effects": "Genera videos dinámicos aplicando efectos visuales a imágenes usando Kling.",
|
||||
"api_kling_flf": "Genera videos controlando los primeros y últimos fotogramas.",
|
||||
"api_kling_i2v": "Genera videos con excelente adherencia a la indicación para acciones, expresiones y movimientos de cámara usando Kling.",
|
||||
"api_luma_i2v": "Convierte imágenes estáticas en animaciones mágicas de alta calidad al instante.",
|
||||
"api_pika_scene": "Usa múltiples imágenes como ingredientes y genera videos que las incorporen todas.",
|
||||
"api_pixverse_t2v": "Genera videos con interpretación precisa de indicaciones y una dinámica visual impresionante.",
|
||||
"api_pixverse_template_i2v": "Transforma imágenes estáticas en videos dinámicos con movimiento y efectos.",
|
||||
"api_veo2_i2v": "Usa la API Google Veo2 para generar videos a partir de imágenes."
|
||||
"api_luma_t2v": "Genera videos de alta calidad usando indicaciones simples.",
|
||||
"api_moonvalley_image_to_video": "Genera videos cinematográficos 1080p a partir de una imagen usando un modelo entrenado solo con datos licenciados.",
|
||||
"api_moonvalley_text_to_video": "Genera videos cinematográficos 1080p a partir de texto usando un modelo entrenado solo con datos licenciados.",
|
||||
"api_pika_i2v": "Genera videos animados suaves a partir de imágenes estáticas usando Pika AI.",
|
||||
"api_pika_scene": "Genera videos que incorporan múltiples imágenes de entrada usando Pika Scenes.",
|
||||
"api_pixverse_i2v": "Genera videos dinámicos a partir de imágenes estáticas con movimiento y efectos usando PixVerse.",
|
||||
"api_pixverse_t2v": "Genera videos con interpretación precisa de indicaciones y dinámica visual impresionante.",
|
||||
"api_pixverse_template_i2v": "Genera videos dinámicos a partir de imágenes estáticas con movimiento y efectos usando PixVerse.",
|
||||
"api_runway_first_last_frame": "Genera transiciones de video suaves entre dos fotogramas clave con precisión de Runway.",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Genera videos cinematográficos a partir de imágenes estáticas usando Runway Gen3a Turbo.",
|
||||
"api_runway_gen4_turo_image_to_video": "Genera videos dinámicos a partir de imágenes usando Runway Gen4 Turbo.",
|
||||
"api_veo2_i2v": "Genera videos a partir de imágenes usando la API Google Veo2."
|
||||
}
|
||||
},
|
||||
"title": "Comienza con una Plantilla"
|
||||
@@ -1483,7 +1587,8 @@
|
||||
"title": "Bienvenido a ComfyUI"
|
||||
},
|
||||
"whatsNewPopup": {
|
||||
"learnMore": "Aprende más"
|
||||
"learnMore": "Aprende más",
|
||||
"noReleaseNotes": "No hay notas de la versión disponibles."
|
||||
},
|
||||
"workflowService": {
|
||||
"enterFilename": "Introduzca el nombre del archivo",
|
||||
|
||||
@@ -259,6 +259,10 @@
|
||||
"name": "Destacar nodo de ajuste",
|
||||
"tooltip": "Al arrastrar un enlace sobre un nodo con ranura de entrada viable, resalta el nodo"
|
||||
},
|
||||
"Comfy_Notification_ShowVersionUpdates": {
|
||||
"name": "Mostrar actualizaciones de versión",
|
||||
"tooltip": "Mostrar actualizaciones para nuevos modelos y funciones principales nuevas."
|
||||
},
|
||||
"Comfy_Pointer_ClickBufferTime": {
|
||||
"name": "Retraso de deriva del clic del puntero",
|
||||
"tooltip": "Después de presionar un botón del puntero, este es el tiempo máximo (en milisegundos) que se puede ignorar el movimiento del puntero.\n\nAyuda a prevenir que los objetos sean movidos involuntariamente si el puntero se mueve al hacer clic."
|
||||
@@ -322,7 +326,8 @@
|
||||
"Bottom": "Abajo",
|
||||
"Disabled": "Deshabilitado",
|
||||
"Top": "Arriba"
|
||||
}
|
||||
},
|
||||
"tooltip": "Posición de la barra de menú. En dispositivos móviles, el menú siempre se muestra en la parte superior."
|
||||
},
|
||||
"Comfy_Validation_NodeDefs": {
|
||||
"name": "Validar definiciones de nodos (lento)",
|
||||
|
||||
@@ -785,13 +785,13 @@
|
||||
"Toggle Bottom Panel": "Basculer le panneau inférieur",
|
||||
"Toggle Focus Mode": "Basculer le mode focus",
|
||||
"Toggle Logs Bottom Panel": "Basculer le panneau inférieur des journaux",
|
||||
"Toggle Model Library Sidebar": "Basculer la barre latérale de la bibliothèque de modèles",
|
||||
"Toggle Node Library Sidebar": "Basculer la barre latérale de la bibliothèque de nœuds",
|
||||
"Toggle Queue Sidebar": "Basculer la barre latérale de la file d'attente",
|
||||
"Toggle Model Library Sidebar": "Afficher/Masquer la barre latérale de la bibliothèque de modèles",
|
||||
"Toggle Node Library Sidebar": "Afficher/Masquer la barre latérale de la bibliothèque de nœuds",
|
||||
"Toggle Queue Sidebar": "Afficher/Masquer la barre latérale de la file d’attente",
|
||||
"Toggle Search Box": "Basculer la boîte de recherche",
|
||||
"Toggle Terminal Bottom Panel": "Basculer le panneau inférieur du terminal",
|
||||
"Toggle Theme (Dark/Light)": "Basculer le thème (Sombre/Clair)",
|
||||
"Toggle Workflows Sidebar": "Basculer la barre latérale des flux de travail",
|
||||
"Toggle Workflows Sidebar": "Afficher/Masquer la barre latérale des workflows",
|
||||
"Toggle the Custom Nodes Manager": "Basculer le gestionnaire de nœuds personnalisés",
|
||||
"Toggle the Custom Nodes Manager Progress Bar": "Basculer la barre de progression du gestionnaire de nœuds personnalisés",
|
||||
"Undo": "Annuler",
|
||||
@@ -1099,6 +1099,7 @@
|
||||
"Node Search Box": "Boîte de Recherche de Nœud",
|
||||
"Node Widget": "Widget de Nœud",
|
||||
"NodeLibrary": "Bibliothèque de Nœuds",
|
||||
"Notification Preferences": "Préférences de notification",
|
||||
"Pointer": "Pointeur",
|
||||
"Queue": "File d'Attente",
|
||||
"QueueButton": "Bouton de File d'Attente",
|
||||
@@ -1199,34 +1200,44 @@
|
||||
"Flux": "Flux",
|
||||
"Image": "Image",
|
||||
"Image API": "API d'image",
|
||||
"LLM API": "API LLM",
|
||||
"Upscaling": "Mise à l'échelle",
|
||||
"Video": "Vidéo",
|
||||
"Video API": "API vidéo"
|
||||
},
|
||||
"template": {
|
||||
"3D": {
|
||||
"hunyuan-3d-multiview-elf": "Hunyuan3D Multivue",
|
||||
"hunyuan-3d-turbo": "Hunyuan3D Turbo",
|
||||
"hunyuan3d-non-multiview-train": "Hunyuan3D",
|
||||
"3d_hunyuan3d_image_to_model": "Hunyuan3D",
|
||||
"3d_hunyuan3d_multiview_to_model": "Hunyuan3D Multivue",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3D Turbo",
|
||||
"stable_zero123_example": "Stable Zero123"
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin : Image vers Modèle",
|
||||
"api_rodin_multiview_to_model": "Rodin : Multivue vers Modèle",
|
||||
"api_tripo_image_to_model": "Tripo : Image vers Modèle",
|
||||
"api_tripo_multiview_to_model": "Tripo : Multivue vers Modèle",
|
||||
"api_tripo_text_to_model": "Tripo : Texte vers Modèle"
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "Composition de Zone",
|
||||
"area_composition_reversed": "Composition de Zone Inversée",
|
||||
"area_composition_square_area_for_subject": "Composition de Zone Carrée pour le Sujet"
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "Stable Audio"
|
||||
"audio_ace_step_1_m2m_editing": "ACE Step v1 Édition M2M",
|
||||
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1 Texte vers Musique Instrumentale",
|
||||
"audio_ace_step_1_t2a_song": "ACE Step v1 Texte vers Chanson",
|
||||
"audio_stable_audio_example": "Stable Audio"
|
||||
},
|
||||
"Basics": {
|
||||
"default": "Génération d'Image",
|
||||
"embedding_example": "Intégration",
|
||||
"gligen_textbox_example": "Boîte de Texte Gligen",
|
||||
"image2image": "Image à Image",
|
||||
"inpain_model_outpainting": "Modèle Inpaint Outpainting",
|
||||
"inpaint_example": "Inpaint",
|
||||
"lora": "Lora",
|
||||
"lora_multiple": "Lora Multiple"
|
||||
"inpaint_model_outpainting": "Outpainting",
|
||||
"lora": "LoRA",
|
||||
"lora_multiple": "LoRA Multiple"
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "2 Passes Pose Worship",
|
||||
@@ -1237,17 +1248,27 @@
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "Flux Canny Model",
|
||||
"flux_depth_lora_example": "Flux Depth Lora",
|
||||
"flux_dev_checkpoint_example": "Flux Dev",
|
||||
"flux_depth_lora_example": "Flux Depth LoRA",
|
||||
"flux_dev_checkpoint_example": "Flux Dev fp8",
|
||||
"flux_dev_full_text_to_image": "Flux Dev texte vers image complet",
|
||||
"flux_fill_inpaint_example": "Flux Inpaint",
|
||||
"flux_fill_outpaint_example": "Flux Outpaint",
|
||||
"flux_kontext_dev_basic": "Flux Kontext Dev (Basique)",
|
||||
"flux_kontext_dev_grouped": "Flux Kontext Dev (Groupé)",
|
||||
"flux_redux_model_example": "Flux Redux Model",
|
||||
"flux_schnell": "Flux Schnell"
|
||||
"flux_schnell": "Flux Schnell fp8",
|
||||
"flux_schnell_full_text_to_image": "Flux Schnell texte vers image complet"
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "HiDream E1 Complet",
|
||||
"hidream_i1_dev": "HiDream I1 Dev",
|
||||
"hidream_i1_fast": "HiDream I1 Rapide",
|
||||
"hidream_i1_full": "HiDream I1 Complet",
|
||||
"image_chroma_text_to_image": "Chroma texte vers image",
|
||||
"image_cosmos_predict2_2B_t2i": "Cosmos Predict2 2B T2I",
|
||||
"image_lotus_depth_v1_1": "Lotus Depth",
|
||||
"image_omnigen2_image_edit": "OmniGen2 Édition d'Image",
|
||||
"image_omnigen2_t2i": "OmniGen2 Texte vers Image",
|
||||
"sd3_5_large_blur": "SD3.5 Grand Flou",
|
||||
"sd3_5_large_canny_controlnet_example": "SD3.5 Grand Canny ControlNet",
|
||||
"sd3_5_large_depth": "SD3.5 Grande Profondeur",
|
||||
@@ -1259,21 +1280,33 @@
|
||||
"sdxlturbo_example": "SDXL Turbo"
|
||||
},
|
||||
"Image API": {
|
||||
"api-openai-dall-e-2-inpaint": "Dall-E 2 Inpainting",
|
||||
"api-openai-dall-e-2-t2i": "Dall-E 2 Texte vers Image",
|
||||
"api-openai-dall-e-3-t2i": "Dall-E 3 Texte vers Image",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux 1.1[pro] Ultra Texte vers Image",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3 Texte vers Image",
|
||||
"api_luma_photon_i2i": "Luma Photon Image vers Image",
|
||||
"api_luma_photon_style_ref": "Luma Photon Référence de Style",
|
||||
"api_openai_image_1_i2i": "OpenAI Image-1 Image vers Image",
|
||||
"api_openai_image_1_inpaint": "OpenAI Image-1 Inpainting",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI Image-1 Entrées Multiples",
|
||||
"api_openai_image_1_t2i": "OpenAI Image-1 Texte vers Image",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft Génération d’Image avec Contrôle des Couleurs",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft Génération d’Image avec Contrôle du Style",
|
||||
"api_recraft_vector_gen": "Recraft Génération de Vecteur",
|
||||
"api_stability_sd3_t2i": "Stability AI Stable Image Ultra Texte vers Image"
|
||||
"api_bfl_flux_1_kontext_max_image": "BFL Flux.1 Kontext Max",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "BFL Flux.1 Kontext Entrée Multi-Images",
|
||||
"api_bfl_flux_1_kontext_pro_image": "BFL Flux.1 Kontext Pro",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux[Pro] : Texte vers Image",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3 : Texte vers Image",
|
||||
"api_luma_photon_i2i": "Luma Photon : Image vers Image",
|
||||
"api_luma_photon_style_ref": "Luma Photon : Référence de Style",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI : Dall-E 2 Inpainting",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI : Dall-E 2 Texte vers Image",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI : Dall-E 3 Texte vers Image",
|
||||
"api_openai_image_1_i2i": "OpenAI : GPT-Image-1 Image vers Image",
|
||||
"api_openai_image_1_inpaint": "OpenAI : GPT-Image-1 Inpainting",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI : GPT-Image-1 Entrées Multiples",
|
||||
"api_openai_image_1_t2i": "OpenAI : GPT-Image-1 Texte vers Image",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft : Génération d’Image avec Contrôle des Couleurs",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft : Génération d’Image avec Contrôle du Style",
|
||||
"api_recraft_vector_gen": "Recraft : Génération de Vecteur",
|
||||
"api_runway_reference_to_image": "Runway : Référence vers Image",
|
||||
"api_runway_text_to_image": "Runway : Texte vers Image",
|
||||
"api_stability_ai_i2i": "Stability AI : Image vers Image",
|
||||
"api_stability_ai_sd3_5_i2i": "Stability AI : SD3.5 Image vers Image",
|
||||
"api_stability_ai_sd3_5_t2i": "Stability AI : SD3.5 Texte vers Image",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Stability AI : Stable Image Ultra Texte vers Image"
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Google Gemini : Chat",
|
||||
"api_openai_chat": "OpenAI : Chat"
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "ESRGAN",
|
||||
@@ -1282,127 +1315,198 @@
|
||||
"latent_upscale_different_prompt_model": "Modèle d'Agrandissement Latent Différent Prompt"
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "Texte à Vidéo Hunyuan",
|
||||
"image_to_video": "Image à Vidéo",
|
||||
"image_to_video_wan": "Wan 2.1 Image à Vidéo",
|
||||
"ltxv_image_to_video": "LTXV Image à Vidéo",
|
||||
"ltxv_text_to_video": "LTXV Texte à Vidéo",
|
||||
"mochi_text_to_video_example": "Exemple de Texte à Vidéo Mochi",
|
||||
"text_to_video_wan": "Wan 2.1 Texte à Vidéo",
|
||||
"txt_to_image_to_video": "Texte à Image à Vidéo",
|
||||
"hunyuan_video_text_to_video": "Hunyuan Texte vers Vidéo",
|
||||
"image_to_video": "SVD Image vers Vidéo",
|
||||
"image_to_video_wan": "Wan 2.1 Image vers Vidéo",
|
||||
"ltxv_image_to_video": "LTXV Image vers Vidéo",
|
||||
"ltxv_text_to_video": "LTXV Texte vers Vidéo",
|
||||
"mochi_text_to_video_example": "Mochi Texte vers Vidéo",
|
||||
"text_to_video_wan": "Wan 2.1 Texte vers Vidéo",
|
||||
"txt_to_image_to_video": "SVD Texte vers Image vers Vidéo",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos Predict2 2B Video2World 480p 16fps",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B",
|
||||
"video_wan_vace_14B_ref2v": "Wan VACE Référence vers Vidéo",
|
||||
"video_wan_vace_14B_t2v": "Wan VACE Texte vers Vidéo",
|
||||
"video_wan_vace_14B_v2v": "Wan VACE Contrôle Vidéo",
|
||||
"video_wan_vace_flf2v": "Wan VACE Premier-Dernier Frame",
|
||||
"video_wan_vace_inpainting": "Wan VACE Inpainting",
|
||||
"video_wan_vace_outpainting": "Wan VACE Outpainting",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16",
|
||||
"wan2_1_fun_control": "Wan 2.1 ControlNet",
|
||||
"wan2_1_fun_inp": "Wan 2.1 Inpainting"
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "MiniMax Image vers Vidéo",
|
||||
"api_kling_i2v": "Kling Image vers Vidéo",
|
||||
"api_luma_i2v": "Luma Image vers Vidéo",
|
||||
"api_hailuo_minimax_i2v": "MiniMax : Image vers Vidéo",
|
||||
"api_hailuo_minimax_t2v": "MiniMax : Texte vers Vidéo",
|
||||
"api_kling_effects": "Kling : Effets Vidéo",
|
||||
"api_kling_flf": "Kling : FLF2V",
|
||||
"api_kling_i2v": "Kling : Image vers Vidéo",
|
||||
"api_luma_i2v": "Luma : Image vers Vidéo",
|
||||
"api_luma_t2v": "Luma : Texte vers Vidéo",
|
||||
"api_moonvalley_image_to_video": "Moonvalley : Image vers Vidéo",
|
||||
"api_moonvalley_text_to_video": "Moonvalley : Texte vers Vidéo",
|
||||
"api_pika_i2v": "Pika : Image vers Vidéo",
|
||||
"api_pika_scene": "Pika Scènes : Images vers Vidéo",
|
||||
"api_pixverse_t2v": "PixVerse Texte vers Vidéo",
|
||||
"api_pixverse_template_i2v": "PixVerse Template Effects: Image vers Vidéo",
|
||||
"api_veo2_i2v": "Veo2 Image vers Vidéo"
|
||||
"api_pixverse_i2v": "PixVerse : Image vers Vidéo",
|
||||
"api_pixverse_t2v": "PixVerse : Texte vers Vidéo",
|
||||
"api_pixverse_template_i2v": "PixVerse Templates : Image vers Vidéo",
|
||||
"api_runway_first_last_frame": "Runway : Premier Dernier Frame vers Vidéo",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway : Gen3a Turbo Image vers Vidéo",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway : Gen4 Turbo Image vers Vidéo",
|
||||
"api_veo2_i2v": "Veo2 : Image vers Vidéo"
|
||||
}
|
||||
},
|
||||
"templateDescription": {
|
||||
"3D": {
|
||||
"hunyuan-3d-multiview-elf": "Utilisez Hunyuan3D 2mv pour générer des modèles à partir de plusieurs vues.",
|
||||
"hunyuan-3d-turbo": "Utilisez Hunyuan3D 2mv turbo pour générer des modèles à partir de plusieurs vues.",
|
||||
"hunyuan3d-non-multiview-train": "Utilisez Hunyuan3D 2.0 pour générer des modèles à partir d'une seule vue.",
|
||||
"stable_zero123_example": "Générez des vues 3D à partir d'images uniques."
|
||||
"3d_hunyuan3d_image_to_model": "Générez des modèles 3D à partir d'images uniques avec Hunyuan3D 2.0.",
|
||||
"3d_hunyuan3d_multiview_to_model": "Générez des modèles 3D à partir de plusieurs vues avec Hunyuan3D 2.0 MV.",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Générez des modèles 3D à partir de plusieurs vues avec Hunyuan3D 2.0 MV Turbo.",
|
||||
"stable_zero123_example": "Générez des vues 3D à partir d'images uniques avec Stable Zero123."
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Générez des modèles 3D détaillés à partir d'une seule photo avec Rodin AI.",
|
||||
"api_rodin_multiview_to_model": "Sculptez des modèles 3D complets à partir de plusieurs vues avec Rodin.",
|
||||
"api_tripo_image_to_model": "Générez des assets 3D professionnels à partir d'images 2D avec Tripo.",
|
||||
"api_tripo_multiview_to_model": "Construisez des modèles 3D à partir de multiples angles avec le scanner avancé de Tripo.",
|
||||
"api_tripo_text_to_model": "Créez des objets 3D à partir de descriptions textuelles avec Tripo."
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "Contrôlez la composition d'image avec des zones.",
|
||||
"area_composition_reversed": "Inversez le workflow de composition de zones.",
|
||||
"area_composition_square_area_for_subject": "Créez un placement cohérent du sujet."
|
||||
"area_composition": "Générez des images en contrôlant la composition avec des zones définies.",
|
||||
"area_composition_square_area_for_subject": "Générez des images avec un placement cohérent du sujet grâce à la composition de zones."
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "Générez de l'audio à partir de descriptions textuelles."
|
||||
"audio_ace_step_1_m2m_editing": "Éditez des chansons existantes pour changer le style et les paroles avec ACE-Step v1 M2M.",
|
||||
"audio_ace_step_1_t2a_instrumentals": "Générez de la musique instrumentale à partir de texte avec ACE-Step v1.",
|
||||
"audio_ace_step_1_t2a_song": "Générez des chansons avec voix à partir de texte avec ACE-Step v1, supportant plusieurs langues et la personnalisation du style.",
|
||||
"audio_stable_audio_example": "Générez de l'audio à partir de descriptions textuelles avec Stable Audio."
|
||||
},
|
||||
"Basics": {
|
||||
"default": "Générez des images à partir de descriptions textuelles.",
|
||||
"embedding_example": "Utilisez l'inversion textuelle pour des styles cohérents.",
|
||||
"gligen_textbox_example": "Spécifiez l'emplacement et la taille des objets.",
|
||||
"embedding_example": "Générez des images avec inversion textuelle pour des styles cohérents.",
|
||||
"gligen_textbox_example": "Générez des images avec un placement précis des objets grâce à des zones de texte.",
|
||||
"image2image": "Transformez des images existantes à l'aide de prompts textuels.",
|
||||
"inpain_model_outpainting": "Étendez les images au-delà de leurs limites d'origine.",
|
||||
"inpaint_example": "Modifiez de façon transparente des parties spécifiques d'une image.",
|
||||
"lora": "Appliquez des modèles LoRA pour des styles ou sujets spécialisés.",
|
||||
"lora_multiple": "Combinez plusieurs modèles LoRA pour des résultats uniques."
|
||||
"inpaint_model_outpainting": "Étendez les images au-delà de leurs limites d'origine.",
|
||||
"lora": "Générez des images avec des modèles LoRA pour des styles ou sujets spécialisés.",
|
||||
"lora_multiple": "Générez des images en combinant plusieurs modèles LoRA."
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "Générez des images à partir de références de pose.",
|
||||
"controlnet_example": "Contrôlez la génération d'image avec des images de référence.",
|
||||
"depth_controlnet": "Créez des images avec une génération sensible à la profondeur.",
|
||||
"depth_t2i_adapter": "Générez rapidement des images sensibles à la profondeur avec un adaptateur T2I.",
|
||||
"mixing_controlnets": "Combinez plusieurs modèles ControlNet ensemble."
|
||||
"2_pass_pose_worship": "Générez des images guidées par des références de pose avec ControlNet.",
|
||||
"controlnet_example": "Générez des images guidées par des images de gribouillage avec ControlNet.",
|
||||
"depth_controlnet": "Générez des images guidées par la profondeur avec ControlNet.",
|
||||
"depth_t2i_adapter": "Générez des images guidées par la profondeur avec l'adaptateur T2I.",
|
||||
"mixing_controlnets": "Générez des images en combinant plusieurs modèles ControlNet."
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "Générez des images à partir de la détection de contours.",
|
||||
"flux_depth_lora_example": "Créez des images avec LoRA sensible à la profondeur.",
|
||||
"flux_dev_checkpoint_example": "Créez des images avec les modèles de développement Flux.",
|
||||
"flux_fill_inpaint_example": "Complétez les parties manquantes d'une image.",
|
||||
"flux_fill_outpaint_example": "Étendez les images avec l'outpainting Flux.",
|
||||
"flux_redux_model_example": "Transférez le style d'une image de référence pour guider la génération d'image avec Flux.",
|
||||
"flux_schnell": "Générez des images rapidement avec Flux Schnell."
|
||||
"flux_canny_model_example": "Générez des images guidées par détection de contours avec Flux Canny.",
|
||||
"flux_depth_lora_example": "Générez des images guidées par la profondeur avec Flux LoRA.",
|
||||
"flux_dev_checkpoint_example": "Générez des images avec la version quantifiée fp8 de Flux Dev. Idéal pour les appareils à faible VRAM, nécessite un seul fichier modèle, mais la qualité d'image est légèrement inférieure à la version complète.",
|
||||
"flux_dev_full_text_to_image": "Générez des images de haute qualité avec la version complète de Flux Dev. Nécessite plus de VRAM et plusieurs fichiers modèles, mais offre la meilleure fidélité au prompt et la meilleure qualité d'image.",
|
||||
"flux_fill_inpaint_example": "Complétez les parties manquantes d'une image avec Flux inpainting.",
|
||||
"flux_fill_outpaint_example": "Étendez les images au-delà des limites avec Flux outpainting.",
|
||||
"flux_kontext_dev_basic": "Éditez des images avec Flux Kontext (tous les nœuds visibles), parfait pour apprendre le workflow.",
|
||||
"flux_kontext_dev_grouped": "Version simplifiée de Flux Kontext avec des nœuds groupés pour un espace de travail plus propre.",
|
||||
"flux_redux_model_example": "Générez des images en transférant le style d'images de référence avec Flux Redux.",
|
||||
"flux_schnell": "Générez rapidement des images avec la version quantifiée fp8 de Flux Schnell. Parfait pour le matériel bas de gamme, nécessite seulement 4 étapes.",
|
||||
"flux_schnell_full_text_to_image": "Générez rapidement des images avec la version complète de Flux Schnell. Licence Apache2.0, seulement 4 étapes tout en maintenant une bonne qualité."
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "Modifiez des images avec HiDream E1.",
|
||||
"hidream_i1_dev": "Générez des images avec HiDream I1 Dev.",
|
||||
"hidream_i1_fast": "Générez rapidement des images avec HiDream I1.",
|
||||
"hidream_i1_full": "Générez des images avec HiDream I1.",
|
||||
"sd3_5_large_blur": "Générez des images à partir d'images de référence floues avec SD 3.5.",
|
||||
"sd3_5_large_canny_controlnet_example": "Utilisez la détection de contours pour guider la génération d'images avec SD 3.5.",
|
||||
"sd3_5_large_depth": "Créez des images sensibles à la profondeur avec SD 3.5.",
|
||||
"hidream_e1_full": "HiDream E1 - Modèle professionnel d'édition d'image par langage naturel.",
|
||||
"hidream_i1_dev": "HiDream I1 Dev - Version équilibrée avec 28 étapes d'inférence, adaptée au matériel intermédiaire.",
|
||||
"hidream_i1_fast": "HiDream I1 Fast - Version légère avec 16 étapes, idéale pour des aperçus rapides sur du matériel bas de gamme.",
|
||||
"hidream_i1_full": "HiDream I1 Full - Version complète avec 50 étapes pour une qualité maximale.",
|
||||
"image_chroma_text_to_image": "Chroma est une version modifiée de Flux avec quelques changements d'architecture.",
|
||||
"image_cosmos_predict2_2B_t2i": "Générez des images avec Cosmos-Predict2 2B T2I, pour une génération physique précise, haute fidélité et riche en détails.",
|
||||
"image_lotus_depth_v1_1": "Exécutez Lotus Depth dans ComfyUI pour une estimation de profondeur monoculaire efficace et détaillée.",
|
||||
"image_omnigen2_image_edit": "Éditez des images avec des instructions en langage naturel grâce aux capacités avancées d'édition d'image et de texte d'OmniGen2.",
|
||||
"image_omnigen2_t2i": "Générez des images de haute qualité à partir de texte avec le modèle multimodal unifié 7B d'OmniGen2 et une architecture à double voie.",
|
||||
"sd3_5_large_blur": "Générez des images guidées par des images de référence floues avec SD 3.5.",
|
||||
"sd3_5_large_canny_controlnet_example": "Générez des images guidées par détection de contours avec SD 3.5 Canny ControlNet.",
|
||||
"sd3_5_large_depth": "Générez des images guidées par la profondeur avec SD 3.5.",
|
||||
"sd3_5_simple_example": "Générez des images avec SD 3.5.",
|
||||
"sdxl_refiner_prompt_example": "Améliorez les résultats SDXL avec des refineurs.",
|
||||
"sdxl_revision_text_prompts": "Transférez des concepts à partir d'images de référence pour guider la génération d'images avec SDXL.",
|
||||
"sdxl_revision_zero_positive": "Ajoutez des prompts textuels en plus des images de référence pour guider la génération d'images avec SDXL.",
|
||||
"sdxl_simple_example": "Créez des images de haute qualité avec SDXL.",
|
||||
"sdxl_refiner_prompt_example": "Améliorez les images SDXL avec des modèles de raffinement.",
|
||||
"sdxl_revision_text_prompts": "Générez des images en transférant des concepts d'images de référence avec SDXL Revision.",
|
||||
"sdxl_revision_zero_positive": "Générez des images en utilisant à la fois des prompts textuels et des images de référence avec SDXL Revision.",
|
||||
"sdxl_simple_example": "Générez des images de haute qualité avec SDXL.",
|
||||
"sdxlturbo_example": "Générez des images en une seule étape avec SDXL Turbo."
|
||||
},
|
||||
"Image API": {
|
||||
"api-openai-dall-e-2-inpaint": "Utilisez l'API Dall-E 2 pour faire de l'inpainting sur des images.",
|
||||
"api-openai-dall-e-2-t2i": "Utilisez l'API Dall-E 2 pour générer des images à partir de descriptions textuelles.",
|
||||
"api-openai-dall-e-3-t2i": "Utilisez l'API Dall-E 3 pour générer des images à partir de descriptions textuelles.",
|
||||
"api_bfl_flux_pro_t2i": "Créez des images avec FLUX.1 [pro] pour un excellent suivi des prompts, une qualité visuelle, des détails d'image et une grande diversité de sorties.",
|
||||
"api_ideogram_v3_t2i": "Générez des images avec un alignement prompt-image de haute qualité, du photoréalisme et du rendu de texte. Créez des logos professionnels, affiches promotionnelles, concepts de pages d'accueil, photographies de produits et plus. Composez facilement des compositions spatiales sophistiquées avec des arrière-plans complexes, un éclairage et des couleurs précis et nuancés, et des détails environnementaux réalistes.",
|
||||
"api_bfl_flux_1_kontext_max_image": "Éditez des images avec Flux.1 Kontext max image.",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "Entrez plusieurs images et éditez-les avec Flux.1 Kontext.",
|
||||
"api_bfl_flux_1_kontext_pro_image": "Éditez des images avec Flux.1 Kontext pro image.",
|
||||
"api_bfl_flux_pro_t2i": "Générez des images avec un excellent suivi de prompt et une qualité visuelle avec FLUX.1 Pro.",
|
||||
"api_ideogram_v3_t2i": "Générez des images de qualité professionnelle avec un excellent alignement prompt-image, du photoréalisme et du rendu de texte avec Ideogram V3.",
|
||||
"api_luma_photon_i2i": "Guidez la génération d'image en combinant images et prompt.",
|
||||
"api_luma_photon_style_ref": "Appliquez et mélangez des références de style avec un contrôle précis. Luma Photon capture l'essence de chaque image de référence, vous permettant de combiner des éléments visuels distincts tout en maintenant une qualité professionnelle.",
|
||||
"api_openai_image_1_i2i": "Utilisez l'API GPT Image 1 pour générer des images à partir d'images.",
|
||||
"api_openai_image_1_inpaint": "Utilisez l'API GPT Image 1 pour faire de l'inpainting sur des images.",
|
||||
"api_openai_image_1_multi_inputs": "Utilisez l'API GPT Image 1 avec plusieurs entrées pour générer des images.",
|
||||
"api_openai_image_1_t2i": "Utilisez l'API GPT Image 1 pour générer des images à partir de descriptions textuelles.",
|
||||
"api_recraft_image_gen_with_color_control": "Créez une palette personnalisée à réutiliser pour plusieurs images ou choisissez les couleurs pour chaque photo. Adaptez la palette de couleurs de votre marque et créez des visuels qui vous ressemblent.",
|
||||
"api_recraft_image_gen_with_style_control": "Contrôlez le style avec des exemples visuels, alignez le positionnement et affinez les objets. Stockez et partagez des styles pour une cohérence parfaite de la marque.",
|
||||
"api_recraft_vector_gen": "Passez d'un prompt textuel à une image vectorielle avec le générateur vectoriel IA de Recraft. Produisez des illustrations vectorielles de la meilleure qualité pour des logos, affiches, icônes, publicités, bannières et maquettes. Perfectionnez vos designs avec des fichiers SVG nets et de haute qualité. Créez des illustrations vectorielles de marque pour votre application ou site web en quelques secondes.",
|
||||
"api_stability_sd3_t2i": "Générez des images de haute qualité avec une excellente fidélité au prompt. Parfait pour les cas d'usage professionnels en résolution 1 mégapixel."
|
||||
"api_luma_photon_style_ref": "Générez des images en combinant des références de style avec un contrôle précis avec Luma Photon.",
|
||||
"api_openai_dall_e_2_inpaint": "Éditez des images avec inpainting avec l'API OpenAI Dall-E 2.",
|
||||
"api_openai_dall_e_2_t2i": "Générez des images à partir de texte avec l'API OpenAI Dall-E 2.",
|
||||
"api_openai_dall_e_3_t2i": "Générez des images à partir de texte avec l'API OpenAI Dall-E 3.",
|
||||
"api_openai_image_1_i2i": "Générez des images à partir d'images avec l'API OpenAI GPT Image 1.",
|
||||
"api_openai_image_1_inpaint": "Éditez des images avec inpainting avec l'API OpenAI GPT Image 1.",
|
||||
"api_openai_image_1_multi_inputs": "Générez des images à partir de plusieurs entrées avec l'API OpenAI GPT Image 1.",
|
||||
"api_openai_image_1_t2i": "Générez des images à partir de texte avec l'API OpenAI GPT Image 1.",
|
||||
"api_recraft_image_gen_with_color_control": "Générez des images avec des palettes de couleurs personnalisées et des visuels de marque avec Recraft.",
|
||||
"api_recraft_image_gen_with_style_control": "Contrôlez le style avec des exemples visuels, alignez le positionnement et affinez les objets. Stockez et partagez des styles pour une cohérence de marque.",
|
||||
"api_recraft_vector_gen": "Générez des images vectorielles de haute qualité à partir de texte avec le générateur vectoriel IA de Recraft.",
|
||||
"api_runway_reference_to_image": "Générez de nouvelles images basées sur des styles et des compositions de référence avec Runway.",
|
||||
"api_runway_text_to_image": "Générez des images de haute qualité à partir de texte avec le modèle IA de Runway.",
|
||||
"api_stability_ai_i2i": "Transformez des images avec une génération de haute qualité avec Stability AI, idéal pour l'édition professionnelle et le transfert de style.",
|
||||
"api_stability_ai_sd3_5_i2i": "Générez des images de haute qualité avec une excellente fidélité au prompt. Parfait pour un usage professionnel à 1 mégapixel.",
|
||||
"api_stability_ai_sd3_5_t2i": "Générez des images de haute qualité avec une excellente fidélité au prompt. Parfait pour un usage professionnel à 1 mégapixel.",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Générez des images de haute qualité avec une excellente fidélité au prompt. Parfait pour un usage professionnel à 1 mégapixel."
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Découvrez l'IA multimodale de Google avec les capacités de raisonnement de Gemini.",
|
||||
"api_openai_chat": "Discutez avec les modèles de langage avancés d'OpenAI pour des conversations intelligentes."
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "Utilisez des modèles d'upscaling pour améliorer la qualité d'image.",
|
||||
"hiresfix_esrgan_workflow": "Utilisez des modèles d'upscaling lors des étapes intermédiaires.",
|
||||
"hiresfix_latent_workflow": "Améliorez la qualité d'image dans l'espace latent.",
|
||||
"latent_upscale_different_prompt_model": "Upscalez et changez le prompt à chaque passage."
|
||||
"esrgan_example": "Agrandissez les images avec des modèles ESRGAN pour améliorer la qualité.",
|
||||
"hiresfix_esrgan_workflow": "Agrandissez les images avec des modèles ESRGAN lors des étapes intermédiaires.",
|
||||
"hiresfix_latent_workflow": "Agrandissez les images en améliorant la qualité dans l'espace latent.",
|
||||
"latent_upscale_different_prompt_model": "Agrandissez les images en changeant les prompts entre les passes."
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "Générez des vidéos avec le modèle Hunyuan.",
|
||||
"image_to_video": "Transformez des images en vidéos animées.",
|
||||
"image_to_video_wan": "Générez rapidement des vidéos à partir d'images.",
|
||||
"ltxv_image_to_video": "Convertissez des images fixes en vidéos.",
|
||||
"ltxv_text_to_video": "Générez des vidéos à partir de descriptions textuelles.",
|
||||
"mochi_text_to_video_example": "Créez des vidéos avec le modèle Mochi.",
|
||||
"text_to_video_wan": "Générez rapidement des vidéos à partir de descriptions textuelles.",
|
||||
"txt_to_image_to_video": "Générez des images à partir de texte puis convertissez-les en vidéos.",
|
||||
"wan2_1_flf2v_720_f16": "Générez une vidéo en contrôlant la première et la dernière image.",
|
||||
"wan2_1_fun_control": "Guidez la génération vidéo avec le contrôle de pose, profondeur, contours et plus.",
|
||||
"wan2_1_fun_inp": "Créez des vidéos à partir d'images de début et de fin."
|
||||
"hunyuan_video_text_to_video": "Générez des vidéos à partir de texte avec le modèle Hunyuan.",
|
||||
"image_to_video": "Générez des vidéos à partir d'images fixes.",
|
||||
"image_to_video_wan": "Générez des vidéos à partir d'images avec Wan 2.1.",
|
||||
"ltxv_image_to_video": "Générez des vidéos à partir d'images fixes.",
|
||||
"ltxv_text_to_video": "Générez des vidéos à partir de texte.",
|
||||
"mochi_text_to_video_example": "Générez des vidéos à partir de texte avec le modèle Mochi.",
|
||||
"text_to_video_wan": "Générez des vidéos à partir de texte avec Wan 2.1.",
|
||||
"txt_to_image_to_video": "Générez des vidéos en créant d'abord des images à partir de texte.",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Générez des vidéos avec Cosmos-Predict2 2B Video2World, pour des simulations physiques précises, haute fidélité et cohérentes.",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Générez des vidéos de haute qualité avec un contrôle avancé de la caméra avec le modèle complet 14B.",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Générez des vidéos dynamiques avec des mouvements de caméra cinématographiques avec Wan 2.1 Fun Camera 1.3B.",
|
||||
"video_wan_vace_14B_ref2v": "Créez des vidéos qui correspondent au style et au contenu d'une image de référence.",
|
||||
"video_wan_vace_14B_t2v": "Transformez des descriptions textuelles en vidéos de haute qualité. Prend en charge 480p et 720p avec le modèle VACE-14B.",
|
||||
"video_wan_vace_14B_v2v": "Générez des vidéos en contrôlant des vidéos d'entrée et des images de référence avec Wan VACE.",
|
||||
"video_wan_vace_flf2v": "Générez des transitions vidéo fluides en définissant les images de début et de fin. Prend en charge les séquences d'images personnalisées.",
|
||||
"video_wan_vace_inpainting": "Éditez des régions spécifiques dans des vidéos tout en préservant le contenu environnant.",
|
||||
"video_wan_vace_outpainting": "Générez des vidéos étendues en agrandissant la taille avec Wan VACE outpainting.",
|
||||
"wan2_1_flf2v_720_f16": "Générez des vidéos en contrôlant la première et la dernière image avec Wan 2.1 FLF2V.",
|
||||
"wan2_1_fun_control": "Générez des vidéos guidées par la pose, la profondeur et les contours avec Wan 2.1 ControlNet.",
|
||||
"wan2_1_fun_inp": "Générez des vidéos à partir d'images de début et de fin avec Wan 2.1 inpainting."
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "Créez des vidéos raffinées à partir d'images et de texte, incluant l'intégration CGI et des effets photo tendance comme le hugging IA viral. Choisissez parmi une variété de styles et thèmes vidéo pour correspondre à votre vision créative.",
|
||||
"api_kling_i2v": "Créez des vidéos avec une excellente fidélité au prompt pour les actions, expressions et mouvements de caméra. Prend désormais en charge des prompts complexes avec des actions séquentielles pour vous permettre de diriger votre scène.",
|
||||
"api_hailuo_minimax_i2v": "Générez des vidéos raffinées à partir d'images et de texte avec intégration CGI avec MiniMax.",
|
||||
"api_hailuo_minimax_t2v": "Générez des vidéos de haute qualité directement à partir de texte. Découvrez les capacités avancées de MiniMax pour créer des récits visuels variés avec des effets CGI professionnels.",
|
||||
"api_kling_effects": "Générez des vidéos dynamiques en appliquant des effets visuels à des images avec Kling.",
|
||||
"api_kling_flf": "Générez des vidéos en contrôlant les premières et dernières images.",
|
||||
"api_kling_i2v": "Générez des vidéos avec une excellente fidélité au prompt pour les actions, expressions et mouvements de caméra avec Kling.",
|
||||
"api_luma_i2v": "Transformez des images statiques en animations magiques de haute qualité instantanément.",
|
||||
"api_pika_scene": "Utilisez plusieurs images comme ingrédients et générez des vidéos qui les intègrent toutes.",
|
||||
"api_pixverse_t2v": "Générez des vidéos avec une interprétation précise du prompt et des dynamiques vidéo impressionnantes.",
|
||||
"api_pixverse_template_i2v": "Transformez des images statiques en vidéos dynamiques avec mouvement et effets.",
|
||||
"api_veo2_i2v": "Utilisez l'API Google Veo2 pour générer des vidéos à partir d'images."
|
||||
"api_luma_t2v": "Générez des vidéos de haute qualité à partir de prompts simples.",
|
||||
"api_moonvalley_image_to_video": "Générez des vidéos cinématographiques 1080p à partir d'une image avec un modèle entraîné uniquement sur des données sous licence.",
|
||||
"api_moonvalley_text_to_video": "Générez des vidéos cinématographiques 1080p à partir de texte avec un modèle entraîné uniquement sur des données sous licence.",
|
||||
"api_pika_i2v": "Générez des vidéos animées fluides à partir d'images fixes avec Pika AI.",
|
||||
"api_pika_scene": "Générez des vidéos qui incorporent plusieurs images d'entrée avec Pika Scenes.",
|
||||
"api_pixverse_i2v": "Générez des vidéos dynamiques à partir d'images fixes avec mouvement et effets avec PixVerse.",
|
||||
"api_pixverse_t2v": "Générez des vidéos avec une interprétation précise du prompt et une dynamique visuelle impressionnante.",
|
||||
"api_pixverse_template_i2v": "Générez des vidéos dynamiques à partir d'images fixes avec mouvement et effets avec PixVerse.",
|
||||
"api_runway_first_last_frame": "Générez des transitions vidéo fluides entre deux images clés avec la précision de Runway.",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Générez des vidéos cinématographiques à partir d'images fixes avec Runway Gen3a Turbo.",
|
||||
"api_runway_gen4_turo_image_to_video": "Générez des vidéos dynamiques à partir d'images avec Runway Gen4 Turbo.",
|
||||
"api_veo2_i2v": "Générez des vidéos à partir d'images avec l'API Google Veo2."
|
||||
}
|
||||
},
|
||||
"title": "Commencez avec un modèle"
|
||||
@@ -1483,7 +1587,8 @@
|
||||
"title": "Bienvenue sur ComfyUI"
|
||||
},
|
||||
"whatsNewPopup": {
|
||||
"learnMore": "En savoir plus"
|
||||
"learnMore": "En savoir plus",
|
||||
"noReleaseNotes": "Aucune note de version disponible."
|
||||
},
|
||||
"workflowService": {
|
||||
"enterFilename": "Entrez le nom du fichier",
|
||||
|
||||
@@ -259,6 +259,10 @@
|
||||
"name": "Le snap met en évidence le nœud",
|
||||
"tooltip": "Lorsque vous faites glisser un lien sur un nœud avec une fente d'entrée viable, mettez en évidence le nœud"
|
||||
},
|
||||
"Comfy_Notification_ShowVersionUpdates": {
|
||||
"name": "Afficher les mises à jour de version",
|
||||
"tooltip": "Afficher les mises à jour pour les nouveaux modèles et les nouvelles fonctionnalités majeures."
|
||||
},
|
||||
"Comfy_Pointer_ClickBufferTime": {
|
||||
"name": "Délai de dérive du clic du pointeur",
|
||||
"tooltip": "Après avoir appuyé sur un bouton de pointeur, c'est le temps maximum (en millisecondes) que le mouvement du pointeur peut être ignoré.\n\nAide à prévenir que les objets soient déplacés involontairement si le pointeur est déplacé lors du clic."
|
||||
@@ -322,7 +326,8 @@
|
||||
"Bottom": "Bas",
|
||||
"Disabled": "Désactivé",
|
||||
"Top": "Haut"
|
||||
}
|
||||
},
|
||||
"tooltip": "Position de la barre de menu. Sur les appareils mobiles, le menu est toujours affiché en haut."
|
||||
},
|
||||
"Comfy_Validation_NodeDefs": {
|
||||
"name": "Valider les définitions de nœuds (lent)",
|
||||
|
||||
@@ -1099,6 +1099,7 @@
|
||||
"Node Search Box": "ノード検索ボックス",
|
||||
"Node Widget": "ノードウィジェット",
|
||||
"NodeLibrary": "ノードライブラリ",
|
||||
"Notification Preferences": "通知設定",
|
||||
"Pointer": "ポインタ",
|
||||
"Queue": "キュー",
|
||||
"QueueButton": "キューボタン",
|
||||
@@ -1199,81 +1200,113 @@
|
||||
"Flux": "Flux",
|
||||
"Image": "画像",
|
||||
"Image API": "画像API",
|
||||
"LLM API": "LLM API",
|
||||
"Upscaling": "アップスケーリング",
|
||||
"Video": "ビデオ",
|
||||
"Video API": "動画API"
|
||||
},
|
||||
"template": {
|
||||
"3D": {
|
||||
"hunyuan-3d-multiview-elf": "Hunyuan3D マルチビュー",
|
||||
"hunyuan-3d-turbo": "Hunyuan3D ターボ",
|
||||
"hunyuan3d-non-multiview-train": "Hunyuan3D",
|
||||
"3d_hunyuan3d_image_to_model": "Hunyuan3D",
|
||||
"3d_hunyuan3d_multiview_to_model": "Hunyuan3Dマルチビュー",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3Dターボ",
|
||||
"stable_zero123_example": "Stable Zero123"
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin:画像からモデルへ",
|
||||
"api_rodin_multiview_to_model": "Rodin:マルチビューからモデルへ",
|
||||
"api_tripo_image_to_model": "Tripo:画像からモデルへ",
|
||||
"api_tripo_multiview_to_model": "Tripo:マルチビューからモデルへ",
|
||||
"api_tripo_text_to_model": "Tripo:テキストからモデルへ"
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "エリア構成",
|
||||
"area_composition_reversed": "エリア構成反転",
|
||||
"area_composition_square_area_for_subject": "主題のためのエリア構成スクエア"
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "Stable Audio"
|
||||
"audio_ace_step_1_m2m_editing": "ACE Step v1 M2M編集",
|
||||
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1テキストからインスト音楽",
|
||||
"audio_ace_step_1_t2a_song": "ACE Step v1テキストから歌",
|
||||
"audio_stable_audio_example": "Stable Audio"
|
||||
},
|
||||
"Basics": {
|
||||
"default": "画像生成",
|
||||
"embedding_example": "埋め込み",
|
||||
"gligen_textbox_example": "Gligenテキストボックス",
|
||||
"image2image": "画像から画像へ",
|
||||
"inpain_model_outpainting": "InpaintモデルのOutpainting",
|
||||
"inpaint_example": "Inpaint",
|
||||
"lora": "Lora",
|
||||
"lora_multiple": "Lora複数"
|
||||
"inpaint_model_outpainting": "Outpainting",
|
||||
"lora": "LoRA",
|
||||
"lora_multiple": "LoRA複数"
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "2 Pass Pose Worship",
|
||||
"controlnet_example": "ControlNet",
|
||||
"controlnet_example": "Scribble ControlNet",
|
||||
"depth_controlnet": "Depth ControlNet",
|
||||
"depth_t2i_adapter": "Depth T2Iアダプタ",
|
||||
"mixing_controlnets": "ControlNetsの混合"
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "Flux Cannyモデル",
|
||||
"flux_depth_lora_example": "Flux Depth Lora",
|
||||
"flux_dev_checkpoint_example": "Flux Dev",
|
||||
"flux_depth_lora_example": "Flux Depth LoRA",
|
||||
"flux_dev_checkpoint_example": "Flux Dev fp8",
|
||||
"flux_dev_full_text_to_image": "Flux Devフルテキストから画像へ",
|
||||
"flux_fill_inpaint_example": "Flux Inpaint",
|
||||
"flux_fill_outpaint_example": "Flux Outpaint",
|
||||
"flux_kontext_dev_basic": "Flux Kontext Dev(ベーシック)",
|
||||
"flux_kontext_dev_grouped": "Flux Kontext Dev(グループ化)",
|
||||
"flux_redux_model_example": "Flux Reduxモデル",
|
||||
"flux_schnell": "Flux Schnell"
|
||||
"flux_schnell": "Flux Schnell fp8",
|
||||
"flux_schnell_full_text_to_image": "Flux Schnellフルテキストから画像へ"
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "HiDream E1 Full",
|
||||
"hidream_i1_dev": "HiDream I1 Dev",
|
||||
"hidream_i1_fast": "HiDream I1 Fast",
|
||||
"hidream_i1_full": "HiDream I1 Full",
|
||||
"sd3_5_large_blur": "SD3.5 ラージブラー",
|
||||
"sd3_5_large_canny_controlnet_example": "SD3.5 ラージキャニーコントロールネット",
|
||||
"sd3_5_large_depth": "SD3.5 ラージデプス",
|
||||
"sd3_5_simple_example": "SD3.5 シンプル",
|
||||
"sdxl_refiner_prompt_example": "SDXL Refinerプロンプト",
|
||||
"sdxl_revision_text_prompts": "SDXL Revisionテキストプロンプト",
|
||||
"sdxl_revision_zero_positive": "SDXL Revisionゼロポジティブ",
|
||||
"image_chroma_text_to_image": "Chromaテキストから画像へ",
|
||||
"image_cosmos_predict2_2B_t2i": "Cosmos Predict2 2B T2I",
|
||||
"image_lotus_depth_v1_1": "Lotus Depth",
|
||||
"image_omnigen2_image_edit": "OmniGen2画像編集",
|
||||
"image_omnigen2_t2i": "OmniGen2テキストから画像へ",
|
||||
"sd3_5_large_blur": "SD3.5ラージブラー",
|
||||
"sd3_5_large_canny_controlnet_example": "SD3.5ラージキャニーコントロールネット",
|
||||
"sd3_5_large_depth": "SD3.5ラージデプス",
|
||||
"sd3_5_simple_example": "SD3.5シンプル",
|
||||
"sdxl_refiner_prompt_example": "SDXLリファイナープロンプト",
|
||||
"sdxl_revision_text_prompts": "SDXLリビジョンテキストプロンプト",
|
||||
"sdxl_revision_zero_positive": "SDXLリビジョンゼロポジティブ",
|
||||
"sdxl_simple_example": "SDXLシンプル",
|
||||
"sdxlturbo_example": "SDXLターボ"
|
||||
},
|
||||
"Image API": {
|
||||
"api-openai-dall-e-2-inpaint": "Dall-E 2 インペイント",
|
||||
"api-openai-dall-e-2-t2i": "Dall-E 2 テキストから画像へ",
|
||||
"api-openai-dall-e-3-t2i": "Dall-E 3 テキストから画像へ",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux 1.1[pro] Ultra テキストから画像へ",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3 テキストから画像へ",
|
||||
"api_luma_photon_i2i": "Luma Photon 画像から画像へ",
|
||||
"api_luma_photon_style_ref": "Luma Photon スタイル参照",
|
||||
"api_openai_image_1_i2i": "OpenAI Image-1 画像から画像へ",
|
||||
"api_openai_image_1_inpaint": "OpenAI Image-1 インペイント",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI Image-1 複数入力",
|
||||
"api_openai_image_1_t2i": "OpenAI Image-1 テキストから画像へ",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft カラーコントロール画像生成",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft スタイルコントロール画像生成",
|
||||
"api_recraft_vector_gen": "Recraft ベクター生成",
|
||||
"api_stability_sd3_t2i": "Stability AI Stable Image Ultra テキストから画像へ"
|
||||
"api_bfl_flux_1_kontext_max_image": "BFL Flux.1 Kontext Max",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "BFL Flux.1 Kontext複数画像入力",
|
||||
"api_bfl_flux_1_kontext_pro_image": "BFL Flux.1 Kontext Pro",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux[Pro]:テキストから画像へ",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3:テキストから画像へ",
|
||||
"api_luma_photon_i2i": "Luma Photon:画像から画像へ",
|
||||
"api_luma_photon_style_ref": "Luma Photon:スタイル参照",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI: Dall-E 2インペイント",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI: Dall-E 2テキストから画像へ",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI: Dall-E 3テキストから画像へ",
|
||||
"api_openai_image_1_i2i": "OpenAI: GPT-Image-1画像から画像へ",
|
||||
"api_openai_image_1_inpaint": "OpenAI: GPT-Image-1インペイント",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI: GPT-Image-1複数入力",
|
||||
"api_openai_image_1_t2i": "OpenAI: GPT-Image-1テキストから画像へ",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft:カラーコントロール画像生成",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft:スタイルコントロール画像生成",
|
||||
"api_recraft_vector_gen": "Recraft:ベクター生成",
|
||||
"api_runway_reference_to_image": "Runway:リファレンスから画像へ",
|
||||
"api_runway_text_to_image": "Runway:テキストから画像へ",
|
||||
"api_stability_ai_i2i": "Stability AI:画像から画像へ",
|
||||
"api_stability_ai_sd3_5_i2i": "Stability AI:SD3.5画像から画像へ",
|
||||
"api_stability_ai_sd3_5_t2i": "Stability AI:SD3.5テキストから画像へ",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Stability AI:Stable Image Ultraテキストから画像へ"
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Google Gemini:チャット",
|
||||
"api_openai_chat": "OpenAI:チャット"
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "ESRGAN",
|
||||
@@ -1283,125 +1316,196 @@
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "Hunyuanビデオテキストからビデオへ",
|
||||
"image_to_video": "画像からビデオへ",
|
||||
"image_to_video_wan": "Wan 2.1 画像からビデオへ",
|
||||
"image_to_video": "SVD画像からビデオへ",
|
||||
"image_to_video_wan": "Wan 2.1画像からビデオへ",
|
||||
"ltxv_image_to_video": "LTXV画像からビデオへ",
|
||||
"ltxv_text_to_video": "LTXVテキストからビデオへ",
|
||||
"mochi_text_to_video_example": "Mochiテキストからビデオへ",
|
||||
"text_to_video_wan": "Wan 2.1 テキストからビデオへ",
|
||||
"txt_to_image_to_video": "テキストから画像へ、画像からビデオへ",
|
||||
"text_to_video_wan": "Wan 2.1テキストからビデオへ",
|
||||
"txt_to_image_to_video": "SVDテキストから画像へ、画像からビデオへ",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos Predict2 2B Video2World 480p 16fps",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B",
|
||||
"video_wan_vace_14B_ref2v": "Wan VACEリファレンスからビデオへ",
|
||||
"video_wan_vace_14B_t2v": "Wan VACEテキストからビデオへ",
|
||||
"video_wan_vace_14B_v2v": "Wan VACEコントロールビデオ",
|
||||
"video_wan_vace_flf2v": "Wan VACEファースト-ラストフレーム",
|
||||
"video_wan_vace_inpainting": "Wan VACEインペインティング",
|
||||
"video_wan_vace_outpainting": "Wan VACEアウトペインティング",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16",
|
||||
"wan2_1_fun_control": "Wan 2.1 ControlNet",
|
||||
"wan2_1_fun_inp": "Wan 2.1 インペインティング"
|
||||
"wan2_1_fun_inp": "Wan 2.1インペインティング"
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "MiniMax 画像から動画へ",
|
||||
"api_kling_i2v": "Kling 画像から動画へ",
|
||||
"api_luma_i2v": "Luma 画像から動画へ",
|
||||
"api_pika_scene": "Pika シーン: 画像から動画へ",
|
||||
"api_pixverse_t2v": "PixVerse テキストから動画へ",
|
||||
"api_pixverse_template_i2v": "PixVerse Template Effects: 画像から動画へ",
|
||||
"api_veo2_i2v": "Veo2 画像から動画へ"
|
||||
"api_hailuo_minimax_i2v": "MiniMax:画像から動画へ",
|
||||
"api_hailuo_minimax_t2v": "MiniMax:テキストから動画へ",
|
||||
"api_kling_effects": "Kling:ビデオエフェクト",
|
||||
"api_kling_flf": "Kling:FLF2V",
|
||||
"api_kling_i2v": "Kling:画像から動画へ",
|
||||
"api_luma_i2v": "Luma:画像から動画へ",
|
||||
"api_luma_t2v": "Luma:テキストから動画へ",
|
||||
"api_moonvalley_image_to_video": "Moonvalley:画像からビデオへ",
|
||||
"api_moonvalley_text_to_video": "Moonvalley:テキストからビデオへ",
|
||||
"api_pika_i2v": "Pika:画像から動画へ",
|
||||
"api_pika_scene": "Pikaシーン:画像から動画へ",
|
||||
"api_pixverse_i2v": "PixVerse:画像から動画へ",
|
||||
"api_pixverse_t2v": "PixVerse:テキストから動画へ",
|
||||
"api_pixverse_template_i2v": "PixVerse Templates:画像から動画へ",
|
||||
"api_runway_first_last_frame": "Runway:ファーストラストフレームから動画へ",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway:Gen3a Turbo画像から動画へ",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway:Gen4 Turbo画像から動画へ",
|
||||
"api_veo2_i2v": "Veo2:画像から動画へ"
|
||||
}
|
||||
},
|
||||
"templateDescription": {
|
||||
"3D": {
|
||||
"hunyuan-3d-multiview-elf": "Hunyuan3D 2mvで複数ビューからモデルを生成します。",
|
||||
"hunyuan-3d-turbo": "Hunyuan3D 2mv turboで複数ビューからモデルを生成します。",
|
||||
"hunyuan3d-non-multiview-train": "Hunyuan3D 2.0で単一ビューからモデルを生成します。",
|
||||
"stable_zero123_example": "単一画像から3Dビューを生成します。"
|
||||
"3d_hunyuan3d_image_to_model": "Hunyuan3D 2.0で単一画像から3Dモデルを生成します。",
|
||||
"3d_hunyuan3d_multiview_to_model": "Hunyuan3D 2.0 MVで複数ビューから3Dモデルを生成します。",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3D 2.0 MV Turboで複数ビューから3Dモデルを生成します。",
|
||||
"stable_zero123_example": "Stable Zero123で単一画像から3Dビューを生成します。"
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin AIで1枚の写真から高精細3Dモデルを生成します。",
|
||||
"api_rodin_multiview_to_model": "Rodinで複数視点から3Dモデルを再構築します。",
|
||||
"api_tripo_image_to_model": "Tripoエンジンで2D画像からプロ品質3Dアセットを生成します。",
|
||||
"api_tripo_multiview_to_model": "Tripoの高度なスキャナーで複数角度から3Dモデルを構築します。",
|
||||
"api_tripo_text_to_model": "Tripoでテキスト説明から3Dオブジェクトを作成します。"
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "エリアで画像構成をコントロールします。",
|
||||
"area_composition_reversed": "エリア構成ワークフローを逆転します。",
|
||||
"area_composition_square_area_for_subject": "被写体の配置を一貫させます。"
|
||||
"area_composition_square_area_for_subject": "エリア構成で被写体の配置を一貫させます。"
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "テキストの説明から音声を生成します。"
|
||||
"audio_ace_step_1_m2m_editing": "ACE-Step v1 M2Mで既存楽曲のスタイルや歌詞を編集します。",
|
||||
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1でテキストからインスト音楽を生成します。",
|
||||
"audio_ace_step_1_t2a_song": "ACE-Step v1でテキストからボーカル付き楽曲を生成。多言語・スタイルカスタマイズ対応。",
|
||||
"audio_stable_audio_example": "テキスト説明から音声を生成します。"
|
||||
},
|
||||
"Basics": {
|
||||
"default": "テキストの説明から画像を生成します。",
|
||||
"embedding_example": "テキスト反転を使って一貫したスタイルを実現します。",
|
||||
"gligen_textbox_example": "オブジェクトの位置とサイズを指定します。",
|
||||
"default": "テキスト説明から画像を生成します。",
|
||||
"embedding_example": "テキスト反転で一貫したスタイルの画像を生成します。",
|
||||
"gligen_textbox_example": "テキストボックスでオブジェクトの位置を正確に指定して画像を生成します。",
|
||||
"image2image": "テキストプロンプトを使って既存の画像を変換します。",
|
||||
"inpain_model_outpainting": "画像を元の境界を超えて拡張します。",
|
||||
"inpaint_example": "画像の特定部分をシームレスに編集します。",
|
||||
"lora": "LoRAモデルを適用して特定のスタイルや対象を表現します。",
|
||||
"lora_multiple": "複数のLoRAモデルを組み合わせて独自の結果を得ます。"
|
||||
"inpaint_model_outpainting": "画像を元の境界を超えて拡張します。",
|
||||
"lora": "LoRAモデルで特定のスタイルやテーマの画像を生成します。",
|
||||
"lora_multiple": "複数のLoRAモデルを組み合わせて画像を生成します。"
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "ポーズ参照から画像を生成します。",
|
||||
"controlnet_example": "参照画像で画像生成をコントロールします。",
|
||||
"depth_controlnet": "深度認識生成で画像を作成します。",
|
||||
"2_pass_pose_worship": "ポーズ参照で画像を生成します。",
|
||||
"controlnet_example": "落書き参照画像で画像生成をガイドします。",
|
||||
"depth_controlnet": "深度情報で画像生成をガイドします。",
|
||||
"depth_t2i_adapter": "T2Iアダプターで素早く深度認識画像を生成します。",
|
||||
"mixing_controlnets": "複数のControlNetモデルを組み合わせます。"
|
||||
"mixing_controlnets": "複数のControlNetモデルを組み合わせて画像を生成します。"
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "エッジ検出から画像を生成します。",
|
||||
"flux_depth_lora_example": "深度認識LoRAで画像を生成します。",
|
||||
"flux_dev_checkpoint_example": "Flux開発モデルを使って画像を生成します。",
|
||||
"flux_fill_inpaint_example": "画像の欠損部分を補完します。",
|
||||
"flux_fill_outpaint_example": "Fluxのアウトペイントで画像を拡張します。",
|
||||
"flux_redux_model_example": "参照画像のスタイルを転送し、Fluxで画像生成をガイドします。",
|
||||
"flux_schnell": "Flux Schnellで素早く画像を生成します。"
|
||||
"flux_canny_model_example": "エッジ検出でFlux画像生成をガイドします。",
|
||||
"flux_depth_lora_example": "深度情報でFlux LoRA画像生成をガイドします。",
|
||||
"flux_dev_checkpoint_example": "Flux Dev fp8量子化版で画像生成。低VRAMデバイス向け、モデルファイル1つでOK、画質はフル版よりやや低め。",
|
||||
"flux_dev_full_text_to_image": "Flux Devフル版で高品質画像生成。大きなVRAMと複数モデルファイルが必要ですが、プロンプト追従性・画質ともに最高です。",
|
||||
"flux_fill_inpaint_example": "Fluxで画像の欠損部分を補完します。",
|
||||
"flux_fill_outpaint_example": "Fluxで画像を元の枠を超えて拡張します。",
|
||||
"flux_kontext_dev_basic": "Flux Kontext(全ノード表示)で画像を編集。ワークフロー学習に最適です。",
|
||||
"flux_kontext_dev_grouped": "ノードをグループ化したFlux Kontext。作業スペースがすっきりします。",
|
||||
"flux_redux_model_example": "参照画像のスタイルを転送し、Flux Reduxで新しい画像を生成します。",
|
||||
"flux_schnell": "Flux Schnell fp8量子化版で高速画像生成。低スペック向け、4ステップで完了。",
|
||||
"flux_schnell_full_text_to_image": "Flux Schnellフル版で高速かつ高品質な画像生成。Apache2.0ライセンス、4ステップで完了。"
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "HiDream E1で画像を編集します。",
|
||||
"hidream_i1_dev": "HiDream I1 Devで画像を生成します。",
|
||||
"hidream_i1_fast": "HiDream I1で素早く画像を生成します。",
|
||||
"hidream_i1_full": "HiDream I1で画像を生成します。",
|
||||
"sd3_5_large_blur": "SD 3.5でぼかし参照画像から画像を生成します。",
|
||||
"sd3_5_large_canny_controlnet_example": "SD 3.5でエッジ検出を使って画像生成をガイドします。",
|
||||
"sd3_5_large_depth": "SD 3.5で深度認識画像を生成します。",
|
||||
"hidream_e1_full": "HiDream E1 - プロ向け自然言語画像編集モデル。",
|
||||
"hidream_i1_dev": "HiDream I1 Dev - 28ステップ推論のバランス型。中程度のハードウェア向け。",
|
||||
"hidream_i1_fast": "HiDream I1 Fast - 16ステップの軽量版。低スペックでも高速プレビュー可能。",
|
||||
"hidream_i1_full": "HiDream I1 Full - 50ステップのフル版。最高画質。",
|
||||
"image_chroma_text_to_image": "ChromaはFluxをベースに構造を一部変更したモデルです。",
|
||||
"image_cosmos_predict2_2B_t2i": "Cosmos-Predict2 2B T2Iで物理的に正確で高精細・高忠実度な画像を生成します。",
|
||||
"image_lotus_depth_v1_1": "ComfyUIでLotus Depthを実行し、高精細な単眼深度推定を実現します。",
|
||||
"image_omnigen2_image_edit": "OmniGen2の高度な画像編集・テキスト描画機能で自然言語指示による画像編集が可能です。",
|
||||
"image_omnigen2_t2i": "OmniGen2の統合7Bマルチモーダルモデルとデュアルパス構造で高品質な画像をテキストから生成します。",
|
||||
"sd3_5_large_blur": "SD 3.5でぼかし参照画像を使って画像生成をガイドします。",
|
||||
"sd3_5_large_canny_controlnet_example": "SD 3.5 Canny ControlNetでエッジ検出を使って画像生成をガイドします。",
|
||||
"sd3_5_large_depth": "SD 3.5で深度情報を使って画像生成をガイドします。",
|
||||
"sd3_5_simple_example": "SD 3.5で画像を生成します。",
|
||||
"sdxl_refiner_prompt_example": "SDXLの出力をリファイナーで強化します。",
|
||||
"sdxl_revision_text_prompts": "参照画像からコンセプトを転送し、SDXLで画像生成をガイドします。",
|
||||
"sdxl_revision_zero_positive": "参照画像とテキストプロンプトを組み合わせてSDXLで画像生成をガイドします。",
|
||||
"sdxl_refiner_prompt_example": "RefinerモデルでSDXL画像を強化します。",
|
||||
"sdxl_revision_text_prompts": "参照画像のコンセプトを転送し、SDXL Revisionで画像生成をガイドします。",
|
||||
"sdxl_revision_zero_positive": "テキストプロンプトと参照画像を組み合わせてSDXL Revisionで画像生成します。",
|
||||
"sdxl_simple_example": "SDXLで高品質な画像を生成します。",
|
||||
"sdxlturbo_example": "SDXL Turboでワンステップで画像を生成します。"
|
||||
"sdxlturbo_example": "SDXL Turboでワンステップ画像生成。"
|
||||
},
|
||||
"Image API": {
|
||||
"api-openai-dall-e-2-inpaint": "Dall-E 2 APIで画像のインペイントを行います。",
|
||||
"api-openai-dall-e-2-t2i": "Dall-E 2 APIでテキストの説明から画像を生成します。",
|
||||
"api-openai-dall-e-3-t2i": "Dall-E 3 APIでテキストの説明から画像を生成します。",
|
||||
"api_bfl_flux_pro_t2i": "FLUX.1 [pro]で優れたプロンプト追従性、画質、ディテール、多様な出力の画像を生成します。",
|
||||
"api_ideogram_v3_t2i": "高品質な画像・プロンプト整合性、フォトリアリズム、テキスト描画で画像を生成します。プロ品質のロゴ、ポスター、ランディングページ、商品写真などを作成。複雑な背景や精密なライティング、リアルな環境ディテールで洗練された空間構成を簡単に作成できます。",
|
||||
"api_bfl_flux_1_kontext_max_image": "Flux.1 Kontext max imageで画像を編集します。",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "複数画像を入力し、Flux.1 Kontextで編集します。",
|
||||
"api_bfl_flux_1_kontext_pro_image": "Flux.1 Kontext pro imageで画像を編集します。",
|
||||
"api_bfl_flux_pro_t2i": "FLUX.1 Proで優れたプロンプト追従性と画質の画像を生成します。",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3でプロンプト整合性・フォトリアリズム・テキスト描画に優れたプロ品質画像を生成します。",
|
||||
"api_luma_photon_i2i": "画像とプロンプトを組み合わせて画像生成をガイドします。",
|
||||
"api_luma_photon_style_ref": "スタイル参照を正確に適用・ブレンドします。Luma Photonは各参照画像の本質を捉え、異なるビジュアル要素を組み合わせつつプロ品質を維持します。",
|
||||
"api_openai_image_1_i2i": "GPT Image 1 APIで画像から画像を生成します。",
|
||||
"api_openai_image_1_inpaint": "GPT Image 1 APIで画像のインペイントを行います。",
|
||||
"api_openai_image_1_multi_inputs": "GPT Image 1 APIで複数入力を使って画像を生成します。",
|
||||
"api_openai_image_1_t2i": "GPT Image 1 APIでテキストの説明から画像を生成します。",
|
||||
"api_recraft_image_gen_with_color_control": "カスタムパレットを作成して複数画像で再利用したり、各写真の色を手動で選択できます。ブランドのカラーパレットに合わせて独自のビジュアルを作成します。",
|
||||
"api_recraft_image_gen_with_style_control": "ビジュアル例でスタイルを制御し、位置合わせやオブジェクトの微調整が可能です。スタイルを保存・共有してブランドの一貫性を保ちます。",
|
||||
"api_recraft_vector_gen": "テキストプロンプトからRecraftのAIベクター生成でベクター画像を作成します。ロゴ、ポスター、アイコンセット、広告、バナー、モックアップに最適な高品質SVGファイルを生成。アプリやウェブサイト用のブランドベクターイラストを数秒で作成します。",
|
||||
"api_stability_sd3_t2i": "高品質でプロンプト追従性の高い画像を生成します。1メガピクセル解像度でプロ用途に最適です。"
|
||||
"api_luma_photon_style_ref": "Luma Photonでスタイル参照をブレンドし、正確にコントロールします。",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI Dall-E 2 APIでインペイントを行います。",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI Dall-E 2 APIでテキストから画像を生成します。",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI Dall-E 3 APIでテキストから画像を生成します。",
|
||||
"api_openai_image_1_i2i": "OpenAI GPT Image 1 APIで画像から画像を生成します。",
|
||||
"api_openai_image_1_inpaint": "OpenAI GPT Image 1 APIでインペイントを行います。",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI GPT Image 1 APIで複数入力から画像を生成します。",
|
||||
"api_openai_image_1_t2i": "OpenAI GPT Image 1 APIでテキストから画像を生成します。",
|
||||
"api_recraft_image_gen_with_color_control": "Recraftでカスタムカラーパレットやブランドビジュアルの画像を生成します。",
|
||||
"api_recraft_image_gen_with_style_control": "ビジュアル例でスタイル・位置合わせ・オブジェクト微調整を制御。スタイル保存・共有でブランド一貫性を実現。",
|
||||
"api_recraft_vector_gen": "RecraftのAIベクター生成でテキストから高品質ベクター画像を生成します。",
|
||||
"api_runway_reference_to_image": "RunwayのAIで参照スタイル・構図から新しい画像を生成します。",
|
||||
"api_runway_text_to_image": "RunwayのAIモデルでテキストから高品質画像を生成します。",
|
||||
"api_stability_ai_i2i": "Stability AIで高品質な画像変換。プロ編集やスタイル転送に最適。",
|
||||
"api_stability_ai_sd3_5_i2i": "Stability AI SD3.5で高品質・高プロンプト追従性の画像変換。1メガピクセル対応。",
|
||||
"api_stability_ai_sd3_5_t2i": "Stability AI SD3.5で高品質・高プロンプト追従性の画像を生成。1メガピクセル対応。",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "高品質・高プロンプト追従性の画像を生成。1メガピクセルでプロ用途に最適。"
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Google GeminiのマルチモーダルAIと推論能力を体験できます。",
|
||||
"api_openai_chat": "OpenAIの高度な言語モデルとインテリジェントな会話を楽しめます。"
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "アップスケールモデルで画像品質を向上させます。",
|
||||
"hiresfix_esrgan_workflow": "中間ステップでアップスケールモデルを使用します。",
|
||||
"esrgan_example": "ESRGANモデルで画像をアップスケールします。",
|
||||
"hiresfix_esrgan_workflow": "中間ステップでESRGANモデルを使って画像をアップスケールします。",
|
||||
"hiresfix_latent_workflow": "latent空間で画像品質を向上させます。",
|
||||
"latent_upscale_different_prompt_model": "アップスケールしつつパスごとにプロンプトを変更します。"
|
||||
"latent_upscale_different_prompt_model": "アップスケールしつつ各パスでプロンプトを変更します。"
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "Hunyuanモデルで動画を生成します。",
|
||||
"image_to_video": "画像をアニメーション動画に変換します。",
|
||||
"image_to_video_wan": "画像から素早く動画を生成します。",
|
||||
"ltxv_image_to_video": "静止画像を動画に変換します。",
|
||||
"ltxv_text_to_video": "テキストの説明から動画を生成します。",
|
||||
"mochi_text_to_video_example": "Mochiモデルで動画を作成します。",
|
||||
"text_to_video_wan": "テキストの説明から素早く動画を生成します。",
|
||||
"txt_to_image_to_video": "テキストから画像を生成し、それを動画に変換します。",
|
||||
"wan2_1_flf2v_720_f16": "最初と最後のフレームを制御して動画を生成します。",
|
||||
"wan2_1_fun_control": "ポーズ、深度、エッジ制御などで動画生成をガイドします。",
|
||||
"wan2_1_fun_inp": "開始フレームと終了フレームから動画を作成します。"
|
||||
"hunyuan_video_text_to_video": "Hunyuanモデルでテキストから動画を生成します。",
|
||||
"image_to_video": "静止画像から動画を生成します。",
|
||||
"image_to_video_wan": "Wan 2.1で画像から動画を生成します。",
|
||||
"ltxv_image_to_video": "静止画像から動画を生成します。",
|
||||
"ltxv_text_to_video": "テキストから動画を生成します。",
|
||||
"mochi_text_to_video_example": "Mochiモデルでテキストから動画を生成します。",
|
||||
"text_to_video_wan": "Wan 2.1でテキストから動画を生成します。",
|
||||
"txt_to_image_to_video": "まず画像を生成し、それを動画に変換します。",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos-Predict2 2B Video2Worldで物理的に正確・高忠実度・一貫性のある動画を生成します。",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14Bで高度なカメラ制御の高品質動画を生成します。",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3Bで映画的なカメラワークの動画を生成します。",
|
||||
"video_wan_vace_14B_ref2v": "Wan VACEで参照画像のスタイル・内容に合った動画を生成します。",
|
||||
"video_wan_vace_14B_t2v": "Wan VACE-14Bでテキストから高品質な動画を生成。480p/720p対応。",
|
||||
"video_wan_vace_14B_v2v": "Wan VACEで入力動画や参照画像をコントロールして新しい動画を生成します。",
|
||||
"video_wan_vace_flf2v": "Wan VACEで開始・終了フレームを指定し、滑らかな動画遷移を生成します。",
|
||||
"video_wan_vace_inpainting": "Wan VACEで動画の特定領域を編集し、周囲の内容を保持します。",
|
||||
"video_wan_vace_outpainting": "Wan VACEのアウトペイントで動画サイズを拡張します。",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2Vで最初と最後のフレームを制御して720p動画を生成します。",
|
||||
"wan2_1_fun_control": "Wan 2.1 ControlNetでポーズ・深度・エッジ制御による動画生成。",
|
||||
"wan2_1_fun_inp": "Wan 2.1で開始・終了フレームから動画を生成します。"
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "画像とテキストから洗練された動画を作成。CGI統合や流行のAIハグなどのエフェクトも。多彩なスタイルやテーマから選択可能です。",
|
||||
"api_kling_i2v": "アクション、表情、カメラワークのプロンプト追従性に優れた動画を作成します。複雑なシーケンシャルアクションもサポートし、あなたがシーンの監督になれます。",
|
||||
"api_luma_i2v": "静止画像から瞬時に高品質なアニメーションを作成します。",
|
||||
"api_pika_scene": "複数の画像を素材として使い、それらを組み込んだ動画を生成します。",
|
||||
"api_pixverse_t2v": "プロンプト解釈が正確で、ダイナミックな動画を生成します。",
|
||||
"api_pixverse_template_i2v": "静止画像を動きやエフェクトのあるダイナミックな動画に変換します。",
|
||||
"api_hailuo_minimax_i2v": "MiniMaxで画像+テキストからCGI統合の洗練動画を生成します。",
|
||||
"api_hailuo_minimax_t2v": "MiniMaxでテキストから高品質動画を直接生成。CGI効果や多彩なスタイルに対応。",
|
||||
"api_kling_effects": "Klingで画像にビジュアルエフェクトを適用し、ダイナミックな動画を生成します。",
|
||||
"api_kling_flf": "Klingで最初と最後のフレームを制御して動画を生成します。",
|
||||
"api_kling_i2v": "Klingでアクション・表情・カメラワークのプロンプト追従性に優れた動画を生成します。",
|
||||
"api_luma_i2v": "Lumaで静止画像から高品質アニメーションを瞬時に生成します。",
|
||||
"api_luma_t2v": "Lumaでシンプルなプロンプトから高品質動画を生成します。",
|
||||
"api_moonvalley_image_to_video": "Moonvalleyで画像から1080p映画品質動画を生成。学習データは全てライセンス済み。",
|
||||
"api_moonvalley_text_to_video": "Moonvalleyでテキストから1080p映画品質動画を生成。学習データは全てライセンス済み。",
|
||||
"api_pika_i2v": "Pika AIで静止画像から滑らかなアニメ動画を生成します。",
|
||||
"api_pika_scene": "Pika Scenesで複数画像を組み込んだ動画を生成します。",
|
||||
"api_pixverse_i2v": "PixVerseで静止画像から動きやエフェクトのあるダイナミック動画を生成します。",
|
||||
"api_pixverse_t2v": "PixVerseでプロンプト解釈が正確でダイナミックな動画を生成します。",
|
||||
"api_pixverse_template_i2v": "PixVerseで静止画像から動きやエフェクトのあるダイナミック動画を生成します。",
|
||||
"api_runway_first_last_frame": "Runwayで2つのキーフレーム間を滑らかに遷移する動画を生成します。",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway Gen3a Turboで静止画像から映画品質動画を生成します。",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway Gen4 Turboで画像からダイナミックな動画を生成します。",
|
||||
"api_veo2_i2v": "Google Veo2 APIで画像から動画を生成します。"
|
||||
}
|
||||
},
|
||||
@@ -1483,7 +1587,8 @@
|
||||
"title": "ComfyUIへようこそ"
|
||||
},
|
||||
"whatsNewPopup": {
|
||||
"learnMore": "詳細はこちら"
|
||||
"learnMore": "詳細はこちら",
|
||||
"noReleaseNotes": "リリースノートはありません。"
|
||||
},
|
||||
"workflowService": {
|
||||
"enterFilename": "ファイル名を入力",
|
||||
|
||||
@@ -259,6 +259,10 @@
|
||||
"name": "スナップハイライトノード",
|
||||
"tooltip": "有効な入力スロットを持つノードの上にリンクをドラッグすると、ノードがハイライトされます"
|
||||
},
|
||||
"Comfy_Notification_ShowVersionUpdates": {
|
||||
"name": "バージョン更新を表示",
|
||||
"tooltip": "新しいモデルや主要な新機能のアップデートを表示します。"
|
||||
},
|
||||
"Comfy_Pointer_ClickBufferTime": {
|
||||
"name": "ポインタークリックドリフト遅延",
|
||||
"tooltip": "ポインターボタンを押した後、ポインタの動きが無視される最大時間(ミリ秒単位)です。\n\nクリック中にポインタが移動した場合、オブジェクトが意図せず動かされるのを防ぎます。"
|
||||
@@ -322,7 +326,8 @@
|
||||
"Bottom": "下",
|
||||
"Disabled": "無効",
|
||||
"Top": "上"
|
||||
}
|
||||
},
|
||||
"tooltip": "メニューバーの位置。モバイルデバイスでは、メニューは常に上部に表示されます。"
|
||||
},
|
||||
"Comfy_Validation_NodeDefs": {
|
||||
"name": "ノード定義を検証(遅い)",
|
||||
|
||||
@@ -787,11 +787,11 @@
|
||||
"Toggle Logs Bottom Panel": "로그 하단 패널 전환",
|
||||
"Toggle Model Library Sidebar": "모델 라이브러리 사이드바 전환",
|
||||
"Toggle Node Library Sidebar": "노드 라이브러리 사이드바 전환",
|
||||
"Toggle Queue Sidebar": "실행 대기열 사이드바 전환",
|
||||
"Toggle Queue Sidebar": "대기열 사이드바 전환",
|
||||
"Toggle Search Box": "검색 상자 전환",
|
||||
"Toggle Terminal Bottom Panel": "터미널 하단 패널 전환",
|
||||
"Toggle Theme (Dark/Light)": "테마 전환 (어두운/밝은)",
|
||||
"Toggle Workflows Sidebar": "워크플로 사이드바 전환",
|
||||
"Toggle Workflows Sidebar": "워크플로우 사이드바 전환",
|
||||
"Toggle the Custom Nodes Manager": "커스텀 노드 매니저 전환",
|
||||
"Toggle the Custom Nodes Manager Progress Bar": "커스텀 노드 매니저 진행률 표시줄 전환",
|
||||
"Undo": "실행 취소",
|
||||
@@ -1099,6 +1099,7 @@
|
||||
"Node Search Box": "노드 검색 상자",
|
||||
"Node Widget": "노드 위젯",
|
||||
"NodeLibrary": "노드 라이브러리",
|
||||
"Notification Preferences": "알림 환경설정",
|
||||
"Pointer": "포인터",
|
||||
"Queue": "실행 대기열",
|
||||
"QueueButton": "실행 대기열 버튼",
|
||||
@@ -1199,209 +1200,312 @@
|
||||
"Flux": "FLUX",
|
||||
"Image": "이미지",
|
||||
"Image API": "이미지 API",
|
||||
"LLM API": "LLM API",
|
||||
"Upscaling": "업스케일링",
|
||||
"Video": "비디오",
|
||||
"Video API": "비디오 API"
|
||||
},
|
||||
"template": {
|
||||
"3D": {
|
||||
"hunyuan-3d-multiview-elf": "Hunyuan3D 다중뷰",
|
||||
"hunyuan-3d-turbo": "Hunyuan3D 터보",
|
||||
"hunyuan3d-non-multiview-train": "Hunyuan3D",
|
||||
"stable_zero123_example": "스테이블 제로123"
|
||||
"3d_hunyuan3d_image_to_model": "Hunyuan3D 2.0",
|
||||
"3d_hunyuan3d_multiview_to_model": "Hunyuan3D 2.0 MV",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3D 2.0 MV 터보",
|
||||
"stable_zero123_example": "Stable Zero123"
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin: 이미지 투 모델",
|
||||
"api_rodin_multiview_to_model": "Rodin: 다중뷰 투 모델",
|
||||
"api_tripo_image_to_model": "Tripo: 이미지 투 모델",
|
||||
"api_tripo_multiview_to_model": "Tripo: 다중뷰 투 모델",
|
||||
"api_tripo_text_to_model": "Tripo: 텍스트 투 모델"
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "영역 구성",
|
||||
"area_composition_reversed": "역 영역 구성",
|
||||
"area_composition_square_area_for_subject": "주제를 위한 사각형 영역 구성"
|
||||
"area_composition_square_area_for_subject": "주제용 사각형 영역 구성"
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "스테이블 오디오"
|
||||
"audio_ace_step_1_m2m_editing": "ACE Step v1 M2M 편집",
|
||||
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1 텍스트 투 연주곡",
|
||||
"audio_ace_step_1_t2a_song": "ACE Step v1 텍스트 투 노래",
|
||||
"audio_stable_audio_example": "Stable Audio"
|
||||
},
|
||||
"Basics": {
|
||||
"default": "이미지 생성",
|
||||
"embedding_example": "임베딩 예제",
|
||||
"gligen_textbox_example": "글리젠 텍스트박스 예제",
|
||||
"image2image": "이미지로 이미지 생성 예제",
|
||||
"inpain_model_outpainting": "인페인트 모델 아웃페인팅",
|
||||
"embedding_example": "임베딩",
|
||||
"gligen_textbox_example": "글리젠 텍스트박스",
|
||||
"image2image": "이미지 투 이미지",
|
||||
"inpaint_example": "인페인트",
|
||||
"lora": "로라",
|
||||
"lora_multiple": "다중 로라"
|
||||
"inpaint_model_outpainting": "아웃페인팅",
|
||||
"lora": "LoRA",
|
||||
"lora_multiple": "다중 LoRA"
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "2 패스 경배 포즈",
|
||||
"controlnet_example": "컨트롤넷",
|
||||
"2_pass_pose_worship": "포즈 컨트롤넷 2패스",
|
||||
"controlnet_example": "스크리블 컨트롤넷",
|
||||
"depth_controlnet": "깊이 컨트롤넷",
|
||||
"depth_t2i_adapter": "깊이 T2I 어댑터",
|
||||
"mixing_controlnets": "컨트롤넷 섞기"
|
||||
"mixing_controlnets": "컨트롤넷 혼합"
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "FLUX 캐니 모델 예제",
|
||||
"flux_depth_lora_example": "FLUX 깊이 로라 예제",
|
||||
"flux_dev_checkpoint_example": "FLUX Dev 예제",
|
||||
"flux_fill_inpaint_example": "FLUX 인페인트 예제",
|
||||
"flux_fill_outpaint_example": "FLUX 아웃페인트 예제",
|
||||
"flux_redux_model_example": "FLUX Redux 모델 예제",
|
||||
"flux_schnell": "FLUX Schnell"
|
||||
"flux_canny_model_example": "Flux 캐니 모델",
|
||||
"flux_depth_lora_example": "Flux 깊이 로라",
|
||||
"flux_dev_checkpoint_example": "Flux Dev fp8",
|
||||
"flux_dev_full_text_to_image": "Flux Dev 전체 텍스트 투 이미지",
|
||||
"flux_fill_inpaint_example": "Flux 인페인트",
|
||||
"flux_fill_outpaint_example": "Flux 아웃페인트",
|
||||
"flux_kontext_dev_basic": "Flux Kontext Dev(기본)",
|
||||
"flux_kontext_dev_grouped": "Flux Kontext Dev(그룹화)",
|
||||
"flux_redux_model_example": "Flux Redux 모델",
|
||||
"flux_schnell": "Flux Schnell fp8",
|
||||
"flux_schnell_full_text_to_image": "Flux Schnell 전체 텍스트 투 이미지"
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "HiDream E1 Full",
|
||||
"hidream_i1_dev": "HiDream I1 Dev",
|
||||
"hidream_i1_fast": "HiDream I1 Fast",
|
||||
"hidream_i1_full": "HiDream I1 Full",
|
||||
"sd3_5_large_blur": "SD3.5 Large 블러 컨트롤넷",
|
||||
"sd3_5_large_canny_controlnet_example": "SD3.5 Large 캐니 컨트롤넷",
|
||||
"sd3_5_large_depth": "SD3.5 Large 깊이 컨트롤넷",
|
||||
"sd3_5_simple_example": "간단한 SD3.5 예제",
|
||||
"image_chroma_text_to_image": "Chroma 텍스트 투 이미지",
|
||||
"image_cosmos_predict2_2B_t2i": "Cosmos Predict2 2B T2I",
|
||||
"image_lotus_depth_v1_1": "Lotus Depth",
|
||||
"image_omnigen2_image_edit": "OmniGen2 이미지 편집",
|
||||
"image_omnigen2_t2i": "OmniGen2 텍스트 투 이미지",
|
||||
"sd3_5_large_blur": "SD3.5 대형 블러",
|
||||
"sd3_5_large_canny_controlnet_example": "SD3.5 대형 캐니 컨트롤넷",
|
||||
"sd3_5_large_depth": "SD3.5 대형 깊이",
|
||||
"sd3_5_simple_example": "SD3.5 간단 예제",
|
||||
"sdxl_refiner_prompt_example": "SDXL 리파이너 프롬프트",
|
||||
"sdxl_revision_text_prompts": "SDXL Revision 텍스트 프롬프트",
|
||||
"sdxl_revision_zero_positive": "SDXL Revision Zero Positive",
|
||||
"sdxl_simple_example": "간단한 SDXL 예제",
|
||||
"sdxl_simple_example": "SDXL 간단 예제",
|
||||
"sdxlturbo_example": "SDXL 터보"
|
||||
},
|
||||
"Image API": {
|
||||
"api-openai-dall-e-2-inpaint": "Dall-E 2 인페인트",
|
||||
"api-openai-dall-e-2-t2i": "Dall-E 2 텍스트 투 이미지",
|
||||
"api-openai-dall-e-3-t2i": "Dall-E 3 텍스트 투 이미지",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux 1.1[pro] Ultra 텍스트 투 이미지",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3 텍스트 투 이미지",
|
||||
"api_luma_photon_i2i": "Luma Photon 이미지 투 이미지",
|
||||
"api_luma_photon_style_ref": "Luma Photon 스타일 참조",
|
||||
"api_openai_image_1_i2i": "OpenAI Image-1 이미지 투 이미지",
|
||||
"api_openai_image_1_inpaint": "OpenAI Image-1 인페인트",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI Image-1 멀티 입력",
|
||||
"api_openai_image_1_t2i": "OpenAI Image-1 텍스트 투 이미지",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft 색상 제어 이미지 생성",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft 스타일 제어 이미지 생성",
|
||||
"api_recraft_vector_gen": "Recraft 벡터 생성",
|
||||
"api_stability_sd3_t2i": "Stability AI Stable Image Ultra 텍스트 투 이미지"
|
||||
"api_bfl_flux_1_kontext_max_image": "BFL Flux.1 Kontext 맥스",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "BFL Flux.1 Kontext 다중 이미지 입력",
|
||||
"api_bfl_flux_1_kontext_pro_image": "BFL Flux.1 Kontext 프로",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux[Pro]: 텍스트 투 이미지",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3: 텍스트 투 이미지",
|
||||
"api_luma_photon_i2i": "Luma Photon: 이미지 투 이미지",
|
||||
"api_luma_photon_style_ref": "Luma Photon: 스타일 참조",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI: Dall-E 2 인페인트",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI: Dall-E 2 텍스트 투 이미지",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI: Dall-E 3 텍스트 투 이미지",
|
||||
"api_openai_image_1_i2i": "OpenAI: GPT-Image-1 이미지 투 이미지",
|
||||
"api_openai_image_1_inpaint": "OpenAI: GPT-Image-1 인페인트",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI: GPT-Image-1 멀티 입력",
|
||||
"api_openai_image_1_t2i": "OpenAI: GPT-Image-1 텍스트 투 이미지",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft: 색상 제어 이미지 생성",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft: 스타일 제어 이미지 생성",
|
||||
"api_recraft_vector_gen": "Recraft: 벡터 생성",
|
||||
"api_runway_reference_to_image": "Runway: 참조 투 이미지",
|
||||
"api_runway_text_to_image": "Runway: 텍스트 투 이미지",
|
||||
"api_stability_ai_i2i": "Stability AI: 이미지 투 이미지",
|
||||
"api_stability_ai_sd3_5_i2i": "Stability AI: SD3.5 이미지 투 이미지",
|
||||
"api_stability_ai_sd3_5_t2i": "Stability AI: SD3.5 텍스트 투 이미지",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Stability AI: Stable Image Ultra 텍스트 투 이미지"
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Google Gemini: 채팅",
|
||||
"api_openai_chat": "OpenAI: 채팅"
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "ESRGAN",
|
||||
"hiresfix_esrgan_workflow": "HiresFix ESRGAN 워크플로우",
|
||||
"hiresfix_latent_workflow": "HiresFix Latent 워크플로우",
|
||||
"latent_upscale_different_prompt_model": "Latent Upscale 다른 프롬프트 모델"
|
||||
"hiresfix_latent_workflow": "업스케일",
|
||||
"latent_upscale_different_prompt_model": "Latent 업스케일 다른 프롬프트 모델"
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "텍스트 -> 비디오 (Hunyuan Video)",
|
||||
"image_to_video": "이미지 -> 동영상",
|
||||
"image_to_video_wan": "Wan 2.1 이미지에서 비디오로",
|
||||
"ltxv_image_to_video": "이미지 -> 동영상 (LTXV)",
|
||||
"ltxv_text_to_video": "텍스트 -> 동영상 (LTXV)",
|
||||
"mochi_text_to_video_example": "텍스트 -> 동영상 (Mochi)",
|
||||
"text_to_video_wan": "Wan 2.1 텍스트를 비디오로",
|
||||
"txt_to_image_to_video": "텍스트 -> 이미지 -> 동영상",
|
||||
"hunyuan_video_text_to_video": "Hunyuan 비디오 텍스트 투 비디오",
|
||||
"image_to_video": "SVD 이미지 투 비디오",
|
||||
"image_to_video_wan": "Wan 2.1 이미지 투 비디오",
|
||||
"ltxv_image_to_video": "LTXV 이미지 투 비디오",
|
||||
"ltxv_text_to_video": "LTXV 텍스트 투 비디오",
|
||||
"mochi_text_to_video_example": "Mochi 텍스트 투 비디오",
|
||||
"text_to_video_wan": "Wan 2.1 텍스트 투 비디오",
|
||||
"txt_to_image_to_video": "SVD 텍스트 투 이미지 투 비디오",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos Predict2 2B Video2World 480p 16fps",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B",
|
||||
"video_wan_vace_14B_ref2v": "Wan VACE 참조 투 비디오",
|
||||
"video_wan_vace_14B_t2v": "Wan VACE 텍스트 투 비디오",
|
||||
"video_wan_vace_14B_v2v": "Wan VACE 컨트롤 비디오",
|
||||
"video_wan_vace_flf2v": "Wan VACE 첫-마지막 프레임",
|
||||
"video_wan_vace_inpainting": "Wan VACE 인페인팅",
|
||||
"video_wan_vace_outpainting": "Wan VACE 아웃페인팅",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16",
|
||||
"wan2_1_fun_control": "Wan 2.1 컨트롤넷",
|
||||
"wan2_1_fun_inp": "Wan 2.1 인페인트"
|
||||
"wan2_1_fun_inp": "Wan 2.1 인페인팅"
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "MiniMax 이미지 투 비디오",
|
||||
"api_kling_i2v": "Kling 이미지 투 비디오",
|
||||
"api_luma_i2v": "Luma 이미지 투 비디오",
|
||||
"api_hailuo_minimax_i2v": "MiniMax: 이미지 투 비디오",
|
||||
"api_hailuo_minimax_t2v": "MiniMax: 텍스트 투 비디오",
|
||||
"api_kling_effects": "Kling: 비디오 효과",
|
||||
"api_kling_flf": "Kling: FLF2V",
|
||||
"api_kling_i2v": "Kling: 이미지 투 비디오",
|
||||
"api_luma_i2v": "Luma: 이미지 투 비디오",
|
||||
"api_luma_t2v": "Luma: 텍스트 투 비디오",
|
||||
"api_moonvalley_image_to_video": "Moonvalley: 이미지 투 비디오",
|
||||
"api_moonvalley_text_to_video": "Moonvalley: 텍스트 투 비디오",
|
||||
"api_pika_i2v": "Pika: 이미지 투 비디오",
|
||||
"api_pika_scene": "Pika 장면: 이미지 투 비디오",
|
||||
"api_pixverse_t2v": "PixVerse 텍스트 투 비디오",
|
||||
"api_pixverse_template_i2v": "PixVerse Template Effects: 이미지 투 비디오",
|
||||
"api_veo2_i2v": "Veo2 이미지 투 비디오"
|
||||
"api_pixverse_i2v": "PixVerse: 이미지 투 비디오",
|
||||
"api_pixverse_t2v": "PixVerse: 텍스트 투 비디오",
|
||||
"api_pixverse_template_i2v": "PixVerse 템플릿: 이미지 투 비디오",
|
||||
"api_runway_first_last_frame": "Runway: 첫-마지막 프레임 투 비디오",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway: Gen3a Turbo 이미지 투 비디오",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway: Gen4 Turbo 이미지 투 비디오",
|
||||
"api_veo2_i2v": "Veo2: 이미지 투 비디오"
|
||||
}
|
||||
},
|
||||
"templateDescription": {
|
||||
"3D": {
|
||||
"hunyuan-3d-multiview-elf": "Hunyuan3D 2mv로 여러 뷰에서 모델을 생성합니다.",
|
||||
"hunyuan-3d-turbo": "Hunyuan3D 2mv turbo로 여러 뷰에서 모델을 생성합니다.",
|
||||
"hunyuan3d-non-multiview-train": "Hunyuan3D 2.0으로 단일 뷰에서 모델을 생성합니다.",
|
||||
"stable_zero123_example": "단일 이미지에서 3D 뷰를 생성합니다."
|
||||
"3d_hunyuan3d_image_to_model": "Hunyuan3D 2.0으로 단일 이미지에서 3D 모델을 생성합니다.",
|
||||
"3d_hunyuan3d_multiview_to_model": "Hunyuan3D 2.0 MV로 여러 뷰에서 3D 모델을 생성합니다.",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3D 2.0 MV Turbo로 여러 뷰에서 3D 모델을 생성합니다.",
|
||||
"stable_zero123_example": "Stable Zero123으로 단일 이미지에서 3D 뷰를 생성합니다."
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin AI로 단일 사진에서 정밀한 3D 모델을 생성합니다.",
|
||||
"api_rodin_multiview_to_model": "Rodin의 다각도 재구성으로 종합적인 3D 모델을 만듭니다.",
|
||||
"api_tripo_image_to_model": "Tripo 엔진으로 2D 이미지에서 전문가용 3D 에셋을 생성합니다.",
|
||||
"api_tripo_multiview_to_model": "Tripo의 고급 스캐너로 여러 각도에서 3D 모델을 만듭니다.",
|
||||
"api_tripo_text_to_model": "Tripo의 텍스트 기반 모델링으로 설명에서 3D 오브젝트를 만듭니다."
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "영역을 통해 이미지 구성을 제어합니다.",
|
||||
"area_composition_reversed": "영역 구성 워크플로우를 반대로 적용합니다.",
|
||||
"area_composition_square_area_for_subject": "일관된 피사체 배치를 만듭니다."
|
||||
"area_composition": "정의된 영역으로 구성을 제어하여 이미지를 생성합니다.",
|
||||
"area_composition_square_area_for_subject": "일관된 피사체 배치를 위해 영역 구성을 활용해 이미지를 생성합니다."
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "텍스트 설명으로 오디오를 생성합니다."
|
||||
"audio_ace_step_1_m2m_editing": "ACE-Step v1 M2M으로 기존 곡의 스타일과 가사를 변경합니다.",
|
||||
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1로 텍스트 프롬프트에서 연주곡을 생성합니다.",
|
||||
"audio_ace_step_1_t2a_song": "ACE-Step v1로 텍스트 프롬프트에서 노래(보컬 포함)를 생성하며, 다국어 및 스타일 커스터마이징을 지원합니다.",
|
||||
"audio_stable_audio_example": "Stable Audio로 텍스트 프롬프트에서 오디오를 생성합니다."
|
||||
},
|
||||
"Basics": {
|
||||
"default": "텍스트 설명으로 이미지를 생성합니다.",
|
||||
"embedding_example": "일관된 스타일을 위해 텍스트 인버전을 사용합니다.",
|
||||
"gligen_textbox_example": "객체의 위치와 크기를 지정합니다.",
|
||||
"default": "텍스트 프롬프트로 이미지를 생성합니다.",
|
||||
"embedding_example": "일관된 스타일을 위해 텍스트 인버전을 사용하여 이미지를 생성합니다.",
|
||||
"gligen_textbox_example": "텍스트 박스를 사용해 객체의 위치를 정밀하게 지정하여 이미지를 생성합니다.",
|
||||
"image2image": "텍스트 프롬프트를 사용하여 기존 이미지를 변환합니다.",
|
||||
"inpain_model_outpainting": "이미지의 원래 경계를 넘어 확장합니다.",
|
||||
"inpaint_example": "이미지의 특정 부분을 자연스럽게 편집합니다.",
|
||||
"lora": "특정 스타일이나 주제를 위해 LoRA 모델을 적용합니다.",
|
||||
"lora_multiple": "여러 LoRA 모델을 결합하여 독특한 결과를 만듭니다."
|
||||
"inpaint_model_outpainting": "이미지를 원래 경계 너머로 확장합니다.",
|
||||
"lora": "특정 스타일이나 주제를 위한 LoRA 모델로 이미지를 생성합니다.",
|
||||
"lora_multiple": "여러 LoRA 모델을 결합하여 이미지를 생성합니다."
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "포즈 참조로 이미지를 생성합니다.",
|
||||
"controlnet_example": "참조 이미지를 사용해 이미지 생성을 제어합니다.",
|
||||
"depth_controlnet": "깊이 인식 이미지 생성을 합니다.",
|
||||
"depth_t2i_adapter": "T2I 어댑터로 깊이 인식 이미지를 빠르게 생성합니다.",
|
||||
"mixing_controlnets": "여러 ControlNet 모델을 결합합니다."
|
||||
"2_pass_pose_worship": "ControlNet으로 포즈 참조를 활용해 이미지를 생성합니다.",
|
||||
"controlnet_example": "ControlNet으로 스크리블 참조 이미지를 활용해 이미지를 생성합니다.",
|
||||
"depth_controlnet": "ControlNet으로 깊이 정보를 활용해 이미지를 생성합니다.",
|
||||
"depth_t2i_adapter": "T2I 어댑터로 깊이 정보를 활용해 이미지를 생성합니다.",
|
||||
"mixing_controlnets": "여러 ControlNet 모델을 결합해 이미지를 생성합니다."
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "검출된 경계선으로 이미지를 생성합니다.",
|
||||
"flux_depth_lora_example": "깊이 인식 LoRA 를 이용해 이미지를 생성합니다.",
|
||||
"flux_dev_checkpoint_example": "FLUX Dev 모델로 이미지를 생성합니다.",
|
||||
"flux_fill_inpaint_example": "이미지의 누락된 부분을 채웁니다.",
|
||||
"flux_fill_outpaint_example": "FLUX 아웃페인팅으로 이미지를 확장합니다.",
|
||||
"flux_redux_model_example": "참조 이미지의 스타일을 가이드 이미지 생성에 적용합니다.",
|
||||
"flux_schnell": "FLUX Schnell 모델로 이미지를 빠르게 생성합니다."
|
||||
"flux_canny_model_example": "Flux Canny로 에지 감지에 따라 이미지를 생성합니다.",
|
||||
"flux_depth_lora_example": "Flux LoRA로 깊이 정보를 활용해 이미지를 생성합니다.",
|
||||
"flux_dev_checkpoint_example": "Flux Dev fp8 양자화 버전으로 이미지를 생성합니다. VRAM이 제한된 장치에 적합하며, 모델 파일 하나만 필요하지만 화질은 전체 버전보다 약간 낮습니다.",
|
||||
"flux_dev_full_text_to_image": "Flux Dev 전체 버전으로 고품질 이미지를 생성합니다. 더 많은 VRAM과 여러 모델 파일이 필요하지만, 최고의 프롬프트 반영력과 화질을 제공합니다.",
|
||||
"flux_fill_inpaint_example": "Flux 인페인팅으로 이미지의 누락된 부분을 채웁니다.",
|
||||
"flux_fill_outpaint_example": "Flux 아웃페인팅으로 이미지를 경계 너머로 확장합니다.",
|
||||
"flux_kontext_dev_basic": "Flux Kontext의 전체 노드 표시로 이미지를 편집합니다. 워크플로우 학습에 적합합니다.",
|
||||
"flux_kontext_dev_grouped": "노드가 그룹화된 Flux Kontext의 간소화 버전으로 작업 공간이 더 깔끔합니다.",
|
||||
"flux_redux_model_example": "Flux Redux로 참조 이미지의 스타일을 전송하여 이미지를 생성합니다.",
|
||||
"flux_schnell": "Flux Schnell fp8 양자화 버전으로 이미지를 빠르게 생성합니다. 저사양 하드웨어에 이상적이며, 4단계만으로 이미지를 생성할 수 있습니다.",
|
||||
"flux_schnell_full_text_to_image": "Flux Schnell 전체 버전으로 이미지를 빠르게 생성합니다. Apache2.0 라이선스를 사용하며, 4단계만으로 좋은 화질을 유지합니다."
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "HiDream E1 모델로 이미지를 편집합니다.",
|
||||
"hidream_i1_dev": "HiDream I1 Dev 모델로 이미지를 생성합니다.",
|
||||
"hidream_i1_fast": "HiDream I1 Fast 모델로 이미지를 빠르게 생성합니다.",
|
||||
"hidream_i1_full": "HiDream I1 Full 모델로 이미지를 생성합니다.",
|
||||
"sd3_5_large_blur": "SD 3.5 모델로 흐릿한 참조 이미지에서 이미지를 생성합니다.",
|
||||
"sd3_5_large_canny_controlnet_example": "Canny 에지 이미지를 통해 SD 3.5 모델 이미지 생성을 가이드합니다.",
|
||||
"sd3_5_large_depth": "깊이 인식 이미지를 통해 SD 3.5 모델 이미지 생성을 가이드합니다.",
|
||||
"sd3_5_simple_example": "SD 3.5 모델로 이미지를 생성합니다.",
|
||||
"sdxl_refiner_prompt_example": "SDXL 결과물을 리파이너로 향상시킵니다.",
|
||||
"sdxl_revision_text_prompts": "참조 이미지의 개념을 SDXL 이미지 생성에 적용합니다.",
|
||||
"sdxl_revision_zero_positive": "참조 이미지와 함께 텍스트 프롬프트를 추가하여 SDXL 이미지 생성을 가이드합니다.",
|
||||
"sdxl_simple_example": "SDXL 모델로 고품질 이미지를 생성합니다.",
|
||||
"sdxlturbo_example": "SDXL Turbo 모델로 1 스텝으로 이미지를 생성합니다."
|
||||
"hidream_e1_full": "HiDream E1 - 전문적인 자연어 이미지 편집 모델로 이미지를 편집합니다.",
|
||||
"hidream_i1_dev": "HiDream I1 Dev - 28 스텝의 균형 잡힌 버전으로, 중간급 하드웨어에 적합합니다.",
|
||||
"hidream_i1_fast": "HiDream I1 Fast - 16 스텝의 경량 버전으로, 저사양 하드웨어에서 빠른 미리보기에 적합합니다.",
|
||||
"hidream_i1_full": "HiDream I1 Full - 50 스텝의 완전 버전으로, 최고의 품질을 제공합니다.",
|
||||
"image_chroma_text_to_image": "Chroma는 flux에서 수정된 모델로, 아키텍처에 일부 변화가 있습니다.",
|
||||
"image_cosmos_predict2_2B_t2i": "Cosmos-Predict2 2B T2I로 물리적으로 정확하고 고해상도, 디테일이 풍부한 이미지를 생성합니다.",
|
||||
"image_lotus_depth_v1_1": "Lotus Depth로 고효율 단안 깊이 추정 및 디테일 보존이 뛰어난 zero-shot 이미지를 생성합니다.",
|
||||
"image_omnigen2_image_edit": "OmniGen2의 고급 이미지 편집 기능과 텍스트 렌더링 지원으로 자연어 지시로 이미지를 편집합니다.",
|
||||
"image_omnigen2_t2i": "OmniGen2의 통합 7B 멀티모달 모델과 듀얼 패스 아키텍처로 텍스트 프롬프트에서 고품질 이미지를 생성합니다.",
|
||||
"sd3_5_large_blur": "SD 3.5로 흐릿한 참조 이미지를 활용해 이미지를 생성합니다.",
|
||||
"sd3_5_large_canny_controlnet_example": "SD 3.5 Canny ControlNet으로 에지 감지에 따라 이미지를 생성합니다.",
|
||||
"sd3_5_large_depth": "SD 3.5로 깊이 정보를 활용해 이미지를 생성합니다.",
|
||||
"sd3_5_simple_example": "SD 3.5로 이미지를 생성합니다.",
|
||||
"sdxl_refiner_prompt_example": "SDXL 리파이너 모델로 이미지를 향상시킵니다.",
|
||||
"sdxl_revision_text_prompts": "SDXL Revision으로 참조 이미지의 개념을 전송하여 이미지를 생성합니다.",
|
||||
"sdxl_revision_zero_positive": "SDXL Revision으로 텍스트 프롬프트와 참조 이미지를 함께 사용해 이미지를 생성합니다.",
|
||||
"sdxl_simple_example": "SDXL로 고품질 이미지를 생성합니다.",
|
||||
"sdxlturbo_example": "SDXL Turbo로 한 번에 이미지를 생성합니다."
|
||||
},
|
||||
"Image API": {
|
||||
"api-openai-dall-e-2-inpaint": "Dall-E 2 API로 이미지를 인페인팅합니다.",
|
||||
"api-openai-dall-e-2-t2i": "Dall-E 2 API로 텍스트 설명에서 이미지를 생성합니다.",
|
||||
"api-openai-dall-e-3-t2i": "Dall-E 3 API로 텍스트 설명에서 이미지를 생성합니다.",
|
||||
"api_bfl_flux_pro_t2i": "FLUX.1 [pro]의 뛰어난 프롬프트 반영, 시각적 품질, 이미지 디테일, 다양성으로 이미지를 생성합니다.",
|
||||
"api_ideogram_v3_t2i": "고품질 이미지-프롬프트 일치, 포토리얼리즘, 텍스트 렌더링으로 이미지를 생성합니다. 전문가 수준의 로고, 홍보 포스터, 랜딩 페이지 컨셉, 제품 사진 등을 만드세요. 정교한 배경, 섬세한 조명과 색상, 사실적인 환경 디테일로 세련된 공간 구성을 손쉽게 제작할 수 있습니다.",
|
||||
"api_bfl_flux_1_kontext_max_image": "Flux.1 Kontext 맥스 이미지로 이미지를 편집합니다.",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "여러 이미지를 입력하고 Flux.1 Kontext로 편집합니다.",
|
||||
"api_bfl_flux_1_kontext_pro_image": "Flux.1 Kontext 프로 이미지로 이미지를 편집합니다.",
|
||||
"api_bfl_flux_pro_t2i": "FLUX.1 Pro로 뛰어난 프롬프트 반영과 시각적 품질로 이미지를 생성합니다.",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3로 뛰어난 프롬프트 일치, 포토리얼리즘, 텍스트 렌더링으로 전문가 수준의 이미지를 생성합니다.",
|
||||
"api_luma_photon_i2i": "이미지와 프롬프트를 조합하여 이미지 생성을 가이드합니다.",
|
||||
"api_luma_photon_style_ref": "정확한 제어로 스타일 참조를 적용하고 혼합합니다. Luma Photon은 각 참조 이미지의 본질을 포착하여, 전문적인 품질을 유지하면서 독특한 시각적 요소를 결합할 수 있습니다.",
|
||||
"api_openai_image_1_i2i": "GPT Image 1 API로 이미지에서 이미지를 생성합니다.",
|
||||
"api_openai_image_1_inpaint": "GPT Image 1 API로 이미지를 인페인팅합니다.",
|
||||
"api_openai_image_1_multi_inputs": "GPT Image 1 API로 여러 입력을 사용해 이미지를 생성합니다.",
|
||||
"api_openai_image_1_t2i": "GPT Image 1 API로 텍스트 설명에서 이미지를 생성합니다.",
|
||||
"api_recraft_image_gen_with_color_control": "여러 이미지에 재사용할 맞춤 팔레트를 만들거나 각 사진마다 색상을 직접 선택하세요. 브랜드의 색상 팔레트에 맞추고, 독창적인 비주얼을 제작하세요.",
|
||||
"api_recraft_image_gen_with_style_control": "시각적 예시로 스타일을 제어하고, 위치를 맞추며, 객체를 미세 조정하세요. 스타일을 저장 및 공유하여 브랜드 일관성을 유지할 수 있습니다.",
|
||||
"api_recraft_vector_gen": "텍스트 프롬프트에서 Recraft의 AI 벡터 생성기로 벡터 이미지를 만드세요. 로고, 포스터, 아이콘 세트, 광고, 배너, 목업 등 최고의 품질의 벡터 아트를 제작할 수 있습니다. 선명하고 고품질의 SVG 파일로 디자인을 완성하세요. 앱이나 웹사이트를 위한 브랜드 벡터 일러스트를 몇 초 만에 만드세요.",
|
||||
"api_stability_sd3_t2i": "1메가픽셀 해상도에서 전문가용 고품질 이미지를 생성합니다. 프롬프트 반영이 우수합니다."
|
||||
"api_luma_photon_style_ref": "Luma Photon으로 스타일 참조를 혼합하여 정밀하게 제어하며 이미지를 생성합니다.",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI Dall-E 2 API로 인페인팅을 통해 이미지를 편집합니다.",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI Dall-E 2 API로 텍스트 프롬프트에서 이미지를 생성합니다.",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI Dall-E 3 API로 텍스트 프롬프트에서 이미지를 생성합니다.",
|
||||
"api_openai_image_1_i2i": "OpenAI GPT Image 1 API로 입력 이미지에서 이미지를 생성합니다.",
|
||||
"api_openai_image_1_inpaint": "OpenAI GPT Image 1 API로 인페인팅을 통해 이미지를 편집합니다.",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI GPT Image 1 API로 여러 입력에서 이미지를 생성합니다.",
|
||||
"api_openai_image_1_t2i": "OpenAI GPT Image 1 API로 텍스트 프롬프트에서 이미지를 생성합니다.",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft로 맞춤 색상 팔레트와 브랜드 비주얼로 이미지를 생성합니다.",
|
||||
"api_recraft_image_gen_with_style_control": "시각적 예시로 스타일을 제어하고, 위치를 맞추며, 객체를 미세 조정합니다. 스타일을 저장 및 공유하여 브랜드 일관성을 유지할 수 있습니다.",
|
||||
"api_recraft_vector_gen": "Recraft의 AI 벡터 생성기로 텍스트 프롬프트에서 고품질 벡터 이미지를 생성합니다.",
|
||||
"api_runway_reference_to_image": "Runway의 AI로 참조 스타일과 구성을 기반으로 새 이미지를 생성합니다.",
|
||||
"api_runway_text_to_image": "Runway의 AI 모델로 텍스트 프롬프트에서 고품질 이미지를 생성합니다.",
|
||||
"api_stability_ai_i2i": "Stability AI로 고품질 이미지 변환 및 스타일 전환을 지원합니다.",
|
||||
"api_stability_ai_sd3_5_i2i": "1메가픽셀 해상도에서 전문가용 고품질 이미지를 생성합니다. 프롬프트 반영이 우수합니다.",
|
||||
"api_stability_ai_sd3_5_t2i": "1메가픽셀 해상도에서 전문가용 고품질 이미지를 생성합니다. 프롬프트 반영이 우수합니다.",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "1메가픽셀 해상도에서 전문가용 고품질 이미지를 생성합니다. 프롬프트 반영이 우수합니다."
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Google Gemini의 멀티모달 AI와 추론 능력을 경험하세요.",
|
||||
"api_openai_chat": "OpenAI의 고급 언어 모델과 대화하세요."
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "업스케일 모델로 이미지 품질을 향상합니다.",
|
||||
"hiresfix_esrgan_workflow": "중간 단계에서 업스케일 모델을 사용합니다.",
|
||||
"hiresfix_latent_workflow": "latent 공간에서 이미지 품질을 향상합니다.",
|
||||
"latent_upscale_different_prompt_model": "업스케일과 프롬프트 변경을 여러 번에 걸쳐 적용합니다."
|
||||
"esrgan_example": "ESRGAN 모델로 이미지 품질을 향상합니다.",
|
||||
"hiresfix_esrgan_workflow": "중간 생성 단계에서 ESRGAN 모델로 업스케일합니다.",
|
||||
"hiresfix_latent_workflow": "Latent 공간에서 이미지 품질을 향상합니다.",
|
||||
"latent_upscale_different_prompt_model": "여러 번의 생성 패스에서 프롬프트를 변경하며 업스케일합니다."
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "Hunyuan 모델을 사용하여 비디오를 생성합니다.",
|
||||
"image_to_video": "이미지를 애니메이션 비디오로 변환합니다.",
|
||||
"image_to_video_wan": "이미지로부터 빠르게 비디오를 생성합니다.",
|
||||
"ltxv_image_to_video": "정지 이미지를 비디오로 변환합니다.",
|
||||
"ltxv_text_to_video": "텍스트 설명으로 비디오를 생성합니다.",
|
||||
"mochi_text_to_video_example": "Mochi 모델로 비디오를 생성합니다.",
|
||||
"text_to_video_wan": "텍스트 설명으로 빠르게 비디오를 생성합니다.",
|
||||
"txt_to_image_to_video": "텍스트로 이미지를 생성한 후 비디오로 변환합니다.",
|
||||
"wan2_1_flf2v_720_f16": "첫 프레임과 마지막 프레임을 제어하여 비디오를 생성합니다.",
|
||||
"wan2_1_fun_control": "포즈, 깊이, 에지 등으로 비디오 생성을 가이드합니다.",
|
||||
"wan2_1_fun_inp": "시작 및 종료 프레임으로 비디오를 생성합니다."
|
||||
"hunyuan_video_text_to_video": "Hunyuan 모델로 텍스트 프롬프트에서 비디오를 생성합니다.",
|
||||
"image_to_video": "정지 이미지로부터 비디오를 생성합니다.",
|
||||
"image_to_video_wan": "Wan 2.1로 이미지에서 비디오를 생성합니다.",
|
||||
"ltxv_image_to_video": "정지 이미지로부터 비디오를 생성합니다.",
|
||||
"ltxv_text_to_video": "텍스트 프롬프트로 비디오를 생성합니다.",
|
||||
"mochi_text_to_video_example": "Mochi 모델로 텍스트 프롬프트에서 비디오를 생성합니다.",
|
||||
"text_to_video_wan": "Wan 2.1로 텍스트 프롬프트에서 비디오를 생성합니다.",
|
||||
"txt_to_image_to_video": "텍스트 프롬프트로 이미지를 생성한 후 비디오로 변환합니다.",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos-Predict2 2B Video2World로 물리적으로 정확하고 고해상도, 일관성 있는 비디오 시뮬레이션을 생성합니다.",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "14B 전체 모델로 고급 카메라 제어가 가능한 고품질 비디오를 생성합니다.",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B 모델로 시네마틱 카메라 움직임이 있는 동적 비디오를 생성합니다.",
|
||||
"video_wan_vace_14B_ref2v": "참조 이미지의 스타일과 내용을 일치시키는 비디오를 생성합니다.",
|
||||
"video_wan_vace_14B_t2v": "VACE-14B 모델로 480p 및 720p 고품질 비디오를 생성합니다.",
|
||||
"video_wan_vace_14B_v2v": "Wan VACE로 입력 비디오와 참조 이미지를 제어하여 비디오를 생성합니다.",
|
||||
"video_wan_vace_flf2v": "시작 및 종료 프레임을 정의하여 부드러운 비디오 전환을 생성합니다. 사용자 지정 키프레임 시퀀스를 지원합니다.",
|
||||
"video_wan_vace_inpainting": "특정 영역을 편집하면서 주변 내용을 보존하는 비디오를 생성합니다. 객체 제거 또는 교체에 적합합니다.",
|
||||
"video_wan_vace_outpainting": "Wan VACE 아웃페인팅으로 비디오 크기를 확장하여 비디오를 생성합니다.",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V로 첫 프레임과 마지막 프레임을 제어하여 비디오를 생성합니다.",
|
||||
"wan2_1_fun_control": "Wan 2.1 ControlNet으로 포즈, 깊이, 에지 제어로 비디오를 생성합니다.",
|
||||
"wan2_1_fun_inp": "Wan 2.1 인페인팅으로 시작 및 종료 프레임에서 비디오를 생성합니다."
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "이미지와 텍스트로 정교한 비디오를 생성합니다. CGI 통합, 바이럴 AI 허깅 등 트렌디한 사진 효과도 포함됩니다. 다양한 비디오 스타일과 테마로 창의적인 비전을 실현하세요.",
|
||||
"api_kling_i2v": "동작, 표정, 카메라 움직임에 대한 프롬프트 반영이 뛰어난 비디오를 생성합니다. 이제 복잡한 프롬프트와 연속 동작도 지원되어, 장면의 연출자가 될 수 있습니다.",
|
||||
"api_luma_i2v": "정지 이미지를 즉시 고품질 애니메이션으로 만드세요.",
|
||||
"api_pika_scene": "여러 이미지를 재료로 사용하여 모두를 포함하는 비디오를 생성합니다.",
|
||||
"api_hailuo_minimax_i2v": "MiniMax로 이미지와 텍스트에서 CGI 통합된 정교한 비디오를 생성합니다.",
|
||||
"api_hailuo_minimax_t2v": "MiniMax의 고급 AI로 텍스트 프롬프트에서 고품질 비디오를 직접 생성합니다. 다양한 스타일과 전문 CGI 효과로 창의적인 비주얼을 만듭니다.",
|
||||
"api_kling_effects": "Kling으로 이미지에 시각 효과를 적용해 동적 비디오를 생성합니다.",
|
||||
"api_kling_flf": "첫 프레임과 마지막 프레임을 제어하여 비디오를 생성합니다.",
|
||||
"api_kling_i2v": "Kling으로 동작, 표정, 카메라 움직임에 대한 프롬프트 반영이 뛰어난 비디오를 생성합니다.",
|
||||
"api_luma_i2v": "정지 이미지를 즉시 고품질 애니메이션으로 만듭니다.",
|
||||
"api_luma_t2v": "간단한 프롬프트로 고품질 비디오를 생성합니다.",
|
||||
"api_moonvalley_image_to_video": "라이선스 데이터로만 학습된 모델로 이미지를 사용해 시네마틱 1080p 비디오를 생성합니다.",
|
||||
"api_moonvalley_text_to_video": "라이선스 데이터로만 학습된 모델로 텍스트 프롬프트에서 시네마틱 1080p 비디오를 생성합니다.",
|
||||
"api_pika_i2v": "Pika AI로 단일 정지 이미지에서 부드러운 애니메이션 비디오를 생성합니다.",
|
||||
"api_pika_scene": "Pika Scenes로 여러 입력 이미지를 포함하는 비디오를 생성합니다.",
|
||||
"api_pixverse_i2v": "PixVerse로 정지 이미지에서 모션과 효과가 있는 동적 비디오를 생성합니다.",
|
||||
"api_pixverse_t2v": "정확한 프롬프트 해석과 놀라운 비디오 다이내믹스로 비디오를 생성합니다.",
|
||||
"api_pixverse_template_i2v": "정지 이미지를 동적 비디오로 변환하고 모션과 효과를 추가합니다.",
|
||||
"api_pixverse_template_i2v": "PixVerse로 정지 이미지에서 모션과 효과가 있는 동적 비디오를 생성합니다.",
|
||||
"api_runway_first_last_frame": "Runway로 두 키프레임 사이의 부드러운 비디오 전환을 생성합니다.",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway Gen3a Turbo로 정지 이미지에서 시네마틱 비디오를 생성합니다.",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway Gen4 Turbo로 이미지에서 동적 비디오를 생성합니다.",
|
||||
"api_veo2_i2v": "Google Veo2 API로 이미지에서 비디오를 생성합니다."
|
||||
}
|
||||
},
|
||||
@@ -1483,7 +1587,8 @@
|
||||
"title": "ComfyUI에 오신 것을 환영합니다"
|
||||
},
|
||||
"whatsNewPopup": {
|
||||
"learnMore": "자세히 알아보기"
|
||||
"learnMore": "자세히 알아보기",
|
||||
"noReleaseNotes": "릴리스 노트가 없습니다."
|
||||
},
|
||||
"workflowService": {
|
||||
"enterFilename": "파일 이름 입력",
|
||||
|
||||
@@ -259,6 +259,10 @@
|
||||
"name": "스냅 하이라이트 노드",
|
||||
"tooltip": "링크를 유효한 입력 슬롯이 있는 노드 위로 드래그할 때 노드를 강조 표시합니다."
|
||||
},
|
||||
"Comfy_Notification_ShowVersionUpdates": {
|
||||
"name": "버전 업데이트 표시",
|
||||
"tooltip": "새 모델과 주요 신규 기능에 대한 업데이트를 표시합니다."
|
||||
},
|
||||
"Comfy_Pointer_ClickBufferTime": {
|
||||
"name": "포인터 클릭 드리프트 지연",
|
||||
"tooltip": "포인터 버튼을 누른 후, 포인터 움직임을 무시할 수 있는 최대 시간(밀리초)입니다.\n\n클릭하는 동안 포인터가 움직여 의도치 않게 객체가 밀리는 것을 방지합니다."
|
||||
@@ -322,7 +326,8 @@
|
||||
"Bottom": "하단",
|
||||
"Disabled": "비활성화",
|
||||
"Top": "상단"
|
||||
}
|
||||
},
|
||||
"tooltip": "메뉴 바 위치입니다. 모바일 기기에서는 메뉴가 항상 상단에 표시됩니다."
|
||||
},
|
||||
"Comfy_Validation_NodeDefs": {
|
||||
"name": "노드 정의 유효성 검사 (느림)",
|
||||
|
||||
@@ -785,13 +785,13 @@
|
||||
"Toggle Bottom Panel": "Переключить нижнюю панель",
|
||||
"Toggle Focus Mode": "Переключить режим фокуса",
|
||||
"Toggle Logs Bottom Panel": "Переключение нижней панели журналов",
|
||||
"Toggle Model Library Sidebar": "Переключение боковой панели библиотеки моделей",
|
||||
"Toggle Node Library Sidebar": "Переключение боковой панели библиотеки нод",
|
||||
"Toggle Queue Sidebar": "Переключение боковой панели очереди",
|
||||
"Toggle Model Library Sidebar": "Показать/скрыть боковую панель библиотеки моделей",
|
||||
"Toggle Node Library Sidebar": "Показать/скрыть боковую панель библиотеки узлов",
|
||||
"Toggle Queue Sidebar": "Показать/скрыть боковую панель очереди",
|
||||
"Toggle Search Box": "Переключить поисковую панель",
|
||||
"Toggle Terminal Bottom Panel": "Переключение нижней панели терминала",
|
||||
"Toggle Theme (Dark/Light)": "Переключение темы (Тёмная/Светлая)",
|
||||
"Toggle Workflows Sidebar": "Переключение боковой панели рабочих процессов",
|
||||
"Toggle Workflows Sidebar": "Показать/скрыть боковую панель рабочих процессов",
|
||||
"Toggle the Custom Nodes Manager": "Переключить менеджер пользовательских узлов",
|
||||
"Toggle the Custom Nodes Manager Progress Bar": "Переключить индикатор выполнения менеджера пользовательских узлов",
|
||||
"Undo": "Отменить",
|
||||
@@ -1099,6 +1099,7 @@
|
||||
"Node Search Box": "Поисковая строка нод",
|
||||
"Node Widget": "Виджет ноды",
|
||||
"NodeLibrary": "Библиотека нод",
|
||||
"Notification Preferences": "Настройки уведомлений",
|
||||
"Pointer": "Указатель",
|
||||
"Queue": "Очередь",
|
||||
"QueueButton": "Кнопка очереди",
|
||||
@@ -1199,55 +1200,75 @@
|
||||
"Flux": "Flux",
|
||||
"Image": "Изображение",
|
||||
"Image API": "Image API",
|
||||
"LLM API": "LLM API",
|
||||
"Upscaling": "Увеличение разрешения",
|
||||
"Video": "Видео",
|
||||
"Video API": "Video API"
|
||||
},
|
||||
"template": {
|
||||
"3D": {
|
||||
"hunyuan-3d-multiview-elf": "Hunyuan3D Многовидовой",
|
||||
"hunyuan-3d-turbo": "Hunyuan3D Турбо",
|
||||
"hunyuan3d-non-multiview-train": "Hunyuan3D",
|
||||
"3d_hunyuan3d_image_to_model": "Hunyuan3D",
|
||||
"3d_hunyuan3d_multiview_to_model": "Hunyuan3D Многовидовой",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3D Турбо",
|
||||
"stable_zero123_example": "Stable Zero123"
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin: изображение в модель",
|
||||
"api_rodin_multiview_to_model": "Rodin: многовидовой в модель",
|
||||
"api_tripo_image_to_model": "Tripo: изображение в модель",
|
||||
"api_tripo_multiview_to_model": "Tripo: многовидовой в модель",
|
||||
"api_tripo_text_to_model": "Tripo: текст в модель"
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "Композиция области",
|
||||
"area_composition_reversed": "Обратная композиция области",
|
||||
"area_composition_square_area_for_subject": "Композиция области квадратной области для субъекта"
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "Stable Audio"
|
||||
"audio_ace_step_1_m2m_editing": "ACE Step v1 M2M редактирование",
|
||||
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1 текст в инструментальную музыку",
|
||||
"audio_ace_step_1_t2a_song": "ACE Step v1 текст в песню",
|
||||
"audio_stable_audio_example": "Stable Audio"
|
||||
},
|
||||
"Basics": {
|
||||
"default": "Генерация изображений",
|
||||
"embedding_example": "Встраивание",
|
||||
"gligen_textbox_example": "Gligen Textbox",
|
||||
"image2image": "Изображение в изображение",
|
||||
"inpain_model_outpainting": "Inpaint Model Outpainting",
|
||||
"inpaint_example": "Inpaint",
|
||||
"lora": "Lora",
|
||||
"lora_multiple": "Lora Multiple"
|
||||
"inpaint_model_outpainting": "Outpainting",
|
||||
"lora": "LoRA",
|
||||
"lora_multiple": "LoRA Multiple"
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "2 Pass Pose Worship",
|
||||
"controlnet_example": "ControlNet",
|
||||
"controlnet_example": "Scribble ControlNet",
|
||||
"depth_controlnet": "Depth ControlNet",
|
||||
"depth_t2i_adapter": "Depth T2I Adapter",
|
||||
"mixing_controlnets": "Mixing ControlNets"
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "Flux Canny Model",
|
||||
"flux_depth_lora_example": "Flux Depth Lora",
|
||||
"flux_dev_checkpoint_example": "Flux Dev",
|
||||
"flux_depth_lora_example": "Flux Depth LoRA",
|
||||
"flux_dev_checkpoint_example": "Flux Dev fp8",
|
||||
"flux_dev_full_text_to_image": "Flux Dev полный текст в изображение",
|
||||
"flux_fill_inpaint_example": "Flux Inpaint",
|
||||
"flux_fill_outpaint_example": "Flux Outpaint",
|
||||
"flux_kontext_dev_basic": "Flux Kontext Dev (Базовый)",
|
||||
"flux_kontext_dev_grouped": "Flux Kontext Dev (Групповой)",
|
||||
"flux_redux_model_example": "Flux Redux Model",
|
||||
"flux_schnell": "Flux Schnell"
|
||||
"flux_schnell": "Flux Schnell fp8",
|
||||
"flux_schnell_full_text_to_image": "Flux Schnell полный текст в изображение"
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "HiDream E1 Full",
|
||||
"hidream_i1_dev": "HiDream I1 Dev",
|
||||
"hidream_i1_fast": "HiDream I1 Fast",
|
||||
"hidream_i1_full": "HiDream I1 Full",
|
||||
"image_chroma_text_to_image": "Chroma текст в изображение",
|
||||
"image_cosmos_predict2_2B_t2i": "Cosmos Predict2 2B T2I",
|
||||
"image_lotus_depth_v1_1": "Lotus Depth",
|
||||
"image_omnigen2_image_edit": "OmniGen2 редактирование изображения",
|
||||
"image_omnigen2_t2i": "OmniGen2 текст в изображение",
|
||||
"sd3_5_large_blur": "SD3.5 Большое размытие",
|
||||
"sd3_5_large_canny_controlnet_example": "SD3.5 Большой Canny ControlNet",
|
||||
"sd3_5_large_depth": "SD3.5 Большая глубина",
|
||||
@@ -1259,21 +1280,33 @@
|
||||
"sdxlturbo_example": "SDXL Turbo"
|
||||
},
|
||||
"Image API": {
|
||||
"api-openai-dall-e-2-inpaint": "Dall-E 2: дорисовка",
|
||||
"api-openai-dall-e-2-t2i": "Dall-E 2: текст в изображение",
|
||||
"api-openai-dall-e-3-t2i": "Dall-E 3: текст в изображение",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux 1.1[pro] Ultra текст в изображение",
|
||||
"api_bfl_flux_1_kontext_max_image": "BFL Flux.1 Kontext Max",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "BFL Flux.1 Kontext несколько изображений",
|
||||
"api_bfl_flux_1_kontext_pro_image": "BFL Flux.1 Kontext Pro",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux[Pro]: текст в изображение",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3: текст в изображение",
|
||||
"api_luma_photon_i2i": "Luma Photon: изображение в изображение",
|
||||
"api_luma_photon_style_ref": "Luma Photon: стиль по образцу",
|
||||
"api_openai_image_1_i2i": "OpenAI Image-1: изображение в изображение",
|
||||
"api_openai_image_1_inpaint": "OpenAI Image-1: дорисовка",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI Image-1: несколько входов",
|
||||
"api_openai_image_1_t2i": "OpenAI Image-1: текст в изображение",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI: Dall-E 2 дорисовка",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI: Dall-E 2 текст в изображение",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI: Dall-E 3 текст в изображение",
|
||||
"api_openai_image_1_i2i": "OpenAI: GPT-Image-1 изображение в изображение",
|
||||
"api_openai_image_1_inpaint": "OpenAI: GPT-Image-1 дорисовка",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI: GPT-Image-1 несколько входов",
|
||||
"api_openai_image_1_t2i": "OpenAI: GPT-Image-1 текст в изображение",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft: генерация изображения с управлением цветом",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft: генерация изображения с управлением стилем",
|
||||
"api_recraft_vector_gen": "Recraft: генерация векторного изображения",
|
||||
"api_stability_sd3_t2i": "Stability AI Stable Image Ultra текст в изображение"
|
||||
"api_runway_reference_to_image": "Runway: референс в изображение",
|
||||
"api_runway_text_to_image": "Runway: текст в изображение",
|
||||
"api_stability_ai_i2i": "Stability AI: изображение в изображение",
|
||||
"api_stability_ai_sd3_5_i2i": "Stability AI: SD3.5 изображение в изображение",
|
||||
"api_stability_ai_sd3_5_t2i": "Stability AI: SD3.5 текст в изображение",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Stability AI: Stable Image Ultra текст в изображение"
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Google Gemini: чат",
|
||||
"api_openai_chat": "OpenAI: чат"
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "ESRGAN",
|
||||
@@ -1282,127 +1315,198 @@
|
||||
"latent_upscale_different_prompt_model": "Latent Upscale Different Prompt Model"
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "Hunyuan Video Text to Video",
|
||||
"image_to_video": "Изображение в видео",
|
||||
"image_to_video_wan": "Wan 2.1 Изображение в Видео",
|
||||
"ltxv_image_to_video": "LTXV Image to Video",
|
||||
"ltxv_text_to_video": "LTXV Text to Video",
|
||||
"mochi_text_to_video_example": "Mochi Text to Video",
|
||||
"text_to_video_wan": "Wan 2.1 Текст в Видео",
|
||||
"txt_to_image_to_video": "Текст в изображение в видео",
|
||||
"hunyuan_video_text_to_video": "Hunyuan видео текст в видео",
|
||||
"image_to_video": "SVD изображение в видео",
|
||||
"image_to_video_wan": "Wan 2.1 изображение в видео",
|
||||
"ltxv_image_to_video": "LTXV изображение в видео",
|
||||
"ltxv_text_to_video": "LTXV текст в видео",
|
||||
"mochi_text_to_video_example": "Mochi текст в видео",
|
||||
"text_to_video_wan": "Wan 2.1 текст в видео",
|
||||
"txt_to_image_to_video": "SVD текст в изображение в видео",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos Predict2 2B Video2World 480p 16fps",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B",
|
||||
"video_wan_vace_14B_ref2v": "Wan VACE референс в видео",
|
||||
"video_wan_vace_14B_t2v": "Wan VACE текст в видео",
|
||||
"video_wan_vace_14B_v2v": "Wan VACE контроль видео",
|
||||
"video_wan_vace_flf2v": "Wan VACE первый-последний кадр",
|
||||
"video_wan_vace_inpainting": "Wan VACE Inpainting",
|
||||
"video_wan_vace_outpainting": "Wan VACE Outpainting",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16",
|
||||
"wan2_1_fun_control": "Wan 2.1 ControlNet",
|
||||
"wan2_1_fun_inp": "Wan 2.1 Inpainting"
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "MiniMax: изображение в видео",
|
||||
"api_hailuo_minimax_t2v": "MiniMax: текст в видео",
|
||||
"api_kling_effects": "Kling: видеоэффекты",
|
||||
"api_kling_flf": "Kling: FLF2V",
|
||||
"api_kling_i2v": "Kling: изображение в видео",
|
||||
"api_luma_i2v": "Luma: изображение в видео",
|
||||
"api_luma_t2v": "Luma: текст в видео",
|
||||
"api_moonvalley_image_to_video": "Moonvalley: изображение в видео",
|
||||
"api_moonvalley_text_to_video": "Moonvalley: текст в видео",
|
||||
"api_pika_i2v": "Pika: изображение в видео",
|
||||
"api_pika_scene": "Pika Scenes: изображения в видео",
|
||||
"api_pixverse_i2v": "PixVerse: изображение в видео",
|
||||
"api_pixverse_t2v": "PixVerse: текст в видео",
|
||||
"api_pixverse_template_i2v": "PixVerse Template Effects: изображение в видео",
|
||||
"api_pixverse_template_i2v": "PixVerse Templates: изображение в видео",
|
||||
"api_runway_first_last_frame": "Runway: первый последний кадр в видео",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway: Gen3a Turbo изображение в видео",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway: Gen4 Turbo изображение в видео",
|
||||
"api_veo2_i2v": "Veo2: изображение в видео"
|
||||
}
|
||||
},
|
||||
"templateDescription": {
|
||||
"3D": {
|
||||
"hunyuan-3d-multiview-elf": "Используйте Hunyuan3D 2mv для генерации моделей по нескольким видам.",
|
||||
"hunyuan-3d-turbo": "Используйте Hunyuan3D 2mv turbo для генерации моделей по нескольким видам.",
|
||||
"hunyuan3d-non-multiview-train": "Используйте Hunyuan3D 2.0 для генерации моделей по одному виду.",
|
||||
"stable_zero123_example": "Генерируйте 3D-виды по одному изображению."
|
||||
"3d_hunyuan3d_image_to_model": "Генерируйте 3D-модели по одному изображению с помощью Hunyuan3D 2.0.",
|
||||
"3d_hunyuan3d_multiview_to_model": "Генерируйте 3D-модели по нескольким видам с помощью Hunyuan3D 2.0 MV.",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Генерируйте 3D-модели по нескольким видам с помощью Hunyuan3D 2.0 MV Turbo.",
|
||||
"stable_zero123_example": "Генерируйте 3D-виды по одному изображению с помощью Stable Zero123."
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Генерируйте детализированные 3D-модели по одной фотографии с помощью Rodin AI.",
|
||||
"api_rodin_multiview_to_model": "Создавайте полные 3D-модели по нескольким видам с помощью Rodin.",
|
||||
"api_tripo_image_to_model": "Генерируйте профессиональные 3D-ассеты по 2D-изображениям с помощью Tripo.",
|
||||
"api_tripo_multiview_to_model": "Создавайте 3D-модели по нескольким ракурсам с помощью продвинутого сканера Tripo.",
|
||||
"api_tripo_text_to_model": "Создавайте 3D-объекты по текстовым описаниям с помощью Tripo."
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "Управляйте композицией изображения с помощью областей.",
|
||||
"area_composition_reversed": "Обратный рабочий процесс композиции областей.",
|
||||
"area_composition_square_area_for_subject": "Создавайте стабильное размещение объекта."
|
||||
"area_composition": "Генерируйте изображения, управляя композицией с помощью определённых областей.",
|
||||
"area_composition_square_area_for_subject": "Генерируйте изображения с постоянным размещением объекта с помощью композиции областей."
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "Генерируйте аудио по текстовым описаниям."
|
||||
"audio_ace_step_1_m2m_editing": "Редактируйте существующие песни, меняя стиль и текст с помощью ACE-Step v1 M2M.",
|
||||
"audio_ace_step_1_t2a_instrumentals": "Генерируйте инструментальную музыку по тексту с помощью ACE-Step v1.",
|
||||
"audio_ace_step_1_t2a_song": "Генерируйте песни с вокалом по тексту с помощью ACE-Step v1, поддержка разных языков и стилей.",
|
||||
"audio_stable_audio_example": "Генерируйте аудио по текстовым описаниям с помощью Stable Audio."
|
||||
},
|
||||
"Basics": {
|
||||
"default": "Генерируйте изображения по текстовым описаниям.",
|
||||
"embedding_example": "Используйте текстовую инверсию для единых стилей.",
|
||||
"gligen_textbox_example": "Указывайте расположение и размер объектов.",
|
||||
"embedding_example": "Генерируйте изображения с помощью текстовой инверсии для единых стилей.",
|
||||
"gligen_textbox_example": "Генерируйте изображения с точным размещением объектов с помощью текстовых блоков.",
|
||||
"image2image": "Преобразуйте существующие изображения с помощью текстовых подсказок.",
|
||||
"inpain_model_outpainting": "Расширяйте изображения за пределы их исходных границ.",
|
||||
"inpaint_example": "Редактируйте отдельные части изображений без швов.",
|
||||
"lora": "Применяйте LoRA-модели для специализированных стилей или объектов.",
|
||||
"lora_multiple": "Комбинируйте несколько LoRA-моделей для уникальных результатов."
|
||||
"inpaint_model_outpainting": "Расширяйте изображения за пределы их исходных границ.",
|
||||
"lora": "Генерируйте изображения с помощью моделей LoRA для специализированных стилей или объектов.",
|
||||
"lora_multiple": "Генерируйте изображения, комбинируя несколько моделей LoRA."
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "Генерируйте изображения по референсам поз.",
|
||||
"controlnet_example": "Управляйте генерацией изображений с помощью референсных изображений.",
|
||||
"depth_controlnet": "Создавайте изображения с учетом глубины.",
|
||||
"depth_t2i_adapter": "Быстро генерируйте изображения с глубиной с помощью T2I-адаптера.",
|
||||
"mixing_controlnets": "Комбинируйте несколько моделей ControlNet вместе."
|
||||
"2_pass_pose_worship": "Генерируйте изображения по референсам поз с помощью ControlNet.",
|
||||
"controlnet_example": "Генерируйте изображения, используя референсы-каракулы с помощью ControlNet.",
|
||||
"depth_controlnet": "Генерируйте изображения, используя информацию о глубине с помощью ControlNet.",
|
||||
"depth_t2i_adapter": "Генерируйте изображения, используя информацию о глубине с помощью адаптера T2I.",
|
||||
"mixing_controlnets": "Генерируйте изображения, комбинируя несколько моделей ControlNet."
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "Генерируйте изображения по детекции границ.",
|
||||
"flux_depth_lora_example": "Создавайте изображения с глубиной с помощью LoRA.",
|
||||
"flux_dev_checkpoint_example": "Создавайте изображения с помощью Flux development models.",
|
||||
"flux_fill_inpaint_example": "Заполняйте отсутствующие части изображений.",
|
||||
"flux_fill_outpaint_example": "Расширяйте изображения с помощью Flux outpainting.",
|
||||
"flux_redux_model_example": "Передавайте стиль с референсного изображения для управления генерацией с помощью Flux.",
|
||||
"flux_schnell": "Быстро генерируйте изображения с Flux Schnell."
|
||||
"flux_canny_model_example": "Генерируйте изображения, используя детекцию границ с помощью Flux Canny.",
|
||||
"flux_depth_lora_example": "Генерируйте изображения, используя информацию о глубине с помощью Flux LoRA.",
|
||||
"flux_dev_checkpoint_example": "Генерируйте изображения с помощью Flux Dev fp8 (квантованная версия). Подходит для устройств с ограниченной VRAM, требуется только один файл модели, но качество немного ниже, чем у полной версии.",
|
||||
"flux_dev_full_text_to_image": "Генерируйте высококачественные изображения с помощью полной версии Flux Dev. Требуется больше VRAM и несколько файлов моделей, но обеспечивается лучшее следование подсказкам и качество.",
|
||||
"flux_fill_inpaint_example": "Заполняйте отсутствующие части изображений с помощью Flux inpainting.",
|
||||
"flux_fill_outpaint_example": "Расширяйте изображения за пределы исходных границ с помощью Flux outpainting.",
|
||||
"flux_kontext_dev_basic": "Редактируйте изображения с помощью Flux Kontext (все узлы видимы), идеально для изучения рабочего процесса.",
|
||||
"flux_kontext_dev_grouped": "Упрощённая версия Flux Kontext с группированными узлами для более чистого рабочего пространства.",
|
||||
"flux_redux_model_example": "Генерируйте изображения, перенося стиль с референсных изображений с помощью Flux Redux.",
|
||||
"flux_schnell": "Быстро генерируйте изображения с помощью Flux Schnell fp8 (квантованная версия). Идеально для слабого железа, требуется всего 4 шага.",
|
||||
"flux_schnell_full_text_to_image": "Быстро генерируйте изображения с помощью полной версии Flux Schnell. Лицензия Apache2.0, всего 4 шага при хорошем качестве."
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "Редактируйте изображения с HiDream E1.",
|
||||
"hidream_i1_dev": "Генерируйте изображения с HiDream I1 Dev.",
|
||||
"hidream_i1_fast": "Быстро генерируйте изображения с HiDream I1.",
|
||||
"hidream_i1_full": "Генерируйте изображения с HiDream I1.",
|
||||
"sd3_5_large_blur": "Генерируйте изображения по размытым референсам с SD 3.5.",
|
||||
"sd3_5_large_canny_controlnet_example": "Используйте детекцию границ для управления генерацией с SD 3.5.",
|
||||
"sd3_5_large_depth": "Создавайте изображения с глубиной с SD 3.5.",
|
||||
"sd3_5_simple_example": "Генерируйте изображения с SD 3.5.",
|
||||
"sdxl_refiner_prompt_example": "Улучшайте результаты SDXL с помощью refiners.",
|
||||
"sdxl_revision_text_prompts": "Передавайте концепции с референсных изображений для управления генерацией с SDXL.",
|
||||
"sdxl_revision_zero_positive": "Добавляйте текстовые подсказки вместе с референсными изображениями для управления генерацией с SDXL.",
|
||||
"sdxl_simple_example": "Создавайте высококачественные изображения с SDXL.",
|
||||
"sdxlturbo_example": "Генерируйте изображения за один шаг с SDXL Turbo."
|
||||
"hidream_e1_full": "HiDream E1 — профессиональная модель для редактирования изображений на естественном языке.",
|
||||
"hidream_i1_dev": "HiDream I1 Dev — сбалансированная версия (28 шагов), подходит для среднего железа.",
|
||||
"hidream_i1_fast": "HiDream I1 Fast — облегчённая версия (16 шагов), быстрая генерация на слабых ПК.",
|
||||
"hidream_i1_full": "HiDream I1 Full — полная версия (50 шагов), максимальное качество.",
|
||||
"image_chroma_text_to_image": "Chroma — модифицированная версия Flux с изменённой архитектурой.",
|
||||
"image_cosmos_predict2_2B_t2i": "Генерируйте изображения с помощью Cosmos-Predict2 2B T2I — физически точная, высокодетализированная генерация.",
|
||||
"image_lotus_depth_v1_1": "Используйте Lotus Depth в ComfyUI для эффективной и детализированной оценки глубины по одному изображению.",
|
||||
"image_omnigen2_image_edit": "Редактируйте изображения с помощью естественно-языковых инструкций и расширенных возможностей OmniGen2.",
|
||||
"image_omnigen2_t2i": "Генерируйте высококачественные изображения по тексту с помощью унифицированной 7B мультимодальной модели OmniGen2 с двухпутевой архитектурой.",
|
||||
"sd3_5_large_blur": "Генерируйте изображения, используя размытые референсы с помощью SD 3.5.",
|
||||
"sd3_5_large_canny_controlnet_example": "Генерируйте изображения, используя детекцию границ с помощью SD 3.5 Canny ControlNet.",
|
||||
"sd3_5_large_depth": "Генерируйте изображения, используя информацию о глубине с помощью SD 3.5.",
|
||||
"sd3_5_simple_example": "Генерируйте изображения с помощью SD 3.5.",
|
||||
"sdxl_refiner_prompt_example": "Улучшайте изображения SDXL с помощью моделей-улучшателей (refiner).",
|
||||
"sdxl_revision_text_prompts": "Передавайте концепции с референсных изображений для генерации с помощью SDXL Revision.",
|
||||
"sdxl_revision_zero_positive": "Генерируйте изображения, используя текстовые подсказки и референсы с помощью SDXL Revision.",
|
||||
"sdxl_simple_example": "Генерируйте высококачественные изображения с помощью SDXL.",
|
||||
"sdxlturbo_example": "Генерируйте изображения за один шаг с помощью SDXL Turbo."
|
||||
},
|
||||
"Image API": {
|
||||
"api-openai-dall-e-2-inpaint": "Используйте Dall-E 2 API для инпейнта изображений.",
|
||||
"api-openai-dall-e-2-t2i": "Используйте Dall-E 2 API для генерации изображений по текстовым описаниям.",
|
||||
"api-openai-dall-e-3-t2i": "Используйте Dall-E 3 API для генерации изображений по текстовым описаниям.",
|
||||
"api_bfl_flux_pro_t2i": "Создавайте изображения с помощью FLUX.1 [pro] с отличным следованием подсказкам, высоким качеством, детализацией и разнообразием.",
|
||||
"api_ideogram_v3_t2i": "Генерируйте изображения с высоким соответствием подсказкам, фотореализмом и рендерингом текста. Создавайте профессиональные логотипы, промо-постеры, концепты лендингов, продуктовые фото и многое другое. Легко создавайте сложные пространственные композиции с детализированным фоном, точным освещением и реалистичной средой.",
|
||||
"api_bfl_flux_1_kontext_max_image": "Редактируйте изображения с помощью Flux.1 Kontext max image.",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "Вводите несколько изображений и редактируйте их с помощью Flux.1 Kontext.",
|
||||
"api_bfl_flux_1_kontext_pro_image": "Редактируйте изображения с помощью Flux.1 Kontext pro image.",
|
||||
"api_bfl_flux_pro_t2i": "Генерируйте изображения с отличным следованием подсказкам и качеством с помощью FLUX.1 Pro.",
|
||||
"api_ideogram_v3_t2i": "Генерируйте профессиональные изображения с отличным соответствием подсказкам, фотореализмом и рендерингом текста с помощью Ideogram V3.",
|
||||
"api_luma_photon_i2i": "Управляйте генерацией изображений с помощью комбинации изображений и подсказки.",
|
||||
"api_luma_photon_style_ref": "Применяйте и смешивайте стили с точным контролем. Luma Photon захватывает суть каждого референса, позволяя комбинировать уникальные визуальные элементы с профессиональным качеством.",
|
||||
"api_openai_image_1_i2i": "Используйте GPT Image 1 API для генерации изображений по изображениям.",
|
||||
"api_openai_image_1_inpaint": "Используйте GPT Image 1 API для инпейнта изображений.",
|
||||
"api_openai_image_1_multi_inputs": "Используйте GPT Image 1 API с несколькими входами для генерации изображений.",
|
||||
"api_openai_image_1_t2i": "Используйте GPT Image 1 API для генерации изображений по текстовым описаниям.",
|
||||
"api_recraft_image_gen_with_color_control": "Создайте собственную палитру для повторного использования или подберите цвета для каждого фото. Совместите фирменную палитру и создайте уникальные визуалы.",
|
||||
"api_recraft_image_gen_with_style_control": "Контролируйте стиль с помощью визуальных примеров, выравнивайте объекты и настраивайте детали. Сохраняйте и делитесь стилями для идеального брендирования.",
|
||||
"api_recraft_vector_gen": "Преобразуйте текстовую подсказку в векторное изображение с помощью AI-генератора Recraft. Создавайте лучшие векторные арты для логотипов, постеров, иконок, баннеров и мокапов. Дорабатывайте дизайн с помощью качественных SVG-файлов. Создавайте фирменные векторные иллюстрации для приложений и сайтов за секунды.",
|
||||
"api_stability_sd3_t2i": "Генерируйте высококачественные изображения с отличным следованием подсказкам. Идеально для профессионального использования при разрешении 1 мегапиксель."
|
||||
"api_luma_photon_style_ref": "Генерируйте изображения, смешивая стили с точным контролем с помощью Luma Photon.",
|
||||
"api_openai_dall_e_2_inpaint": "Редактируйте изображения с помощью инпейнта в OpenAI Dall-E 2 API.",
|
||||
"api_openai_dall_e_2_t2i": "Генерируйте изображения по тексту с помощью OpenAI Dall-E 2 API.",
|
||||
"api_openai_dall_e_3_t2i": "Генерируйте изображения по тексту с помощью OpenAI Dall-E 3 API.",
|
||||
"api_openai_image_1_i2i": "Генерируйте изображения по изображениям с помощью OpenAI GPT Image 1 API.",
|
||||
"api_openai_image_1_inpaint": "Редактируйте изображения с помощью инпейнта в OpenAI GPT Image 1 API.",
|
||||
"api_openai_image_1_multi_inputs": "Генерируйте изображения по нескольким входам с помощью OpenAI GPT Image 1 API.",
|
||||
"api_openai_image_1_t2i": "Генерируйте изображения по тексту с помощью OpenAI GPT Image 1 API.",
|
||||
"api_recraft_image_gen_with_color_control": "Генерируйте изображения с пользовательскими палитрами и фирменными визуалами с помощью Recraft.",
|
||||
"api_recraft_image_gen_with_style_control": "Контролируйте стиль с помощью визуальных примеров, выравнивайте объекты и настраивайте детали. Сохраняйте и делитесь стилями для брендовой целостности.",
|
||||
"api_recraft_vector_gen": "Генерируйте векторные изображения высокого качества по тексту с помощью AI-генератора Recraft.",
|
||||
"api_runway_reference_to_image": "Генерируйте новые изображения на основе стиля и композиции референса с помощью Runway.",
|
||||
"api_runway_text_to_image": "Генерируйте изображения высокого качества по тексту с помощью модели Runway AI.",
|
||||
"api_stability_ai_i2i": "Преобразуйте изображения с высоким качеством с помощью Stability AI, идеально для профессионального редактирования и передачи стиля.",
|
||||
"api_stability_ai_sd3_5_i2i": "Генерируйте изображения высокого качества с отличным следованием подсказкам. Идеально для профессионального использования при разрешении 1 мегапиксель.",
|
||||
"api_stability_ai_sd3_5_t2i": "Генерируйте изображения высокого качества с отличным следованием подсказкам. Идеально для профессионального использования при разрешении 1 мегапиксель.",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Генерируйте изображения высокого качества с отличным следованием подсказкам. Идеально для профессионального использования при разрешении 1 мегапиксель."
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Испытайте мультимодальный ИИ Google с возможностями рассуждения Gemini.",
|
||||
"api_openai_chat": "Общайтесь с продвинутыми языковыми моделями OpenAI для интеллектуальных диалогов."
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "Используйте модели апскейлинга для повышения качества изображений.",
|
||||
"hiresfix_esrgan_workflow": "Используйте модели апскейлинга на промежуточных этапах.",
|
||||
"hiresfix_latent_workflow": "Улучшайте качество изображений в latent space.",
|
||||
"latent_upscale_different_prompt_model": "Увеличивайте и меняйте подсказку на разных проходах."
|
||||
"esrgan_example": "Увеличивайте изображения с помощью моделей ESRGAN для повышения качества.",
|
||||
"hiresfix_esrgan_workflow": "Увеличивайте изображения с помощью моделей ESRGAN на промежуточных этапах.",
|
||||
"hiresfix_latent_workflow": "Увеличивайте изображения, улучшая качество в латентном пространстве.",
|
||||
"latent_upscale_different_prompt_model": "Увеличивайте изображения, меняя подсказки между проходами."
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "Генерируйте видео с помощью модели Hunyuan.",
|
||||
"image_to_video": "Преобразуйте изображения в анимированные видео.",
|
||||
"image_to_video_wan": "Быстро генерируйте видео из изображений.",
|
||||
"ltxv_image_to_video": "Преобразуйте статичные изображения в видео.",
|
||||
"ltxv_text_to_video": "Генерируйте видео по текстовым описаниям.",
|
||||
"mochi_text_to_video_example": "Создавайте видео с помощью модели Mochi.",
|
||||
"text_to_video_wan": "Быстро генерируйте видео по текстовым описаниям.",
|
||||
"txt_to_image_to_video": "Генерируйте изображения по тексту, а затем преобразуйте их в видео.",
|
||||
"wan2_1_flf2v_720_f16": "Генерируйте видео, контролируя первый и последний кадры.",
|
||||
"wan2_1_fun_control": "Управляйте генерацией видео с помощью позы, глубины, границ и других параметров.",
|
||||
"wan2_1_fun_inp": "Создавайте видео по начальному и конечному кадрам."
|
||||
"hunyuan_video_text_to_video": "Генерируйте видео по тексту с помощью модели Hunyuan.",
|
||||
"image_to_video": "Генерируйте видео по статичным изображениям.",
|
||||
"image_to_video_wan": "Генерируйте видео по изображениям с помощью Wan 2.1.",
|
||||
"ltxv_image_to_video": "Генерируйте видео по статичным изображениям.",
|
||||
"ltxv_text_to_video": "Генерируйте видео по тексту.",
|
||||
"mochi_text_to_video_example": "Генерируйте видео по тексту с помощью модели Mochi.",
|
||||
"text_to_video_wan": "Генерируйте видео по тексту с помощью Wan 2.1.",
|
||||
"txt_to_image_to_video": "Сначала создайте изображения по тексту, затем преобразуйте их в видео.",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Генерируйте видео с помощью Cosmos-Predict2 2B Video2World — физически точные, высокодетализированные и последовательные симуляции.",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Генерируйте видео высокого качества с расширенным управлением камерой с помощью полной модели 14B.",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Генерируйте динамичные видео с кинематографическим движением камеры с помощью Wan 2.1 Fun Camera 1.3B.",
|
||||
"video_wan_vace_14B_ref2v": "Создавайте видео, соответствующие стилю и содержанию референсного изображения.",
|
||||
"video_wan_vace_14B_t2v": "Преобразуйте текстовые описания в видео высокого качества. Поддержка 480p и 720p с моделью VACE-14B.",
|
||||
"video_wan_vace_14B_v2v": "Генерируйте видео, управляя входными видео и референсами с помощью Wan VACE.",
|
||||
"video_wan_vace_flf2v": "Создавайте плавные переходы, задавая начальный и конечный кадры. Поддержка пользовательских последовательностей ключевых кадров.",
|
||||
"video_wan_vace_inpainting": "Редактируйте отдельные области видео, сохраняя окружающее содержимое.",
|
||||
"video_wan_vace_outpainting": "Генерируйте расширенные видео, увеличивая размер с помощью Wan VACE outpainting.",
|
||||
"wan2_1_flf2v_720_f16": "Генерируйте видео, контролируя первый и последний кадры с помощью Wan 2.1 FLF2V.",
|
||||
"wan2_1_fun_control": "Генерируйте видео, управляя позой, глубиной и границами с помощью Wan 2.1 ControlNet.",
|
||||
"wan2_1_fun_inp": "Генерируйте видео по начальному и конечному кадрам с помощью Wan 2.1 inpainting."
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "Создавайте изысканные видео из изображений и текста, включая CGI и трендовые эффекты, такие как viral AI hugging. Выбирайте стили и темы для вашего креатива.",
|
||||
"api_kling_i2v": "Создавайте видео с отличным следованием подсказкам для действий, эмоций и движений камеры. Теперь поддерживаются сложные подсказки с последовательными действиями — вы режиссёр своей сцены.",
|
||||
"api_hailuo_minimax_i2v": "Генерируйте изысканные видео по изображениям и тексту с CGI-интеграцией с помощью MiniMax.",
|
||||
"api_hailuo_minimax_t2v": "Генерируйте высококачественные видео напрямую по тексту. Используйте возможности MiniMax для создания разнообразных визуальных историй с профессиональными CGI-эффектами.",
|
||||
"api_kling_effects": "Генерируйте динамичные видео, применяя визуальные эффекты к изображениям с помощью Kling.",
|
||||
"api_kling_flf": "Генерируйте видео, контролируя первый и последний кадры.",
|
||||
"api_kling_i2v": "Генерируйте видео с отличным следованием подсказкам для действий, эмоций и движений камеры с помощью Kling.",
|
||||
"api_luma_i2v": "Преобразуйте статичные изображения в волшебные анимации высокого качества.",
|
||||
"api_pika_scene": "Используйте несколько изображений как ингредиенты и генерируйте видео, включающие их все.",
|
||||
"api_luma_t2v": "Генерируйте высококачественные видео по простым подсказкам.",
|
||||
"api_moonvalley_image_to_video": "Генерируйте кинематографические видео 1080p по изображению с помощью модели, обученной только на лицензированных данных.",
|
||||
"api_moonvalley_text_to_video": "Генерируйте кинематографические видео 1080p по тексту с помощью модели, обученной только на лицензированных данных.",
|
||||
"api_pika_i2v": "Генерируйте плавные анимированные видео по одному изображению с помощью Pika AI.",
|
||||
"api_pika_scene": "Генерируйте видео, включающие несколько входных изображений, с помощью Pika Scenes.",
|
||||
"api_pixverse_i2v": "Генерируйте динамичные видео по статичным изображениям с движением и эффектами с помощью PixVerse.",
|
||||
"api_pixverse_t2v": "Генерируйте видео с точной интерпретацией подсказок и впечатляющей динамикой.",
|
||||
"api_pixverse_template_i2v": "Преобразует статичные изображения в динамичные видео с движением и эффектами.",
|
||||
"api_veo2_i2v": "Используйте Google Veo2 API для генерации видео из изображений."
|
||||
"api_pixverse_template_i2v": "Генерируйте динамичные видео по статичным изображениям с движением и эффектами с помощью PixVerse.",
|
||||
"api_runway_first_last_frame": "Генерируйте плавные переходы между двумя ключевыми кадрами с помощью Runway.",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Генерируйте кинематографические видео по статичным изображениям с помощью Runway Gen3a Turbo.",
|
||||
"api_runway_gen4_turo_image_to_video": "Генерируйте динамичные видео по изображениям с помощью Runway Gen4 Turbo.",
|
||||
"api_veo2_i2v": "Генерируйте видео по изображениям с помощью Google Veo2 API."
|
||||
}
|
||||
},
|
||||
"title": "Начните с шаблона"
|
||||
@@ -1483,7 +1587,8 @@
|
||||
"title": "Добро пожаловать в ComfyUI"
|
||||
},
|
||||
"whatsNewPopup": {
|
||||
"learnMore": "Узнать больше"
|
||||
"learnMore": "Узнать больше",
|
||||
"noReleaseNotes": "Нет доступных примечаний к выпуску."
|
||||
},
|
||||
"workflowService": {
|
||||
"enterFilename": "Введите название файла",
|
||||
|
||||
@@ -259,6 +259,10 @@
|
||||
"name": "Подсветка ноды при привязке",
|
||||
"tooltip": "При перетаскивании ссылки над нодой с подходящим входным слотом, нода подсвечивается"
|
||||
},
|
||||
"Comfy_Notification_ShowVersionUpdates": {
|
||||
"name": "Показывать обновления версий",
|
||||
"tooltip": "Показывать обновления новых моделей и основные новые функции."
|
||||
},
|
||||
"Comfy_Pointer_ClickBufferTime": {
|
||||
"name": "Задержка дрейфа щелчка указателя",
|
||||
"tooltip": "После нажатия кнопки указателя, это максимальное время (в миллисекундах), в течение которого движение указателя может быть проигнорировано.\n\nПомогает предотвратить непреднамеренное смещение объектов, если указатель перемещается во время щелчка."
|
||||
@@ -322,7 +326,8 @@
|
||||
"Bottom": "Внизу",
|
||||
"Disabled": "Отключено",
|
||||
"Top": "Вверху"
|
||||
}
|
||||
},
|
||||
"tooltip": "Расположение панели меню. На мобильных устройствах меню всегда отображается вверху."
|
||||
},
|
||||
"Comfy_Validation_NodeDefs": {
|
||||
"name": "Проверка определений нод (медленно)",
|
||||
|
||||
249
src/locales/zh-TW/commands.json
Normal file
@@ -0,0 +1,249 @@
|
||||
{
|
||||
"Comfy-Desktop_CheckForUpdates": {
|
||||
"label": "檢查更新"
|
||||
},
|
||||
"Comfy-Desktop_Folders_OpenCustomNodesFolder": {
|
||||
"label": "開啟自訂節點資料夾"
|
||||
},
|
||||
"Comfy-Desktop_Folders_OpenInputsFolder": {
|
||||
"label": "開啟輸入資料夾"
|
||||
},
|
||||
"Comfy-Desktop_Folders_OpenLogsFolder": {
|
||||
"label": "開啟日誌資料夾"
|
||||
},
|
||||
"Comfy-Desktop_Folders_OpenModelConfig": {
|
||||
"label": "開啟 extra_model_paths.yaml"
|
||||
},
|
||||
"Comfy-Desktop_Folders_OpenModelsFolder": {
|
||||
"label": "開啟模型資料夾"
|
||||
},
|
||||
"Comfy-Desktop_Folders_OpenOutputsFolder": {
|
||||
"label": "開啟輸出資料夾"
|
||||
},
|
||||
"Comfy-Desktop_OpenDevTools": {
|
||||
"label": "開啟開發者工具"
|
||||
},
|
||||
"Comfy-Desktop_OpenUserGuide": {
|
||||
"label": "桌面版使用指南"
|
||||
},
|
||||
"Comfy-Desktop_Quit": {
|
||||
"label": "退出"
|
||||
},
|
||||
"Comfy-Desktop_Reinstall": {
|
||||
"label": "重新安裝"
|
||||
},
|
||||
"Comfy-Desktop_Restart": {
|
||||
"label": "重新啟動"
|
||||
},
|
||||
"Comfy_BrowseTemplates": {
|
||||
"label": "瀏覽範本"
|
||||
},
|
||||
"Comfy_Canvas_AddEditModelStep": {
|
||||
"label": "新增編輯模型步驟"
|
||||
},
|
||||
"Comfy_Canvas_DeleteSelectedItems": {
|
||||
"label": "刪除選取項目"
|
||||
},
|
||||
"Comfy_Canvas_FitView": {
|
||||
"label": "將視圖適應至所選節點"
|
||||
},
|
||||
"Comfy_Canvas_MoveSelectedNodes_Down": {
|
||||
"label": "將選取的節點下移"
|
||||
},
|
||||
"Comfy_Canvas_MoveSelectedNodes_Left": {
|
||||
"label": "左移選取的節點"
|
||||
},
|
||||
"Comfy_Canvas_MoveSelectedNodes_Right": {
|
||||
"label": "右移選取的節點"
|
||||
},
|
||||
"Comfy_Canvas_MoveSelectedNodes_Up": {
|
||||
"label": "上移選取的節點"
|
||||
},
|
||||
"Comfy_Canvas_ResetView": {
|
||||
"label": "重設視圖"
|
||||
},
|
||||
"Comfy_Canvas_Resize": {
|
||||
"label": "調整所選節點大小"
|
||||
},
|
||||
"Comfy_Canvas_ToggleLinkVisibility": {
|
||||
"label": "畫布切換連結可見性"
|
||||
},
|
||||
"Comfy_Canvas_ToggleLock": {
|
||||
"label": "畫布切換鎖定"
|
||||
},
|
||||
"Comfy_Canvas_ToggleSelectedNodes_Bypass": {
|
||||
"label": "略過/取消略過選取的節點"
|
||||
},
|
||||
"Comfy_Canvas_ToggleSelectedNodes_Collapse": {
|
||||
"label": "收合/展開選取的節點"
|
||||
},
|
||||
"Comfy_Canvas_ToggleSelectedNodes_Mute": {
|
||||
"label": "停用/啟用選取的節點"
|
||||
},
|
||||
"Comfy_Canvas_ToggleSelectedNodes_Pin": {
|
||||
"label": "釘選/取消釘選已選取的節點"
|
||||
},
|
||||
"Comfy_Canvas_ToggleSelected_Pin": {
|
||||
"label": "釘選/取消釘選已選項目"
|
||||
},
|
||||
"Comfy_Canvas_ZoomIn": {
|
||||
"label": "放大"
|
||||
},
|
||||
"Comfy_Canvas_ZoomOut": {
|
||||
"label": "縮小"
|
||||
},
|
||||
"Comfy_ClearPendingTasks": {
|
||||
"label": "清除待處理任務"
|
||||
},
|
||||
"Comfy_ClearWorkflow": {
|
||||
"label": "清除工作流程"
|
||||
},
|
||||
"Comfy_ContactSupport": {
|
||||
"label": "聯絡支援"
|
||||
},
|
||||
"Comfy_DuplicateWorkflow": {
|
||||
"label": "複製目前工作流程"
|
||||
},
|
||||
"Comfy_ExportWorkflow": {
|
||||
"label": "匯出工作流程"
|
||||
},
|
||||
"Comfy_ExportWorkflowAPI": {
|
||||
"label": "匯出工作流程(API 格式)"
|
||||
},
|
||||
"Comfy_Feedback": {
|
||||
"label": "提供回饋"
|
||||
},
|
||||
"Comfy_Graph_ConvertToSubgraph": {
|
||||
"label": "將選取內容轉換為子圖"
|
||||
},
|
||||
"Comfy_Graph_FitGroupToContents": {
|
||||
"label": "調整群組以符合內容"
|
||||
},
|
||||
"Comfy_Graph_GroupSelectedNodes": {
|
||||
"label": "群組所選節點"
|
||||
},
|
||||
"Comfy_GroupNode_ConvertSelectedNodesToGroupNode": {
|
||||
"label": "將選取的節點轉換為群組節點"
|
||||
},
|
||||
"Comfy_GroupNode_ManageGroupNodes": {
|
||||
"label": "管理群組節點"
|
||||
},
|
||||
"Comfy_GroupNode_UngroupSelectedGroupNodes": {
|
||||
"label": "取消群組所選群組節點"
|
||||
},
|
||||
"Comfy_Help_AboutComfyUI": {
|
||||
"label": "開啟關於 ComfyUI"
|
||||
},
|
||||
"Comfy_Help_OpenComfyOrgDiscord": {
|
||||
"label": "開啟 Comfy-Org Discord"
|
||||
},
|
||||
"Comfy_Help_OpenComfyUIDocs": {
|
||||
"label": "開啟 ComfyUI 文件"
|
||||
},
|
||||
"Comfy_Help_OpenComfyUIForum": {
|
||||
"label": "開啟 ComfyUI 論壇"
|
||||
},
|
||||
"Comfy_Help_OpenComfyUIIssues": {
|
||||
"label": "開啟 ComfyUI 問題追蹤"
|
||||
},
|
||||
"Comfy_Interrupt": {
|
||||
"label": "中斷"
|
||||
},
|
||||
"Comfy_LoadDefaultWorkflow": {
|
||||
"label": "載入預設工作流程"
|
||||
},
|
||||
"Comfy_Manager_CustomNodesManager": {
|
||||
"label": "切換自訂節點管理器"
|
||||
},
|
||||
"Comfy_Manager_ToggleManagerProgressDialog": {
|
||||
"label": "切換自訂節點管理器進度條"
|
||||
},
|
||||
"Comfy_MaskEditor_OpenMaskEditor": {
|
||||
"label": "為選取的節點開啟 Mask 編輯器"
|
||||
},
|
||||
"Comfy_NewBlankWorkflow": {
|
||||
"label": "新增空白工作流程"
|
||||
},
|
||||
"Comfy_OpenClipspace": {
|
||||
"label": "Clipspace"
|
||||
},
|
||||
"Comfy_OpenWorkflow": {
|
||||
"label": "開啟工作流程"
|
||||
},
|
||||
"Comfy_QueuePrompt": {
|
||||
"label": "將提示詞加入佇列"
|
||||
},
|
||||
"Comfy_QueuePromptFront": {
|
||||
"label": "將提示詞加入佇列前方"
|
||||
},
|
||||
"Comfy_QueueSelectedOutputNodes": {
|
||||
"label": "佇列所選的輸出節點"
|
||||
},
|
||||
"Comfy_Redo": {
|
||||
"label": "重做"
|
||||
},
|
||||
"Comfy_RefreshNodeDefinitions": {
|
||||
"label": "重新整理節點定義"
|
||||
},
|
||||
"Comfy_SaveWorkflow": {
|
||||
"label": "儲存工作流程"
|
||||
},
|
||||
"Comfy_SaveWorkflowAs": {
|
||||
"label": "另存工作流程"
|
||||
},
|
||||
"Comfy_ShowSettingsDialog": {
|
||||
"label": "顯示設定對話框"
|
||||
},
|
||||
"Comfy_ToggleTheme": {
|
||||
"label": "切換主題(深色/淺色)"
|
||||
},
|
||||
"Comfy_Undo": {
|
||||
"label": "復原"
|
||||
},
|
||||
"Comfy_User_OpenSignInDialog": {
|
||||
"label": "開啟登入對話框"
|
||||
},
|
||||
"Comfy_User_SignOut": {
|
||||
"label": "登出"
|
||||
},
|
||||
"Workspace_CloseWorkflow": {
|
||||
"label": "關閉當前工作流程"
|
||||
},
|
||||
"Workspace_NextOpenedWorkflow": {
|
||||
"label": "下一個已開啟的工作流程"
|
||||
},
|
||||
"Workspace_PreviousOpenedWorkflow": {
|
||||
"label": "上次開啟的工作流程"
|
||||
},
|
||||
"Workspace_SearchBox_Toggle": {
|
||||
"label": "切換搜尋框"
|
||||
},
|
||||
"Workspace_ToggleBottomPanel": {
|
||||
"label": "切換下方面板"
|
||||
},
|
||||
"Workspace_ToggleBottomPanelTab_command-terminal": {
|
||||
"label": "切換終端機底部面板"
|
||||
},
|
||||
"Workspace_ToggleBottomPanelTab_logs-terminal": {
|
||||
"label": "切換日誌底部面板"
|
||||
},
|
||||
"Workspace_ToggleFocusMode": {
|
||||
"label": "切換專注模式"
|
||||
},
|
||||
"Workspace_ToggleSidebarTab_model-library": {
|
||||
"label": "切換模型庫側邊欄",
|
||||
"tooltip": "模型庫"
|
||||
},
|
||||
"Workspace_ToggleSidebarTab_node-library": {
|
||||
"label": "切換節點庫側邊欄",
|
||||
"tooltip": "節點庫"
|
||||
},
|
||||
"Workspace_ToggleSidebarTab_queue": {
|
||||
"label": "切換佇列側邊欄",
|
||||
"tooltip": "佇列"
|
||||
},
|
||||
"Workspace_ToggleSidebarTab_workflows": {
|
||||
"label": "切換工作流程側邊欄",
|
||||
"tooltip": "工作流程"
|
||||
}
|
||||
}
|
||||
1598
src/locales/zh-TW/main.json
Normal file
8660
src/locales/zh-TW/nodeDefs.json
Normal file
413
src/locales/zh-TW/settings.json
Normal file
@@ -0,0 +1,413 @@
|
||||
{
|
||||
"Comfy-Desktop_AutoUpdate": {
|
||||
"name": "自動檢查更新"
|
||||
},
|
||||
"Comfy-Desktop_SendStatistics": {
|
||||
"name": "傳送匿名使用統計資料"
|
||||
},
|
||||
"Comfy-Desktop_UV_PypiInstallMirror": {
|
||||
"name": "PyPI 安裝鏡像站",
|
||||
"tooltip": "預設 pip 安裝鏡像站"
|
||||
},
|
||||
"Comfy-Desktop_UV_PythonInstallMirror": {
|
||||
"name": "Python 安裝鏡像站",
|
||||
"tooltip": "受管理的 Python 安裝檔會從 Astral 的 python-build-standalone 專案下載。這個變數可以設定為鏡像站的 URL,以便從不同來源下載 Python 安裝檔。所提供的 URL 會取代 https://github.com/astral-sh/python-build-standalone/releases/download,例如:https://github.com/astral-sh/python-build-standalone/releases/download/20240713/cpython-3.12.4%2B20240713-aarch64-apple-darwin-install_only.tar.gz。若要從本機目錄讀取發行版本,請使用 file:// URL 格式。"
|
||||
},
|
||||
"Comfy-Desktop_UV_TorchInstallMirror": {
|
||||
"name": "Torch 安裝鏡像站",
|
||||
"tooltip": "PyTorch 的 pip 安裝鏡像站"
|
||||
},
|
||||
"Comfy-Desktop_WindowStyle": {
|
||||
"name": "視窗樣式",
|
||||
"options": {
|
||||
"custom": "自訂",
|
||||
"default": "預設"
|
||||
},
|
||||
"tooltip": "自訂:以 ComfyUI 的頂部選單取代系統標題列"
|
||||
},
|
||||
"Comfy_Canvas_BackgroundImage": {
|
||||
"name": "畫布背景圖片",
|
||||
"tooltip": "畫布背景的圖片網址。你可以在輸出面板中右鍵點擊圖片並選擇「設為背景」來使用,或是使用上傳按鈕上傳你自己的圖片。"
|
||||
},
|
||||
"Comfy_Canvas_SelectionToolbox": {
|
||||
"name": "顯示選取工具箱"
|
||||
},
|
||||
"Comfy_ConfirmClear": {
|
||||
"name": "清除工作流程時需要確認"
|
||||
},
|
||||
"Comfy_DOMClippingEnabled": {
|
||||
"name": "啟用 DOM 元素裁剪(啟用後可能會降低效能)"
|
||||
},
|
||||
"Comfy_DevMode": {
|
||||
"name": "啟用開發者模式選項(API 儲存等)"
|
||||
},
|
||||
"Comfy_DisableFloatRounding": {
|
||||
"name": "停用預設浮點數元件四捨五入。",
|
||||
"tooltip": "(需重新載入頁面)當後端節點已設定四捨五入時,無法停用四捨五入。"
|
||||
},
|
||||
"Comfy_DisableSliders": {
|
||||
"name": "停用節點元件滑桿"
|
||||
},
|
||||
"Comfy_EditAttention_Delta": {
|
||||
"name": "Ctrl+上/下 精確調整"
|
||||
},
|
||||
"Comfy_EnableTooltips": {
|
||||
"name": "啟用工具提示"
|
||||
},
|
||||
"Comfy_EnableWorkflowViewRestore": {
|
||||
"name": "在工作流程中儲存並還原畫布位置與縮放等級"
|
||||
},
|
||||
"Comfy_FloatRoundingPrecision": {
|
||||
"name": "浮點元件小數點位數 [0 = 自動]。",
|
||||
"tooltip": "(需重新載入頁面)"
|
||||
},
|
||||
"Comfy_Graph_CanvasInfo": {
|
||||
"name": "在左下角顯示畫布資訊(fps 等)"
|
||||
},
|
||||
"Comfy_Graph_CanvasMenu": {
|
||||
"name": "顯示圖形畫布選單"
|
||||
},
|
||||
"Comfy_Graph_CtrlShiftZoom": {
|
||||
"name": "啟用快速縮放快捷鍵(Ctrl + Shift + 拖曳)"
|
||||
},
|
||||
"Comfy_Graph_LinkMarkers": {
|
||||
"name": "連結中點標記",
|
||||
"options": {
|
||||
"Arrow": "箭頭",
|
||||
"Circle": "圓圈",
|
||||
"None": "無"
|
||||
}
|
||||
},
|
||||
"Comfy_Graph_ZoomSpeed": {
|
||||
"name": "畫布縮放速度"
|
||||
},
|
||||
"Comfy_GroupSelectedNodes_Padding": {
|
||||
"name": "群組所選節點間距"
|
||||
},
|
||||
"Comfy_Group_DoubleClickTitleToEdit": {
|
||||
"name": "雙擊群組標題以編輯"
|
||||
},
|
||||
"Comfy_LinkRelease_Action": {
|
||||
"name": "釋放連結時的動作(無修飾鍵)",
|
||||
"options": {
|
||||
"context menu": "右鍵選單",
|
||||
"no action": "無動作",
|
||||
"search box": "搜尋框"
|
||||
}
|
||||
},
|
||||
"Comfy_LinkRelease_ActionShift": {
|
||||
"name": "連結釋放時的動作(Shift)",
|
||||
"options": {
|
||||
"context menu": "右鍵選單",
|
||||
"no action": "無動作",
|
||||
"search box": "搜尋框"
|
||||
}
|
||||
},
|
||||
"Comfy_LinkRenderMode": {
|
||||
"name": "連結渲染模式",
|
||||
"options": {
|
||||
"Hidden": "隱藏",
|
||||
"Linear": "線性",
|
||||
"Spline": "曲線",
|
||||
"Straight": "直線"
|
||||
}
|
||||
},
|
||||
"Comfy_Load3D_BackgroundColor": {
|
||||
"name": "初始背景顏色",
|
||||
"tooltip": "控制 3D 場景的預設背景顏色。此設定決定新建立 3D 元件時的背景外觀,但每個元件在建立後都可單獨調整。"
|
||||
},
|
||||
"Comfy_Load3D_CameraType": {
|
||||
"name": "初始相機類型",
|
||||
"options": {
|
||||
"orthographic": "正交",
|
||||
"perspective": "透視"
|
||||
},
|
||||
"tooltip": "控制新建 3D 元件時,相機預設為透視或正交。此預設值在建立後仍可針對每個元件單獨切換。"
|
||||
},
|
||||
"Comfy_Load3D_LightAdjustmentIncrement": {
|
||||
"name": "燈光調整增量",
|
||||
"tooltip": "控制在 3D 場景中調整燈光強度時的增量大小。較小的步進值可讓您更細緻地調整燈光,較大的值則每次調整會有更明顯的變化。"
|
||||
},
|
||||
"Comfy_Load3D_LightIntensity": {
|
||||
"name": "初始光源強度",
|
||||
"tooltip": "設定 3D 場景中燈光的預設亮度等級。此數值決定新建立 3D 元件時燈光照亮物體的強度,但每個元件在建立後都可以個別調整。"
|
||||
},
|
||||
"Comfy_Load3D_LightIntensityMaximum": {
|
||||
"name": "最大光照強度",
|
||||
"tooltip": "設定 3D 場景中允許的最大光照強度值。這會定義在調整任何 3D 小工具照明時可設定的最高亮度上限。"
|
||||
},
|
||||
"Comfy_Load3D_LightIntensityMinimum": {
|
||||
"name": "光源強度下限",
|
||||
"tooltip": "設定 3D 場景中允許的最小光源強度值。這會定義在調整任何 3D 控制元件照明時可設定的最低亮度限制。"
|
||||
},
|
||||
"Comfy_Load3D_ShowGrid": {
|
||||
"name": "初始網格可見性",
|
||||
"tooltip": "控制在建立新的 3D 元件時,網格是否預設可見。此預設值在建立後仍可針對每個元件單獨切換。"
|
||||
},
|
||||
"Comfy_Load3D_ShowPreview": {
|
||||
"name": "初始預覽可見性",
|
||||
"tooltip": "控制當新建 3D 元件時,預覽畫面預設是否顯示。此預設值在元件建立後仍可針對每個元件單獨切換。"
|
||||
},
|
||||
"Comfy_Locale": {
|
||||
"name": "語言"
|
||||
},
|
||||
"Comfy_MaskEditor_BrushAdjustmentSpeed": {
|
||||
"name": "筆刷調整速度倍數",
|
||||
"tooltip": "控制調整筆刷大小與硬度時的變化速度。數值越高,變化越快。"
|
||||
},
|
||||
"Comfy_MaskEditor_UseDominantAxis": {
|
||||
"name": "鎖定筆刷調整至主軸",
|
||||
"tooltip": "啟用後,筆刷調整只會根據你移動較多的方向,分別影響大小或硬度"
|
||||
},
|
||||
"Comfy_MaskEditor_UseNewEditor": {
|
||||
"name": "使用新遮罩編輯器",
|
||||
"tooltip": "切換到新遮罩編輯器介面"
|
||||
},
|
||||
"Comfy_ModelLibrary_AutoLoadAll": {
|
||||
"name": "自動載入所有模型資料夾",
|
||||
"tooltip": "若為開啟,當你打開模型庫時,所有資料夾將自動載入(這可能會導致載入時延遲)。若為關閉,只有在你點擊根目錄下的模型資料夾時才會載入。"
|
||||
},
|
||||
"Comfy_ModelLibrary_NameFormat": {
|
||||
"name": "在模型庫樹狀檢視中顯示的名稱",
|
||||
"options": {
|
||||
"filename": "filename",
|
||||
"title": "title"
|
||||
},
|
||||
"tooltip": "選擇「filename」可在模型清單中顯示簡化的原始檔名(不含目錄或「.safetensors」副檔名)。選擇「title」則顯示可設定的模型中繼資料標題。"
|
||||
},
|
||||
"Comfy_NodeBadge_NodeIdBadgeMode": {
|
||||
"name": "節點 ID 標籤模式",
|
||||
"options": {
|
||||
"None": "無",
|
||||
"Show all": "全部顯示"
|
||||
}
|
||||
},
|
||||
"Comfy_NodeBadge_NodeLifeCycleBadgeMode": {
|
||||
"name": "節點生命週期徽章模式",
|
||||
"options": {
|
||||
"None": "無",
|
||||
"Show all": "顯示全部"
|
||||
}
|
||||
},
|
||||
"Comfy_NodeBadge_NodeSourceBadgeMode": {
|
||||
"name": "節點來源徽章模式",
|
||||
"options": {
|
||||
"Hide built-in": "隱藏內建",
|
||||
"None": "無",
|
||||
"Show all": "全部顯示"
|
||||
}
|
||||
},
|
||||
"Comfy_NodeBadge_ShowApiPricing": {
|
||||
"name": "顯示 API 節點價格標籤"
|
||||
},
|
||||
"Comfy_NodeSearchBoxImpl": {
|
||||
"name": "節點搜尋框實作",
|
||||
"options": {
|
||||
"default": "預設",
|
||||
"litegraph (legacy)": "litegraph(舊版)"
|
||||
}
|
||||
},
|
||||
"Comfy_NodeSearchBoxImpl_NodePreview": {
|
||||
"name": "節點預覽",
|
||||
"tooltip": "僅適用於預設實作"
|
||||
},
|
||||
"Comfy_NodeSearchBoxImpl_ShowCategory": {
|
||||
"name": "在搜尋結果中顯示節點分類",
|
||||
"tooltip": "僅適用於預設實作"
|
||||
},
|
||||
"Comfy_NodeSearchBoxImpl_ShowIdName": {
|
||||
"name": "在搜尋結果中顯示節點 ID 名稱",
|
||||
"tooltip": "僅適用於預設實作"
|
||||
},
|
||||
"Comfy_NodeSearchBoxImpl_ShowNodeFrequency": {
|
||||
"name": "在搜尋結果中顯示節點頻率",
|
||||
"tooltip": "僅適用於預設實作"
|
||||
},
|
||||
"Comfy_NodeSuggestions_number": {
|
||||
"name": "節點建議數量",
|
||||
"tooltip": "僅適用於 litegraph 搜尋框/右鍵選單"
|
||||
},
|
||||
"Comfy_Node_AllowImageSizeDraw": {
|
||||
"name": "在圖片預覽下方顯示寬度 × 高度"
|
||||
},
|
||||
"Comfy_Node_AutoSnapLinkToSlot": {
|
||||
"name": "自動吸附連結到節點插槽",
|
||||
"tooltip": "拖曳連結到節點時,連結會自動吸附到節點上可用的輸入插槽"
|
||||
},
|
||||
"Comfy_Node_BypassAllLinksOnDelete": {
|
||||
"name": "刪除節點時保留所有連結",
|
||||
"tooltip": "刪除節點時,嘗試自動重新連接其所有輸入與輸出連結(繞過被刪除的節點)"
|
||||
},
|
||||
"Comfy_Node_DoubleClickTitleToEdit": {
|
||||
"name": "雙擊節點標題以編輯"
|
||||
},
|
||||
"Comfy_Node_MiddleClickRerouteNode": {
|
||||
"name": "中鍵點擊建立新的重導節點"
|
||||
},
|
||||
"Comfy_Node_Opacity": {
|
||||
"name": "節點不透明度"
|
||||
},
|
||||
"Comfy_Node_ShowDeprecated": {
|
||||
"name": "在搜尋中顯示已棄用節點",
|
||||
"tooltip": "已棄用的節點在介面中預設隱藏,但在現有使用這些節點的工作流程中仍可運作。"
|
||||
},
|
||||
"Comfy_Node_ShowExperimental": {
|
||||
"name": "在搜尋中顯示實驗性節點",
|
||||
"tooltip": "實驗性節點會在介面中標註,未來版本可能會有重大變動或被移除。請在正式工作流程中謹慎使用"
|
||||
},
|
||||
"Comfy_Node_SnapHighlightsNode": {
|
||||
"name": "節點高亮顯示對齊",
|
||||
"tooltip": "當拖曳連結到具有可用輸入插槽的節點時,高亮顯示該節點"
|
||||
},
|
||||
"Comfy_Notification_ShowVersionUpdates": {
|
||||
"name": "顯示版本更新",
|
||||
"tooltip": "顯示新模型和主要新功能的更新。"
|
||||
},
|
||||
"Comfy_Pointer_ClickBufferTime": {
|
||||
"name": "指標點擊漂移延遲",
|
||||
"tooltip": "按下指標按鈕後,這是可忽略指標移動的最長時間(以毫秒為單位)。\n\n可防止在點擊時移動指標導致物件被意外推動。"
|
||||
},
|
||||
"Comfy_Pointer_ClickDrift": {
|
||||
"name": "指標點擊漂移(最大距離)",
|
||||
"tooltip": "如果在按住按鈕時指標移動超過此距離,則視為拖曳(而非點擊)。\n\n可防止在點擊時不小心移動指標導致物件被意外推動。"
|
||||
},
|
||||
"Comfy_Pointer_DoubleClickTime": {
|
||||
"name": "雙擊間隔(最大值)",
|
||||
"tooltip": "兩次點擊被視為雙擊的最長間隔時間(毫秒)。增加此數值可協助在雙擊有時未被辨識時改善操作體驗。"
|
||||
},
|
||||
"Comfy_PreviewFormat": {
|
||||
"name": "預覽圖片格式",
|
||||
"tooltip": "在圖片元件中顯示預覽時,將其轉換為輕量級圖片格式,例如 webp、jpeg、webp;50 等。"
|
||||
},
|
||||
"Comfy_PromptFilename": {
|
||||
"name": "儲存工作流程時提示輸入檔案名稱"
|
||||
},
|
||||
"Comfy_QueueButton_BatchCountLimit": {
|
||||
"name": "批次數量上限",
|
||||
"tooltip": "每次按鈕點擊可加入佇列的最大任務數"
|
||||
},
|
||||
"Comfy_Queue_MaxHistoryItems": {
|
||||
"name": "佇列歷史記錄大小",
|
||||
"tooltip": "佇列歷史中顯示的最大任務數量。"
|
||||
},
|
||||
"Comfy_Sidebar_Location": {
|
||||
"name": "側邊欄位置",
|
||||
"options": {
|
||||
"left": "左側",
|
||||
"right": "右側"
|
||||
}
|
||||
},
|
||||
"Comfy_Sidebar_Size": {
|
||||
"name": "側邊欄大小",
|
||||
"options": {
|
||||
"normal": "一般",
|
||||
"small": "小"
|
||||
}
|
||||
},
|
||||
"Comfy_Sidebar_UnifiedWidth": {
|
||||
"name": "統一側邊欄寬度"
|
||||
},
|
||||
"Comfy_SnapToGrid_GridSize": {
|
||||
"name": "對齊至格線大小",
|
||||
"tooltip": "當按住 Shift 拖曳或調整節點大小時,節點會對齊到格線,此設定可調整格線的大小。"
|
||||
},
|
||||
"Comfy_TextareaWidget_FontSize": {
|
||||
"name": "文字區塊元件字型大小"
|
||||
},
|
||||
"Comfy_TextareaWidget_Spellcheck": {
|
||||
"name": "文字方塊小工具拼字檢查"
|
||||
},
|
||||
"Comfy_TreeExplorer_ItemPadding": {
|
||||
"name": "樹狀瀏覽器項目間距"
|
||||
},
|
||||
"Comfy_UseNewMenu": {
|
||||
"name": "使用新選單",
|
||||
"options": {
|
||||
"Bottom": "下方",
|
||||
"Disabled": "停用",
|
||||
"Top": "上方"
|
||||
},
|
||||
"tooltip": "選單列位置。在行動裝置上,選單永遠顯示在頂部。"
|
||||
},
|
||||
"Comfy_Validation_NodeDefs": {
|
||||
"name": "驗證節點定義(較慢)",
|
||||
"tooltip": "建議節點開發者使用。這會在啟動時驗證所有節點定義。"
|
||||
},
|
||||
"Comfy_Validation_Workflows": {
|
||||
"name": "驗證工作流程"
|
||||
},
|
||||
"Comfy_WidgetControlMode": {
|
||||
"name": "元件控制模式",
|
||||
"options": {
|
||||
"after": "佇列後",
|
||||
"before": "佇列前"
|
||||
},
|
||||
"tooltip": "控制元件數值何時更新(隨機、遞增、遞減),可選在提示加入佇列前或後進行。"
|
||||
},
|
||||
"Comfy_Window_UnloadConfirmation": {
|
||||
"name": "關閉視窗時顯示確認提示"
|
||||
},
|
||||
"Comfy_Workflow_AutoSave": {
|
||||
"name": "自動儲存",
|
||||
"options": {
|
||||
"after delay": "延遲後",
|
||||
"off": "關閉"
|
||||
}
|
||||
},
|
||||
"Comfy_Workflow_AutoSaveDelay": {
|
||||
"name": "自動儲存延遲(毫秒)",
|
||||
"tooltip": "僅在自動儲存設為「延遲後」時適用。"
|
||||
},
|
||||
"Comfy_Workflow_ConfirmDelete": {
|
||||
"name": "刪除工作流程時顯示確認視窗"
|
||||
},
|
||||
"Comfy_Workflow_Persist": {
|
||||
"name": "保留工作流程狀態並於頁面重新載入時還原"
|
||||
},
|
||||
"Comfy_Workflow_ShowMissingModelsWarning": {
|
||||
"name": "顯示缺少模型警告"
|
||||
},
|
||||
"Comfy_Workflow_ShowMissingNodesWarning": {
|
||||
"name": "顯示缺少節點警告"
|
||||
},
|
||||
"Comfy_Workflow_SortNodeIdOnSave": {
|
||||
"name": "儲存工作流程時排序節點 ID"
|
||||
},
|
||||
"Comfy_Workflow_WorkflowTabsPosition": {
|
||||
"name": "已開啟工作流程的位置",
|
||||
"options": {
|
||||
"Sidebar": "側邊欄",
|
||||
"Topbar": "頂部欄",
|
||||
"Topbar (2nd-row)": "頂部欄(第二列)"
|
||||
}
|
||||
},
|
||||
"LiteGraph_Canvas_LowQualityRenderingZoomThreshold": {
|
||||
"name": "低品質渲染縮放臨界值",
|
||||
"tooltip": "當縮小檢視時以低品質渲染圖形"
|
||||
},
|
||||
"LiteGraph_Canvas_MaximumFps": {
|
||||
"name": "最大FPS",
|
||||
"tooltip": "畫布允許渲染的最大每秒幀數。限制GPU使用率,但可能影響流暢度。若設為0,則使用螢幕的更新率。預設值:0"
|
||||
},
|
||||
"LiteGraph_ContextMenu_Scaling": {
|
||||
"name": "放大時縮放節點組合小工具選單(清單)"
|
||||
},
|
||||
"LiteGraph_Node_DefaultPadding": {
|
||||
"name": "新增節點時自動縮小",
|
||||
"tooltip": "建立節點時自動調整為最小尺寸。若停用,新增的節點會略為加寬以顯示元件數值。"
|
||||
},
|
||||
"LiteGraph_Node_TooltipDelay": {
|
||||
"name": "提示延遲"
|
||||
},
|
||||
"LiteGraph_Pointer_TrackpadGestures": {
|
||||
"name": "啟用觸控板手勢",
|
||||
"tooltip": "此設定可為畫布啟用觸控板模式,允許使用兩指縮放與平移。"
|
||||
},
|
||||
"LiteGraph_Reroute_SplineOffset": {
|
||||
"name": "重導樣條偏移",
|
||||
"tooltip": "貝茲控制點相對於重導中心點的偏移量"
|
||||
},
|
||||
"pysssss_SnapToGrid": {
|
||||
"name": "總是對齊格線"
|
||||
}
|
||||
}
|
||||
@@ -1099,6 +1099,7 @@
|
||||
"Node Search Box": "节点搜索框",
|
||||
"Node Widget": "节点组件",
|
||||
"NodeLibrary": "节点库",
|
||||
"Notification Preferences": "通知偏好",
|
||||
"Pointer": "指针",
|
||||
"Queue": "队列",
|
||||
"QueueButton": "执行按钮",
|
||||
@@ -1190,219 +1191,322 @@
|
||||
"category": {
|
||||
"3D": "3D",
|
||||
"All": "所有模板",
|
||||
"Area Composition": "区域组成",
|
||||
"Audio": "音频",
|
||||
"Area Composition": "区域合成",
|
||||
"Audio": "音频生成",
|
||||
"Basics": "基础",
|
||||
"ComfyUI Examples": "ComfyUI示例",
|
||||
"ControlNet": "ControlNet",
|
||||
"Custom Nodes": "自定义节点",
|
||||
"Flux": "Flux",
|
||||
"Image": "图片",
|
||||
"Image": "图像生成",
|
||||
"Image API": "图像 API",
|
||||
"Upscaling": "放大",
|
||||
"Video": "视频",
|
||||
"LLM API": "LLM API",
|
||||
"Upscaling": "图像放大",
|
||||
"Video": "视频生成",
|
||||
"Video API": "视频 API"
|
||||
},
|
||||
"template": {
|
||||
"3D": {
|
||||
"hunyuan-3d-multiview-elf": "混元3D多视图",
|
||||
"hunyuan-3d-turbo": "混元3D Turbo",
|
||||
"hunyuan3d-non-multiview-train": "混元3D",
|
||||
"3d_hunyuan3d_image_to_model": "混元3D 2.0 图生模型",
|
||||
"3d_hunyuan3d_multiview_to_model": "混元3D 2.0 多视图模型生成",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "混元3D 2.0 多视图模型生成 Turbo",
|
||||
"stable_zero123_example": "Stable Zero123"
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin:图生模型",
|
||||
"api_rodin_multiview_to_model": "Rodin:多视图模型生成",
|
||||
"api_tripo_image_to_model": "Tripo:图生模型",
|
||||
"api_tripo_multiview_to_model": "Tripo:多视图模型生成",
|
||||
"api_tripo_text_to_model": "Tripo:文生模型"
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "区域构成",
|
||||
"area_composition_reversed": "反向区域构成",
|
||||
"area_composition_square_area_for_subject": "主题的方形区域构成"
|
||||
"area_composition": "区域条件控制",
|
||||
"area_composition_square_area_for_subject": "区域件控制生成"
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "Stable Audio"
|
||||
"audio_ace_step_1_m2m_editing": "ACE Step v1 歌曲风格/歌词编辑",
|
||||
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1 文生器乐",
|
||||
"audio_ace_step_1_t2a_song": "ACE Step v1 文生歌曲",
|
||||
"audio_stable_audio_example": "Stable Audio"
|
||||
},
|
||||
"Basics": {
|
||||
"default": "图像生成",
|
||||
"embedding_example": "嵌入",
|
||||
"default": "文生图(默认)",
|
||||
"embedding_example": "文本嵌入模型",
|
||||
"gligen_textbox_example": "Gligen文本框",
|
||||
"image2image": "图像到图像",
|
||||
"inpain_model_outpainting": "Inpaint模型Outpainting",
|
||||
"inpaint_example": "Inpaint",
|
||||
"lora": "Lora",
|
||||
"lora_multiple": "Lora多个"
|
||||
"image2image": "图生图",
|
||||
"inpaint_example": "局部重绘",
|
||||
"inpaint_model_outpainting": "图像外扩",
|
||||
"lora": "LoRA",
|
||||
"lora_multiple": "多重LoRA"
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "双通道姿势处理",
|
||||
"controlnet_example": "ControlNet",
|
||||
"depth_controlnet": "深度ControlNet",
|
||||
"depth_t2i_adapter": "深度T2I适配器",
|
||||
"mixing_controlnets": "混合ControlNets"
|
||||
"2_pass_pose_worship": "姿态 ControlNet 二次采样",
|
||||
"controlnet_example": "涂鸦 ControlNet",
|
||||
"depth_controlnet": "深度 ControlNet",
|
||||
"depth_t2i_adapter": "深度 T2I 适配器",
|
||||
"mixing_controlnets": "混合 ControlNet"
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "Flux Canny Model",
|
||||
"flux_depth_lora_example": "Flux Depth Lora",
|
||||
"flux_dev_checkpoint_example": "Flux Dev的检查点示例",
|
||||
"flux_fill_inpaint_example": "Flux Inpaint",
|
||||
"flux_fill_outpaint_example": "Flux Outpaint",
|
||||
"flux_depth_lora_example": "Flux Depth LoRA",
|
||||
"flux_dev_checkpoint_example": "Flux Dev fp8 文生图",
|
||||
"flux_dev_full_text_to_image": "Flux Dev 全量文生图",
|
||||
"flux_fill_inpaint_example": "Flux 局部重绘",
|
||||
"flux_fill_outpaint_example": "Flux 扩图",
|
||||
"flux_kontext_dev_basic": "Flux Kontext Dev(基础)",
|
||||
"flux_kontext_dev_grouped": "Flux Kontext Dev(组)",
|
||||
"flux_redux_model_example": "Flux Redux Model",
|
||||
"flux_schnell": "Flux Schnell"
|
||||
"flux_schnell": "Flux Schnell fp8 文生图",
|
||||
"flux_schnell_full_text_to_image": "Flux Schnell 全量文生图"
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "HiDream E1 Full",
|
||||
"hidream_i1_dev": "HiDream I1 Dev",
|
||||
"hidream_i1_fast": "HiDream I1 Fast",
|
||||
"hidream_i1_full": "HiDream I1 Full",
|
||||
"image_chroma_text_to_image": "Chroma 文生图",
|
||||
"image_cosmos_predict2_2B_t2i": "Cosmos Predict2 2B 文生图",
|
||||
"image_lotus_depth_v1_1": "Lotus Depth",
|
||||
"image_omnigen2_image_edit": "OmniGen2 图像编辑",
|
||||
"image_omnigen2_t2i": "OmniGen2 文生图",
|
||||
"sd3_5_large_blur": "SD3.5 Large 模糊",
|
||||
"sd3_5_large_canny_controlnet_example": "SD3.5 Large Canny 控制网",
|
||||
"sd3_5_large_depth": "SD3.5 Large 深度",
|
||||
"sd3_5_simple_example": "SD3.5 简易示例",
|
||||
"sd3_5_simple_example": "SD3.5 简单版本",
|
||||
"sdxl_refiner_prompt_example": "SDXL Refiner提示",
|
||||
"sdxl_revision_text_prompts": "SDXL修订文本提示",
|
||||
"sdxl_revision_zero_positive": "SDXL修订零正",
|
||||
"sdxl_simple_example": "SDXL简单",
|
||||
"sdxl_simple_example": "SDXL简单版本",
|
||||
"sdxlturbo_example": "SDXL Turbo"
|
||||
},
|
||||
"Image API": {
|
||||
"api-openai-dall-e-2-inpaint": "Dall-E 2 局部修复",
|
||||
"api-openai-dall-e-2-t2i": "Dall-E 2 文生图",
|
||||
"api-openai-dall-e-3-t2i": "Dall-E 3 文生图",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux 1.1[pro] Ultra 文生图",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3 文生图",
|
||||
"api_luma_photon_i2i": "Luma Photon 图生图",
|
||||
"api_luma_photon_style_ref": "Luma Photon 风格参考",
|
||||
"api_openai_image_1_i2i": "OpenAI Image-1 图生图",
|
||||
"api_openai_image_1_inpaint": "OpenAI Image-1 局部修复",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI Image-1 多输入",
|
||||
"api_openai_image_1_t2i": "OpenAI Image-1 文生图",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft 颜色控制图像生成",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft 风格控制图像生成",
|
||||
"api_recraft_vector_gen": "Recraft 矢量生成",
|
||||
"api_stability_sd3_t2i": "Stability AI Stable Image Ultra 文生图"
|
||||
"api_bfl_flux_1_kontext_max_image": "BFL Flux.1 Kontext Max",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "BFL Flux.1 Kontext 多图输入",
|
||||
"api_bfl_flux_1_kontext_pro_image": "BFL Flux.1 Kontext Pro",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux[Pro]:文生图",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3:文生图",
|
||||
"api_luma_photon_i2i": "Luma Photon:图生图",
|
||||
"api_luma_photon_style_ref": "Luma Photon:风格参考",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI:Dall-E 2 局部修复",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI:Dall-E 2 文生图",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI:Dall-E 3 文生图",
|
||||
"api_openai_image_1_i2i": "OpenAI:GPT-Image-1 图生图",
|
||||
"api_openai_image_1_inpaint": "OpenAI:GPT-Image-1 局部重绘",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI:GPT-Image-1 多输入",
|
||||
"api_openai_image_1_t2i": "OpenAI:GPT-Image-1 文生图",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft:颜色控制图像生成",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft:风格控制图像生成",
|
||||
"api_recraft_vector_gen": "Recraft:矢量生成",
|
||||
"api_runway_reference_to_image": "Runway:参考图到图像",
|
||||
"api_runway_text_to_image": "Runway:文生图",
|
||||
"api_stability_ai_i2i": "Stability AI:图生图",
|
||||
"api_stability_ai_sd3_5_i2i": "Stability AI:SD3.5 图生图",
|
||||
"api_stability_ai_sd3_5_t2i": "Stability AI:SD3.5 文生图",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Stability AI:Stable Image Ultra 文生图"
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Google Gemini:对话",
|
||||
"api_openai_chat": "OpenAI:对话"
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "ESRGAN",
|
||||
"hiresfix_esrgan_workflow": "HiresFix ESRGAN工作流",
|
||||
"hiresfix_latent_workflow": "HiresFix潜在工作流",
|
||||
"latent_upscale_different_prompt_model": "潜在升级不同提示模型"
|
||||
"hiresfix_esrgan_workflow": "HiresFix ESRGAN放大",
|
||||
"hiresfix_latent_workflow": "HiresFix 潜空间放大",
|
||||
"latent_upscale_different_prompt_model": "潜空间放大二次采样"
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "混元视频文本到视频",
|
||||
"image_to_video": "图像到视频",
|
||||
"image_to_video_wan": "Wan 2.1 图像到视频",
|
||||
"ltxv_image_to_video": "LTXV图像到视频",
|
||||
"ltxv_text_to_video": "LTXV文本到视频",
|
||||
"mochi_text_to_video_example": "Mochi文本到视频",
|
||||
"text_to_video_wan": "Wan 2.1 文字到视频",
|
||||
"txt_to_image_to_video": "文本到图像到视频",
|
||||
"hunyuan_video_text_to_video": "混元视频文生视频",
|
||||
"image_to_video": "图生视频",
|
||||
"image_to_video_wan": "Wan 2.1 图生视频",
|
||||
"ltxv_image_to_video": "LTXV图生视频",
|
||||
"ltxv_text_to_video": "LTXV文生视频",
|
||||
"mochi_text_to_video_example": "Mochi文生视频",
|
||||
"text_to_video_wan": "Wan 2.1 文生视频",
|
||||
"txt_to_image_to_video": "文生图转视频",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos Predict2 2B Video2World 480p 16fps",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B",
|
||||
"video_wan_vace_14B_ref2v": "Wan VACE 参考视频生成",
|
||||
"video_wan_vace_14B_t2v": "Wan VACE 文生视频",
|
||||
"video_wan_vace_14B_v2v": "Wan VACE 控制视频",
|
||||
"video_wan_vace_flf2v": "Wan VACE 首尾帧",
|
||||
"video_wan_vace_inpainting": "Wan VACE Inpainting",
|
||||
"video_wan_vace_outpainting": "Wan VACE 视频外扩",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16",
|
||||
"wan2_1_fun_control": "Wan 2.1 ControlNet",
|
||||
"wan2_1_fun_inp": "Wan 2.1 图像修复"
|
||||
"wan2_1_fun_inp": "Wan 2.1 局部重绘"
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "MiniMax 图生视频",
|
||||
"api_kling_i2v": "Kling 图生视频",
|
||||
"api_luma_i2v": "Luma 图生视频",
|
||||
"api_pika_scene": "Pika 场景:图生视频",
|
||||
"api_pixverse_t2v": "PixVerse 文生视频",
|
||||
"api_hailuo_minimax_i2v": "MiniMax:图生视频",
|
||||
"api_hailuo_minimax_t2v": "MiniMax:文生视频",
|
||||
"api_kling_effects": "Kling:视频特效",
|
||||
"api_kling_flf": "Kling:首尾帧",
|
||||
"api_kling_i2v": "Kling:图生视频",
|
||||
"api_luma_i2v": "Luma:图生视频",
|
||||
"api_luma_t2v": "Luma:文生视频",
|
||||
"api_moonvalley_image_to_video": "Moonvalley:图生视频",
|
||||
"api_moonvalley_text_to_video": "Moonvalley:文生视频",
|
||||
"api_pika_i2v": "Pika:图生视频",
|
||||
"api_pika_scene": "Pika 场景:多图视频生成",
|
||||
"api_pixverse_i2v": "PixVerse:图生视频",
|
||||
"api_pixverse_t2v": "PixVerse:文本到视频",
|
||||
"api_pixverse_template_i2v": "PixVerse特效:图生视频",
|
||||
"api_veo2_i2v": "Veo2 图生视频"
|
||||
"api_runway_first_last_frame": "Runway:首尾帧视频sheng c",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway:Gen3a Turbo 图生视频",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway:Gen4 Turbo 图生视频",
|
||||
"api_veo2_i2v": "Veo2:图生视频"
|
||||
}
|
||||
},
|
||||
"templateDescription": {
|
||||
"3D": {
|
||||
"hunyuan-3d-multiview-elf": "使用 Hunyuan3D 2mv 从多视角生成模型。",
|
||||
"hunyuan-3d-turbo": "使用 Hunyuan3D 2mv turbo 从多视角生成模型。",
|
||||
"hunyuan3d-non-multiview-train": "使用 Hunyuan3D 2.0 从单视角生成模型。",
|
||||
"3d_hunyuan3d_image_to_model": "使用 Hunyuan3D 2.0 单图生成 3D 模型。",
|
||||
"3d_hunyuan3d_multiview_to_model": "使用 Hunyuan3D 2.0 MV 多视角生成 3D 模型。",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "使用 Hunyuan3D 2.0 MV Turbo 多视角生成 3D 模型。",
|
||||
"stable_zero123_example": "通过单张图像生成 3D 视图。"
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin AI 单张照片生成高细节 3D 资产。",
|
||||
"api_rodin_multiview_to_model": "Rodin 多视图重建完整 3D 资产。",
|
||||
"api_tripo_image_to_model": "Tripo 2D 图像生成专业的 3D 资产。",
|
||||
"api_tripo_multiview_to_model": "Tripo 多视角生成 3D 资产。",
|
||||
"api_tripo_text_to_model": "Tripo 文本描述生成 3D 物体。"
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "通过区域控制图像构图。",
|
||||
"area_composition_reversed": "反向区域构图流程。",
|
||||
"area_composition_square_area_for_subject": "实现主体位置一致性。"
|
||||
"area_composition_square_area_for_subject": "通过区域构成实现主体位置一致性。"
|
||||
},
|
||||
"Audio": {
|
||||
"stable_audio_example": "根据文本描述生成音频。"
|
||||
"audio_ace_step_1_m2m_editing": "使用 ACE-Step v1 M2M 编辑现有歌曲风格和歌词。",
|
||||
"audio_ace_step_1_t2a_instrumentals": "使用 ACE-Step v1 根据文本生成器乐音乐。",
|
||||
"audio_ace_step_1_t2a_song": "使用 ACE-Step v1 根据文本生成带人声的歌曲,支持多语言和风格定制。",
|
||||
"audio_stable_audio_example": "根据文本描述生成音频。"
|
||||
},
|
||||
"Basics": {
|
||||
"default": "根据文本描述生成图像。",
|
||||
"embedding_example": "使用文本反演实现风格一致性。",
|
||||
"gligen_textbox_example": "指定物体的位置和大小。",
|
||||
"embedding_example": "使用文本反演模型实现风格一致性。",
|
||||
"gligen_textbox_example": "通过文本框精确控制物体位置生成图像。",
|
||||
"image2image": "使用文本提示转换现有图像。",
|
||||
"inpain_model_outpainting": "将图像扩展到原始边界之外。",
|
||||
"inpaint_example": "无缝编辑图像的特定部分。",
|
||||
"inpaint_model_outpainting": "将图像扩展到原始边界之外。",
|
||||
"lora": "应用 LoRA 模型以实现特定风格或主题。",
|
||||
"lora_multiple": "组合多个 LoRA 模型以获得独特效果。"
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "通过姿态参考生成图像。",
|
||||
"controlnet_example": "通过参考图像控制图像生成。",
|
||||
"depth_controlnet": "生成深度感知图像。",
|
||||
"controlnet_example": "通过涂鸦参考图像引导生成。",
|
||||
"depth_controlnet": "通过深度信息引导生成图像。",
|
||||
"depth_t2i_adapter": "使用 T2I 适配器快速生成深度感知图像。",
|
||||
"mixing_controlnets": "组合多个 ControlNet 模型。"
|
||||
"mixing_controlnets": "组合多个 ControlNet 模型生成图像。"
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "通过边缘检测生成图像。",
|
||||
"flux_depth_lora_example": "使用深度感知 LoRA 生成图像。",
|
||||
"flux_dev_checkpoint_example": "使用 flux 开发模型生成图像。",
|
||||
"flux_fill_inpaint_example": "填充图像中缺失的部分。",
|
||||
"flux_fill_outpaint_example": "使用 flux 外扩生成图像。",
|
||||
"flux_redux_model_example": "将参考图像的风格迁移到引导图像生成(flux)。",
|
||||
"flux_schnell": "使用 flux schnell 快速生成图像。"
|
||||
"flux_canny_model_example": "通过边缘检测引导 Flux 生成图像。",
|
||||
"flux_depth_lora_example": "通过深度信息引导 Flux LoRA 生成图像。",
|
||||
"flux_dev_checkpoint_example": "使用 Flux Dev fp8 量化版生成图像,适合显存有限设备,仅需一个模型文件,画质略低于完整版。",
|
||||
"flux_dev_full_text_to_image": "使用 Flux Dev 完整版生成高质量图像,需要更大显存和多个模型文件,提示遵循性和画质最佳。",
|
||||
"flux_fill_inpaint_example": "使用 Flux 修复图像缺失区域。",
|
||||
"flux_fill_outpaint_example": "使用 Flux 将图像外扩生成更大尺寸的图片",
|
||||
"flux_kontext_dev_basic": "使用 Flux Kontext(基础版)编辑图像,适合了解节点组合",
|
||||
"flux_kontext_dev_grouped": "使用 Flux Kontext 组节点版本,简洁封装版适合快速复用",
|
||||
"flux_redux_model_example": "通过参考图像风格迁移,使用 Flux Redux 生成新图像。",
|
||||
"flux_schnell": "使用 Flux Schnell fp8 量化版快速生成图像,适合低端硬件,仅需4步即可生成。",
|
||||
"flux_schnell_full_text_to_image": "使用 Flux Schnell 完整版快速生成图像,Apache2.0 许可,仅需4步,兼顾速度和画质。"
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "使用 HiDream E1 编辑图像。",
|
||||
"hidream_i1_dev": "使用 HiDream I1 Dev 生成图像。",
|
||||
"hidream_i1_fast": "使用 HiDream I1 快速生成图像。",
|
||||
"hidream_i1_full": "使用 HiDream I1 生成图像。",
|
||||
"sd3_5_large_blur": "使用 SD 3.5 通过模糊参考图像生成图像。",
|
||||
"sd3_5_large_canny_controlnet_example": "使用边缘检测引导 SD 3.5 图像生成。",
|
||||
"sd3_5_large_depth": "使用 SD 3.5 生成深度感知图像。",
|
||||
"hidream_e1_full": "HiDream E1 - 专业级自然语言图像编辑模型。",
|
||||
"hidream_i1_dev": "HiDream I1 Dev - 28 步推理,适合中端硬件,平衡速度与质量。",
|
||||
"hidream_i1_fast": "HiDream I1 Fast - 16 步推理,适合低端硬件快速预览。",
|
||||
"hidream_i1_full": "HiDream I1 Full - 50 步推理,画质最佳。",
|
||||
"image_chroma_text_to_image": "Chroma 基于 Flux 改进,架构有所变化。",
|
||||
"image_cosmos_predict2_2B_t2i": "使用 Cosmos-Predict2 2B T2I 生成物理真实、高保真、细节丰富的图像。",
|
||||
"image_lotus_depth_v1_1": "在 ComfyUI 中运行 Lotus Depth,实现高效、细节丰富的单目深度估计。",
|
||||
"image_omnigen2_image_edit": "使用 OmniGen2 高级图像编辑和文本渲染能力,通过自然语言指令编辑图片。",
|
||||
"image_omnigen2_t2i": "使用 OmniGen2 统一 7B 多模态模型和双路径架构,根据文本生成高质量图像。",
|
||||
"sd3_5_large_blur": "使用 SD 3.5 通过模糊参考图像引导生成图像。",
|
||||
"sd3_5_large_canny_controlnet_example": "使用 SD 3.5 Canny ControlNet 通过边缘检测引导生成图像。",
|
||||
"sd3_5_large_depth": "使用 SD 3.5 通过深度信息引导生成图像。",
|
||||
"sd3_5_simple_example": "使用 SD 3.5 生成图像。",
|
||||
"sdxl_refiner_prompt_example": "使用精炼器提升 SDXL 输出效果。",
|
||||
"sdxl_revision_text_prompts": "将参考图像的概念迁移到 SDXL 图像生成中。",
|
||||
"sdxl_revision_zero_positive": "在 SDXL 图像生成中结合文本提示和参考图像。",
|
||||
"sdxl_refiner_prompt_example": "使用 Refiner 模型提升 SDXL 图像质量。",
|
||||
"sdxl_revision_text_prompts": "通过参考图像概念迁移,使用 SDXL Revision 生成图像。",
|
||||
"sdxl_revision_zero_positive": "结合文本提示和参考图像,使用 SDXL Revision 生成图像。",
|
||||
"sdxl_simple_example": "使用 SDXL 生成高质量图像。",
|
||||
"sdxlturbo_example": "使用 SDXL Turbo 一步生成图像。"
|
||||
},
|
||||
"Image API": {
|
||||
"api-openai-dall-e-2-inpaint": "使用 Dall-E 2 API 对图像进行修复。",
|
||||
"api-openai-dall-e-2-t2i": "使用 Dall-E 2 API 根据文本描述生成图像。",
|
||||
"api-openai-dall-e-3-t2i": "使用 Dall-E 3 API 根据文本描述生成图像。",
|
||||
"api_bfl_flux_pro_t2i": "使用 FLUX.1 [pro] 生成高质量、细节丰富、提示遵循性强且多样化的图像。",
|
||||
"api_ideogram_v3_t2i": "生成高质量图像与提示对齐、照片级真实感和文本渲染。可用于专业级 logo、宣传海报、落地页概念、产品摄影等。轻松打造复杂空间构图、精细背景、精准光影与色彩、逼真环境细节。",
|
||||
"api_luma_photon_i2i": "结合图像和提示词引导图像生成。",
|
||||
"api_luma_photon_style_ref": "精确控制并融合风格参考。Luma Photon 捕捉每个参考图像的精髓,让你在保持专业品质的同时融合不同视觉元素。",
|
||||
"api_openai_image_1_i2i": "使用 GPT Image 1 API 通过图像生成图像。",
|
||||
"api_openai_image_1_inpaint": "使用 GPT Image 1 API 对图像进行修复。",
|
||||
"api_openai_image_1_multi_inputs": "使用 GPT Image 1 API 多输入生成图像。",
|
||||
"api_openai_image_1_t2i": "使用 GPT Image 1 API 根据文本描述生成图像。",
|
||||
"api_recraft_image_gen_with_color_control": "创建自定义调色板以复用或为每张照片手动选色。匹配品牌色彩,打造专属视觉风格。",
|
||||
"api_recraft_image_gen_with_style_control": "通过视觉示例控制风格、对齐位置、微调物体。存储并分享风格,实现品牌一致性。",
|
||||
"api_recraft_vector_gen": "通过文本提示生成矢量图像,使用 Recraft 的 AI 矢量生成器。可用于 logo、海报、图标集、广告、横幅和模型。生成高质量 SVG 文件,几秒内为你的应用或网站创建品牌矢量插画。",
|
||||
"api_stability_sd3_t2i": "生成高质量、提示遵循性极佳的图像。适用于专业场景,分辨率达 1 兆像素。"
|
||||
"api_bfl_flux_1_kontext_max_image": "使用 Flux.1 Kontext max image 编辑图像。",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "多图输入,使用 Flux.1 Kontext 编辑。",
|
||||
"api_bfl_flux_1_kontext_pro_image": "使用 Flux.1 Kontext pro image 编辑图像。",
|
||||
"api_bfl_flux_pro_t2i": "使用 FLUX.1 Pro 生成高质量、提示遵循性强的图像。",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3 高质量、提示对齐、照片级真实感和文本渲染。",
|
||||
"api_luma_photon_i2i": "结合图像和提示词引导生成。",
|
||||
"api_luma_photon_style_ref": "融合风格参考,精确控制,保持专业品质。",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI Dall-E 2 API 局部修复。",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI Dall-E 2 API 文本生成图像。",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI Dall-E 3 API 文本生成图像。",
|
||||
"api_openai_image_1_i2i": "OpenAI GPT Image 1 API 图生图。",
|
||||
"api_openai_image_1_inpaint": "OpenAI GPT Image 1 API 局部修复。",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI GPT Image 1 API 多输入生成图像。",
|
||||
"api_openai_image_1_t2i": "OpenAI GPT Image 1 API 文本生成图像。",
|
||||
"api_recraft_image_gen_with_color_control": "自定义调色板,打造品牌专属视觉风格。",
|
||||
"api_recraft_image_gen_with_style_control": "通过视觉示例控制风格、对齐位置、微调物体,实现品牌一致性。",
|
||||
"api_recraft_vector_gen": "通过文本生成高质量矢量图像,适用于 logo、海报等。",
|
||||
"api_runway_reference_to_image": "Runway AI 参考风格和构图生成新图像。",
|
||||
"api_runway_text_to_image": "使用 Runway AI 文本生成高质量图像。",
|
||||
"api_stability_ai_i2i": "Stability AI 高质量图生图,适合专业编辑和风格迁移。",
|
||||
"api_stability_ai_sd3_5_i2i": "Stability AI SD3.5 图生图,分辨率达 1 兆像素。",
|
||||
"api_stability_ai_sd3_5_t2i": "Stability AI SD3.5 文本生成高质量图像,分辨率达 1 兆像素。",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "生成高质量、提示遵循性极佳的图像,适合专业场景,分辨率达 1 兆像素。"
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "体验 Google Gemini 多模态推理能力。",
|
||||
"api_openai_chat": "与 OpenAI 高级语言模型智能对话。"
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "使用超分模型提升图像质量。",
|
||||
"hiresfix_esrgan_workflow": "在中间步骤使用超分模型提升图像质量。",
|
||||
"hiresfix_latent_workflow": "在 latent 空间中提升图像质量。",
|
||||
"esrgan_example": "使用 ESRGAN 超分模型提升图像质量。",
|
||||
"hiresfix_esrgan_workflow": "在中间步骤使用 ESRGAN 超分提升图像质量。",
|
||||
"hiresfix_latent_workflow": "在潜空间提升图像质量。",
|
||||
"latent_upscale_different_prompt_model": "放大图像并在不同阶段更换提示词。"
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "使用 Hunyuan 模型生成视频。",
|
||||
"image_to_video": "将图像转换为动画视频。",
|
||||
"image_to_video_wan": "快速将图像生成视频。",
|
||||
"ltxv_image_to_video": "将静态图像转换为视频。",
|
||||
"ltxv_text_to_video": "根据文本描述生成视频。",
|
||||
"mochi_text_to_video_example": "使用 Mochi 模型生成视频。",
|
||||
"text_to_video_wan": "快速将文本描述生成视频。",
|
||||
"txt_to_image_to_video": "先由文本生成图像,再转换为视频。",
|
||||
"wan2_1_flf2v_720_f16": "通过控制首帧和尾帧生成视频。",
|
||||
"wan2_1_fun_control": "通过姿态、深度、边缘等控制引导视频生成。",
|
||||
"wan2_1_fun_inp": "通过起始帧和结束帧生成视频。"
|
||||
"hunyuan_video_text_to_video": "Hunyuan 文本转视频。",
|
||||
"image_to_video": "静态图像转视频。",
|
||||
"image_to_video_wan": "Wan 2.1 图像转视频。",
|
||||
"ltxv_image_to_video": "LTXV 静态图像转视频。",
|
||||
"ltxv_text_to_video": "LTXV 文本转视频。",
|
||||
"mochi_text_to_video_example": "Mochi 文本转视频。",
|
||||
"text_to_video_wan": "Wan 2.1 文本转视频。",
|
||||
"txt_to_image_to_video": "先生成图像再转为视频。",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "使用 Cosmos-Predict2 2B Video2World 生成物理真实、高保真、一致性强的视频。",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B,支持高级镜头控制。",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B,生成动态镜头运动视频。",
|
||||
"video_wan_vace_14B_ref2v": "Wan VACE 参考图风格一致性视频生成。",
|
||||
"video_wan_vace_14B_t2v": "Wan VACE-14B 文本转视频,支持 480p 和 720p。",
|
||||
"video_wan_vace_14B_v2v": "Wan VACE 控制输入视频和参考图生成新视频。",
|
||||
"video_wan_vace_flf2v": "Wan VACE 首尾帧,支持自定义关键帧序列,实现平滑过渡。",
|
||||
"video_wan_vace_inpainting": "Wan VACE 局部编辑,保留周围内容,适合物体移除或替换。",
|
||||
"video_wan_vace_outpainting": "Wan VACE 外扩,扩展视频尺寸。",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V,控制首尾帧生成 720p 视频。",
|
||||
"wan2_1_fun_control": "Wan 2.1 ControlNet,姿态、深度、边缘等控制视频生成。",
|
||||
"wan2_1_fun_inp": "Wan 2.1 起始帧和结束帧生成视频。"
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "通过图像和文本生成精致视频,支持 CGI 效果和流行 AI 拥抱等特效。多种视频风格和主题,满足你的创意需求。",
|
||||
"api_kling_i2v": "生成动作、表情、镜头运动等提示遵循性强的视频。支持复杂提示和顺序动作,让你成为场景导演。",
|
||||
"api_luma_i2v": "将静态图像瞬间转化为高质量动画。",
|
||||
"api_pika_scene": "将多张图像作为素材,生成融合所有内容的视频。",
|
||||
"api_pixverse_t2v": "根据提示生成高还原度、动态效果出色的视频。",
|
||||
"api_pixverse_template_i2v": "将静态图像转化为带有动态和特效的视频。",
|
||||
"api_veo2_i2v": "使用 Google Veo2 API 通过图像生成视频。"
|
||||
"api_hailuo_minimax_i2v": "MiniMax 图像+文本生成精致视频,支持 CGI 效果。",
|
||||
"api_hailuo_minimax_t2v": "MiniMax 文本生成高质量视频,支持 CGI 效果和多样风格。",
|
||||
"api_kling_effects": "Kling 应用视觉特效生成动态视频。",
|
||||
"api_kling_flf": "Kling 控制首尾帧生成视频。",
|
||||
"api_kling_i2v": "Kling 高提示遵循性,支持动作、表情、镜头运动等。",
|
||||
"api_luma_i2v": "Luma 静态图像一键生成高质量动画。",
|
||||
"api_luma_t2v": "Luma 简单提示生成高质量视频。",
|
||||
"api_moonvalley_image_to_video": "Moonvalley 图像生成 1080p 电影级视频,训练数据全部为授权内容。",
|
||||
"api_moonvalley_text_to_video": "Moonvalley 文本生成 1080p 电影级视频,训练数据全部为授权内容。",
|
||||
"api_pika_i2v": "Pika AI 单张图像生成流畅动画视频。",
|
||||
"api_pika_scene": "Pika Scenes 多图融合生成视频。",
|
||||
"api_pixverse_i2v": "PixVerse 静态图像生成动态特效视频。",
|
||||
"api_pixverse_t2v": "PixVerse 高还原度、动态效果出色的视频生成。",
|
||||
"api_pixverse_template_i2v": "PixVerse 静态图像生成动态特效视频。",
|
||||
"api_runway_first_last_frame": "Runway 精准控制首尾帧生成平滑过渡视频。",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway Gen3a Turbo 静态图像生成电影级视频。",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway Gen4 Turbo 图像生成动态视频。",
|
||||
"api_veo2_i2v": "Google Veo2 API 图像生成视频。"
|
||||
}
|
||||
},
|
||||
"title": "从模板开始"
|
||||
@@ -1483,7 +1587,8 @@
|
||||
"title": "欢迎使用 ComfyUI"
|
||||
},
|
||||
"whatsNewPopup": {
|
||||
"learnMore": "了解更多"
|
||||
"learnMore": "了解更多",
|
||||
"noReleaseNotes": "暂无更新说明。"
|
||||
},
|
||||
"workflowService": {
|
||||
"enterFilename": "输入文件名",
|
||||
|
||||
@@ -259,6 +259,10 @@
|
||||
"name": "吸附高亮节点",
|
||||
"tooltip": "在拖动连线经过具有可用输入接口的节点时,高亮显示该节点。"
|
||||
},
|
||||
"Comfy_Notification_ShowVersionUpdates": {
|
||||
"name": "显示版本更新",
|
||||
"tooltip": "显示新模型和主要新功能的更新。"
|
||||
},
|
||||
"Comfy_Pointer_ClickBufferTime": {
|
||||
"name": "指针点击漂移延迟",
|
||||
"tooltip": "按下指针按钮后,忽略指针移动的最大时间(毫秒)。\n\n有助于防止在点击时意外移动鼠标。"
|
||||
@@ -322,7 +326,8 @@
|
||||
"Bottom": "底部",
|
||||
"Disabled": "禁用",
|
||||
"Top": "顶部"
|
||||
}
|
||||
},
|
||||
"tooltip": "選單列位置。在行動裝置上,選單始終顯示於頂端。"
|
||||
},
|
||||
"Comfy_Validation_NodeDefs": {
|
||||
"name": "校验节点定义(慢)",
|
||||
|
||||
@@ -113,6 +113,8 @@ const zLogRawResponse = z.object({
|
||||
entries: z.array(zLogEntry)
|
||||
})
|
||||
|
||||
const zFeatureFlagsWsMessage = z.record(z.string(), z.any())
|
||||
|
||||
export type StatusWsMessageStatus = z.infer<typeof zStatusWsMessageStatus>
|
||||
export type StatusWsMessage = z.infer<typeof zStatusWsMessage>
|
||||
export type ProgressWsMessage = z.infer<typeof zProgressWsMessage>
|
||||
@@ -132,6 +134,7 @@ export type ProgressTextWsMessage = z.infer<typeof zProgressTextWsMessage>
|
||||
export type DisplayComponentWsMessage = z.infer<
|
||||
typeof zDisplayComponentWsMessage
|
||||
>
|
||||
export type FeatureFlagsWsMessage = z.infer<typeof zFeatureFlagsWsMessage>
|
||||
// End of ws messages
|
||||
|
||||
const zPromptInputItem = z.object({
|
||||
@@ -427,6 +430,7 @@ const zSettings = z.object({
|
||||
'Comfy.NodeBadge.NodeIdBadgeMode': zNodeBadgeMode,
|
||||
'Comfy.NodeBadge.NodeLifeCycleBadgeMode': zNodeBadgeMode,
|
||||
'Comfy.NodeBadge.ShowApiPricing': z.boolean(),
|
||||
'Comfy.Notification.ShowVersionUpdates': z.boolean(),
|
||||
'Comfy.QueueButton.BatchCountLimit': z.number(),
|
||||
'Comfy.Queue.MaxHistoryItems': z.number(),
|
||||
'Comfy.Keybinding.UnsetBindings': z.array(zKeybinding),
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import axios from 'axios'
|
||||
|
||||
import defaultClientFeatureFlags from '@/config/clientFeatureFlags.json'
|
||||
import type {
|
||||
DisplayComponentWsMessage,
|
||||
EmbeddingsResponse,
|
||||
@@ -11,6 +12,7 @@ import type {
|
||||
ExecutionStartWsMessage,
|
||||
ExecutionSuccessWsMessage,
|
||||
ExtensionsResponse,
|
||||
FeatureFlagsWsMessage,
|
||||
HistoryTaskItem,
|
||||
LogsRawResponse,
|
||||
LogsWsMessage,
|
||||
@@ -105,6 +107,7 @@ interface BackendApiCalls {
|
||||
b_preview: Blob
|
||||
progress_text: ProgressTextWsMessage
|
||||
display_component: DisplayComponentWsMessage
|
||||
feature_flags: FeatureFlagsWsMessage
|
||||
}
|
||||
|
||||
/** Dictionary of all api calls */
|
||||
@@ -234,6 +237,19 @@ export class ComfyApi extends EventTarget {
|
||||
|
||||
reportedUnknownMessageTypes = new Set<string>()
|
||||
|
||||
/**
|
||||
* Get feature flags supported by this frontend client.
|
||||
* Returns a copy to prevent external modification.
|
||||
*/
|
||||
getClientFeatureFlags(): Record<string, unknown> {
|
||||
return { ...defaultClientFeatureFlags }
|
||||
}
|
||||
|
||||
/**
|
||||
* Feature flags received from the backend server.
|
||||
*/
|
||||
serverFeatureFlags: Record<string, unknown> = {}
|
||||
|
||||
/**
|
||||
* The auth token for the comfy org account if the user is logged in.
|
||||
* This is only used for {@link queuePrompt} now. It is not directly
|
||||
@@ -375,6 +391,15 @@ export class ComfyApi extends EventTarget {
|
||||
|
||||
this.socket.addEventListener('open', () => {
|
||||
opened = true
|
||||
|
||||
// Send feature flags as the first message
|
||||
this.socket!.send(
|
||||
JSON.stringify({
|
||||
type: 'feature_flags',
|
||||
data: this.getClientFeatureFlags()
|
||||
})
|
||||
)
|
||||
|
||||
if (isReconnect) {
|
||||
this.dispatchCustomEvent('reconnected')
|
||||
}
|
||||
@@ -468,6 +493,14 @@ export class ComfyApi extends EventTarget {
|
||||
case 'b_preview':
|
||||
this.dispatchCustomEvent(msg.type, msg.data)
|
||||
break
|
||||
case 'feature_flags':
|
||||
// Store server feature flags
|
||||
this.serverFeatureFlags = msg.data
|
||||
console.log(
|
||||
'Server feature flags received:',
|
||||
this.serverFeatureFlags
|
||||
)
|
||||
break
|
||||
default:
|
||||
if (this.#registered.has(msg.type)) {
|
||||
// Fallback for custom types - calls super direct.
|
||||
@@ -689,7 +722,8 @@ export class ComfyApi extends EventTarget {
|
||||
Running: data.queue_running.map((prompt: Record<number, any>) => ({
|
||||
taskType: 'Running',
|
||||
prompt,
|
||||
remove: { name: 'Cancel', cb: () => api.interrupt() }
|
||||
// prompt[1] is the prompt id
|
||||
remove: { name: 'Cancel', cb: () => api.interrupt(prompt[1]) }
|
||||
})),
|
||||
Pending: data.queue_pending.map((prompt: Record<number, any>) => ({
|
||||
taskType: 'Pending',
|
||||
@@ -770,10 +804,15 @@ export class ComfyApi extends EventTarget {
|
||||
}
|
||||
|
||||
/**
|
||||
* Interrupts the execution of the running prompt
|
||||
* Interrupts the execution of the running prompt. If runningPromptId is provided,
|
||||
* it is included in the payload as a helpful hint to the backend.
|
||||
* @param {string | null} [runningPromptId] Optional Running Prompt ID to interrupt
|
||||
*/
|
||||
async interrupt() {
|
||||
await this.#postItem('interrupt', null)
|
||||
async interrupt(runningPromptId: string | null) {
|
||||
await this.#postItem(
|
||||
'interrupt',
|
||||
runningPromptId ? { prompt_id: runningPromptId } : undefined
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -956,6 +995,33 @@ export class ComfyApi extends EventTarget {
|
||||
async getCustomNodesI18n(): Promise<Record<string, any>> {
|
||||
return (await axios.get(this.apiURL('/i18n'))).data
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the server supports a specific feature.
|
||||
* @param featureName The name of the feature to check
|
||||
* @returns true if the feature is supported, false otherwise
|
||||
*/
|
||||
serverSupportsFeature(featureName: string): boolean {
|
||||
return this.serverFeatureFlags[featureName] === true
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a server feature flag value.
|
||||
* @param featureName The name of the feature to get
|
||||
* @param defaultValue The default value if the feature is not found
|
||||
* @returns The feature value or default
|
||||
*/
|
||||
getServerFeature<T = unknown>(featureName: string, defaultValue?: T): T {
|
||||
return (this.serverFeatureFlags[featureName] ?? defaultValue) as T
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets all server feature flags.
|
||||
* @returns Copy of all server feature flags
|
||||
*/
|
||||
getServerFeatures(): Record<string, unknown> {
|
||||
return { ...this.serverFeatureFlags }
|
||||
}
|
||||
}
|
||||
|
||||
export const api = new ComfyApi()
|
||||
|
||||
@@ -65,7 +65,9 @@ export class ComfySettingsDialog extends ComfyDialog<HTMLDialogElement> {
|
||||
/**
|
||||
* @deprecated Use `settingStore.getDefaultValue` instead.
|
||||
*/
|
||||
getSettingDefaultValue<K extends keyof Settings>(id: K): Settings[K] {
|
||||
getSettingDefaultValue<K extends keyof Settings>(
|
||||
id: K
|
||||
): Settings[K] | undefined {
|
||||
return useSettingStore().getDefaultValue(id)
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,9 @@ export const useReleaseStore = defineStore('release', () => {
|
||||
const releaseTimestamp = computed(() =>
|
||||
settingStore.get('Comfy.Release.Timestamp')
|
||||
)
|
||||
const showVersionUpdates = computed(() =>
|
||||
settingStore.get('Comfy.Notification.ShowVersionUpdates')
|
||||
)
|
||||
|
||||
// Most recent release
|
||||
const recentRelease = computed(() => {
|
||||
@@ -73,6 +76,11 @@ export const useReleaseStore = defineStore('release', () => {
|
||||
|
||||
// Show toast if needed
|
||||
const shouldShowToast = computed(() => {
|
||||
// Skip if notifications are disabled
|
||||
if (!showVersionUpdates.value) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!isNewVersionAvailable.value) {
|
||||
return false
|
||||
}
|
||||
@@ -85,7 +93,7 @@ export const useReleaseStore = defineStore('release', () => {
|
||||
// Skip if user already skipped or changelog seen
|
||||
if (
|
||||
releaseVersion.value === recentRelease.value?.version &&
|
||||
!['skipped', 'changelog seen'].includes(releaseStatus.value)
|
||||
['skipped', 'changelog seen'].includes(releaseStatus.value)
|
||||
) {
|
||||
return false
|
||||
}
|
||||
@@ -95,6 +103,11 @@ export const useReleaseStore = defineStore('release', () => {
|
||||
|
||||
// Show red-dot indicator
|
||||
const shouldShowRedDot = computed(() => {
|
||||
// Skip if notifications are disabled
|
||||
if (!showVersionUpdates.value) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Already latest → no dot
|
||||
if (!isNewVersionAvailable.value) {
|
||||
return false
|
||||
@@ -132,6 +145,11 @@ export const useReleaseStore = defineStore('release', () => {
|
||||
|
||||
// Show "What's New" popup
|
||||
const shouldShowPopup = computed(() => {
|
||||
// Skip if notifications are disabled
|
||||
if (!showVersionUpdates.value) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!isLatestVersion.value) {
|
||||
return false
|
||||
}
|
||||
@@ -183,7 +201,14 @@ export const useReleaseStore = defineStore('release', () => {
|
||||
|
||||
// Fetch releases from API
|
||||
async function fetchReleases(): Promise<void> {
|
||||
if (isLoading.value) return
|
||||
if (isLoading.value) {
|
||||
return
|
||||
}
|
||||
|
||||
// Skip fetching if notifications are disabled
|
||||
if (!showVersionUpdates.value) {
|
||||
return
|
||||
}
|
||||
|
||||
isLoading.value = true
|
||||
error.value = null
|
||||
|
||||
@@ -7,6 +7,7 @@ import { api } from '@/scripts/api'
|
||||
import { app } from '@/scripts/app'
|
||||
import type { SettingParams } from '@/types/settingTypes'
|
||||
import type { TreeNode } from '@/types/treeExplorerTypes'
|
||||
import { compareVersions, isSemVer } from '@/utils/formatUtil'
|
||||
|
||||
export const getSettingInfo = (setting: SettingParams) => {
|
||||
const parts = setting.category || setting.id.split('.')
|
||||
@@ -20,16 +21,24 @@ export interface SettingTreeNode extends TreeNode {
|
||||
data?: SettingParams
|
||||
}
|
||||
|
||||
function tryMigrateDeprecatedValue(setting: SettingParams, value: any) {
|
||||
function tryMigrateDeprecatedValue(
|
||||
setting: SettingParams | undefined,
|
||||
value: unknown
|
||||
) {
|
||||
return setting?.migrateDeprecatedValue?.(value) ?? value
|
||||
}
|
||||
|
||||
function onChange(setting: SettingParams, newValue: any, oldValue: any) {
|
||||
function onChange(
|
||||
setting: SettingParams | undefined,
|
||||
newValue: unknown,
|
||||
oldValue: unknown
|
||||
) {
|
||||
if (setting?.onChange) {
|
||||
setting.onChange(newValue, oldValue)
|
||||
}
|
||||
// Backward compatibility with old settings dialog.
|
||||
// Some extensions still listens event emitted by the old settings dialog.
|
||||
// @ts-expect-error 'setting' is possibly 'undefined'.ts(18048)
|
||||
app.ui.settings.dispatchChange(setting.id, newValue, oldValue)
|
||||
}
|
||||
|
||||
@@ -76,16 +85,73 @@ export const useSettingStore = defineStore('setting', () => {
|
||||
return _.cloneDeep(settingValues.value[key] ?? getDefaultValue(key))
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the setting params, asserting the type that is intentionally left off
|
||||
* of {@link settingsById}.
|
||||
* @param key The key of the setting to get.
|
||||
* @returns The setting.
|
||||
*/
|
||||
function getSettingById<K extends keyof Settings>(
|
||||
key: K
|
||||
): SettingParams<Settings[K]> | undefined {
|
||||
return settingsById.value[key] as SettingParams<Settings[K]> | undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default value of a setting.
|
||||
* @param key - The key of the setting to get.
|
||||
* @returns The default value of the setting.
|
||||
*/
|
||||
function getDefaultValue<K extends keyof Settings>(key: K): Settings[K] {
|
||||
const param = settingsById.value[key]
|
||||
return typeof param?.defaultValue === 'function'
|
||||
function getDefaultValue<K extends keyof Settings>(
|
||||
key: K
|
||||
): Settings[K] | undefined {
|
||||
// Assertion: settingsById is not typed.
|
||||
const param = getSettingById(key)
|
||||
|
||||
if (param === undefined) return
|
||||
|
||||
const versionedDefault = getVersionedDefaultValue(key, param)
|
||||
|
||||
if (versionedDefault) {
|
||||
return versionedDefault
|
||||
}
|
||||
|
||||
return typeof param.defaultValue === 'function'
|
||||
? param.defaultValue()
|
||||
: param?.defaultValue
|
||||
: param.defaultValue
|
||||
}
|
||||
|
||||
function getVersionedDefaultValue<
|
||||
K extends keyof Settings,
|
||||
TValue = Settings[K]
|
||||
>(key: K, param: SettingParams<TValue> | undefined): TValue | null {
|
||||
// get default versioned value, skipping if the key is 'Comfy.InstalledVersion' to prevent infinite loop
|
||||
const defaultsByInstallVersion = param?.defaultsByInstallVersion
|
||||
if (defaultsByInstallVersion && key !== 'Comfy.InstalledVersion') {
|
||||
const installedVersion = get('Comfy.InstalledVersion')
|
||||
|
||||
if (installedVersion) {
|
||||
const sortedVersions = Object.keys(defaultsByInstallVersion).sort(
|
||||
(a, b) => compareVersions(b, a)
|
||||
)
|
||||
|
||||
for (const version of sortedVersions) {
|
||||
// Ensure the version is in a valid format before comparing
|
||||
if (!isSemVer(version)) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (compareVersions(installedVersion, version) >= 0) {
|
||||
const versionedDefault = defaultsByInstallVersion[version]
|
||||
return typeof versionedDefault === 'function'
|
||||
? versionedDefault()
|
||||
: versionedDefault
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -5,6 +5,7 @@ import { useModelLibrarySidebarTab } from '@/composables/sidebarTabs/useModelLib
|
||||
import { useNodeLibrarySidebarTab } from '@/composables/sidebarTabs/useNodeLibrarySidebarTab'
|
||||
import { useQueueSidebarTab } from '@/composables/sidebarTabs/useQueueSidebarTab'
|
||||
import { useWorkflowsSidebarTab } from '@/composables/sidebarTabs/useWorkflowsSidebarTab'
|
||||
import { t, te } from '@/i18n'
|
||||
import { useCommandStore } from '@/stores/commandStore'
|
||||
import { SidebarTabExtension } from '@/types/extensionTypes'
|
||||
|
||||
@@ -25,11 +26,23 @@ export const useSidebarTabStore = defineStore('sidebarTab', () => {
|
||||
|
||||
const registerSidebarTab = (tab: SidebarTabExtension) => {
|
||||
sidebarTabs.value = [...sidebarTabs.value, tab]
|
||||
|
||||
// Generate label in format "Toggle X Sidebar"
|
||||
const labelFunction = () => {
|
||||
const tabTitle = te(tab.title) ? t(tab.title) : tab.title
|
||||
return `Toggle ${tabTitle} Sidebar`
|
||||
}
|
||||
const tooltipFunction = tab.tooltip
|
||||
? te(String(tab.tooltip))
|
||||
? () => t(String(tab.tooltip))
|
||||
: String(tab.tooltip)
|
||||
: undefined
|
||||
|
||||
useCommandStore().registerCommand({
|
||||
id: `Workspace.ToggleSidebarTab.${tab.id}`,
|
||||
icon: tab.icon,
|
||||
label: `Toggle ${tab.title} Sidebar`,
|
||||
tooltip: tab.tooltip,
|
||||
label: labelFunction,
|
||||
tooltip: tooltipFunction,
|
||||
versionAdded: '1.3.9',
|
||||
function: () => {
|
||||
toggleSidebarTab(tab.id)
|
||||
|
||||
@@ -699,6 +699,27 @@ export interface paths {
|
||||
patch?: never
|
||||
trace?: never
|
||||
}
|
||||
'/publishers/{publisherId}/nodes/{nodeId}/claim-my-node': {
|
||||
parameters: {
|
||||
query?: never
|
||||
header?: never
|
||||
path?: never
|
||||
cookie?: never
|
||||
}
|
||||
get?: never
|
||||
put?: never
|
||||
/**
|
||||
* Claim nodeId into publisherId for the authenticated publisher
|
||||
* @description This endpoint allows a publisher to claim an unclaimed node that they own the repo, which is identified by the nodeId. The unclaimed node's repository must be owned by the authenticated user.
|
||||
*
|
||||
*/
|
||||
post: operations['claimMyNode']
|
||||
delete?: never
|
||||
options?: never
|
||||
head?: never
|
||||
patch?: never
|
||||
trace?: never
|
||||
}
|
||||
'/publishers/{publisherId}/nodes/v2': {
|
||||
parameters: {
|
||||
query?: never
|
||||
@@ -1061,6 +1082,23 @@ export interface paths {
|
||||
patch?: never
|
||||
trace?: never
|
||||
}
|
||||
'/bulk/nodes/versions': {
|
||||
parameters: {
|
||||
query?: never
|
||||
header?: never
|
||||
path?: never
|
||||
cookie?: never
|
||||
}
|
||||
get?: never
|
||||
put?: never
|
||||
/** Retrieve multiple node versions in a single request */
|
||||
post: operations['getBulkNodeVersions']
|
||||
delete?: never
|
||||
options?: never
|
||||
head?: never
|
||||
patch?: never
|
||||
trace?: never
|
||||
}
|
||||
'/versions': {
|
||||
parameters: {
|
||||
query?: never
|
||||
@@ -1095,6 +1133,26 @@ export interface paths {
|
||||
patch?: never
|
||||
trace?: never
|
||||
}
|
||||
'/admin/nodes/{nodeId}': {
|
||||
parameters: {
|
||||
query?: never
|
||||
header?: never
|
||||
path?: never
|
||||
cookie?: never
|
||||
}
|
||||
get?: never
|
||||
/**
|
||||
* Admin Update Node
|
||||
* @description Only admins can update a node with admin privileges.
|
||||
*/
|
||||
put: operations['adminUpdateNode']
|
||||
post?: never
|
||||
delete?: never
|
||||
options?: never
|
||||
head?: never
|
||||
patch?: never
|
||||
trace?: never
|
||||
}
|
||||
'/admin/nodes/{nodeId}/versions/{versionNumber}': {
|
||||
parameters: {
|
||||
query?: never
|
||||
@@ -2951,7 +3009,7 @@ export interface paths {
|
||||
patch?: never
|
||||
trace?: never
|
||||
}
|
||||
'/proxy/moonvalley/text-to-video': {
|
||||
'/proxy/moonvalley/prompts/text-to-video': {
|
||||
parameters: {
|
||||
query?: never
|
||||
header?: never
|
||||
@@ -2968,7 +3026,7 @@ export interface paths {
|
||||
patch?: never
|
||||
trace?: never
|
||||
}
|
||||
'/proxy/moonvalley/text-to-image': {
|
||||
'/proxy/moonvalley/prompts/text-to-image': {
|
||||
parameters: {
|
||||
query?: never
|
||||
header?: never
|
||||
@@ -3057,6 +3115,37 @@ export interface paths {
|
||||
export type webhooks = Record<string, never>
|
||||
export interface components {
|
||||
schemas: {
|
||||
ClaimMyNodeRequest: {
|
||||
/** @description GitHub token to verify if the user owns the repo of the node */
|
||||
GH_TOKEN: string
|
||||
}
|
||||
BulkNodeVersionsRequest: {
|
||||
/** @description List of node ID and version pairs to retrieve */
|
||||
node_versions: components['schemas']['NodeVersionIdentifier'][]
|
||||
}
|
||||
NodeVersionIdentifier: {
|
||||
/** @description The unique identifier of the node */
|
||||
node_id: string
|
||||
/** @description The version of the node */
|
||||
version: string
|
||||
}
|
||||
BulkNodeVersionsResponse: {
|
||||
/** @description List of retrieved node versions with their status */
|
||||
node_versions: components['schemas']['BulkNodeVersionResult'][]
|
||||
}
|
||||
BulkNodeVersionResult: {
|
||||
/** @description The node and version identifier */
|
||||
identifier: components['schemas']['NodeVersionIdentifier']
|
||||
/**
|
||||
* @description Status of the retrieval operation
|
||||
* @enum {string}
|
||||
*/
|
||||
status: 'success' | 'not_found' | 'error'
|
||||
/** @description The retrieved node version data (only present if status is success) */
|
||||
node_version?: components['schemas']['NodeVersion']
|
||||
/** @description Error message if retrieval failed (only present if status is error) */
|
||||
error_message?: string
|
||||
}
|
||||
PersonalAccessToken: {
|
||||
/**
|
||||
* Format: uuid
|
||||
@@ -8713,71 +8802,212 @@ export interface components {
|
||||
| 'computer-use-preview'
|
||||
| 'computer-use-preview-2025-03-11'
|
||||
| 'chatgpt-4o-latest'
|
||||
MoonvalleyInferenceParams: {
|
||||
/** @default 1080 */
|
||||
MoonvalleyTextToVideoInferenceParams: {
|
||||
/**
|
||||
* @description Height of the generated video in pixels
|
||||
* @default 1080
|
||||
*/
|
||||
height: number
|
||||
/** @default 1920 */
|
||||
/**
|
||||
* @description Width of the generated video in pixels
|
||||
* @default 1920
|
||||
*/
|
||||
width: number
|
||||
/** @default 64 */
|
||||
/**
|
||||
* @description Number of frames to generate
|
||||
* @default 64
|
||||
*/
|
||||
num_frames: number
|
||||
/** @default 24 */
|
||||
/**
|
||||
* @description Frames per second of the generated video
|
||||
* @default 24
|
||||
*/
|
||||
fps: number
|
||||
/**
|
||||
* Format: float
|
||||
* @default 12.5
|
||||
* @description Guidance scale for generation control
|
||||
* @default 10
|
||||
*/
|
||||
guidance_scale: number
|
||||
/** @description Random seed for generation (default: random) */
|
||||
seed?: number
|
||||
/** @default 80 */
|
||||
/**
|
||||
* @description Number of denoising steps
|
||||
* @default 80
|
||||
*/
|
||||
steps: number
|
||||
/** @default true */
|
||||
/**
|
||||
* @description Whether to use timestep transformation
|
||||
* @default true
|
||||
*/
|
||||
use_timestep_transform: boolean
|
||||
/**
|
||||
* Format: float
|
||||
* @description Shift value for generation control
|
||||
* @default 3
|
||||
*/
|
||||
shift_value: number
|
||||
/** @default true */
|
||||
/**
|
||||
* @description Whether to use guidance scheduling
|
||||
* @default true
|
||||
*/
|
||||
use_guidance_schedule: boolean
|
||||
/** @default true */
|
||||
/**
|
||||
* @description Whether to add quality guidance
|
||||
* @default true
|
||||
*/
|
||||
add_quality_guidance: boolean
|
||||
/**
|
||||
* Format: float
|
||||
* @description CLIP value for generation control
|
||||
* @default 3
|
||||
*/
|
||||
clip_value: number
|
||||
/** @default false */
|
||||
/**
|
||||
* @description Whether to use negative prompts
|
||||
* @default false
|
||||
*/
|
||||
use_negative_prompts: boolean
|
||||
/** @description Negative prompt text */
|
||||
negative_prompt?: string
|
||||
warmup_steps?: number
|
||||
cooldown_steps?: number
|
||||
/**
|
||||
* @description Number of warmup steps (calculated based on num_frames)
|
||||
* @default 0
|
||||
*/
|
||||
warmup_steps: number
|
||||
/**
|
||||
* @description Number of cooldown steps (calculated based on num_frames)
|
||||
* @default 75
|
||||
*/
|
||||
cooldown_steps: number
|
||||
/**
|
||||
* Format: float
|
||||
* @description Caching coefficient for optimization
|
||||
* @default 0.3
|
||||
*/
|
||||
caching_coefficient: number
|
||||
/** @default 3 */
|
||||
/**
|
||||
* @description Number of caching warmup steps
|
||||
* @default 3
|
||||
*/
|
||||
caching_warmup: number
|
||||
/** @default 3 */
|
||||
/**
|
||||
* @description Number of caching cooldown steps
|
||||
* @default 3
|
||||
*/
|
||||
caching_cooldown: number
|
||||
/** @default 0 */
|
||||
/**
|
||||
* @description Index of the conditioning frame
|
||||
* @default 0
|
||||
*/
|
||||
conditioning_frame_index: number
|
||||
}
|
||||
MoonvalleyVideoToVideoInferenceParams: {
|
||||
/**
|
||||
* Format: float
|
||||
* @description Guidance scale for generation control
|
||||
* @default 15
|
||||
*/
|
||||
guidance_scale: number
|
||||
/** @description Random seed for generation (default: random) */
|
||||
seed?: number
|
||||
/**
|
||||
* @description Number of denoising steps
|
||||
* @default 80
|
||||
*/
|
||||
steps: number
|
||||
/**
|
||||
* @description Whether to use timestep transformation
|
||||
* @default true
|
||||
*/
|
||||
use_timestep_transform: boolean
|
||||
/**
|
||||
* Format: float
|
||||
* @description Shift value for generation control
|
||||
* @default 3
|
||||
*/
|
||||
shift_value: number
|
||||
/**
|
||||
* @description Whether to use guidance scheduling
|
||||
* @default true
|
||||
*/
|
||||
use_guidance_schedule: boolean
|
||||
/**
|
||||
* @description Whether to add quality guidance
|
||||
* @default true
|
||||
*/
|
||||
add_quality_guidance: boolean
|
||||
/**
|
||||
* Format: float
|
||||
* @description CLIP value for generation control
|
||||
* @default 3
|
||||
*/
|
||||
clip_value: number
|
||||
/**
|
||||
* @description Whether to use negative prompts
|
||||
* @default false
|
||||
*/
|
||||
use_negative_prompts: boolean
|
||||
/** @description Negative prompt text */
|
||||
negative_prompt?: string
|
||||
/**
|
||||
* @description Number of warmup steps (calculated based on num_frames)
|
||||
* @default 24
|
||||
*/
|
||||
warmup_steps: number
|
||||
/**
|
||||
* @description Number of cooldown steps (calculated based on num_frames)
|
||||
* @default 36
|
||||
*/
|
||||
cooldown_steps: number
|
||||
/**
|
||||
* Format: float
|
||||
* @description Caching coefficient for optimization
|
||||
* @default 0.3
|
||||
*/
|
||||
caching_coefficient: number
|
||||
/**
|
||||
* @description Number of caching warmup steps
|
||||
* @default 3
|
||||
*/
|
||||
caching_warmup: number
|
||||
/**
|
||||
* @description Number of caching cooldown steps
|
||||
* @default 3
|
||||
*/
|
||||
caching_cooldown: number
|
||||
/**
|
||||
* @description Index of the conditioning frame
|
||||
* @default 0
|
||||
*/
|
||||
conditioning_frame_index: number
|
||||
}
|
||||
MoonvalleyTextToImageRequest: {
|
||||
prompt_text?: string
|
||||
image_url?: string
|
||||
inference_params?: components['schemas']['MoonvalleyInferenceParams']
|
||||
inference_params?: components['schemas']['MoonvalleyTextToVideoInferenceParams']
|
||||
webhook_url?: string
|
||||
}
|
||||
MoonvalleyTextToVideoRequest: {
|
||||
prompt_text?: string
|
||||
image_url?: string
|
||||
inference_params?: components['schemas']['MoonvalleyInferenceParams']
|
||||
inference_params?: components['schemas']['MoonvalleyTextToVideoInferenceParams']
|
||||
webhook_url?: string
|
||||
}
|
||||
MoonvalleyVideoToVideoRequest: components['schemas']['MoonvalleyTextToVideoRequest'] & {
|
||||
MoonvalleyVideoToVideoRequest: {
|
||||
/** @description Describes the video to generate */
|
||||
prompt_text: string
|
||||
/** @description Url to control video */
|
||||
video_url: string
|
||||
control_type: string
|
||||
/**
|
||||
* @description Supported types for video control
|
||||
* @enum {string}
|
||||
*/
|
||||
control_type: 'motion_control' | 'pose_control'
|
||||
/** @description Parameters for video-to-video generation inference */
|
||||
inference_params?: components['schemas']['MoonvalleyVideoToVideoInferenceParams']
|
||||
/** @description Optional webhook URL for notifications */
|
||||
webhook_url?: string
|
||||
}
|
||||
MoonvalleyPromptResponse: {
|
||||
id?: string
|
||||
@@ -10421,6 +10651,89 @@ export interface operations {
|
||||
}
|
||||
}
|
||||
}
|
||||
claimMyNode: {
|
||||
parameters: {
|
||||
query?: never
|
||||
header?: never
|
||||
path: {
|
||||
publisherId: string
|
||||
nodeId: string
|
||||
}
|
||||
cookie?: never
|
||||
}
|
||||
requestBody: {
|
||||
content: {
|
||||
'application/json': components['schemas']['ClaimMyNodeRequest']
|
||||
}
|
||||
}
|
||||
responses: {
|
||||
/** @description Node claimed successfully */
|
||||
204: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content?: never
|
||||
}
|
||||
/** @description Bad request, invalid input data */
|
||||
400: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['ErrorResponse']
|
||||
}
|
||||
}
|
||||
/** @description Unauthorized */
|
||||
401: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content?: never
|
||||
}
|
||||
/** @description Forbidden - various authorization and permission issues
|
||||
* Includes:
|
||||
* - The authenticated user does not have permission to claim the node
|
||||
* - The node is already claimed by another publisher
|
||||
* - The GH_TOKEN is invalid
|
||||
* - The repository is not owned by the authenticated GitHub user
|
||||
* */
|
||||
403: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['ErrorResponse']
|
||||
}
|
||||
}
|
||||
/** @description Too many requests - GitHub API rate limit exceeded */
|
||||
429: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['ErrorResponse']
|
||||
}
|
||||
}
|
||||
/** @description Internal server error */
|
||||
500: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['ErrorResponse']
|
||||
}
|
||||
}
|
||||
/** @description Service unavailable - GitHub API is currently unavailable */
|
||||
503: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['ErrorResponse']
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
listNodesForPublisherV2: {
|
||||
parameters: {
|
||||
query?: {
|
||||
@@ -11709,6 +12022,48 @@ export interface operations {
|
||||
}
|
||||
}
|
||||
}
|
||||
getBulkNodeVersions: {
|
||||
parameters: {
|
||||
query?: never
|
||||
header?: never
|
||||
path?: never
|
||||
cookie?: never
|
||||
}
|
||||
requestBody: {
|
||||
content: {
|
||||
'application/json': components['schemas']['BulkNodeVersionsRequest']
|
||||
}
|
||||
}
|
||||
responses: {
|
||||
/** @description Successfully retrieved node versions */
|
||||
200: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['BulkNodeVersionsResponse']
|
||||
}
|
||||
}
|
||||
/** @description Bad request, invalid input */
|
||||
400: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['ErrorResponse']
|
||||
}
|
||||
}
|
||||
/** @description Internal server error */
|
||||
500: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['ErrorResponse']
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
listAllNodeVersions: {
|
||||
parameters: {
|
||||
query?: {
|
||||
@@ -11834,6 +12189,75 @@ export interface operations {
|
||||
}
|
||||
}
|
||||
}
|
||||
adminUpdateNode: {
|
||||
parameters: {
|
||||
query?: never
|
||||
header?: never
|
||||
path: {
|
||||
nodeId: string
|
||||
}
|
||||
cookie?: never
|
||||
}
|
||||
requestBody: {
|
||||
content: {
|
||||
'application/json': components['schemas']['Node']
|
||||
}
|
||||
}
|
||||
responses: {
|
||||
/** @description Node updated successfully */
|
||||
200: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['Node']
|
||||
}
|
||||
}
|
||||
/** @description Bad request, invalid input data. */
|
||||
400: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['ErrorResponse']
|
||||
}
|
||||
}
|
||||
/** @description Unauthorized */
|
||||
401: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content?: never
|
||||
}
|
||||
/** @description Forbidden */
|
||||
403: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['ErrorResponse']
|
||||
}
|
||||
}
|
||||
/** @description Node not found */
|
||||
404: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['ErrorResponse']
|
||||
}
|
||||
}
|
||||
/** @description Internal server error */
|
||||
500: {
|
||||
headers: {
|
||||
[name: string]: unknown
|
||||
}
|
||||
content: {
|
||||
'application/json': components['schemas']['ErrorResponse']
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
adminUpdateNodeVersion: {
|
||||
parameters: {
|
||||
query?: never
|
||||
|
||||
6
src/types/litegraph-augmentation.d.ts
vendored
@@ -69,7 +69,7 @@ declare module '@comfyorg/litegraph/dist/interfaces' {
|
||||
* ComfyUI extensions of litegraph
|
||||
*/
|
||||
declare module '@comfyorg/litegraph' {
|
||||
import type { ExecutableLGraphNode } from '@comfyorg/litegraph'
|
||||
import type { ExecutableLGraphNode, ExecutionId } from '@comfyorg/litegraph'
|
||||
import type { IBaseWidget } from '@comfyorg/litegraph/dist/types/widgets'
|
||||
|
||||
interface LGraphNodeConstructor<T extends LGraphNode = LGraphNode> {
|
||||
@@ -99,8 +99,10 @@ declare module '@comfyorg/litegraph' {
|
||||
setInnerNodes?(nodes: LGraphNode[]): void
|
||||
/** Originally a group node API. */
|
||||
getInnerNodes?(
|
||||
nodesByExecutionId: Map<ExecutionId, ExecutableLGraphNode>,
|
||||
subgraphNodePath?: readonly NodeId[],
|
||||
nodes?: ExecutableLGraphNode[],
|
||||
subgraphs?: WeakSet<LGraphNode>
|
||||
subgraphs?: Set<LGraphNode>
|
||||
): ExecutableLGraphNode[]
|
||||
/** @deprecated groupNode */
|
||||
convertToNodes?(): LGraphNode[]
|
||||
|
||||
@@ -32,9 +32,10 @@ export interface Setting {
|
||||
render: () => HTMLElement
|
||||
}
|
||||
|
||||
export interface SettingParams extends FormItem {
|
||||
export interface SettingParams<TValue = unknown> extends FormItem {
|
||||
id: keyof Settings
|
||||
defaultValue: any | (() => any)
|
||||
defaultsByInstallVersion?: Record<`${number}.${number}.${number}`, TValue>
|
||||
onChange?: (newValue: any, oldValue?: any) => void
|
||||
// By default category is id.split('.'). However, changing id to assign
|
||||
// new category has poor backward compatibility. Use this field to overwrite
|
||||
|
||||
53
src/utils/executableGroupNodeChildDTO.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import {
|
||||
type ExecutableLGraphNode,
|
||||
ExecutableNodeDTO,
|
||||
type ExecutionId,
|
||||
type LGraphNode,
|
||||
type NodeId,
|
||||
type SubgraphNode
|
||||
} from '@comfyorg/litegraph'
|
||||
|
||||
import type { GroupNodeHandler } from '@/extensions/core/groupNode'
|
||||
|
||||
export class ExecutableGroupNodeChildDTO extends ExecutableNodeDTO {
|
||||
groupNodeHandler?: GroupNodeHandler
|
||||
|
||||
constructor(
|
||||
/** The actual node that this DTO wraps. */
|
||||
node: LGraphNode | SubgraphNode,
|
||||
/** A list of subgraph instance node IDs from the root graph to the containing instance. @see {@link id} */
|
||||
subgraphNodePath: readonly NodeId[],
|
||||
/** A flattened map of all DTOs in this node network. Subgraph instances have been expanded into their inner nodes. */
|
||||
nodesByExecutionId: Map<ExecutionId, ExecutableLGraphNode>,
|
||||
/** The actual subgraph instance that contains this node, otherise undefined. */
|
||||
subgraphNode?: SubgraphNode | undefined,
|
||||
groupNodeHandler?: GroupNodeHandler
|
||||
) {
|
||||
super(node, subgraphNodePath, nodesByExecutionId, subgraphNode)
|
||||
this.groupNodeHandler = groupNodeHandler
|
||||
}
|
||||
|
||||
override resolveInput(slot: number) {
|
||||
const inputNode = this.node.getInputNode(slot)
|
||||
if (!inputNode) return
|
||||
|
||||
const link = this.node.getInputLink(slot)
|
||||
if (!link) throw new Error('Failed to get input link')
|
||||
|
||||
const id = String(inputNode.id).split(':').at(-1)
|
||||
if (id === undefined) throw new Error('Invalid input node id')
|
||||
|
||||
const inputNodeDto = this.nodesByExecutionId?.get(id)
|
||||
if (!inputNodeDto) {
|
||||
throw new Error(
|
||||
`Failed to get input node ${id} for group node child ${this.id} with slot ${slot}`
|
||||
)
|
||||
}
|
||||
|
||||
return {
|
||||
node: inputNodeDto,
|
||||
origin_id: String(inputNode.id),
|
||||
origin_slot: link.origin_slot
|
||||
}
|
||||
}
|
||||
}
|
||||
71
src/utils/executableGroupNodeDto.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
import {
|
||||
type ExecutableLGraphNode,
|
||||
ExecutableNodeDTO,
|
||||
type ISlotType,
|
||||
LGraphEventMode,
|
||||
type LGraphNode
|
||||
} from '@comfyorg/litegraph'
|
||||
|
||||
export const GROUP = Symbol()
|
||||
|
||||
export function isGroupNode(node: LGraphNode): boolean {
|
||||
return node.constructor?.nodeData?.[GROUP] !== undefined
|
||||
}
|
||||
|
||||
export class ExecutableGroupNodeDTO extends ExecutableNodeDTO {
|
||||
override get isVirtualNode(): true {
|
||||
return true
|
||||
}
|
||||
|
||||
override getInnerNodes(): ExecutableLGraphNode[] {
|
||||
return this.node.getInnerNodes?.(this.nodesByExecutionId) ?? []
|
||||
}
|
||||
|
||||
override resolveOutput(slot: number, type: ISlotType, visited: Set<string>) {
|
||||
// Temporary duplication: Bypass nodes are bypassed using the first input with matching type
|
||||
if (this.mode === LGraphEventMode.BYPASS) {
|
||||
const { inputs } = this
|
||||
|
||||
// Bypass nodes by finding first input with matching type
|
||||
const parentInputIndexes = Object.keys(inputs).map(Number)
|
||||
// Prioritise exact slot index
|
||||
const indexes = [slot, ...parentInputIndexes]
|
||||
const matchingIndex = indexes.find((i) => inputs[i]?.type === type)
|
||||
|
||||
// No input types match
|
||||
if (matchingIndex === undefined) return
|
||||
|
||||
return this.resolveInput(matchingIndex, visited)
|
||||
}
|
||||
|
||||
const linkId = this.node.outputs[slot]?.links?.at(0)
|
||||
const link = this.node.graph?.getLink(linkId)
|
||||
if (!link) {
|
||||
throw new Error(
|
||||
`Failed to get link for group node ${this.node.id} with link ${linkId}`
|
||||
)
|
||||
}
|
||||
|
||||
const updated = this.node.updateLink?.(link)
|
||||
if (!updated) {
|
||||
throw new Error(
|
||||
`Failed to update link for group node ${this.node.id} with link ${linkId}`
|
||||
)
|
||||
}
|
||||
|
||||
const node = this.node
|
||||
.getInnerNodes?.(this.nodesByExecutionId)
|
||||
.find((node) => node.id === updated.origin_id)
|
||||
if (!node) {
|
||||
throw new Error(
|
||||
`Failed to get node for group node ${this.node.id} with link ${linkId}`
|
||||
)
|
||||
}
|
||||
|
||||
return {
|
||||
node,
|
||||
origin_id: `${this.id}:${(updated.origin_id as string).split(':').at(-1)}`,
|
||||
origin_slot: updated.origin_slot
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,9 @@
|
||||
import type { LGraph, NodeId } from '@comfyorg/litegraph'
|
||||
import type {
|
||||
ExecutableLGraphNode,
|
||||
ExecutionId,
|
||||
LGraph,
|
||||
NodeId
|
||||
} from '@comfyorg/litegraph'
|
||||
import {
|
||||
ExecutableNodeDTO,
|
||||
LGraphEventMode,
|
||||
@@ -10,6 +15,7 @@ import type {
|
||||
ComfyWorkflowJSON
|
||||
} from '@/schemas/comfyWorkflowSchema'
|
||||
|
||||
import { ExecutableGroupNodeDTO, isGroupNode } from './executableGroupNodeDto'
|
||||
import { compressWidgetInputSlots } from './litegraphUtil'
|
||||
|
||||
/**
|
||||
@@ -54,7 +60,9 @@ export const graphToPrompt = async (
|
||||
const { sortNodes = false, queueNodeIds } = options
|
||||
|
||||
for (const node of graph.computeExecutionOrder(false)) {
|
||||
const innerNodes = node.getInnerNodes ? node.getInnerNodes() : [node]
|
||||
const innerNodes = node.getInnerNodes
|
||||
? node.getInnerNodes(new Map())
|
||||
: [node]
|
||||
for (const innerNode of innerNodes) {
|
||||
if (innerNode.isVirtualNode) {
|
||||
innerNode.applyToGraph?.()
|
||||
@@ -78,82 +86,87 @@ export const graphToPrompt = async (
|
||||
workflow.extra ??= {}
|
||||
workflow.extra.frontendVersion = __COMFYUI_FRONTEND_VERSION__
|
||||
|
||||
const computedNodeDtos = graph
|
||||
.computeExecutionOrder(false)
|
||||
.map(
|
||||
(node) =>
|
||||
new ExecutableNodeDTO(
|
||||
const nodeDtoMap = new Map<ExecutionId, ExecutableLGraphNode>()
|
||||
for (const node of graph.computeExecutionOrder(false)) {
|
||||
const dto: ExecutableLGraphNode = isGroupNode(node)
|
||||
? new ExecutableGroupNodeDTO(node, [], nodeDtoMap)
|
||||
: new ExecutableNodeDTO(
|
||||
node,
|
||||
[],
|
||||
nodeDtoMap,
|
||||
node instanceof SubgraphNode ? node : undefined
|
||||
)
|
||||
)
|
||||
|
||||
for (const innerNode of dto.getInnerNodes()) {
|
||||
nodeDtoMap.set(innerNode.id, innerNode)
|
||||
}
|
||||
|
||||
nodeDtoMap.set(dto.id, dto)
|
||||
}
|
||||
|
||||
let output: ComfyApiWorkflow = {}
|
||||
// Process nodes in order of execution
|
||||
for (const outerNode of computedNodeDtos) {
|
||||
for (const node of nodeDtoMap.values()) {
|
||||
// Don't serialize muted nodes
|
||||
if (
|
||||
outerNode.mode === LGraphEventMode.NEVER ||
|
||||
outerNode.mode === LGraphEventMode.BYPASS
|
||||
node.isVirtualNode ||
|
||||
node.mode === LGraphEventMode.NEVER ||
|
||||
node.mode === LGraphEventMode.BYPASS
|
||||
) {
|
||||
continue
|
||||
}
|
||||
|
||||
for (const node of outerNode.getInnerNodes()) {
|
||||
if (
|
||||
node.isVirtualNode ||
|
||||
node.mode === LGraphEventMode.NEVER ||
|
||||
node.mode === LGraphEventMode.BYPASS
|
||||
) {
|
||||
const inputs: ComfyApiWorkflow[string]['inputs'] = {}
|
||||
const { widgets } = node
|
||||
|
||||
// Store all widget values
|
||||
if (widgets) {
|
||||
for (const [i, widget] of widgets.entries()) {
|
||||
if (!widget.name || widget.options?.serialize === false) continue
|
||||
|
||||
const widgetValue = widget.serializeValue
|
||||
? await widget.serializeValue(node, i)
|
||||
: widget.value
|
||||
// By default, Array values are reserved to represent node connections.
|
||||
// We need to wrap the array as an object to avoid the misinterpretation
|
||||
// of the array as a node connection.
|
||||
// The backend automatically unwraps the object to an array during
|
||||
// execution.
|
||||
inputs[widget.name] = Array.isArray(widgetValue)
|
||||
? {
|
||||
__value__: widgetValue
|
||||
}
|
||||
: widgetValue
|
||||
}
|
||||
}
|
||||
|
||||
// Store all node links
|
||||
for (const [i, input] of node.inputs.entries()) {
|
||||
const resolvedInput = node.resolveInput(i)
|
||||
if (!resolvedInput) continue
|
||||
|
||||
// Resolved to an actual widget value rather than a node connection
|
||||
if (resolvedInput.widgetInfo) {
|
||||
const { value } = resolvedInput.widgetInfo
|
||||
inputs[input.name] = Array.isArray(value) ? { __value__: value } : value
|
||||
continue
|
||||
}
|
||||
|
||||
const inputs: ComfyApiWorkflow[string]['inputs'] = {}
|
||||
const { widgets } = node
|
||||
inputs[input.name] = [
|
||||
String(resolvedInput.origin_id),
|
||||
// @ts-expect-error link.origin_slot is already number.
|
||||
parseInt(resolvedInput.origin_slot)
|
||||
]
|
||||
}
|
||||
|
||||
// Store all widget values
|
||||
if (widgets) {
|
||||
for (const [i, widget] of widgets.entries()) {
|
||||
if (!widget.name || widget.options?.serialize === false) continue
|
||||
|
||||
const widgetValue = widget.serializeValue
|
||||
? await widget.serializeValue(node, i)
|
||||
: widget.value
|
||||
// By default, Array values are reserved to represent node connections.
|
||||
// We need to wrap the array as an object to avoid the misinterpretation
|
||||
// of the array as a node connection.
|
||||
// The backend automatically unwraps the object to an array during
|
||||
// execution.
|
||||
inputs[widget.name] = Array.isArray(widgetValue)
|
||||
? {
|
||||
__value__: widgetValue
|
||||
}
|
||||
: widgetValue
|
||||
}
|
||||
}
|
||||
|
||||
// Store all node links
|
||||
for (const [i, input] of node.inputs.entries()) {
|
||||
const resolvedInput = node.resolveInput(i)
|
||||
if (!resolvedInput) continue
|
||||
|
||||
inputs[input.name] = [
|
||||
String(resolvedInput.origin_id),
|
||||
// @ts-expect-error link.origin_slot is already number.
|
||||
parseInt(resolvedInput.origin_slot)
|
||||
]
|
||||
}
|
||||
|
||||
output[String(node.id)] = {
|
||||
inputs,
|
||||
// TODO(huchenlei): Filter out all nodes that cannot be mapped to a
|
||||
// comfyClass.
|
||||
class_type: node.comfyClass!,
|
||||
// Ignored by the backend.
|
||||
_meta: {
|
||||
title: node.title
|
||||
}
|
||||
output[String(node.id)] = {
|
||||
inputs,
|
||||
// TODO(huchenlei): Filter out all nodes that cannot be mapped to a
|
||||
// comfyClass.
|
||||
class_type: node.comfyClass!,
|
||||
// Ignored by the backend.
|
||||
_meta: {
|
||||
title: node.title
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -386,8 +386,10 @@ export const downloadUrlToHfRepoUrl = (url: string): string => {
|
||||
}
|
||||
}
|
||||
|
||||
export const isSemVer = (version: string) => {
|
||||
const regex = /^(\d+)\.(\d+)\.(\d+)$/
|
||||
export const isSemVer = (
|
||||
version: string
|
||||
): version is `${number}.${number}.${number}` => {
|
||||
const regex = /^\d+\.\d+\.\d+$/
|
||||
return regex.test(version)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
<template>
|
||||
<div class="comfyui-body grid h-full w-full overflow-hidden">
|
||||
<div id="comfyui-body-top" class="comfyui-body-top">
|
||||
<TopMenubar v-if="useNewMenu === 'Top'" />
|
||||
<TopMenubar v-if="showTopMenu" />
|
||||
</div>
|
||||
<div id="comfyui-body-bottom" class="comfyui-body-bottom">
|
||||
<TopMenubar v-if="useNewMenu === 'Bottom'" />
|
||||
<TopMenubar v-if="showBottomMenu" />
|
||||
</div>
|
||||
<div id="comfyui-body-left" class="comfyui-body-left" />
|
||||
<div id="comfyui-body-right" class="comfyui-body-right" />
|
||||
@@ -21,7 +21,7 @@
|
||||
</template>
|
||||
|
||||
<script setup lang="ts">
|
||||
import { useEventListener } from '@vueuse/core'
|
||||
import { useBreakpoints, useEventListener } from '@vueuse/core'
|
||||
import type { ToastMessageOptions } from 'primevue/toast'
|
||||
import { useToast } from 'primevue/usetoast'
|
||||
import { computed, onBeforeUnmount, onMounted, watch, watchEffect } from 'vue'
|
||||
@@ -75,6 +75,14 @@ const colorPaletteStore = useColorPaletteStore()
|
||||
const queueStore = useQueueStore()
|
||||
const versionCompatibilityStore = useVersionCompatibilityStore()
|
||||
|
||||
const breakpoints = useBreakpoints({ md: 961 })
|
||||
const isMobile = breakpoints.smaller('md')
|
||||
const showTopMenu = computed(() => isMobile.value || useNewMenu.value === 'Top')
|
||||
const showBottomMenu = computed(
|
||||
() => !isMobile.value && useNewMenu.value === 'Bottom'
|
||||
)
|
||||
|
||||
|
||||
watch(
|
||||
() => colorPaletteStore.completedActivePalette,
|
||||
(newTheme) => {
|
||||
|
||||
208
tests-ui/tests/api.featureFlags.test.ts
Normal file
@@ -0,0 +1,208 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { api } from '@/scripts/api'
|
||||
|
||||
describe('API Feature Flags', () => {
|
||||
let mockWebSocket: any
|
||||
const wsEventHandlers: { [key: string]: (event: any) => void } = {}
|
||||
|
||||
beforeEach(() => {
|
||||
// Use fake timers
|
||||
vi.useFakeTimers()
|
||||
|
||||
// Mock WebSocket
|
||||
mockWebSocket = {
|
||||
readyState: 1, // WebSocket.OPEN
|
||||
send: vi.fn(),
|
||||
close: vi.fn(),
|
||||
addEventListener: vi.fn(
|
||||
(event: string, handler: (event: any) => void) => {
|
||||
wsEventHandlers[event] = handler
|
||||
}
|
||||
),
|
||||
removeEventListener: vi.fn()
|
||||
}
|
||||
|
||||
// Mock WebSocket constructor
|
||||
global.WebSocket = vi.fn().mockImplementation(() => mockWebSocket) as any
|
||||
|
||||
// Reset API state
|
||||
api.serverFeatureFlags = {}
|
||||
|
||||
// Mock getClientFeatureFlags to return test feature flags
|
||||
vi.spyOn(api, 'getClientFeatureFlags').mockReturnValue({
|
||||
supports_preview_metadata: true,
|
||||
api_version: '1.0.0',
|
||||
capabilities: ['bulk_operations', 'async_nodes']
|
||||
})
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers()
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
describe('Feature flags negotiation', () => {
|
||||
it('should send client feature flags as first message on connection', async () => {
|
||||
// Initialize API connection
|
||||
const initPromise = api.init()
|
||||
|
||||
// Simulate connection open
|
||||
wsEventHandlers['open'](new Event('open'))
|
||||
|
||||
// Check that feature flags were sent as first message
|
||||
expect(mockWebSocket.send).toHaveBeenCalledTimes(1)
|
||||
const sentMessage = JSON.parse(mockWebSocket.send.mock.calls[0][0])
|
||||
expect(sentMessage).toEqual({
|
||||
type: 'feature_flags',
|
||||
data: {
|
||||
supports_preview_metadata: true,
|
||||
api_version: '1.0.0',
|
||||
capabilities: ['bulk_operations', 'async_nodes']
|
||||
}
|
||||
})
|
||||
|
||||
// Simulate server response with status message
|
||||
wsEventHandlers['message']({
|
||||
data: JSON.stringify({
|
||||
type: 'status',
|
||||
data: {
|
||||
status: { exec_info: { queue_remaining: 0 } },
|
||||
sid: 'test-sid'
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// Simulate server feature flags response
|
||||
wsEventHandlers['message']({
|
||||
data: JSON.stringify({
|
||||
type: 'feature_flags',
|
||||
data: {
|
||||
supports_preview_metadata: true,
|
||||
async_execution: true,
|
||||
supported_formats: ['webp', 'jpeg', 'png'],
|
||||
api_version: '1.0.0',
|
||||
max_upload_size: 104857600,
|
||||
capabilities: ['isolated_nodes', 'dynamic_models']
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
await initPromise
|
||||
|
||||
// Check that server features were stored
|
||||
expect(api.serverFeatureFlags).toEqual({
|
||||
supports_preview_metadata: true,
|
||||
async_execution: true,
|
||||
supported_formats: ['webp', 'jpeg', 'png'],
|
||||
api_version: '1.0.0',
|
||||
max_upload_size: 104857600,
|
||||
capabilities: ['isolated_nodes', 'dynamic_models']
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle server without feature flags support', async () => {
|
||||
// Initialize API connection
|
||||
const initPromise = api.init()
|
||||
|
||||
// Simulate connection open
|
||||
wsEventHandlers['open'](new Event('open'))
|
||||
|
||||
// Clear the send mock to reset
|
||||
mockWebSocket.send.mockClear()
|
||||
|
||||
// Simulate server response with status but no feature flags
|
||||
wsEventHandlers['message']({
|
||||
data: JSON.stringify({
|
||||
type: 'status',
|
||||
data: {
|
||||
status: { exec_info: { queue_remaining: 0 } },
|
||||
sid: 'test-sid'
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// Simulate some other message (not feature flags)
|
||||
wsEventHandlers['message']({
|
||||
data: JSON.stringify({
|
||||
type: 'execution_start',
|
||||
data: {}
|
||||
})
|
||||
})
|
||||
|
||||
await initPromise
|
||||
|
||||
// Server features should remain empty
|
||||
expect(api.serverFeatureFlags).toEqual({})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Feature checking methods', () => {
|
||||
beforeEach(() => {
|
||||
// Set up some test features
|
||||
api.serverFeatureFlags = {
|
||||
supports_preview_metadata: true,
|
||||
async_execution: false,
|
||||
capabilities: ['isolated_nodes', 'dynamic_models']
|
||||
}
|
||||
})
|
||||
|
||||
it('should check if server supports a boolean feature', () => {
|
||||
expect(api.serverSupportsFeature('supports_preview_metadata')).toBe(true)
|
||||
expect(api.serverSupportsFeature('async_execution')).toBe(false)
|
||||
expect(api.serverSupportsFeature('non_existent_feature')).toBe(false)
|
||||
})
|
||||
|
||||
it('should get server feature value', () => {
|
||||
expect(api.getServerFeature('supports_preview_metadata')).toBe(true)
|
||||
expect(api.getServerFeature('capabilities')).toEqual([
|
||||
'isolated_nodes',
|
||||
'dynamic_models'
|
||||
])
|
||||
expect(api.getServerFeature('non_existent_feature')).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Client feature flags configuration', () => {
|
||||
it('should use mocked client feature flags', () => {
|
||||
// Verify mocked flags are returned
|
||||
const clientFlags = api.getClientFeatureFlags()
|
||||
expect(clientFlags).toEqual({
|
||||
supports_preview_metadata: true,
|
||||
api_version: '1.0.0',
|
||||
capabilities: ['bulk_operations', 'async_nodes']
|
||||
})
|
||||
})
|
||||
|
||||
it('should return a copy of client feature flags', () => {
|
||||
// Temporarily restore the real implementation for this test
|
||||
vi.mocked(api.getClientFeatureFlags).mockRestore()
|
||||
|
||||
// Verify that modifications to returned object don't affect original
|
||||
const clientFlags1 = api.getClientFeatureFlags()
|
||||
const clientFlags2 = api.getClientFeatureFlags()
|
||||
|
||||
// Should be different objects
|
||||
expect(clientFlags1).not.toBe(clientFlags2)
|
||||
|
||||
// But with same content
|
||||
expect(clientFlags1).toEqual(clientFlags2)
|
||||
|
||||
// Modifying one should not affect the other
|
||||
clientFlags1.test_flag = true
|
||||
expect(api.getClientFeatureFlags()).not.toHaveProperty('test_flag')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Integration with preview messages', () => {
|
||||
it('should affect preview message handling based on feature support', () => {
|
||||
// Test with metadata support
|
||||
api.serverFeatureFlags = { supports_preview_metadata: true }
|
||||
expect(api.serverSupportsFeature('supports_preview_metadata')).toBe(true)
|
||||
|
||||
// Test without metadata support
|
||||
api.serverFeatureFlags = {}
|
||||
expect(api.serverSupportsFeature('supports_preview_metadata')).toBe(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -227,7 +227,7 @@ describe('useNodePricing', () => {
|
||||
])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.02/Run')
|
||||
expect(price).toBe('$0.020/Run')
|
||||
})
|
||||
|
||||
it('should return $0.018 for 512x512 size', () => {
|
||||
@@ -255,7 +255,7 @@ describe('useNodePricing', () => {
|
||||
const node = createMockNode('OpenAIDalle2', [])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.016-0.02/Run (varies with size)')
|
||||
expect(price).toBe('$0.016-0.02 x n/Run (varies with size & n)')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -295,19 +295,19 @@ describe('useNodePricing', () => {
|
||||
const node = createMockNode('OpenAIGPTImage1', [])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.011-0.30/Run (varies with quality)')
|
||||
expect(price).toBe('$0.011-0.30 x n/Run (varies with quality & n)')
|
||||
})
|
||||
})
|
||||
|
||||
describe('dynamic pricing - IdeogramV3', () => {
|
||||
it('should return $0.08 for Quality rendering speed', () => {
|
||||
it('should return $0.09 for Quality rendering speed', () => {
|
||||
const { getNodeDisplayPrice } = useNodePricing()
|
||||
const node = createMockNode('IdeogramV3', [
|
||||
{ name: 'rendering_speed', value: 'Quality' }
|
||||
])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.08/Run')
|
||||
expect(price).toBe('$0.09/Run')
|
||||
})
|
||||
|
||||
it('should return $0.06 for Balanced rendering speed', () => {
|
||||
@@ -348,7 +348,7 @@ describe('useNodePricing', () => {
|
||||
])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.24/Run') // 0.08 * 3
|
||||
expect(price).toBe('$0.27/Run') // 0.09 * 3
|
||||
})
|
||||
|
||||
it('should multiply price by num_images for Turbo rendering speed', () => {
|
||||
@@ -894,4 +894,133 @@ describe('useNodePricing', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('OpenAI nodes dynamic pricing with n parameter', () => {
|
||||
it('should calculate dynamic pricing for OpenAIDalle2 based on size and n', () => {
|
||||
const { getNodeDisplayPrice } = useNodePricing()
|
||||
const node = createMockNode('OpenAIDalle2', [
|
||||
{ name: 'size', value: '1024x1024' },
|
||||
{ name: 'n', value: 3 }
|
||||
])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.060/Run') // 0.02 * 3
|
||||
})
|
||||
|
||||
it('should calculate dynamic pricing for OpenAIGPTImage1 based on quality and n', () => {
|
||||
const { getNodeDisplayPrice } = useNodePricing()
|
||||
const node = createMockNode('OpenAIGPTImage1', [
|
||||
{ name: 'quality', value: 'low' },
|
||||
{ name: 'n', value: 2 }
|
||||
])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.011-0.02 x 2/Run')
|
||||
})
|
||||
|
||||
it('should fall back to static display when n widget is missing for OpenAIDalle2', () => {
|
||||
const { getNodeDisplayPrice } = useNodePricing()
|
||||
const node = createMockNode('OpenAIDalle2', [
|
||||
{ name: 'size', value: '512x512' }
|
||||
])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.018/Run') // n defaults to 1
|
||||
})
|
||||
})
|
||||
|
||||
describe('KlingImageGenerationNode dynamic pricing with n parameter', () => {
|
||||
it('should calculate dynamic pricing for text-to-image with kling-v1', () => {
|
||||
const { getNodeDisplayPrice } = useNodePricing()
|
||||
const node = createMockNode('KlingImageGenerationNode', [
|
||||
{ name: 'model_name', value: 'kling-v1' },
|
||||
{ name: 'n', value: 4 }
|
||||
])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.0140/Run') // 0.0035 * 4
|
||||
})
|
||||
|
||||
it('should calculate dynamic pricing for text-to-image with kling-v1-5', () => {
|
||||
const { getNodeDisplayPrice } = useNodePricing()
|
||||
// Mock node without image input (text-to-image mode)
|
||||
const node = createMockNode('KlingImageGenerationNode', [
|
||||
{ name: 'model_name', value: 'kling-v1-5' },
|
||||
{ name: 'n', value: 2 }
|
||||
])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.0280/Run') // For kling-v1-5 text-to-image: 0.014 * 2
|
||||
})
|
||||
|
||||
it('should fall back to static display when model widget is missing', () => {
|
||||
const { getNodeDisplayPrice } = useNodePricing()
|
||||
const node = createMockNode('KlingImageGenerationNode', [])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.0035-0.028 x n/Run (varies with modality & model)')
|
||||
})
|
||||
})
|
||||
|
||||
describe('New Recraft nodes dynamic pricing', () => {
|
||||
it('should calculate dynamic pricing for RecraftGenerateImageNode', () => {
|
||||
const { getNodeDisplayPrice } = useNodePricing()
|
||||
const node = createMockNode('RecraftGenerateImageNode', [
|
||||
{ name: 'n', value: 3 }
|
||||
])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.12/Run') // 0.04 * 3
|
||||
})
|
||||
|
||||
it('should calculate dynamic pricing for RecraftVectorizeImageNode', () => {
|
||||
const { getNodeDisplayPrice } = useNodePricing()
|
||||
const node = createMockNode('RecraftVectorizeImageNode', [
|
||||
{ name: 'n', value: 5 }
|
||||
])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.05/Run') // 0.01 * 5
|
||||
})
|
||||
|
||||
it('should calculate dynamic pricing for RecraftGenerateVectorImageNode', () => {
|
||||
const { getNodeDisplayPrice } = useNodePricing()
|
||||
const node = createMockNode('RecraftGenerateVectorImageNode', [
|
||||
{ name: 'n', value: 2 }
|
||||
])
|
||||
|
||||
const price = getNodeDisplayPrice(node)
|
||||
expect(price).toBe('$0.16/Run') // 0.08 * 2
|
||||
})
|
||||
})
|
||||
|
||||
describe('Widget names for reactive updates', () => {
|
||||
it('should include n parameter for OpenAI nodes', () => {
|
||||
const { getRelevantWidgetNames } = useNodePricing()
|
||||
|
||||
expect(getRelevantWidgetNames('OpenAIDalle2')).toEqual(['size', 'n'])
|
||||
expect(getRelevantWidgetNames('OpenAIGPTImage1')).toEqual([
|
||||
'quality',
|
||||
'n'
|
||||
])
|
||||
})
|
||||
|
||||
it('should include n parameter for Kling and new Recraft nodes', () => {
|
||||
const { getRelevantWidgetNames } = useNodePricing()
|
||||
|
||||
expect(getRelevantWidgetNames('KlingImageGenerationNode')).toEqual([
|
||||
'modality',
|
||||
'model_name',
|
||||
'n'
|
||||
])
|
||||
expect(getRelevantWidgetNames('RecraftVectorizeImageNode')).toEqual(['n'])
|
||||
expect(getRelevantWidgetNames('RecraftGenerateImageNode')).toEqual(['n'])
|
||||
expect(getRelevantWidgetNames('RecraftGenerateVectorImageNode')).toEqual([
|
||||
'n'
|
||||
])
|
||||
expect(
|
||||
getRelevantWidgetNames('RecraftGenerateColorFromImageNode')
|
||||
).toEqual(['n'])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
425
tests-ui/tests/composables/useSettingSearch.test.ts
Normal file
@@ -0,0 +1,425 @@
|
||||
import { createPinia, setActivePinia } from 'pinia'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { nextTick } from 'vue'
|
||||
|
||||
import { useSettingSearch } from '@/composables/setting/useSettingSearch'
|
||||
import { st } from '@/i18n'
|
||||
import { getSettingInfo, useSettingStore } from '@/stores/settingStore'
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@/i18n', () => ({
|
||||
st: vi.fn((_: string, fallback: string) => fallback)
|
||||
}))
|
||||
|
||||
vi.mock('@/stores/settingStore', () => ({
|
||||
useSettingStore: vi.fn(),
|
||||
getSettingInfo: vi.fn()
|
||||
}))
|
||||
|
||||
describe('useSettingSearch', () => {
|
||||
let mockSettingStore: any
|
||||
let mockSettings: any
|
||||
|
||||
beforeEach(() => {
|
||||
setActivePinia(createPinia())
|
||||
vi.clearAllMocks()
|
||||
|
||||
// Mock settings data
|
||||
mockSettings = {
|
||||
'Category.Setting1': {
|
||||
id: 'Category.Setting1',
|
||||
name: 'Setting One',
|
||||
type: 'text',
|
||||
defaultValue: 'default',
|
||||
category: ['Category', 'Basic']
|
||||
},
|
||||
'Category.Setting2': {
|
||||
id: 'Category.Setting2',
|
||||
name: 'Setting Two',
|
||||
type: 'boolean',
|
||||
defaultValue: false,
|
||||
category: ['Category', 'Advanced']
|
||||
},
|
||||
'Category.HiddenSetting': {
|
||||
id: 'Category.HiddenSetting',
|
||||
name: 'Hidden Setting',
|
||||
type: 'hidden',
|
||||
defaultValue: 'hidden',
|
||||
category: ['Category', 'Basic']
|
||||
},
|
||||
'Category.DeprecatedSetting': {
|
||||
id: 'Category.DeprecatedSetting',
|
||||
name: 'Deprecated Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'deprecated',
|
||||
deprecated: true,
|
||||
category: ['Category', 'Advanced']
|
||||
},
|
||||
'Other.Setting3': {
|
||||
id: 'Other.Setting3',
|
||||
name: 'Other Setting',
|
||||
type: 'select',
|
||||
defaultValue: 'option1',
|
||||
category: ['Other', 'SubCategory']
|
||||
}
|
||||
}
|
||||
|
||||
// Mock setting store
|
||||
mockSettingStore = {
|
||||
settingsById: mockSettings
|
||||
}
|
||||
vi.mocked(useSettingStore).mockReturnValue(mockSettingStore)
|
||||
|
||||
// Mock getSettingInfo function
|
||||
vi.mocked(getSettingInfo).mockImplementation((setting: any) => {
|
||||
const parts = setting.category || setting.id.split('.')
|
||||
return {
|
||||
category: parts[0] ?? 'Other',
|
||||
subCategory: parts[1] ?? 'Other'
|
||||
}
|
||||
})
|
||||
|
||||
// Mock st function to return fallback value
|
||||
vi.mocked(st).mockImplementation((_: string, fallback: string) => fallback)
|
||||
})
|
||||
|
||||
describe('initialization', () => {
|
||||
it('initializes with default state', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
expect(search.searchQuery.value).toBe('')
|
||||
expect(search.filteredSettingIds.value).toEqual([])
|
||||
expect(search.searchInProgress.value).toBe(false)
|
||||
expect(search.queryIsEmpty.value).toBe(true)
|
||||
expect(search.inSearch.value).toBe(false)
|
||||
expect(search.searchResultsCategories.value).toEqual(new Set())
|
||||
})
|
||||
})
|
||||
|
||||
describe('reactive properties', () => {
|
||||
it('queryIsEmpty computed property works correctly', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
expect(search.queryIsEmpty.value).toBe(true)
|
||||
|
||||
search.searchQuery.value = 'test'
|
||||
expect(search.queryIsEmpty.value).toBe(false)
|
||||
|
||||
search.searchQuery.value = ''
|
||||
expect(search.queryIsEmpty.value).toBe(true)
|
||||
})
|
||||
|
||||
it('inSearch computed property works correctly', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
// Empty query, not in search
|
||||
expect(search.inSearch.value).toBe(false)
|
||||
|
||||
// Has query but search in progress
|
||||
search.searchQuery.value = 'test'
|
||||
search.searchInProgress.value = true
|
||||
expect(search.inSearch.value).toBe(false)
|
||||
|
||||
// Has query and search complete
|
||||
search.searchInProgress.value = false
|
||||
expect(search.inSearch.value).toBe(true)
|
||||
})
|
||||
|
||||
it('searchResultsCategories computed property works correctly', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
// No results
|
||||
expect(search.searchResultsCategories.value).toEqual(new Set())
|
||||
|
||||
// Add some filtered results
|
||||
search.filteredSettingIds.value = ['Category.Setting1', 'Other.Setting3']
|
||||
expect(search.searchResultsCategories.value).toEqual(
|
||||
new Set(['Category', 'Other'])
|
||||
)
|
||||
})
|
||||
|
||||
it('watches searchQuery and sets searchInProgress to true', async () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
expect(search.searchInProgress.value).toBe(false)
|
||||
|
||||
search.searchQuery.value = 'test'
|
||||
await nextTick()
|
||||
|
||||
expect(search.searchInProgress.value).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('handleSearch', () => {
|
||||
it('clears results when query is empty', () => {
|
||||
const search = useSettingSearch()
|
||||
search.filteredSettingIds.value = ['Category.Setting1']
|
||||
|
||||
search.handleSearch('')
|
||||
|
||||
expect(search.filteredSettingIds.value).toEqual([])
|
||||
})
|
||||
|
||||
it('filters settings by ID (case insensitive)', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
search.handleSearch('category.setting1')
|
||||
|
||||
expect(search.filteredSettingIds.value).toContain('Category.Setting1')
|
||||
expect(search.filteredSettingIds.value).not.toContain('Other.Setting3')
|
||||
})
|
||||
|
||||
it('filters settings by name (case insensitive)', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
search.handleSearch('setting one')
|
||||
|
||||
expect(search.filteredSettingIds.value).toContain('Category.Setting1')
|
||||
expect(search.filteredSettingIds.value).not.toContain('Category.Setting2')
|
||||
})
|
||||
|
||||
it('filters settings by category', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
search.handleSearch('other')
|
||||
|
||||
expect(search.filteredSettingIds.value).toContain('Other.Setting3')
|
||||
expect(search.filteredSettingIds.value).not.toContain('Category.Setting1')
|
||||
})
|
||||
|
||||
it('excludes hidden settings from results', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
search.handleSearch('hidden')
|
||||
|
||||
expect(search.filteredSettingIds.value).not.toContain(
|
||||
'Category.HiddenSetting'
|
||||
)
|
||||
})
|
||||
|
||||
it('excludes deprecated settings from results', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
search.handleSearch('deprecated')
|
||||
|
||||
expect(search.filteredSettingIds.value).not.toContain(
|
||||
'Category.DeprecatedSetting'
|
||||
)
|
||||
})
|
||||
|
||||
it('sets searchInProgress to false after search', () => {
|
||||
const search = useSettingSearch()
|
||||
search.searchInProgress.value = true
|
||||
|
||||
search.handleSearch('test')
|
||||
|
||||
expect(search.searchInProgress.value).toBe(false)
|
||||
})
|
||||
|
||||
it('includes visible settings in results', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
search.handleSearch('setting')
|
||||
|
||||
expect(search.filteredSettingIds.value).toEqual(
|
||||
expect.arrayContaining([
|
||||
'Category.Setting1',
|
||||
'Category.Setting2',
|
||||
'Other.Setting3'
|
||||
])
|
||||
)
|
||||
expect(search.filteredSettingIds.value).not.toContain(
|
||||
'Category.HiddenSetting'
|
||||
)
|
||||
expect(search.filteredSettingIds.value).not.toContain(
|
||||
'Category.DeprecatedSetting'
|
||||
)
|
||||
})
|
||||
|
||||
it('includes all visible settings in comprehensive search', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
// Search for a partial match that should include multiple settings
|
||||
search.handleSearch('setting')
|
||||
|
||||
// Should find all visible settings (not hidden/deprecated)
|
||||
expect(search.filteredSettingIds.value.length).toBeGreaterThan(0)
|
||||
expect(search.filteredSettingIds.value).toEqual(
|
||||
expect.arrayContaining([
|
||||
'Category.Setting1',
|
||||
'Category.Setting2',
|
||||
'Other.Setting3'
|
||||
])
|
||||
)
|
||||
})
|
||||
|
||||
it('uses translated categories for search', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
// Mock st to return translated category names
|
||||
vi.mocked(st).mockImplementation((key: string, fallback: string) => {
|
||||
if (key === 'settingsCategories.Category') {
|
||||
return 'Translated Category'
|
||||
}
|
||||
return fallback
|
||||
})
|
||||
|
||||
search.handleSearch('translated category')
|
||||
|
||||
expect(search.filteredSettingIds.value).toEqual(
|
||||
expect.arrayContaining(['Category.Setting1', 'Category.Setting2'])
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getSearchResults', () => {
|
||||
it('groups results by subcategory', () => {
|
||||
const search = useSettingSearch()
|
||||
search.filteredSettingIds.value = [
|
||||
'Category.Setting1',
|
||||
'Category.Setting2'
|
||||
]
|
||||
|
||||
const results = search.getSearchResults(null)
|
||||
|
||||
expect(results).toEqual([
|
||||
{
|
||||
label: 'Basic',
|
||||
settings: [mockSettings['Category.Setting1']]
|
||||
},
|
||||
{
|
||||
label: 'Advanced',
|
||||
settings: [mockSettings['Category.Setting2']]
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('filters results by active category', () => {
|
||||
const search = useSettingSearch()
|
||||
search.filteredSettingIds.value = ['Category.Setting1', 'Other.Setting3']
|
||||
|
||||
const activeCategory = { label: 'Category' } as any
|
||||
const results = search.getSearchResults(activeCategory)
|
||||
|
||||
expect(results).toEqual([
|
||||
{
|
||||
label: 'Basic',
|
||||
settings: [mockSettings['Category.Setting1']]
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('returns all results when no active category', () => {
|
||||
const search = useSettingSearch()
|
||||
search.filteredSettingIds.value = ['Category.Setting1', 'Other.Setting3']
|
||||
|
||||
const results = search.getSearchResults(null)
|
||||
|
||||
expect(results).toEqual([
|
||||
{
|
||||
label: 'Basic',
|
||||
settings: [mockSettings['Category.Setting1']]
|
||||
},
|
||||
{
|
||||
label: 'SubCategory',
|
||||
settings: [mockSettings['Other.Setting3']]
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('returns empty array when no filtered results', () => {
|
||||
const search = useSettingSearch()
|
||||
search.filteredSettingIds.value = []
|
||||
|
||||
const results = search.getSearchResults(null)
|
||||
|
||||
expect(results).toEqual([])
|
||||
})
|
||||
|
||||
it('handles multiple settings in same subcategory', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
// Add another setting to Basic subcategory
|
||||
mockSettings['Category.Setting4'] = {
|
||||
id: 'Category.Setting4',
|
||||
name: 'Setting Four',
|
||||
type: 'text',
|
||||
defaultValue: 'default',
|
||||
category: ['Category', 'Basic']
|
||||
}
|
||||
|
||||
search.filteredSettingIds.value = [
|
||||
'Category.Setting1',
|
||||
'Category.Setting4'
|
||||
]
|
||||
|
||||
const results = search.getSearchResults(null)
|
||||
|
||||
expect(results).toEqual([
|
||||
{
|
||||
label: 'Basic',
|
||||
settings: [
|
||||
mockSettings['Category.Setting1'],
|
||||
mockSettings['Category.Setting4']
|
||||
]
|
||||
}
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('handles empty settings store', () => {
|
||||
mockSettingStore.settingsById = {}
|
||||
const search = useSettingSearch()
|
||||
|
||||
search.handleSearch('test')
|
||||
|
||||
expect(search.filteredSettingIds.value).toEqual([])
|
||||
})
|
||||
|
||||
it('handles settings with undefined category', () => {
|
||||
mockSettings['NoCategorySetting'] = {
|
||||
id: 'NoCategorySetting',
|
||||
name: 'No Category',
|
||||
type: 'text',
|
||||
defaultValue: 'default'
|
||||
}
|
||||
|
||||
const search = useSettingSearch()
|
||||
|
||||
search.handleSearch('category')
|
||||
|
||||
expect(search.filteredSettingIds.value).toContain('NoCategorySetting')
|
||||
})
|
||||
|
||||
it('handles special characters in search query', () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
// Search for part of the ID that contains a dot
|
||||
search.handleSearch('category.setting')
|
||||
|
||||
expect(search.filteredSettingIds.value).toContain('Category.Setting1')
|
||||
})
|
||||
|
||||
it('handles very long search queries', () => {
|
||||
const search = useSettingSearch()
|
||||
const longQuery = 'a'.repeat(1000)
|
||||
|
||||
search.handleSearch(longQuery)
|
||||
|
||||
expect(search.filteredSettingIds.value).toEqual([])
|
||||
})
|
||||
|
||||
it('handles rapid consecutive searches', async () => {
|
||||
const search = useSettingSearch()
|
||||
|
||||
search.handleSearch('setting')
|
||||
search.handleSearch('other')
|
||||
search.handleSearch('category')
|
||||
|
||||
expect(search.filteredSettingIds.value).toEqual(
|
||||
expect.arrayContaining(['Category.Setting1', 'Category.Setting2'])
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -61,6 +61,12 @@ describe('useReleaseStore', () => {
|
||||
vi.mocked(useSettingStore).mockReturnValue(mockSettingStore)
|
||||
vi.mocked(useSystemStatsStore).mockReturnValue(mockSystemStatsStore)
|
||||
|
||||
// Default showVersionUpdates to true
|
||||
mockSettingStore.get.mockImplementation((key: string) => {
|
||||
if (key === 'Comfy.Notification.ShowVersionUpdates') return true
|
||||
return null
|
||||
})
|
||||
|
||||
store = useReleaseStore()
|
||||
})
|
||||
|
||||
@@ -114,6 +120,107 @@ describe('useReleaseStore', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('showVersionUpdates setting', () => {
|
||||
beforeEach(() => {
|
||||
store.releases = [mockRelease]
|
||||
})
|
||||
|
||||
describe('when notifications are enabled', () => {
|
||||
beforeEach(() => {
|
||||
mockSettingStore.get.mockImplementation((key: string) => {
|
||||
if (key === 'Comfy.Notification.ShowVersionUpdates') return true
|
||||
return null
|
||||
})
|
||||
})
|
||||
|
||||
it('should show toast for medium/high attention releases', async () => {
|
||||
const { compareVersions } = await import('@/utils/formatUtil')
|
||||
vi.mocked(compareVersions).mockReturnValue(1)
|
||||
|
||||
// Need multiple releases for hasMediumOrHighAttention to work
|
||||
const mediumRelease = {
|
||||
...mockRelease,
|
||||
id: 2,
|
||||
attention: 'medium' as const
|
||||
}
|
||||
store.releases = [mockRelease, mediumRelease]
|
||||
|
||||
expect(store.shouldShowToast).toBe(true)
|
||||
})
|
||||
|
||||
it('should show red dot for new versions', async () => {
|
||||
const { compareVersions } = await import('@/utils/formatUtil')
|
||||
vi.mocked(compareVersions).mockReturnValue(1)
|
||||
|
||||
expect(store.shouldShowRedDot).toBe(true)
|
||||
})
|
||||
|
||||
it('should show popup for latest version', async () => {
|
||||
mockSystemStatsStore.systemStats.system.comfyui_version = '1.2.0'
|
||||
const { compareVersions } = await import('@/utils/formatUtil')
|
||||
vi.mocked(compareVersions).mockReturnValue(0)
|
||||
|
||||
expect(store.shouldShowPopup).toBe(true)
|
||||
})
|
||||
|
||||
it('should fetch releases during initialization', async () => {
|
||||
mockReleaseService.getReleases.mockResolvedValue([mockRelease])
|
||||
|
||||
await store.initialize()
|
||||
|
||||
expect(mockReleaseService.getReleases).toHaveBeenCalledWith({
|
||||
project: 'comfyui',
|
||||
current_version: '1.0.0',
|
||||
form_factor: 'git-windows'
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('when notifications are disabled', () => {
|
||||
beforeEach(() => {
|
||||
mockSettingStore.get.mockImplementation((key: string) => {
|
||||
if (key === 'Comfy.Notification.ShowVersionUpdates') return false
|
||||
return null
|
||||
})
|
||||
})
|
||||
|
||||
it('should not show toast even with new version available', async () => {
|
||||
const { compareVersions } = await import('@/utils/formatUtil')
|
||||
vi.mocked(compareVersions).mockReturnValue(1)
|
||||
|
||||
expect(store.shouldShowToast).toBe(false)
|
||||
})
|
||||
|
||||
it('should not show red dot even with new version available', async () => {
|
||||
const { compareVersions } = await import('@/utils/formatUtil')
|
||||
vi.mocked(compareVersions).mockReturnValue(1)
|
||||
|
||||
expect(store.shouldShowRedDot).toBe(false)
|
||||
})
|
||||
|
||||
it('should not show popup even for latest version', async () => {
|
||||
mockSystemStatsStore.systemStats.system.comfyui_version = '1.2.0'
|
||||
const { compareVersions } = await import('@/utils/formatUtil')
|
||||
vi.mocked(compareVersions).mockReturnValue(0)
|
||||
|
||||
expect(store.shouldShowPopup).toBe(false)
|
||||
})
|
||||
|
||||
it('should skip fetching releases during initialization', async () => {
|
||||
await store.initialize()
|
||||
|
||||
expect(mockReleaseService.getReleases).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should not fetch releases when calling fetchReleases directly', async () => {
|
||||
await store.fetchReleases()
|
||||
|
||||
expect(mockReleaseService.getReleases).not.toHaveBeenCalled()
|
||||
expect(store.isLoading).toBe(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('release initialization', () => {
|
||||
it('should fetch releases successfully', async () => {
|
||||
mockReleaseService.getReleases.mockResolvedValue([mockRelease])
|
||||
@@ -184,6 +291,17 @@ describe('useReleaseStore', () => {
|
||||
|
||||
expect(mockSystemStatsStore.fetchSystemStats).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should not set loading state when notifications disabled', async () => {
|
||||
mockSettingStore.get.mockImplementation((key: string) => {
|
||||
if (key === 'Comfy.Notification.ShowVersionUpdates') return false
|
||||
return null
|
||||
})
|
||||
|
||||
await store.initialize()
|
||||
|
||||
expect(store.isLoading).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('action handlers', () => {
|
||||
@@ -248,6 +366,7 @@ describe('useReleaseStore', () => {
|
||||
mockSettingStore.get.mockImplementation((key: string) => {
|
||||
if (key === 'Comfy.Release.Version') return null
|
||||
if (key === 'Comfy.Release.Status') return null
|
||||
if (key === 'Comfy.Notification.ShowVersionUpdates') return true
|
||||
return null
|
||||
})
|
||||
|
||||
@@ -267,7 +386,10 @@ describe('useReleaseStore', () => {
|
||||
it('should show red dot for new versions', async () => {
|
||||
const { compareVersions } = await import('@/utils/formatUtil')
|
||||
vi.mocked(compareVersions).mockReturnValue(1)
|
||||
mockSettingStore.get.mockReturnValue(null)
|
||||
mockSettingStore.get.mockImplementation((key: string) => {
|
||||
if (key === 'Comfy.Notification.ShowVersionUpdates') return true
|
||||
return null
|
||||
})
|
||||
|
||||
store.releases = [mockRelease]
|
||||
|
||||
@@ -276,7 +398,10 @@ describe('useReleaseStore', () => {
|
||||
|
||||
it('should show popup for latest version', async () => {
|
||||
mockSystemStatsStore.systemStats.system.comfyui_version = '1.2.0' // Same as release
|
||||
mockSettingStore.get.mockReturnValue(null)
|
||||
mockSettingStore.get.mockImplementation((key: string) => {
|
||||
if (key === 'Comfy.Notification.ShowVersionUpdates') return true
|
||||
return null
|
||||
})
|
||||
|
||||
const { compareVersions } = await import('@/utils/formatUtil')
|
||||
vi.mocked(compareVersions).mockReturnValue(0) // versions are equal (latest version)
|
||||
@@ -286,4 +411,37 @@ describe('useReleaseStore', () => {
|
||||
expect(store.shouldShowPopup).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle missing system stats gracefully', async () => {
|
||||
mockSystemStatsStore.systemStats = null
|
||||
mockSettingStore.get.mockImplementation((key: string) => {
|
||||
if (key === 'Comfy.Notification.ShowVersionUpdates') return false
|
||||
return null
|
||||
})
|
||||
|
||||
await store.initialize()
|
||||
|
||||
// Should not fetch system stats when notifications disabled
|
||||
expect(mockSystemStatsStore.fetchSystemStats).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle concurrent fetchReleases calls', async () => {
|
||||
mockReleaseService.getReleases.mockImplementation(
|
||||
() =>
|
||||
new Promise((resolve) =>
|
||||
setTimeout(() => resolve([mockRelease]), 100)
|
||||
)
|
||||
)
|
||||
|
||||
// Start two concurrent calls
|
||||
const promise1 = store.fetchReleases()
|
||||
const promise2 = store.fetchReleases()
|
||||
|
||||
await Promise.all([promise1, promise2])
|
||||
|
||||
// Should only call API once due to loading check
|
||||
expect(mockReleaseService.getReleases).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -109,6 +109,241 @@ describe('useSettingStore', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('getDefaultValue', () => {
|
||||
beforeEach(() => {
|
||||
// Set up installed version for most tests
|
||||
store.settingValues['Comfy.InstalledVersion'] = '1.30.0'
|
||||
})
|
||||
|
||||
it('should return regular default value when no defaultsByInstallVersion', () => {
|
||||
const setting: SettingParams = {
|
||||
id: 'test.setting',
|
||||
name: 'Test Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'regular-default'
|
||||
}
|
||||
store.addSetting(setting)
|
||||
|
||||
const result = store.getDefaultValue('test.setting')
|
||||
expect(result).toBe('regular-default')
|
||||
})
|
||||
|
||||
it('should return versioned default when user version matches', () => {
|
||||
const setting: SettingParams = {
|
||||
id: 'test.setting',
|
||||
name: 'Test Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'regular-default',
|
||||
defaultsByInstallVersion: {
|
||||
'1.21.3': 'version-1.21.3-default',
|
||||
'1.40.3': 'version-1.40.3-default'
|
||||
}
|
||||
}
|
||||
store.addSetting(setting)
|
||||
|
||||
const result = store.getDefaultValue('test.setting')
|
||||
// installedVersion is 1.30.0, so should get 1.21.3 default
|
||||
expect(result).toBe('version-1.21.3-default')
|
||||
})
|
||||
|
||||
it('should return latest versioned default when user version is higher', () => {
|
||||
store.settingValues['Comfy.InstalledVersion'] = '1.50.0'
|
||||
|
||||
const setting: SettingParams = {
|
||||
id: 'test.setting',
|
||||
name: 'Test Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'regular-default',
|
||||
defaultsByInstallVersion: {
|
||||
'1.21.3': 'version-1.21.3-default',
|
||||
'1.40.3': 'version-1.40.3-default'
|
||||
}
|
||||
}
|
||||
store.addSetting(setting)
|
||||
|
||||
const result = store.getDefaultValue('test.setting')
|
||||
// installedVersion is 1.50.0, so should get 1.40.3 default
|
||||
expect(result).toBe('version-1.40.3-default')
|
||||
})
|
||||
|
||||
it('should return regular default when user version is lower than all versioned defaults', () => {
|
||||
store.settingValues['Comfy.InstalledVersion'] = '1.10.0'
|
||||
|
||||
const setting: SettingParams = {
|
||||
id: 'test.setting',
|
||||
name: 'Test Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'regular-default',
|
||||
defaultsByInstallVersion: {
|
||||
'1.21.3': 'version-1.21.3-default',
|
||||
'1.40.3': 'version-1.40.3-default'
|
||||
}
|
||||
}
|
||||
store.addSetting(setting)
|
||||
|
||||
const result = store.getDefaultValue('test.setting')
|
||||
// installedVersion is 1.10.0, lower than all versioned defaults
|
||||
expect(result).toBe('regular-default')
|
||||
})
|
||||
|
||||
it('should return regular default when no installed version (existing users)', () => {
|
||||
// Clear installed version to simulate existing user
|
||||
delete store.settingValues['Comfy.InstalledVersion']
|
||||
|
||||
const setting: SettingParams = {
|
||||
id: 'test.setting',
|
||||
name: 'Test Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'regular-default',
|
||||
defaultsByInstallVersion: {
|
||||
'1.21.3': 'version-1.21.3-default',
|
||||
'1.40.3': 'version-1.40.3-default'
|
||||
}
|
||||
}
|
||||
store.addSetting(setting)
|
||||
|
||||
const result = store.getDefaultValue('test.setting')
|
||||
// No installed version, should use backward compatibility
|
||||
expect(result).toBe('regular-default')
|
||||
})
|
||||
|
||||
it('should handle function-based versioned defaults', () => {
|
||||
const setting: SettingParams = {
|
||||
id: 'test.setting',
|
||||
name: 'Test Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'regular-default',
|
||||
defaultsByInstallVersion: {
|
||||
'1.21.3': () => 'dynamic-version-1.21.3-default',
|
||||
'1.40.3': () => 'dynamic-version-1.40.3-default'
|
||||
}
|
||||
}
|
||||
store.addSetting(setting)
|
||||
|
||||
const result = store.getDefaultValue('test.setting')
|
||||
// installedVersion is 1.30.0, so should get 1.21.3 default (executed)
|
||||
expect(result).toBe('dynamic-version-1.21.3-default')
|
||||
})
|
||||
|
||||
it('should handle function-based regular defaults with versioned defaults', () => {
|
||||
store.settingValues['Comfy.InstalledVersion'] = '1.10.0'
|
||||
|
||||
const setting: SettingParams = {
|
||||
id: 'test.setting',
|
||||
name: 'Test Setting',
|
||||
type: 'text',
|
||||
defaultValue: () => 'dynamic-regular-default',
|
||||
defaultsByInstallVersion: {
|
||||
'1.21.3': 'version-1.21.3-default',
|
||||
'1.40.3': 'version-1.40.3-default'
|
||||
}
|
||||
}
|
||||
store.addSetting(setting)
|
||||
|
||||
const result = store.getDefaultValue('test.setting')
|
||||
// installedVersion is 1.10.0, should fallback to function-based regular default
|
||||
expect(result).toBe('dynamic-regular-default')
|
||||
})
|
||||
|
||||
it('should handle complex version comparison correctly', () => {
|
||||
const setting: SettingParams = {
|
||||
id: 'test.setting',
|
||||
name: 'Test Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'regular-default',
|
||||
defaultsByInstallVersion: {
|
||||
'1.21.3': 'version-1.21.3-default',
|
||||
'1.21.10': 'version-1.21.10-default',
|
||||
'1.40.3': 'version-1.40.3-default'
|
||||
}
|
||||
}
|
||||
store.addSetting(setting)
|
||||
|
||||
// Test with 1.21.5 - should get 1.21.3 default
|
||||
store.settingValues['Comfy.InstalledVersion'] = '1.21.5'
|
||||
expect(store.getDefaultValue('test.setting')).toBe(
|
||||
'version-1.21.3-default'
|
||||
)
|
||||
|
||||
// Test with 1.21.15 - should get 1.21.10 default
|
||||
store.settingValues['Comfy.InstalledVersion'] = '1.21.15'
|
||||
expect(store.getDefaultValue('test.setting')).toBe(
|
||||
'version-1.21.10-default'
|
||||
)
|
||||
|
||||
// Test with 1.21.3 exactly - should get 1.21.3 default
|
||||
store.settingValues['Comfy.InstalledVersion'] = '1.21.3'
|
||||
expect(store.getDefaultValue('test.setting')).toBe(
|
||||
'version-1.21.3-default'
|
||||
)
|
||||
})
|
||||
|
||||
it('should work with get() method using versioned defaults', () => {
|
||||
const setting: SettingParams = {
|
||||
id: 'test.setting',
|
||||
name: 'Test Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'regular-default',
|
||||
defaultsByInstallVersion: {
|
||||
'1.21.3': 'version-1.21.3-default',
|
||||
'1.40.3': 'version-1.40.3-default'
|
||||
}
|
||||
}
|
||||
store.addSetting(setting)
|
||||
|
||||
// get() should use getDefaultValue internally
|
||||
const result = store.get('test.setting')
|
||||
expect(result).toBe('version-1.21.3-default')
|
||||
})
|
||||
|
||||
it('should handle mixed function and static versioned defaults', () => {
|
||||
const setting: SettingParams = {
|
||||
id: 'test.setting',
|
||||
name: 'Test Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'regular-default',
|
||||
defaultsByInstallVersion: {
|
||||
'1.21.3': () => 'dynamic-1.21.3-default',
|
||||
'1.40.3': 'static-1.40.3-default'
|
||||
}
|
||||
}
|
||||
store.addSetting(setting)
|
||||
|
||||
// Test with 1.30.0 - should get dynamic 1.21.3 default
|
||||
store.settingValues['Comfy.InstalledVersion'] = '1.30.0'
|
||||
expect(store.getDefaultValue('test.setting')).toBe(
|
||||
'dynamic-1.21.3-default'
|
||||
)
|
||||
|
||||
// Test with 1.50.0 - should get static 1.40.3 default
|
||||
store.settingValues['Comfy.InstalledVersion'] = '1.50.0'
|
||||
expect(store.getDefaultValue('test.setting')).toBe(
|
||||
'static-1.40.3-default'
|
||||
)
|
||||
})
|
||||
|
||||
it('should handle version sorting correctly', () => {
|
||||
const setting: SettingParams = {
|
||||
id: 'test.setting',
|
||||
name: 'Test Setting',
|
||||
type: 'text',
|
||||
defaultValue: 'regular-default',
|
||||
defaultsByInstallVersion: {
|
||||
'1.40.3': 'version-1.40.3-default',
|
||||
'1.21.3': 'version-1.21.3-default', // Unsorted order
|
||||
'1.35.0': 'version-1.35.0-default'
|
||||
}
|
||||
}
|
||||
store.addSetting(setting)
|
||||
|
||||
// Test with 1.37.0 - should get 1.35.0 default (highest version <= 1.37.0)
|
||||
store.settingValues['Comfy.InstalledVersion'] = '1.37.0'
|
||||
expect(store.getDefaultValue('test.setting')).toBe(
|
||||
'version-1.35.0-default'
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('get and set', () => {
|
||||
it('should get default value when setting not exists', () => {
|
||||
const setting: SettingParams = {
|
||||
|
||||