Merge branch 'main' into clean-tsconfig
@@ -5,6 +5,10 @@ PLAYWRIGHT_TEST_URL=http://localhost:5173
|
||||
|
||||
# Proxy target of the local development server
|
||||
# Note: localhost:8188 does not work.
|
||||
# Cloud auto-detection: Setting this to any *.comfy.org URL automatically enables
|
||||
# cloud mode (DISTRIBUTION=cloud) without needing to set DISTRIBUTION separately.
|
||||
# Examples: https://testcloud.comfy.org/, https://stagingcloud.comfy.org/,
|
||||
# https://pr-123.testenvs.comfy.org/, https://cloud.comfy.org/
|
||||
DEV_SERVER_COMFYUI_URL=http://127.0.0.1:8188
|
||||
|
||||
# Allow dev server access from remote IP addresses.
|
||||
|
||||
@@ -26,15 +26,6 @@ jobs:
|
||||
node-version: lts/*
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Cache tool outputs
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cache
|
||||
key: electron-types-tools-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
electron-types-tools-cache-${{ runner.os }}-
|
||||
|
||||
- name: Update electron types
|
||||
run: pnpm install --workspace-root @comfyorg/comfyui-electron-types@latest
|
||||
|
||||
|
||||
@@ -31,26 +31,9 @@ jobs:
|
||||
node-version: lts/*
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Cache tool outputs
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cache
|
||||
key: update-manager-tools-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
update-manager-tools-cache-${{ runner.os }}-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Cache ComfyUI-Manager repository
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ComfyUI-Manager
|
||||
key: comfyui-manager-repo-${{ runner.os }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
comfyui-manager-repo-${{ runner.os }}-
|
||||
|
||||
- name: Checkout ComfyUI-Manager repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
|
||||
@@ -30,26 +30,9 @@ jobs:
|
||||
node-version: lts/*
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Cache tool outputs
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cache
|
||||
key: update-registry-tools-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
update-registry-tools-cache-${{ runner.os }}-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Cache comfy-api repository
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: comfy-api
|
||||
key: comfy-api-repo-${{ runner.os }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
comfy-api-repo-${{ runner.os }}-
|
||||
|
||||
- name: Checkout comfy-api repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
|
||||
19
.github/workflows/ci-lint-format.yaml
vendored
@@ -33,27 +33,15 @@ jobs:
|
||||
node-version: 'lts/*'
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Cache tool outputs
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cache
|
||||
.eslintcache
|
||||
tsconfig.tsbuildinfo
|
||||
.prettierCache
|
||||
.knip-cache
|
||||
key: lint-format-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}-${{ hashFiles('src/**/*.{ts,vue,js,mts}', '*.config.*', '.eslintrc.*', '.prettierrc.*', 'tsconfig.json') }}
|
||||
restore-keys: |
|
||||
lint-format-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}-
|
||||
lint-format-cache-${{ runner.os }}-
|
||||
ci-tools-cache-${{ runner.os }}-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run ESLint with auto-fix
|
||||
run: pnpm lint:fix
|
||||
|
||||
- name: Run Stylelint with auto-fix
|
||||
run: pnpm stylelint:fix
|
||||
|
||||
- name: Run Prettier with auto-format
|
||||
run: pnpm format
|
||||
|
||||
@@ -78,6 +66,7 @@ jobs:
|
||||
- name: Final validation
|
||||
run: |
|
||||
pnpm lint
|
||||
pnpm stylelint
|
||||
pnpm format:check
|
||||
pnpm knip
|
||||
|
||||
|
||||
26
.github/workflows/ci-tests-storybook.yaml
vendored
@@ -50,19 +50,6 @@ jobs:
|
||||
node-version: '20'
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Cache tool outputs
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cache
|
||||
storybook-static
|
||||
tsconfig.tsbuildinfo
|
||||
key: storybook-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}-${{ hashFiles('src/**/*.{ts,vue,js}', '*.config.*', '.storybook/**/*') }}
|
||||
restore-keys: |
|
||||
storybook-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}-
|
||||
storybook-cache-${{ runner.os }}-
|
||||
storybook-tools-cache-${{ runner.os }}-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
@@ -115,19 +102,6 @@ jobs:
|
||||
node-version: '20'
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Cache tool outputs
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cache
|
||||
storybook-static
|
||||
tsconfig.tsbuildinfo
|
||||
key: storybook-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}-${{ hashFiles('src/**/*.{ts,vue,js}', '*.config.*', '.storybook/**/*') }}
|
||||
restore-keys: |
|
||||
storybook-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}-
|
||||
storybook-cache-${{ runner.os }}-
|
||||
storybook-tools-cache-${{ runner.os }}-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
|
||||
13
.github/workflows/ci-tests-unit.yaml
vendored
@@ -29,19 +29,6 @@ jobs:
|
||||
node-version: "lts/*"
|
||||
cache: "pnpm"
|
||||
|
||||
- name: Cache tool outputs
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cache
|
||||
coverage
|
||||
.vitest-cache
|
||||
key: vitest-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}-${{ hashFiles('src/**/*.{ts,vue,js}', 'vitest.config.*', 'tsconfig.json') }}
|
||||
restore-keys: |
|
||||
vitest-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}-
|
||||
vitest-cache-${{ runner.os }}-
|
||||
test-tools-cache-${{ runner.os }}-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
|
||||
@@ -12,11 +12,11 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
if: >
|
||||
( github.event_name == 'pull_request' && github.event.label.name == 'New Browser Test Expectations' ) ||
|
||||
( github.event.issue.pull_request &&
|
||||
( github.event.issue.pull_request &&
|
||||
github.event_name == 'issue_comment' &&
|
||||
(
|
||||
github.event.comment.author_association == 'OWNER' ||
|
||||
@@ -24,12 +24,25 @@ jobs:
|
||||
github.event.comment.author_association == 'COLLABORATOR'
|
||||
) &&
|
||||
startsWith(github.event.comment.body, '/update-playwright') )
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
pr-number: ${{ steps.pr-info.outputs.pr-number }}
|
||||
branch: ${{ steps.pr-info.outputs.branch }}
|
||||
comment-id: ${{ steps.find-update-comment.outputs.comment-id }}
|
||||
steps:
|
||||
- name: Get PR info
|
||||
id: pr-info
|
||||
run: |
|
||||
echo "pr-number=${{ github.event.number || github.event.issue.number }}" >> $GITHUB_OUTPUT
|
||||
echo "branch=$(gh pr view ${{ github.event.number || github.event.issue.number }} --repo ${{ github.repository }} --json headRefName --jq '.headRefName')" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Find Update Comment
|
||||
uses: peter-evans/find-comment@b30e6a3c0ed37e7c023ccd3f1db5c6c0b0c23aad
|
||||
id: "find-update-comment"
|
||||
with:
|
||||
issue-number: ${{ github.event.number || github.event.issue.number }}
|
||||
issue-number: ${{ steps.pr-info.outputs.pr-number }}
|
||||
comment-author: "github-actions[bot]"
|
||||
body-includes: "Updating Playwright Expectations"
|
||||
|
||||
@@ -37,72 +50,260 @@ jobs:
|
||||
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9
|
||||
with:
|
||||
comment-id: ${{ steps.find-update-comment.outputs.comment-id }}
|
||||
issue-number: ${{ github.event.number || github.event.issue.number }}
|
||||
issue-number: ${{ steps.pr-info.outputs.pr-number }}
|
||||
body: |
|
||||
Updating Playwright Expectations
|
||||
edit-mode: replace
|
||||
reactions: eyes
|
||||
|
||||
- name: Get Branch SHA
|
||||
id: "get-branch"
|
||||
run: echo ::set-output name=branch::$(gh pr view $PR_NO --repo $REPO --json headRefName --jq '.headRefName')
|
||||
env:
|
||||
REPO: ${{ github.repository }}
|
||||
PR_NO: ${{ github.event.number || github.event.issue.number }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Initial Checkout
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{ steps.get-branch.outputs.branch }}
|
||||
- name: Setup Frontend
|
||||
ref: ${{ steps.pr-info.outputs.branch }}
|
||||
- name: Setup frontend
|
||||
uses: ./.github/actions/setup-frontend
|
||||
with:
|
||||
include_build_step: true
|
||||
- name: Setup ComfyUI Server
|
||||
# Save expensive build artifacts (Python env, built frontend, node_modules)
|
||||
# Source code will be checked out fresh in sharded jobs
|
||||
- name: Generate cache key
|
||||
id: cache-key
|
||||
run: echo "key=$(date +%s)" >> $GITHUB_OUTPUT
|
||||
- name: Save cache
|
||||
uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684
|
||||
with:
|
||||
path: |
|
||||
ComfyUI
|
||||
dist
|
||||
key: comfyui-setup-${{ steps.cache-key.outputs.key }}
|
||||
|
||||
# Sharded snapshot updates
|
||||
update-snapshots-sharded:
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
shardIndex: [1, 2, 3, 4]
|
||||
shardTotal: [4]
|
||||
steps:
|
||||
# Checkout source code fresh (not cached)
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{ needs.setup.outputs.branch }}
|
||||
|
||||
# Restore expensive build artifacts from setup job
|
||||
- name: Restore cached artifacts
|
||||
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684
|
||||
with:
|
||||
fail-on-cache-miss: true
|
||||
path: |
|
||||
ComfyUI
|
||||
dist
|
||||
key: comfyui-setup-${{ needs.setup.outputs.cache-key }}
|
||||
|
||||
- name: Setup ComfyUI server (from cache)
|
||||
uses: ./.github/actions/setup-comfyui-server
|
||||
with:
|
||||
launch_server: true
|
||||
|
||||
- name: Setup nodejs, pnpm, reuse built frontend
|
||||
uses: ./.github/actions/setup-frontend
|
||||
|
||||
- name: Setup Playwright
|
||||
uses: ./.github/actions/setup-playwright
|
||||
- name: Run Playwright tests and update snapshots
|
||||
|
||||
# Run sharded tests with snapshot updates
|
||||
- name: Update snapshots (Shard ${{ matrix.shardIndex }}/${{ matrix.shardTotal }})
|
||||
id: playwright-tests
|
||||
run: pnpm exec playwright test --update-snapshots
|
||||
run: pnpm exec playwright test --update-snapshots --shard=${{ matrix.shardIndex }}/${{ matrix.shardTotal }}
|
||||
continue-on-error: true
|
||||
- uses: actions/upload-artifact@v4
|
||||
|
||||
# Identify and stage only changed snapshot files
|
||||
- name: Stage changed snapshot files
|
||||
id: changed-snapshots
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "STAGING CHANGED SNAPSHOTS (Shard ${{ matrix.shardIndex }})"
|
||||
echo "=========================================="
|
||||
|
||||
# Get list of changed snapshot files
|
||||
changed_files=$(git diff --name-only browser_tests/ 2>/dev/null | grep -E '\-snapshots/' || echo "")
|
||||
|
||||
if [ -z "$changed_files" ]; then
|
||||
echo "No snapshot changes in this shard"
|
||||
echo "has-changes=false" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "✓ Found changed files:"
|
||||
echo "$changed_files"
|
||||
file_count=$(echo "$changed_files" | wc -l)
|
||||
echo "Count: $file_count"
|
||||
echo "has-changes=true" >> $GITHUB_OUTPUT
|
||||
echo ""
|
||||
|
||||
# Create staging directory
|
||||
mkdir -p /tmp/changed_snapshots_shard
|
||||
|
||||
# Copy only changed files, preserving directory structure
|
||||
# Strip 'browser_tests/' prefix to avoid double nesting
|
||||
echo "Copying changed files to staging directory..."
|
||||
while IFS= read -r file; do
|
||||
# Remove 'browser_tests/' prefix
|
||||
file_without_prefix="${file#browser_tests/}"
|
||||
# Create parent directories
|
||||
mkdir -p "/tmp/changed_snapshots_shard/$(dirname "$file_without_prefix")"
|
||||
# Copy file
|
||||
cp "$file" "/tmp/changed_snapshots_shard/$file_without_prefix"
|
||||
echo " → $file_without_prefix"
|
||||
done <<< "$changed_files"
|
||||
|
||||
echo ""
|
||||
echo "Staged files for upload:"
|
||||
find /tmp/changed_snapshots_shard -type f
|
||||
|
||||
# Upload ONLY the changed files from this shard
|
||||
- name: Upload changed snapshots
|
||||
uses: actions/upload-artifact@v4
|
||||
if: steps.changed-snapshots.outputs.has-changes == 'true'
|
||||
with:
|
||||
name: snapshots-shard-${{ matrix.shardIndex }}
|
||||
path: /tmp/changed_snapshots_shard/
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload test report
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: playwright-report
|
||||
name: playwright-report-shard-${{ matrix.shardIndex }}
|
||||
path: ./playwright-report/
|
||||
retention-days: 30
|
||||
- name: Debugging info
|
||||
|
||||
# Merge snapshots and commit
|
||||
merge-and-commit:
|
||||
needs: [setup, update-snapshots-sharded]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{ needs.setup.outputs.branch }}
|
||||
|
||||
# Download all changed snapshot files from shards
|
||||
- name: Download snapshot artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: snapshots-shard-*
|
||||
path: ./downloaded-snapshots
|
||||
merge-multiple: false
|
||||
|
||||
- name: List downloaded files
|
||||
run: |
|
||||
echo "PR: ${{ github.event.issue.number }}"
|
||||
echo "Branch: ${{ steps.get-branch.outputs.branch }}"
|
||||
git status
|
||||
echo "=========================================="
|
||||
echo "DOWNLOADED SNAPSHOT FILES"
|
||||
echo "=========================================="
|
||||
find ./downloaded-snapshots -type f
|
||||
echo ""
|
||||
echo "Total files: $(find ./downloaded-snapshots -type f | wc -l)"
|
||||
|
||||
# Merge only the changed files into browser_tests
|
||||
- name: Merge changed snapshots
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
echo "=========================================="
|
||||
echo "MERGING CHANGED SNAPSHOTS"
|
||||
echo "=========================================="
|
||||
|
||||
# Verify target directory exists
|
||||
if [ ! -d "browser_tests" ]; then
|
||||
echo "::error::Target directory 'browser_tests' does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
merged_count=0
|
||||
|
||||
# For each shard's changed files, copy them directly
|
||||
for shard_dir in ./downloaded-snapshots/snapshots-shard-*/; do
|
||||
if [ ! -d "$shard_dir" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
shard_name=$(basename "$shard_dir")
|
||||
file_count=$(find "$shard_dir" -type f | wc -l)
|
||||
|
||||
if [ "$file_count" -eq 0 ]; then
|
||||
echo " $shard_name: no files"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Processing $shard_name ($file_count file(s))..."
|
||||
|
||||
# Copy files directly, preserving directory structure
|
||||
# Since files are already in correct structure (no browser_tests/ prefix), just copy them all
|
||||
cp -v -r "$shard_dir"* browser_tests/ 2>&1 | sed 's/^/ /'
|
||||
|
||||
merged_count=$((merged_count + 1))
|
||||
echo " ✓ Merged"
|
||||
echo ""
|
||||
done
|
||||
|
||||
echo "=========================================="
|
||||
echo "MERGE COMPLETE"
|
||||
echo "=========================================="
|
||||
echo "Shards merged: $merged_count"
|
||||
|
||||
- name: Show changes
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "CHANGES SUMMARY"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "Changed files in browser_tests:"
|
||||
git diff --name-only browser_tests/ | head -20 || echo "No changes"
|
||||
echo ""
|
||||
echo "Total changes:"
|
||||
git diff --name-only browser_tests/ | wc -l || echo "0"
|
||||
|
||||
- name: Commit updated expectations
|
||||
id: commit
|
||||
run: |
|
||||
git config --global user.name 'github-actions'
|
||||
git config --global user.email 'github-actions@github.com'
|
||||
git add browser_tests
|
||||
if git diff --cached --quiet; then
|
||||
|
||||
if git diff --quiet browser_tests/; then
|
||||
echo "No changes to commit"
|
||||
else
|
||||
git commit -m "[automated] Update test expectations"
|
||||
git push origin ${{ steps.get-branch.outputs.branch }}
|
||||
echo "has-changes=false" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "=========================================="
|
||||
echo "COMMITTING CHANGES"
|
||||
echo "=========================================="
|
||||
|
||||
echo "has-changes=true" >> $GITHUB_OUTPUT
|
||||
|
||||
git add browser_tests/
|
||||
git commit -m "[automated] Update test expectations"
|
||||
|
||||
echo "Pushing to ${{ needs.setup.outputs.branch }}..."
|
||||
git push origin ${{ needs.setup.outputs.branch }}
|
||||
|
||||
echo "✓ Commit and push successful"
|
||||
|
||||
- name: Add Done Reaction
|
||||
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9
|
||||
if: github.event_name == 'issue_comment'
|
||||
if: github.event_name == 'issue_comment' && steps.commit.outputs.has-changes == 'true'
|
||||
with:
|
||||
comment-id: ${{ steps.find-update-comment.outputs.comment-id }}
|
||||
issue-number: ${{ github.event.number || github.event.issue.number }}
|
||||
comment-id: ${{ needs.setup.outputs.comment-id }}
|
||||
issue-number: ${{ needs.setup.outputs.pr-number }}
|
||||
reactions: +1
|
||||
reactions-edit-mode: replace
|
||||
|
||||
- name: Remove New Browser Test Expectations label
|
||||
if: always() && github.event_name == 'pull_request'
|
||||
run: gh pr edit ${{ github.event.pull_request.number }} --remove-label "New Browser Test Expectations"
|
||||
run: gh pr edit ${{ needs.setup.outputs.pr-number }} --remove-label "New Browser Test Expectations"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
14
.github/workflows/publish-desktop-ui.yaml
vendored
@@ -161,20 +161,6 @@ jobs:
|
||||
echo "publish_dir=$PUBLISH_DIR" >> "$GITHUB_OUTPUT"
|
||||
echo "name=$NAME" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Pack (preview only)
|
||||
shell: bash
|
||||
working-directory: ${{ steps.pkg.outputs.publish_dir }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
npm pack --json | tee pack-result.json
|
||||
|
||||
- name: Upload package tarball artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: desktop-ui-npm-tarball-${{ inputs.version }}
|
||||
path: ${{ steps.pkg.outputs.publish_dir }}/*.tgz
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Check if version already on npm
|
||||
id: check_npm
|
||||
env:
|
||||
|
||||
10
.github/workflows/release-draft-create.yaml
vendored
@@ -28,16 +28,6 @@ jobs:
|
||||
node-version: 'lts/*'
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Cache tool outputs
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cache
|
||||
tsconfig.tsbuildinfo
|
||||
key: release-tools-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
release-tools-cache-${{ runner.os }}-
|
||||
|
||||
- name: Get current version
|
||||
id: current_version
|
||||
run: echo "version=$(node -p "require('./package.json').version")" >> $GITHUB_OUTPUT
|
||||
|
||||
11
.github/workflows/release-pypi-dev.yaml
vendored
@@ -25,17 +25,6 @@ jobs:
|
||||
node-version: 'lts/*'
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Cache tool outputs
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cache
|
||||
dist
|
||||
tsconfig.tsbuildinfo
|
||||
key: dev-release-tools-cache-${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
dev-release-tools-cache-${{ runner.os }}-
|
||||
|
||||
- name: Get current version
|
||||
id: current_version
|
||||
run: echo "version=$(node -p "require('./package.json').version")" >> $GITHUB_OUTPUT
|
||||
|
||||
1
.github/workflows/release-version-bump.yaml
vendored
@@ -59,7 +59,6 @@ jobs:
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: lts/*
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Bump version
|
||||
id: bump-version
|
||||
|
||||
2
.gitignore
vendored
@@ -78,7 +78,7 @@ templates_repo/
|
||||
vite.config.mts.timestamp-*.mjs
|
||||
|
||||
# Linux core dumps
|
||||
./core
|
||||
/core
|
||||
|
||||
*storybook.log
|
||||
storybook-static
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Run Knip with cache via package script
|
||||
pnpm knip
|
||||
pnpm knip 1>&2
|
||||
|
||||
|
||||
@@ -7,12 +7,5 @@
|
||||
"importOrder": ["^@core/(.*)$", "<THIRD_PARTY_MODULES>", "^@/(.*)$", "^[./]"],
|
||||
"importOrderSeparation": true,
|
||||
"importOrderSortSpecifiers": true,
|
||||
"overrides": [
|
||||
{
|
||||
"files": "*.{js,cjs,mjs,ts,cts,mts,tsx,vue}",
|
||||
"options": {
|
||||
"plugins": ["@trivago/prettier-plugin-sort-imports"]
|
||||
}
|
||||
}
|
||||
]
|
||||
"plugins": ["@prettier/plugin-oxc", "@trivago/prettier-plugin-sort-imports"]
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ Have another idea? Drop into Discord or open an issue, and let's chat!
|
||||
```
|
||||
|
||||
3. Configure environment (optional):
|
||||
Create a `.env` file in the project root based on the provided [.env.example](.env.example) file.
|
||||
Create a `.env` file in the project root based on the provided [.env_example](.env_example) file.
|
||||
|
||||
**Note about ports**: By default, the dev server expects the ComfyUI backend at `localhost:8188`. If your ComfyUI instance runs on a different port, update this in your `.env` file.
|
||||
|
||||
@@ -325,4 +325,4 @@ If you have questions about contributing:
|
||||
- Ask in our [Discord](https://discord.com/invite/comfyorg)
|
||||
- Open a new issue for clarification
|
||||
|
||||
Thank you for contributing to ComfyUI Frontend!
|
||||
Thank you for contributing to ComfyUI Frontend!
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<title>ComfyUI Desktop</title>
|
||||
<title>ComfyUI</title>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no" />
|
||||
</head>
|
||||
<body>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@comfyorg/desktop-ui",
|
||||
"version": "0.0.1",
|
||||
"version": "0.0.3",
|
||||
"type": "module",
|
||||
"nx": {
|
||||
"tags": [
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
<template>
|
||||
<div
|
||||
class="grid grid-rows-[1fr_auto_auto_1fr] w-full max-w-3xl mx-auto h-[40rem] select-none"
|
||||
class="mx-auto grid h-[40rem] w-full max-w-3xl grid-rows-[1fr_auto_auto_1fr] select-none"
|
||||
>
|
||||
<h2 class="font-inter font-bold text-3xl text-neutral-100 text-center">
|
||||
<h2 class="text-center font-inter text-3xl font-bold text-neutral-100">
|
||||
{{ $t('install.gpuPicker.title') }}
|
||||
</h2>
|
||||
|
||||
<!-- GPU Selection buttons - takes up remaining space and centers content -->
|
||||
<div class="flex-1 flex gap-8 justify-center items-center">
|
||||
<div class="flex flex-1 items-center justify-center gap-8">
|
||||
<!-- Apple Metal / NVIDIA -->
|
||||
<HardwareOption
|
||||
v-if="platform === 'darwin'"
|
||||
:image-path="'/assets/images/apple-mps-logo.png'"
|
||||
:image-path="'./assets/images/apple-mps-logo.png'"
|
||||
placeholder-text="Apple Metal"
|
||||
subtitle="Apple Metal"
|
||||
:value="'mps'"
|
||||
@@ -21,7 +21,7 @@
|
||||
/>
|
||||
<HardwareOption
|
||||
v-else
|
||||
:image-path="'/assets/images/nvidia-logo-square.jpg'"
|
||||
:image-path="'./assets/images/nvidia-logo-square.jpg'"
|
||||
placeholder-text="NVIDIA"
|
||||
:subtitle="$t('install.gpuPicker.nvidiaSubtitle')"
|
||||
:value="'nvidia'"
|
||||
@@ -47,17 +47,17 @@
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div class="pt-12 px-24 h-16">
|
||||
<div class="h-16 px-24 pt-12">
|
||||
<div v-show="showRecommendedBadge" class="flex items-center gap-2">
|
||||
<Tag
|
||||
:value="$t('install.gpuPicker.recommended')"
|
||||
class="bg-neutral-300 text-neutral-900 rounded-full text-sm font-bold px-2 py-[1px]"
|
||||
class="rounded-full bg-neutral-300 px-2 py-[1px] text-sm font-bold text-neutral-900"
|
||||
/>
|
||||
<i class="icon-[lucide--badge-check] text-neutral-300 text-lg" />
|
||||
<i class="icon-[lucide--badge-check] text-lg text-neutral-300" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="text-neutral-300 px-24">
|
||||
<div class="px-24 text-neutral-300">
|
||||
<p v-show="descriptionText" class="leading-relaxed">
|
||||
{{ descriptionText }}
|
||||
</p>
|
||||
|
||||
@@ -66,17 +66,6 @@
|
||||
@click="troubleshoot"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div class="text-center">
|
||||
<button
|
||||
v-if="!terminalVisible"
|
||||
class="text-sm text-neutral-500 hover:text-neutral-300 transition-colors flex items-center gap-2 mx-auto"
|
||||
@click="terminalVisible = true"
|
||||
>
|
||||
<i class="pi pi-search"></i>
|
||||
{{ $t('serverStart.showTerminal') }}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Terminal Output (positioned at bottom when manually toggled in error state) -->
|
||||
|
||||
@@ -13,7 +13,7 @@ export class ComfyActionbar {
|
||||
|
||||
async isDocked() {
|
||||
const className = await this.root.getAttribute('class')
|
||||
return className?.includes('is-docked') ?? false
|
||||
return className?.includes('static') ?? false
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -301,7 +301,9 @@ test.describe('Settings', () => {
|
||||
})
|
||||
|
||||
test.describe('Support', () => {
|
||||
test('Should open external zendesk link', async ({ comfyPage }) => {
|
||||
test('Should open external zendesk link with OSS tag', async ({
|
||||
comfyPage
|
||||
}) => {
|
||||
await comfyPage.setSetting('Comfy.UseNewMenu', 'Top')
|
||||
const pagePromise = comfyPage.page.context().waitForEvent('page')
|
||||
await comfyPage.menu.topbar.triggerTopbarCommand(['Help', 'Support'])
|
||||
@@ -309,6 +311,10 @@ test.describe('Support', () => {
|
||||
|
||||
await newPage.waitForLoadState('networkidle')
|
||||
await expect(newPage).toHaveURL(/.*support\.comfy\.org.*/)
|
||||
|
||||
const url = new URL(newPage.url())
|
||||
expect(url.searchParams.get('tf_42243568391700')).toBe('oss')
|
||||
|
||||
await newPage.close()
|
||||
})
|
||||
})
|
||||
|
||||
|
Before Width: | Height: | Size: 85 KiB After Width: | Height: | Size: 85 KiB |
|
Before Width: | Height: | Size: 99 KiB After Width: | Height: | Size: 99 KiB |
|
Before Width: | Height: | Size: 108 KiB After Width: | Height: | Size: 108 KiB |
|
Before Width: | Height: | Size: 105 KiB After Width: | Height: | Size: 105 KiB |
|
Before Width: | Height: | Size: 97 KiB After Width: | Height: | Size: 97 KiB |
|
Before Width: | Height: | Size: 58 KiB After Width: | Height: | Size: 56 KiB |
|
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 63 KiB |
|
Before Width: | Height: | Size: 61 KiB After Width: | Height: | Size: 60 KiB |
|
Before Width: | Height: | Size: 62 KiB After Width: | Height: | Size: 61 KiB |
|
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 62 KiB |
|
Before Width: | Height: | Size: 65 KiB After Width: | Height: | Size: 64 KiB |
|
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 62 KiB |
|
Before Width: | Height: | Size: 61 KiB After Width: | Height: | Size: 60 KiB |
|
Before Width: | Height: | Size: 60 KiB After Width: | Height: | Size: 59 KiB |
|
Before Width: | Height: | Size: 86 KiB After Width: | Height: | Size: 84 KiB |
|
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 97 KiB After Width: | Height: | Size: 97 KiB |
|
Before Width: | Height: | Size: 97 KiB After Width: | Height: | Size: 97 KiB |
|
Before Width: | Height: | Size: 98 KiB After Width: | Height: | Size: 98 KiB |
@@ -23,10 +23,10 @@ test.describe('Vue Nodes - LOD', () => {
|
||||
|
||||
const vueNodesContainer = comfyPage.vueNodes.nodes
|
||||
const textboxesInNodes = vueNodesContainer.getByRole('textbox')
|
||||
const buttonsInNodes = vueNodesContainer.getByRole('button')
|
||||
const comboboxesInNodes = vueNodesContainer.getByRole('combobox')
|
||||
|
||||
await expect(textboxesInNodes.first()).toBeVisible()
|
||||
await expect(buttonsInNodes.first()).toBeVisible()
|
||||
await expect(comboboxesInNodes.first()).toBeVisible()
|
||||
|
||||
await comfyPage.zoom(120, 10)
|
||||
await comfyPage.nextFrame()
|
||||
@@ -34,7 +34,7 @@ test.describe('Vue Nodes - LOD', () => {
|
||||
await expect(comfyPage.canvas).toHaveScreenshot('vue-nodes-lod-active.png')
|
||||
|
||||
await expect(textboxesInNodes.first()).toBeHidden()
|
||||
await expect(buttonsInNodes.first()).toBeHidden()
|
||||
await expect(comboboxesInNodes.first()).toBeHidden()
|
||||
|
||||
await comfyPage.zoom(-120, 10)
|
||||
await comfyPage.nextFrame()
|
||||
@@ -43,6 +43,6 @@ test.describe('Vue Nodes - LOD', () => {
|
||||
'vue-nodes-lod-inactive.png'
|
||||
)
|
||||
await expect(textboxesInNodes.first()).toBeVisible()
|
||||
await expect(buttonsInNodes.first()).toBeVisible()
|
||||
await expect(comboboxesInNodes.first()).toBeVisible()
|
||||
})
|
||||
})
|
||||
|
||||
|
Before Width: | Height: | Size: 94 KiB After Width: | Height: | Size: 95 KiB |
|
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 34 KiB |
|
Before Width: | Height: | Size: 94 KiB After Width: | Height: | Size: 95 KiB |
|
Before Width: | Height: | Size: 97 KiB After Width: | Height: | Size: 97 KiB |
|
Before Width: | Height: | Size: 83 KiB After Width: | Height: | Size: 82 KiB |
@@ -6,6 +6,34 @@ interface ShimResult {
|
||||
exports: string[]
|
||||
}
|
||||
|
||||
const SKIP_WARNING_FILES = new Set(['scripts/app', 'scripts/api'])
|
||||
|
||||
/** Files that will be removed in v1.34 */
|
||||
const DEPRECATED_FILES = [
|
||||
'scripts/ui',
|
||||
'extensions/core/maskEditorOld',
|
||||
'extensions/core/groupNode'
|
||||
] as const
|
||||
|
||||
function getWarningMessage(
|
||||
fileKey: string,
|
||||
shimFileName: string
|
||||
): string | null {
|
||||
if (SKIP_WARNING_FILES.has(fileKey)) {
|
||||
return null
|
||||
}
|
||||
|
||||
const isDeprecated = DEPRECATED_FILES.some((deprecatedPath) =>
|
||||
fileKey.startsWith(deprecatedPath)
|
||||
)
|
||||
|
||||
if (isDeprecated) {
|
||||
return `[ComfyUI Deprecated] Importing from "${shimFileName}" is deprecated and will be removed in v1.34.`
|
||||
}
|
||||
|
||||
return `[ComfyUI Notice] "${shimFileName}" is an internal module, not part of the public API. Future updates may break this import.`
|
||||
}
|
||||
|
||||
function isLegacyFile(id: string): boolean {
|
||||
return (
|
||||
id.endsWith('.ts') &&
|
||||
@@ -63,12 +91,22 @@ export function comfyAPIPlugin(isDev: boolean): Plugin {
|
||||
const relativePath = path.relative(path.join(projectRoot, 'src'), id)
|
||||
const shimFileName = relativePath.replace(/\.ts$/, '.js')
|
||||
|
||||
const shimComment = `// Shim for ${relativePath}\n`
|
||||
let shimContent = `// Shim for ${relativePath}\n`
|
||||
|
||||
const fileKey = relativePath.replace(/\.ts$/, '').replace(/\\/g, '/')
|
||||
const warningMessage = getWarningMessage(fileKey, shimFileName)
|
||||
|
||||
if (warningMessage) {
|
||||
// It will only display once because it is at the root of the file.
|
||||
shimContent += `console.warn('${warningMessage}');\n`
|
||||
}
|
||||
|
||||
shimContent += result.exports.join('')
|
||||
|
||||
this.emitFile({
|
||||
type: 'asset',
|
||||
fileName: shimFileName,
|
||||
source: shimComment + result.exports.join('')
|
||||
source: shimContent
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
15
global.d.ts
vendored
@@ -4,12 +4,19 @@ declare const __SENTRY_DSN__: string
|
||||
declare const __ALGOLIA_APP_ID__: string
|
||||
declare const __ALGOLIA_API_KEY__: string
|
||||
declare const __USE_PROD_CONFIG__: boolean
|
||||
declare const __MIXPANEL_TOKEN__: string
|
||||
|
||||
type BuildFeatureFlags = {
|
||||
REQUIRE_SUBSCRIPTION: boolean
|
||||
interface Window {
|
||||
__CONFIG__: {
|
||||
mixpanel_token?: string
|
||||
subscription_required?: boolean
|
||||
server_health_alert?: {
|
||||
message: string
|
||||
tooltip?: string
|
||||
severity?: 'info' | 'warning' | 'error'
|
||||
badge?: string
|
||||
}
|
||||
}
|
||||
}
|
||||
declare const __BUILD_FLAGS__: BuildFeatureFlags
|
||||
|
||||
interface Navigator {
|
||||
/**
|
||||
|
||||
@@ -34,9 +34,7 @@ const config: KnipConfig = {
|
||||
'@primeuix/forms',
|
||||
'@primeuix/styled',
|
||||
'@primeuix/utils',
|
||||
'@primevue/icons',
|
||||
// Dev
|
||||
'@trivago/prettier-plugin-sort-imports'
|
||||
'@primevue/icons'
|
||||
],
|
||||
ignore: [
|
||||
// Auto generated manager types
|
||||
|
||||
@@ -8,8 +8,10 @@ export default {
|
||||
}
|
||||
|
||||
function formatAndEslint(fileNames) {
|
||||
// Convert absolute paths to relative paths for better ESLint resolution
|
||||
const relativePaths = fileNames.map((f) => f.replace(process.cwd() + '/', ''))
|
||||
return [
|
||||
`pnpm exec eslint --cache --fix ${fileNames.join(' ')}`,
|
||||
`pnpm exec prettier --cache --write ${fileNames.join(' ')}`
|
||||
`pnpm exec eslint --cache --fix ${relativePaths.join(' ')}`,
|
||||
`pnpm exec prettier --cache --write ${relativePaths.join(' ')}`
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@comfyorg/comfyui-frontend",
|
||||
"private": true,
|
||||
"version": "1.30.1",
|
||||
"version": "1.30.3",
|
||||
"type": "module",
|
||||
"repository": "https://github.com/Comfy-Org/ComfyUI_frontend",
|
||||
"homepage": "https://comfy.org",
|
||||
@@ -55,6 +55,7 @@
|
||||
"@nx/vite": "catalog:",
|
||||
"@pinia/testing": "catalog:",
|
||||
"@playwright/test": "catalog:",
|
||||
"@prettier/plugin-oxc": "catalog:",
|
||||
"@storybook/addon-docs": "catalog:",
|
||||
"@storybook/vue3": "catalog:",
|
||||
"@storybook/vue3-vite": "catalog:",
|
||||
|
||||
@@ -200,7 +200,7 @@
|
||||
--node-stroke-executing: var(--color-blue-100);
|
||||
--text-secondary: var(--color-stone-100);
|
||||
--text-primary: var(--color-charcoal-700);
|
||||
--input-surface: rgba(0, 0, 0, 0.15);
|
||||
--input-surface: rgb(0 0 0 / 0.15);
|
||||
}
|
||||
|
||||
.dark-theme {
|
||||
@@ -247,7 +247,7 @@
|
||||
--node-stroke-executing: var(--color-blue-100);
|
||||
--text-secondary: var(--color-slate-100);
|
||||
--text-primary: var(--color-pure-white);
|
||||
--input-surface: rgba(130, 130, 130, 0.1);
|
||||
--input-surface: rgb(130 130 130 / 0.1);
|
||||
}
|
||||
|
||||
@theme inline {
|
||||
@@ -258,9 +258,15 @@
|
||||
--color-button-surface: var(--button-surface);
|
||||
--color-button-surface-contrast: var(--button-surface-contrast);
|
||||
--color-dialog-surface: var(--dialog-surface);
|
||||
--color-interface-menu-component-surface-hovered: var(--interface-menu-component-surface-hovered);
|
||||
--color-interface-menu-component-surface-selected: var(--interface-menu-component-surface-selected);
|
||||
--color-interface-menu-keybind-surface-default: var(--interface-menu-keybind-surface-default);
|
||||
--color-interface-menu-component-surface-hovered: var(
|
||||
--interface-menu-component-surface-hovered
|
||||
);
|
||||
--color-interface-menu-component-surface-selected: var(
|
||||
--interface-menu-component-surface-selected
|
||||
);
|
||||
--color-interface-menu-keybind-surface-default: var(
|
||||
--interface-menu-keybind-surface-default
|
||||
);
|
||||
--color-interface-panel-surface: var(--interface-panel-surface);
|
||||
--color-interface-stroke: var(--interface-stroke);
|
||||
--color-nav-background: var(--nav-background);
|
||||
@@ -324,7 +330,42 @@
|
||||
}
|
||||
}
|
||||
|
||||
/* Everything below here to be cleaned up over time. */
|
||||
|
||||
/* ===================== Scrollbar Utilities (Tailwind) =====================
|
||||
Usage: Add `scrollbar-custom` class to scrollable containers.
|
||||
The scrollbar styling adapts to light/dark theme automatically.
|
||||
============================================================================ */
|
||||
|
||||
@utility scrollbar-custom {
|
||||
overflow-y: auto;
|
||||
/* Firefox */
|
||||
scrollbar-width: thin;
|
||||
scrollbar-color: var(--dialog-surface) transparent;
|
||||
|
||||
/* WebKit */
|
||||
&::-webkit-scrollbar {
|
||||
width: 10px;
|
||||
height: 10px;
|
||||
background-color: transparent;
|
||||
}
|
||||
&::-webkit-scrollbar-track {
|
||||
background: transparent;
|
||||
}
|
||||
&::-webkit-scrollbar-thumb {
|
||||
background: var(--dialog-surface);
|
||||
border-radius: 9999px;
|
||||
border: 2px solid transparent;
|
||||
}
|
||||
&::-webkit-scrollbar-thumb:hover {
|
||||
background: var(--dialog-surface);
|
||||
}
|
||||
&::-webkit-scrollbar-corner {
|
||||
background: transparent;
|
||||
}
|
||||
}
|
||||
/* =================== End Custom Scrollbar (cross-browser) =================== */
|
||||
|
||||
/* Everthing below here to be cleaned up over time. */
|
||||
|
||||
body {
|
||||
width: 100vw;
|
||||
@@ -1139,7 +1180,7 @@ audio.comfy-audio.empty-audio-widget {
|
||||
}
|
||||
|
||||
.isLOD .lg-node-header {
|
||||
border-radius: 0px;
|
||||
border-radius: 0;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
|
||||
179
pnpm-lock.yaml
generated
@@ -45,6 +45,9 @@ catalogs:
|
||||
'@playwright/test':
|
||||
specifier: ^1.52.0
|
||||
version: 1.52.0
|
||||
'@prettier/plugin-oxc':
|
||||
specifier: ^0.0.4
|
||||
version: 0.0.4
|
||||
'@primeuix/forms':
|
||||
specifier: 0.0.2
|
||||
version: 0.0.2
|
||||
@@ -498,6 +501,9 @@ importers:
|
||||
'@playwright/test':
|
||||
specifier: 'catalog:'
|
||||
version: 1.52.0
|
||||
'@prettier/plugin-oxc':
|
||||
specifier: 'catalog:'
|
||||
version: 0.0.4
|
||||
'@storybook/addon-docs':
|
||||
specifier: 'catalog:'
|
||||
version: 9.1.1(@types/react@19.1.9)(storybook@9.1.6(@testing-library/dom@10.4.1)(prettier@3.6.2)(vite@5.4.19(@types/node@20.14.10)(lightningcss@1.30.1)(terser@5.39.2)))
|
||||
@@ -2348,6 +2354,98 @@ packages:
|
||||
'@one-ini/wasm@0.1.1':
|
||||
resolution: {integrity: sha512-XuySG1E38YScSJoMlqovLru4KTUNSjgVTIjyh7qMX6aNN5HY5Ct5LhRJdxO79JtTzKfzV/bnWpz+zquYrISsvw==}
|
||||
|
||||
'@oxc-parser/binding-android-arm64@0.74.0':
|
||||
resolution: {integrity: sha512-lgq8TJq22eyfojfa2jBFy2m66ckAo7iNRYDdyn9reXYA3I6Wx7tgGWVx1JAp1lO+aUiqdqP/uPlDaETL9tqRcg==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [android]
|
||||
|
||||
'@oxc-parser/binding-darwin-arm64@0.74.0':
|
||||
resolution: {integrity: sha512-xbY/io/hkARggbpYEMFX6CwFzb7f4iS6WuBoBeZtdqRWfIEi7sm/uYWXfyVeB8uqOATvJ07WRFC2upI8PSI83g==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@oxc-parser/binding-darwin-x64@0.74.0':
|
||||
resolution: {integrity: sha512-FIj2gAGtFaW0Zk+TnGyenMUoRu1ju+kJ/h71D77xc1owOItbFZFGa+4WSVck1H8rTtceeJlK+kux+vCjGFCl9Q==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@oxc-parser/binding-freebsd-x64@0.74.0':
|
||||
resolution: {integrity: sha512-W1I+g5TJg0TRRMHgEWNWsTIfe782V3QuaPgZxnfPNmDMywYdtlzllzclBgaDq6qzvZCCQc/UhvNb37KWTCTj8A==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [x64]
|
||||
os: [freebsd]
|
||||
|
||||
'@oxc-parser/binding-linux-arm-gnueabihf@0.74.0':
|
||||
resolution: {integrity: sha512-gxqkyRGApeVI8dgvJ19SYe59XASW3uVxF1YUgkE7peW/XIg5QRAOVTFKyTjI9acYuK1MF6OJHqx30cmxmZLtiQ==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
'@oxc-parser/binding-linux-arm-musleabihf@0.74.0':
|
||||
resolution: {integrity: sha512-jpnAUP4Fa93VdPPDzxxBguJmldj/Gpz7wTXKFzpAueqBMfZsy9KNC+0qT2uZ9HGUDMzNuKw0Se3bPCpL/gfD2Q==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
'@oxc-parser/binding-linux-arm64-gnu@0.74.0':
|
||||
resolution: {integrity: sha512-fcWyM7BNfCkHqIf3kll8fJctbR/PseL4RnS2isD9Y3FFBhp4efGAzhDaxIUK5GK7kIcFh1P+puIRig8WJ6IMVQ==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@oxc-parser/binding-linux-arm64-musl@0.74.0':
|
||||
resolution: {integrity: sha512-AMY30z/C77HgiRRJX7YtVUaelKq1ex0aaj28XoJu4SCezdS8i0IftUNTtGS1UzGjGZB8zQz5SFwVy4dRu4GLwg==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@oxc-parser/binding-linux-riscv64-gnu@0.74.0':
|
||||
resolution: {integrity: sha512-/RZAP24TgZo4vV/01TBlzRqs0R7E6xvatww4LnmZEBBulQBU/SkypDywfriFqWuFoa61WFXPV7sLcTjJGjim/w==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
|
||||
'@oxc-parser/binding-linux-s390x-gnu@0.74.0':
|
||||
resolution: {integrity: sha512-620J1beNAlGSPBD+Msb3ptvrwxu04B8iULCH03zlf0JSLy/5sqlD6qBs0XUVkUJv1vbakUw1gfVnUQqv0UTuEg==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [s390x]
|
||||
os: [linux]
|
||||
|
||||
'@oxc-parser/binding-linux-x64-gnu@0.74.0':
|
||||
resolution: {integrity: sha512-WBFgQmGtFnPNzHyLKbC1wkYGaRIBxXGofO0+hz1xrrkPgbxbJS1Ukva1EB8sPaVBBQ52Bdc2GjLSp721NWRvww==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@oxc-parser/binding-linux-x64-musl@0.74.0':
|
||||
resolution: {integrity: sha512-y4mapxi0RGqlp3t6Sm+knJlAEqdKDYrEue2LlXOka/F2i4sRN0XhEMPiSOB3ppHmvK4I2zY2XBYTsX1Fel0fAg==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@oxc-parser/binding-wasm32-wasi@0.74.0':
|
||||
resolution: {integrity: sha512-yDS9bRDh5ymobiS2xBmjlrGdUuU61IZoJBaJC5fELdYT5LJNBXlbr3Yc6m2PWfRJwkH6Aq5fRvxAZ4wCbkGa8w==}
|
||||
engines: {node: '>=14.0.0'}
|
||||
cpu: [wasm32]
|
||||
|
||||
'@oxc-parser/binding-win32-arm64-msvc@0.74.0':
|
||||
resolution: {integrity: sha512-XFWY52Rfb4N5wEbMCTSBMxRkDLGbAI9CBSL24BIDywwDJMl31gHEVlmHdCDRoXAmanCI6gwbXYTrWe0HvXJ7Aw==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
'@oxc-parser/binding-win32-x64-msvc@0.74.0':
|
||||
resolution: {integrity: sha512-1D3x6iU2apLyfTQHygbdaNbX3nZaHu4yaXpD7ilYpoLo7f0MX0tUuoDrqJyJrVGqvyXgc0uz4yXz9tH9ZZhvvg==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@oxc-project/types@0.74.0':
|
||||
resolution: {integrity: sha512-KOw/RZrVlHGhCXh1RufBFF7Nuo7HdY5w1lRJukM/igIl6x9qtz8QycDvZdzb4qnHO7znrPyo2sJrFJK2eKHgfQ==}
|
||||
|
||||
'@oxc-resolver/binding-android-arm-eabi@11.6.1':
|
||||
resolution: {integrity: sha512-Ma/kg29QJX1Jzelv0Q/j2iFuUad1WnjgPjpThvjqPjpOyLjCUaiFCCnshhmWjyS51Ki1Iol3fjf1qAzObf8GIA==}
|
||||
cpu: [arm]
|
||||
@@ -2481,6 +2579,10 @@ packages:
|
||||
'@polka/url@1.0.0-next.29':
|
||||
resolution: {integrity: sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==}
|
||||
|
||||
'@prettier/plugin-oxc@0.0.4':
|
||||
resolution: {integrity: sha512-UGXe+g/rSRbglL0FOJiar+a+nUrst7KaFmsg05wYbKiInGWP6eAj/f8A2Uobgo5KxEtb2X10zeflNH6RK2xeIQ==}
|
||||
engines: {node: '>=14'}
|
||||
|
||||
'@primeuix/forms@0.0.2':
|
||||
resolution: {integrity: sha512-DpecPQd/Qf/kav4LKCaIeGuT3AkwhJzuHCkLANTVlN/zBvo8KIj3OZHsCkm0zlIMVVnaJdtx1ULNlRQdudef+A==}
|
||||
engines: {node: '>=12.11.0'}
|
||||
@@ -6207,6 +6309,10 @@ packages:
|
||||
resolution: {integrity: sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
oxc-parser@0.74.0:
|
||||
resolution: {integrity: sha512-2tDN/ttU8WE6oFh8EzKNam7KE7ZXSG5uXmvX85iNzxdJfMssDWcj3gpYzZi1E04XuE7m3v1dVWl/8BE886vPGw==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
oxc-resolver@11.6.1:
|
||||
resolution: {integrity: sha512-WQgmxevT4cM5MZ9ioQnEwJiHpPzbvntV5nInGAKo9NQZzegcOonHvcVcnkYqld7bTG35UFHEKeF7VwwsmA3cZg==}
|
||||
|
||||
@@ -9796,6 +9902,55 @@ snapshots:
|
||||
|
||||
'@one-ini/wasm@0.1.1': {}
|
||||
|
||||
'@oxc-parser/binding-android-arm64@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-darwin-arm64@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-darwin-x64@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-freebsd-x64@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-linux-arm-gnueabihf@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-linux-arm-musleabihf@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-linux-arm64-gnu@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-linux-arm64-musl@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-linux-riscv64-gnu@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-linux-s390x-gnu@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-linux-x64-gnu@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-linux-x64-musl@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-wasm32-wasi@0.74.0':
|
||||
dependencies:
|
||||
'@napi-rs/wasm-runtime': 0.2.12
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-win32-arm64-msvc@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-parser/binding-win32-x64-msvc@0.74.0':
|
||||
optional: true
|
||||
|
||||
'@oxc-project/types@0.74.0': {}
|
||||
|
||||
'@oxc-resolver/binding-android-arm-eabi@11.6.1':
|
||||
optional: true
|
||||
|
||||
@@ -9891,6 +10046,10 @@ snapshots:
|
||||
|
||||
'@polka/url@1.0.0-next.29': {}
|
||||
|
||||
'@prettier/plugin-oxc@0.0.4':
|
||||
dependencies:
|
||||
oxc-parser: 0.74.0
|
||||
|
||||
'@primeuix/forms@0.0.2':
|
||||
dependencies:
|
||||
'@primeuix/utils': 0.3.2
|
||||
@@ -14173,6 +14332,26 @@ snapshots:
|
||||
safe-push-apply: 1.0.0
|
||||
optional: true
|
||||
|
||||
oxc-parser@0.74.0:
|
||||
dependencies:
|
||||
'@oxc-project/types': 0.74.0
|
||||
optionalDependencies:
|
||||
'@oxc-parser/binding-android-arm64': 0.74.0
|
||||
'@oxc-parser/binding-darwin-arm64': 0.74.0
|
||||
'@oxc-parser/binding-darwin-x64': 0.74.0
|
||||
'@oxc-parser/binding-freebsd-x64': 0.74.0
|
||||
'@oxc-parser/binding-linux-arm-gnueabihf': 0.74.0
|
||||
'@oxc-parser/binding-linux-arm-musleabihf': 0.74.0
|
||||
'@oxc-parser/binding-linux-arm64-gnu': 0.74.0
|
||||
'@oxc-parser/binding-linux-arm64-musl': 0.74.0
|
||||
'@oxc-parser/binding-linux-riscv64-gnu': 0.74.0
|
||||
'@oxc-parser/binding-linux-s390x-gnu': 0.74.0
|
||||
'@oxc-parser/binding-linux-x64-gnu': 0.74.0
|
||||
'@oxc-parser/binding-linux-x64-musl': 0.74.0
|
||||
'@oxc-parser/binding-wasm32-wasi': 0.74.0
|
||||
'@oxc-parser/binding-win32-arm64-msvc': 0.74.0
|
||||
'@oxc-parser/binding-win32-x64-msvc': 0.74.0
|
||||
|
||||
oxc-resolver@11.6.1:
|
||||
dependencies:
|
||||
napi-postinstall: 0.3.3
|
||||
|
||||
@@ -16,6 +16,7 @@ catalog:
|
||||
'@nx/vite': 21.4.1
|
||||
'@pinia/testing': ^0.1.5
|
||||
'@playwright/test': ^1.52.0
|
||||
'@prettier/plugin-oxc': ^0.0.4
|
||||
'@primeuix/forms': 0.0.2
|
||||
'@primeuix/styled': 0.3.2
|
||||
'@primeuix/utils': ^0.3.2
|
||||
|
||||
@@ -54,18 +54,50 @@ self.addEventListener('fetch', (event) => {
|
||||
headers.set(key, value)
|
||||
}
|
||||
|
||||
return fetch(
|
||||
// Fetch with manual redirect to handle cross-origin redirects (e.g., GCS signed URLs)
|
||||
const response = await fetch(
|
||||
new Request(event.request.url, {
|
||||
method: event.request.method,
|
||||
headers: headers,
|
||||
mode: 'same-origin',
|
||||
credentials: event.request.credentials,
|
||||
cache: 'no-store',
|
||||
redirect: event.request.redirect,
|
||||
redirect: 'manual',
|
||||
referrer: event.request.referrer,
|
||||
integrity: event.request.integrity
|
||||
})
|
||||
)
|
||||
|
||||
// Handle redirects to external storage (e.g., GCS signed URLs)
|
||||
if (response.type === 'opaqueredirect') {
|
||||
// Opaqueredirect: redirect occurred but response is opaque (headers not accessible)
|
||||
// Re-fetch the original /api/view URL with redirect: 'follow' and mode: 'no-cors'
|
||||
// - mode: 'no-cors' allows cross-origin fetches without CORS headers (GCS doesn't have CORS)
|
||||
// - Returns opaque response, which works fine for images/videos/audio
|
||||
// - Browser will send auth headers to /api/view (same-origin)
|
||||
// - Browser will receive 302 redirect to GCS
|
||||
// - Browser will follow redirect using GCS signed URL authentication
|
||||
return fetch(event.request.url, {
|
||||
method: 'GET',
|
||||
headers: headers,
|
||||
redirect: 'follow',
|
||||
mode: 'no-cors'
|
||||
})
|
||||
}
|
||||
|
||||
// Non-opaque redirect (status visible) - shouldn't normally happen with redirect: 'manual'
|
||||
// but handle as fallback
|
||||
if (response.status === 302 || response.status === 301) {
|
||||
const location = response.headers.get('location')
|
||||
if (location) {
|
||||
// Follow redirect manually - do NOT include auth headers for external URLs
|
||||
return fetch(location, {
|
||||
method: 'GET',
|
||||
redirect: 'follow'
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return response
|
||||
} catch (error) {
|
||||
console.error('[Auth SW] Request failed:', error)
|
||||
return fetch(event.request)
|
||||
|
||||
@@ -1,29 +1,64 @@
|
||||
/**
|
||||
* Utility functions for downloading files
|
||||
*/
|
||||
import { isCloud } from '@/platform/distribution/types'
|
||||
|
||||
// Constants
|
||||
const DEFAULT_DOWNLOAD_FILENAME = 'download.png'
|
||||
|
||||
/**
|
||||
* Trigger a download by creating a temporary anchor element
|
||||
* @param href - The URL or blob URL to download
|
||||
* @param filename - The filename to suggest to the browser
|
||||
*/
|
||||
function triggerLinkDownload(href: string, filename: string): void {
|
||||
const link = document.createElement('a')
|
||||
link.href = href
|
||||
link.download = filename
|
||||
link.style.display = 'none'
|
||||
|
||||
document.body.appendChild(link)
|
||||
link.click()
|
||||
document.body.removeChild(link)
|
||||
}
|
||||
|
||||
/**
|
||||
* Download a file from a URL by creating a temporary anchor element
|
||||
* @param url - The URL of the file to download (must be a valid URL string)
|
||||
* @param filename - Optional filename override (will use URL filename or default if not provided)
|
||||
* @throws {Error} If the URL is invalid or empty
|
||||
*/
|
||||
export const downloadFile = (url: string, filename?: string): void => {
|
||||
export function downloadFile(url: string, filename?: string): void {
|
||||
if (!url || typeof url !== 'string' || url.trim().length === 0) {
|
||||
throw new Error('Invalid URL provided for download')
|
||||
}
|
||||
const link = document.createElement('a')
|
||||
link.href = url
|
||||
link.download =
|
||||
|
||||
const inferredFilename =
|
||||
filename || extractFilenameFromUrl(url) || DEFAULT_DOWNLOAD_FILENAME
|
||||
|
||||
// Trigger download
|
||||
document.body.appendChild(link)
|
||||
link.click()
|
||||
document.body.removeChild(link)
|
||||
if (isCloud) {
|
||||
// Assets from cross-origin (e.g., GCS) cannot be downloaded this way
|
||||
void downloadViaBlobFetch(url, inferredFilename).catch((error) => {
|
||||
console.error('Failed to download file', error)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
triggerLinkDownload(url, inferredFilename)
|
||||
}
|
||||
|
||||
/**
|
||||
* Download a Blob by creating a temporary object URL and anchor element
|
||||
* @param filename - The filename to suggest to the browser
|
||||
* @param blob - The Blob to download
|
||||
*/
|
||||
export function downloadBlob(filename: string, blob: Blob): void {
|
||||
const url = URL.createObjectURL(blob)
|
||||
|
||||
triggerLinkDownload(url, filename)
|
||||
|
||||
// Revoke on the next microtask to give the browser time to start the download
|
||||
queueMicrotask(() => URL.revokeObjectURL(url))
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -39,3 +74,15 @@ const extractFilenameFromUrl = (url: string): string | null => {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
const downloadViaBlobFetch = async (
|
||||
href: string,
|
||||
filename: string
|
||||
): Promise<void> => {
|
||||
const response = await fetch(href)
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch ${href}: ${response.status}`)
|
||||
}
|
||||
const blob = await response.blob()
|
||||
downloadBlob(filename, blob)
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
<slot name="topmenu" :sidebar-panel-visible="sidebarPanelVisible" />
|
||||
|
||||
<Splitter
|
||||
class="splitter-overlay splitter-overlay-bottom mr-2 mb-2 ml-2 flex-1"
|
||||
class="splitter-overlay splitter-overlay-bottom mr-1 mb-1 ml-1 flex-1"
|
||||
layout="vertical"
|
||||
:pt:gutter="
|
||||
'rounded-tl-lg rounded-tr-lg ' +
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
<template>
|
||||
<div v-if="!workspaceStore.focusMode" class="ml-2 flex pt-2">
|
||||
<div v-if="!workspaceStore.focusMode" class="ml-2 flex pt-1">
|
||||
<div class="min-w-0 flex-1">
|
||||
<SubgraphBreadcrumb />
|
||||
</div>
|
||||
|
||||
<div
|
||||
class="actionbar-container pointer-events-auto mx-2 flex h-12 items-center rounded-lg px-2 shadow-md"
|
||||
class="actionbar-container pointer-events-auto mx-1 flex h-12 items-center rounded-lg px-2 shadow-md"
|
||||
>
|
||||
<!-- Support for legacy topbar elements attached by custom scripts, hidden if no elements present -->
|
||||
<div
|
||||
@@ -13,8 +13,8 @@
|
||||
class="[&:not(:has(*>*:not(:empty)))]:hidden"
|
||||
></div>
|
||||
<ComfyActionbar />
|
||||
<LoginButton v-if="!isLoggedIn" />
|
||||
<CurrentUserButton v-else class="shrink-0" />
|
||||
<CurrentUserButton v-if="isLoggedIn" class="shrink-0" />
|
||||
<LoginButton v-else-if="isDesktop" />
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
@@ -29,9 +29,11 @@ import LoginButton from '@/components/topbar/LoginButton.vue'
|
||||
import { useCurrentUser } from '@/composables/auth/useCurrentUser'
|
||||
import { app } from '@/scripts/app'
|
||||
import { useWorkspaceStore } from '@/stores/workspaceStore'
|
||||
import { isElectron } from '@/utils/envUtil'
|
||||
|
||||
const workspaceStore = useWorkspaceStore()
|
||||
const { isLoggedIn } = useCurrentUser()
|
||||
const isDesktop = isElectron()
|
||||
|
||||
// Maintain support for legacy topbar elements attached by custom scripts
|
||||
const legacyCommandsContainerRef = ref<HTMLElement>()
|
||||
|
||||
@@ -2,10 +2,7 @@
|
||||
<div class="flex h-full items-center">
|
||||
<div
|
||||
v-if="isDragging && !isDocked"
|
||||
class="actionbar-drop-zone m-1.5 flex items-center justify-center self-stretch rounded-md"
|
||||
:class="{
|
||||
'drop-zone-active': isMouseOverDropZone
|
||||
}"
|
||||
:class="actionbarClass"
|
||||
@mouseenter="onMouseEnterDropZone"
|
||||
@mouseleave="onMouseLeaveDropZone"
|
||||
>
|
||||
@@ -13,18 +10,15 @@
|
||||
</div>
|
||||
|
||||
<Panel
|
||||
class="actionbar"
|
||||
class="pointer-events-auto z-1000"
|
||||
:style="style"
|
||||
:class="{
|
||||
fixed: !isDocked,
|
||||
'is-dragging': isDragging,
|
||||
'is-docked static mr-2 border-none bg-transparent p-0': isDocked
|
||||
:class="panelClass"
|
||||
:pt="{
|
||||
header: { class: 'hidden' },
|
||||
content: { class: isDocked ? 'p-0' : 'p-1' }
|
||||
}"
|
||||
>
|
||||
<div
|
||||
ref="panelRef"
|
||||
class="actionbar-content flex items-center select-none"
|
||||
>
|
||||
<div ref="panelRef" class="flex items-center select-none">
|
||||
<span
|
||||
ref="dragHandleRef"
|
||||
:class="
|
||||
@@ -251,45 +245,20 @@ watch(isDragging, (dragging) => {
|
||||
isMouseOverDropZone.value = false
|
||||
}
|
||||
})
|
||||
const actionbarClass = computed(() =>
|
||||
cn(
|
||||
'w-[265px] border-dashed border-blue-500 opacity-80',
|
||||
'm-1.5 flex items-center justify-center self-stretch',
|
||||
'rounded-md before:w-50 before:-ml-50 before:h-full',
|
||||
isMouseOverDropZone.value &&
|
||||
'border-[3px] opacity-100 scale-105 shadow-[0_0_20px] shadow-blue-500'
|
||||
)
|
||||
)
|
||||
const panelClass = computed(() =>
|
||||
cn(
|
||||
'actionbar pointer-events-auto z1000',
|
||||
isDragging.value && 'select-none pointer-events-none',
|
||||
isDocked.value ? 'p-0 static mr-2 border-none bg-transparent' : 'fixed'
|
||||
)
|
||||
)
|
||||
</script>
|
||||
|
||||
<style scoped>
|
||||
@reference '../../assets/css/style.css';
|
||||
|
||||
.actionbar {
|
||||
pointer-events: all;
|
||||
z-index: 1000;
|
||||
}
|
||||
|
||||
.actionbar-drop-zone {
|
||||
width: 265px;
|
||||
border: 2px dashed var(--p-primary-color);
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
.actionbar-drop-zone.drop-zone-active {
|
||||
background: var(--p-highlight-background-focus);
|
||||
border-color: var(--p-primary-color);
|
||||
border-width: 3px;
|
||||
box-shadow: 0 0 20px var(--p-primary-color);
|
||||
opacity: 1;
|
||||
transform: scale(1.05);
|
||||
}
|
||||
|
||||
.actionbar.is-dragging {
|
||||
user-select: none;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
:deep(.p-panel-content) {
|
||||
@apply p-1;
|
||||
}
|
||||
|
||||
.is-docked :deep(.p-panel-content) {
|
||||
@apply p-0;
|
||||
}
|
||||
|
||||
:deep(.p-panel-header) {
|
||||
display: none;
|
||||
}
|
||||
</style>
|
||||
|
||||
@@ -2,6 +2,6 @@ import { defineAsyncComponent } from 'vue'
|
||||
|
||||
import { isCloud } from '@/platform/distribution/types'
|
||||
|
||||
export default isCloud && __BUILD_FLAGS__.REQUIRE_SUBSCRIPTION
|
||||
export default isCloud && window.__CONFIG__?.subscription_required
|
||||
? defineAsyncComponent(() => import('./CloudRunButtonWrapper.vue'))
|
||||
: defineAsyncComponent(() => import('./ComfyQueueButton.vue'))
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
class="w-fit rounded-lg p-0"
|
||||
:model="items"
|
||||
:pt="{ item: { class: 'pointer-events-auto' } }"
|
||||
aria-label="Graph navigation"
|
||||
:aria-label="$t('g.graphNavigation')"
|
||||
>
|
||||
<template #item="{ item }">
|
||||
<SubgraphBreadcrumbItem
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
value: item.label,
|
||||
showDelay: 512
|
||||
}"
|
||||
draggable="false"
|
||||
href="#"
|
||||
class="p-breadcrumb-item-link h-12 cursor-pointer px-2"
|
||||
:class="{
|
||||
|
||||
@@ -46,7 +46,12 @@
|
||||
: $t('manager.installAllMissingNodes')
|
||||
"
|
||||
/>
|
||||
<Button label="Open Manager" size="small" outlined @click="openManager" />
|
||||
<Button
|
||||
:label="$t('g.openManager')"
|
||||
size="small"
|
||||
outlined
|
||||
@click="openManager"
|
||||
/>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@
|
||||
<img
|
||||
src="/assets/images/comfy-logo-mono.svg"
|
||||
class="mr-2 h-5 w-5"
|
||||
alt="Comfy"
|
||||
:alt="$t('g.comfy')"
|
||||
/>
|
||||
{{ t('auth.login.useApiKey') }}
|
||||
</Button>
|
||||
|
||||
@@ -89,7 +89,7 @@
|
||||
ref="keybindingInput"
|
||||
class="mb-2 text-center"
|
||||
:model-value="newBindingKeyCombo?.toString() ?? ''"
|
||||
placeholder="Press keys for new binding"
|
||||
:placeholder="$t('g.pressKeysForNewBinding')"
|
||||
autocomplete="off"
|
||||
fluid
|
||||
@keydown.stop.prevent="captureKeybinding"
|
||||
|
||||
@@ -27,28 +27,6 @@
|
||||
|
||||
<PasswordFields />
|
||||
|
||||
<!-- Personal Data Consent Checkbox -->
|
||||
<FormField
|
||||
v-slot="$field"
|
||||
name="personalDataConsent"
|
||||
class="flex items-center gap-2"
|
||||
>
|
||||
<Checkbox
|
||||
input-id="comfy-org-sign-up-personal-data-consent"
|
||||
:binary="true"
|
||||
:invalid="$field.invalid"
|
||||
/>
|
||||
<label
|
||||
for="comfy-org-sign-up-personal-data-consent"
|
||||
class="text-base font-medium opacity-80"
|
||||
>
|
||||
{{ t('auth.signup.personalDataConsentLabel') }}
|
||||
</label>
|
||||
<small v-if="$field.error" class="-mt-4 text-red-500">{{
|
||||
$field.error.message
|
||||
}}</small>
|
||||
</FormField>
|
||||
|
||||
<!-- Submit Button -->
|
||||
<Button
|
||||
type="submit"
|
||||
@@ -63,7 +41,6 @@ import type { FormSubmitEvent } from '@primevue/forms'
|
||||
import { Form, FormField } from '@primevue/forms'
|
||||
import { zodResolver } from '@primevue/forms/resolvers/zod'
|
||||
import Button from 'primevue/button'
|
||||
import Checkbox from 'primevue/checkbox'
|
||||
import InputText from 'primevue/inputtext'
|
||||
import { useI18n } from 'vue-i18n'
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<div class="px-2 py-4">
|
||||
<img
|
||||
src="/assets/images/comfy-logo-single.svg"
|
||||
alt="ComfyOrg Logo"
|
||||
:alt="$t('g.comfyOrgLogoAlt')"
|
||||
width="32"
|
||||
height="32"
|
||||
/>
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
v-if="isNativeWindow() && workflowTabsPosition !== 'Topbar'"
|
||||
class="app-drag fixed top-0 left-0 z-10 h-[var(--comfy-topbar-height)] w-full"
|
||||
/>
|
||||
<div class="flex">
|
||||
<div class="flex h-full items-center">
|
||||
<WorkflowTabs />
|
||||
<TopbarBadges />
|
||||
</div>
|
||||
@@ -420,9 +420,7 @@ onMounted(async () => {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
CORE_SETTINGS.forEach((setting) => {
|
||||
settingStore.addSetting(setting)
|
||||
})
|
||||
CORE_SETTINGS.forEach(settingStore.addSetting)
|
||||
|
||||
await newUserService().initializeIfNewUser(settingStore)
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
<template>
|
||||
<div
|
||||
v-if="visible"
|
||||
class="absolute right-2 bottom-[66px] z-1300 flex w-[250px] justify-center border-0! bg-inherit!"
|
||||
class="absolute right-0 bottom-[62px] z-1300 flex w-[250px] justify-center border-0! bg-inherit!"
|
||||
>
|
||||
<div
|
||||
class="w-4/5 rounded-lg border border-node-border bg-interface-panel-surface p-2 text-text-primary shadow-lg select-none"
|
||||
|
||||
@@ -243,7 +243,7 @@ const pt = computed(() => ({
|
||||
},
|
||||
listContainer: () => ({
|
||||
style: { maxHeight: listMaxHeight },
|
||||
class: 'overflow-y-auto scrollbar-hide'
|
||||
class: 'scrollbar-custom'
|
||||
}),
|
||||
list: {
|
||||
class: 'flex flex-col gap-0 p-0 m-0 list-none border-none text-sm'
|
||||
|
||||
@@ -159,7 +159,7 @@ const pt = computed(() => ({
|
||||
},
|
||||
listContainer: () => ({
|
||||
style: `max-height: ${listMaxHeight}`,
|
||||
class: 'overflow-y-auto scrollbar-hide'
|
||||
class: 'scrollbar-custom'
|
||||
}),
|
||||
list: {
|
||||
class:
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
</Dialog>
|
||||
|
||||
<AutoCompletePlus
|
||||
ref="autoCompletePlus"
|
||||
:model-value="filters"
|
||||
class="comfy-vue-node-search-box z-10 grow"
|
||||
scroll-height="40vh"
|
||||
@@ -42,7 +43,6 @@
|
||||
:input-id="inputId"
|
||||
append-to="self"
|
||||
:suggestions="suggestions"
|
||||
:min-length="0"
|
||||
:delay="100"
|
||||
:loading="!nodeFrequencyStore.isLoaded"
|
||||
complete-on-focus
|
||||
@@ -106,6 +106,7 @@ const { filters, searchLimit = 64 } = defineProps<{
|
||||
searchLimit?: number
|
||||
}>()
|
||||
|
||||
const autoCompletePlus = ref()
|
||||
const nodeSearchFilterVisible = ref(false)
|
||||
const inputId = `comfy-vue-node-search-box-input-${Math.random()}`
|
||||
const suggestions = ref<ComfyNodeDefImpl[]>([])
|
||||
@@ -140,7 +141,13 @@ const reFocusInput = async () => {
|
||||
}
|
||||
}
|
||||
|
||||
onMounted(reFocusInput)
|
||||
onMounted(() => {
|
||||
inputElement ??= document.getElementById(inputId) as HTMLInputElement
|
||||
if (inputElement) inputElement.focus()
|
||||
autoCompletePlus.value.hide = () => search('')
|
||||
search('')
|
||||
autoCompletePlus.value.show()
|
||||
})
|
||||
const onAddFilter = (
|
||||
filterAndValue: FuseFilterWithValue<ComfyNodeDefImpl, string>
|
||||
) => {
|
||||
|
||||
@@ -180,7 +180,7 @@ onMounted(() => {
|
||||
* but need to reference sidebar dimensions for proper positioning.
|
||||
*/
|
||||
:root {
|
||||
--sidebar-padding: 8px;
|
||||
--sidebar-padding: 4px;
|
||||
--sidebar-icon-size: 1rem;
|
||||
|
||||
--sidebar-default-floating-width: 56px;
|
||||
|
||||
@@ -59,7 +59,7 @@
|
||||
|
||||
<teleport v-if="isHovered" to="#node-library-node-preview-container">
|
||||
<div class="node-lib-node-preview" :style="nodePreviewStyle">
|
||||
<NodePreview ref="previewRef" :node-def="nodeDef" />
|
||||
<NodePreview :node-def="nodeDef" />
|
||||
</div>
|
||||
</teleport>
|
||||
</div>
|
||||
@@ -132,11 +132,12 @@ function deleteBlueprint() {
|
||||
void useSubgraphStore().deleteBlueprint(props.node.data.name)
|
||||
}
|
||||
|
||||
const previewRef = ref<InstanceType<typeof NodePreview> | null>(null)
|
||||
const nodePreviewStyle = ref<CSSProperties>({
|
||||
position: 'absolute',
|
||||
position: 'fixed',
|
||||
top: '0px',
|
||||
left: '0px'
|
||||
left: '0px',
|
||||
pointerEvents: 'none',
|
||||
zIndex: 1001
|
||||
})
|
||||
|
||||
const handleNodeHover = async () => {
|
||||
@@ -144,19 +145,15 @@ const handleNodeHover = async () => {
|
||||
if (!hoverTarget) return
|
||||
|
||||
const targetRect = hoverTarget.getBoundingClientRect()
|
||||
const margin = 40
|
||||
|
||||
const previewHeight = previewRef.value?.$el.offsetHeight || 0
|
||||
const availableSpaceBelow = window.innerHeight - targetRect.bottom
|
||||
|
||||
nodePreviewStyle.value.top =
|
||||
previewHeight > availableSpaceBelow
|
||||
? `${Math.max(0, targetRect.top - (previewHeight - availableSpaceBelow) - 20)}px`
|
||||
: `${targetRect.top - 40}px`
|
||||
if (sidebarLocation.value === 'left') {
|
||||
nodePreviewStyle.value.left = `${targetRect.right}px`
|
||||
} else {
|
||||
nodePreviewStyle.value.left = `${targetRect.left - 400}px`
|
||||
}
|
||||
nodePreviewStyle.value.top = `${targetRect.top}px`
|
||||
nodePreviewStyle.value.left =
|
||||
sidebarLocation.value === 'left'
|
||||
? `${targetRect.right + margin}px`
|
||||
: `${targetRect.left - margin}px`
|
||||
nodePreviewStyle.value.transform =
|
||||
sidebarLocation.value === 'right' ? 'translateX(-100%)' : undefined
|
||||
}
|
||||
|
||||
const container = ref<HTMLElement | null>(null)
|
||||
|
||||
@@ -64,7 +64,7 @@ function updateToastPosition() {
|
||||
|
||||
styleElement.textContent = `
|
||||
.p-toast.p-component.p-toast-top-right {
|
||||
top: ${rect.top + 20}px !important;
|
||||
top: ${rect.top + 100}px !important;
|
||||
right: ${window.innerWidth - (rect.left + rect.width) + 20}px !important;
|
||||
}
|
||||
`
|
||||
|
||||
@@ -1,39 +1,83 @@
|
||||
<template>
|
||||
<div
|
||||
class="flex items-center gap-2 bg-comfy-menu-secondary"
|
||||
v-tooltip="badge.tooltip"
|
||||
class="flex h-full shrink-0 items-center gap-2 whitespace-nowrap"
|
||||
:class="[{ 'flex-row-reverse': reverseOrder }, noPadding ? '' : 'px-3']"
|
||||
:style="{ backgroundColor: 'var(--comfy-menu-bg)' }"
|
||||
>
|
||||
<i
|
||||
v-if="iconClass"
|
||||
:class="['shrink-0 text-base', iconClass, iconColorClass]"
|
||||
/>
|
||||
<div
|
||||
v-if="badge.label"
|
||||
class="rounded-full bg-white px-1.5 py-0.5 text-xxxs font-semibold text-black"
|
||||
:class="labelClass"
|
||||
class="shrink-0 rounded-full px-1.5 py-0.5 text-xxxs font-semibold"
|
||||
:class="labelClasses"
|
||||
>
|
||||
{{ badge.label }}
|
||||
</div>
|
||||
<div
|
||||
class="font-inter text-sm font-extrabold text-slate-100"
|
||||
:class="textClass"
|
||||
>
|
||||
<div class="font-inter text-sm font-extrabold" :class="textClasses">
|
||||
{{ badge.text }}
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
<script setup lang="ts">
|
||||
import { computed } from 'vue'
|
||||
|
||||
import type { TopbarBadge } from '@/types/comfy'
|
||||
|
||||
withDefaults(
|
||||
const props = withDefaults(
|
||||
defineProps<{
|
||||
badge: TopbarBadge
|
||||
reverseOrder?: boolean
|
||||
noPadding?: boolean
|
||||
labelClass?: string
|
||||
textClass?: string
|
||||
}>(),
|
||||
{
|
||||
reverseOrder: false,
|
||||
noPadding: false,
|
||||
labelClass: '',
|
||||
textClass: ''
|
||||
noPadding: false
|
||||
}
|
||||
)
|
||||
|
||||
const variant = computed(() => props.badge.variant ?? 'info')
|
||||
|
||||
const labelClasses = computed(() => {
|
||||
switch (variant.value) {
|
||||
case 'error':
|
||||
return 'bg-danger-100 text-white'
|
||||
case 'warning':
|
||||
return 'bg-warning-100 text-black'
|
||||
case 'info':
|
||||
default:
|
||||
return 'bg-white text-black'
|
||||
}
|
||||
})
|
||||
|
||||
const textClasses = computed(() => {
|
||||
switch (variant.value) {
|
||||
case 'error':
|
||||
return 'text-danger-100'
|
||||
case 'warning':
|
||||
return 'text-warning-100'
|
||||
case 'info':
|
||||
default:
|
||||
return 'text-slate-100'
|
||||
}
|
||||
})
|
||||
|
||||
const iconColorClass = computed(() => textClasses.value)
|
||||
|
||||
const iconClass = computed(() => {
|
||||
if (props.badge.icon) {
|
||||
return props.badge.icon
|
||||
}
|
||||
switch (variant.value) {
|
||||
case 'error':
|
||||
return 'pi pi-exclamation-circle'
|
||||
case 'warning':
|
||||
return 'pi pi-exclamation-triangle'
|
||||
case 'info':
|
||||
default:
|
||||
return undefined
|
||||
}
|
||||
})
|
||||
</script>
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
<template>
|
||||
<div class="flex">
|
||||
<div v-if="notMobile" class="flex h-full shrink-0 items-center">
|
||||
<TopbarBadge
|
||||
v-for="badge in topbarBadgeStore.badges"
|
||||
:key="badge.text"
|
||||
:badge
|
||||
:reverse-order="reverseOrder"
|
||||
:no-padding="noPadding"
|
||||
:label-class="labelClass"
|
||||
:text-class="textClass"
|
||||
/>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script lang="ts" setup>
|
||||
import { useBreakpoints } from '@vueuse/core'
|
||||
|
||||
import { useTopbarBadgeStore } from '@/stores/topbarBadgeStore'
|
||||
|
||||
import TopbarBadge from './TopbarBadge.vue'
|
||||
@@ -21,16 +21,16 @@ withDefaults(
|
||||
defineProps<{
|
||||
reverseOrder?: boolean
|
||||
noPadding?: boolean
|
||||
labelClass?: string
|
||||
textClass?: string
|
||||
}>(),
|
||||
{
|
||||
reverseOrder: false,
|
||||
noPadding: false,
|
||||
labelClass: '',
|
||||
textClass: ''
|
||||
noPadding: false
|
||||
}
|
||||
)
|
||||
|
||||
const BREAKPOINTS = { md: 880 }
|
||||
const breakpoints = useBreakpoints(BREAKPOINTS)
|
||||
const notMobile = breakpoints.greater('md')
|
||||
|
||||
const topbarBadgeStore = useTopbarBadgeStore()
|
||||
</script>
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import { FirebaseError } from 'firebase/app'
|
||||
import { AuthErrorCodes } from 'firebase/auth'
|
||||
import { ref } from 'vue'
|
||||
import { useRouter } from 'vue-router'
|
||||
|
||||
import { useErrorHandling } from '@/composables/useErrorHandling'
|
||||
import type { ErrorRecoveryStrategy } from '@/composables/useErrorHandling'
|
||||
import { t } from '@/i18n'
|
||||
import { isCloud } from '@/platform/distribution/types'
|
||||
import { useToastStore } from '@/platform/updates/common/toastStore'
|
||||
import { useDialogService } from '@/services/dialogService'
|
||||
import { useFirebaseAuthStore } from '@/stores/firebaseAuthStore'
|
||||
@@ -54,6 +56,16 @@ export const useFirebaseAuthActions = () => {
|
||||
detail: t('auth.signOut.successDetail'),
|
||||
life: 5000
|
||||
})
|
||||
|
||||
if (isCloud) {
|
||||
try {
|
||||
const router = useRouter()
|
||||
await router.push({ name: 'cloud-login' })
|
||||
} catch (error) {
|
||||
// needed for local development until we bring in cloud login pages.
|
||||
window.location.reload()
|
||||
}
|
||||
}
|
||||
}, reportError)
|
||||
|
||||
const sendPasswordReset = wrapWithErrorHandlingAsync(
|
||||
|
||||
@@ -20,6 +20,7 @@ import type { WidgetValue } from '@/types/simplifiedWidget'
|
||||
|
||||
import type {
|
||||
LGraph,
|
||||
LGraphBadge,
|
||||
LGraphNode,
|
||||
LGraphTriggerAction,
|
||||
LGraphTriggerEvent,
|
||||
@@ -51,6 +52,8 @@ export interface VueNodeData {
|
||||
mode: number
|
||||
selected: boolean
|
||||
executing: boolean
|
||||
apiNode?: boolean
|
||||
badges?: (LGraphBadge | (() => LGraphBadge))[]
|
||||
subgraphId?: string | null
|
||||
widgets?: SafeWidgetData[]
|
||||
inputs?: INodeInputSlot[]
|
||||
@@ -117,7 +120,7 @@ export function useGraphNodeManager(graph: LGraph): GraphNodeManager {
|
||||
}
|
||||
|
||||
// Extract safe data from LiteGraph node for Vue consumption
|
||||
const extractVueNodeData = (node: LGraphNode): VueNodeData => {
|
||||
function extractVueNodeData(node: LGraphNode): VueNodeData {
|
||||
// Determine subgraph ID - null for root graph, string for subgraphs
|
||||
const subgraphId =
|
||||
node.graph && 'id' in node.graph && node.graph !== node.graph.rootGraph
|
||||
@@ -126,14 +129,6 @@ export function useGraphNodeManager(graph: LGraph): GraphNodeManager {
|
||||
// Extract safe widget data
|
||||
const slotMetadata = new Map<string, WidgetSlotMetadata>()
|
||||
|
||||
node.inputs?.forEach((input, index) => {
|
||||
if (!input?.widget?.name) return
|
||||
slotMetadata.set(input.widget.name, {
|
||||
index,
|
||||
linked: input.link != null
|
||||
})
|
||||
})
|
||||
|
||||
const reactiveWidgets = shallowReactive<IBaseWidget[]>(node.widgets ?? [])
|
||||
Object.defineProperty(node, 'widgets', {
|
||||
get() {
|
||||
@@ -144,8 +139,15 @@ export function useGraphNodeManager(graph: LGraph): GraphNodeManager {
|
||||
}
|
||||
})
|
||||
|
||||
const safeWidgets = reactiveComputed<SafeWidgetData[]>(
|
||||
() =>
|
||||
const safeWidgets = reactiveComputed<SafeWidgetData[]>(() => {
|
||||
node.inputs?.forEach((input, index) => {
|
||||
if (!input?.widget?.name) return
|
||||
slotMetadata.set(input.widget.name, {
|
||||
index,
|
||||
linked: input.link != null
|
||||
})
|
||||
})
|
||||
return (
|
||||
node.widgets?.map((widget) => {
|
||||
try {
|
||||
// TODO: Use widget.getReactiveData() once TypeScript types are updated
|
||||
@@ -183,7 +185,8 @@ export function useGraphNodeManager(graph: LGraph): GraphNodeManager {
|
||||
}
|
||||
}
|
||||
}) ?? []
|
||||
)
|
||||
)
|
||||
})
|
||||
|
||||
const nodeType =
|
||||
node.type ||
|
||||
@@ -192,6 +195,9 @@ export function useGraphNodeManager(graph: LGraph): GraphNodeManager {
|
||||
node.constructor?.name ||
|
||||
'Unknown'
|
||||
|
||||
const apiNode = node.constructor?.nodeData?.api_node ?? false
|
||||
const badges = node.badges
|
||||
|
||||
return {
|
||||
id: String(node.id),
|
||||
title: typeof node.title === 'string' ? node.title : '',
|
||||
@@ -200,6 +206,8 @@ export function useGraphNodeManager(graph: LGraph): GraphNodeManager {
|
||||
selected: node.selected || false,
|
||||
executing: false, // Will be updated separately based on execution state
|
||||
subgraphId,
|
||||
apiNode,
|
||||
badges,
|
||||
hasErrors: !!node.has_errors,
|
||||
widgets: safeWidgets,
|
||||
inputs: node.inputs ? [...node.inputs] : undefined,
|
||||
|
||||
@@ -23,6 +23,7 @@ import { useAssetBrowserDialog } from '@/platform/assets/composables/useAssetBro
|
||||
import { createModelNodeFromAsset } from '@/platform/assets/utils/createModelNodeFromAsset'
|
||||
import { isCloud } from '@/platform/distribution/types'
|
||||
import { useSettingStore } from '@/platform/settings/settingStore'
|
||||
import { SUPPORT_URL } from '@/platform/support/config'
|
||||
import { useTelemetry } from '@/platform/telemetry'
|
||||
import { useToastStore } from '@/platform/updates/common/toastStore'
|
||||
import { useWorkflowService } from '@/platform/workflow/core/services/workflowService'
|
||||
@@ -775,7 +776,7 @@ export function useCoreCommands(): ComfyCommand[] {
|
||||
label: 'Contact Support',
|
||||
versionAdded: '1.17.8',
|
||||
function: () => {
|
||||
window.open('https://support.comfy.org/', '_blank')
|
||||
window.open(SUPPORT_URL, '_blank')
|
||||
}
|
||||
},
|
||||
{
|
||||
|
||||
@@ -161,4 +161,5 @@ export function promoteRecommendedWidgets(subgraphNode: SubgraphNode) {
|
||||
const proxyWidgets: ProxyWidgetsProperty =
|
||||
filteredWidgets.map(widgetItemToProperty)
|
||||
subgraphNode.properties.proxyWidgets = proxyWidgets
|
||||
subgraphNode.computeSize(subgraphNode.size)
|
||||
}
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
import { t } from '@/i18n'
|
||||
import { useExtensionService } from '@/services/extensionService'
|
||||
|
||||
useExtensionService().registerExtension({
|
||||
name: 'Comfy.CloudBadge',
|
||||
topbarBadges: [
|
||||
{
|
||||
label: t('g.beta'),
|
||||
text: 'Comfy Cloud'
|
||||
}
|
||||
]
|
||||
})
|
||||
36
src/extensions/core/cloudBadges.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { computed } from 'vue'
|
||||
|
||||
import { remoteConfig } from '@/platform/remoteConfig/remoteConfig'
|
||||
import { t } from '@/i18n'
|
||||
import { useExtensionService } from '@/services/extensionService'
|
||||
import type { TopbarBadge } from '@/types/comfy'
|
||||
|
||||
const badges = computed<TopbarBadge[]>(() => {
|
||||
const result: TopbarBadge[] = []
|
||||
|
||||
// Add server health alert first (if present)
|
||||
const alert = remoteConfig.value.server_health_alert
|
||||
if (alert) {
|
||||
result.push({
|
||||
text: alert.message,
|
||||
label: alert.badge,
|
||||
variant: alert.severity ?? 'error',
|
||||
tooltip: alert.tooltip
|
||||
})
|
||||
}
|
||||
|
||||
// Always add cloud badge last (furthest right)
|
||||
result.push({
|
||||
label: t('g.beta'),
|
||||
text: 'Comfy Cloud'
|
||||
})
|
||||
|
||||
return result
|
||||
})
|
||||
|
||||
useExtensionService().registerExtension({
|
||||
name: 'Comfy.Cloud.Badges',
|
||||
get topbarBadges() {
|
||||
return badges.value
|
||||
}
|
||||
})
|
||||
15
src/extensions/core/cloudRemoteConfig.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import { loadRemoteConfig } from '@/platform/remoteConfig/remoteConfig'
|
||||
import { useExtensionService } from '@/services/extensionService'
|
||||
|
||||
/**
|
||||
* Cloud-only extension that polls for remote config updates
|
||||
* Initial config load happens in main.ts before any other imports
|
||||
*/
|
||||
useExtensionService().registerExtension({
|
||||
name: 'Comfy.Cloud.RemoteConfig',
|
||||
|
||||
setup: async () => {
|
||||
// Poll for config updates every 30 seconds
|
||||
setInterval(() => void loadRemoteConfig(), 30000)
|
||||
}
|
||||
})
|
||||
@@ -4,8 +4,11 @@ import { useCurrentUser } from '@/composables/auth/useCurrentUser'
|
||||
import { useSubscription } from '@/platform/cloud/subscription/composables/useSubscription'
|
||||
import { useExtensionService } from '@/services/extensionService'
|
||||
|
||||
/**
|
||||
* Cloud-only extension that enforces active subscription requirement
|
||||
*/
|
||||
useExtensionService().registerExtension({
|
||||
name: 'Comfy.CloudSubscription',
|
||||
name: 'Comfy.Cloud.Subscription',
|
||||
|
||||
setup: async () => {
|
||||
const { isLoggedIn } = useCurrentUser()
|
||||
@@ -13,7 +16,6 @@ useExtensionService().registerExtension({
|
||||
|
||||
const checkSubscriptionStatus = () => {
|
||||
if (!isLoggedIn.value) return
|
||||
|
||||
void requireActiveSubscription()
|
||||
}
|
||||
|
||||
|
||||
@@ -24,10 +24,12 @@ import './uploadImage'
|
||||
import './webcamCapture'
|
||||
import './widgetInputs'
|
||||
|
||||
// Cloud-only extensions - tree-shaken in OSS builds
|
||||
if (isCloud) {
|
||||
import('./cloudBadge')
|
||||
await import('./cloudRemoteConfig')
|
||||
await import('./cloudBadges')
|
||||
|
||||
if (__BUILD_FLAGS__.REQUIRE_SUBSCRIPTION) {
|
||||
import('./cloudSubscription')
|
||||
if (window.__CONFIG__?.subscription_required) {
|
||||
await import('./cloudSubscription')
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ import { GLTFExporter } from 'three/examples/jsm/exporters/GLTFExporter'
|
||||
import { OBJExporter } from 'three/examples/jsm/exporters/OBJExporter'
|
||||
import { STLExporter } from 'three/examples/jsm/exporters/STLExporter'
|
||||
|
||||
import { downloadBlob } from '@/base/common/downloadUtil'
|
||||
import { t } from '@/i18n'
|
||||
import { useToastStore } from '@/platform/updates/common/toastStore'
|
||||
|
||||
@@ -38,13 +39,7 @@ export class ModelExporter {
|
||||
try {
|
||||
const response = await fetch(url)
|
||||
const blob = await response.blob()
|
||||
|
||||
const link = document.createElement('a')
|
||||
link.href = URL.createObjectURL(blob)
|
||||
link.download = desiredFilename
|
||||
link.click()
|
||||
|
||||
URL.revokeObjectURL(link.href)
|
||||
downloadBlob(desiredFilename, blob)
|
||||
} catch (error) {
|
||||
console.error('Error downloading from URL:', error)
|
||||
useToastStore().addAlert(t('toastMessages.failedToDownloadFile'))
|
||||
@@ -152,19 +147,11 @@ export class ModelExporter {
|
||||
|
||||
private static saveArrayBuffer(buffer: ArrayBuffer, filename: string): void {
|
||||
const blob = new Blob([buffer], { type: 'application/octet-stream' })
|
||||
const link = document.createElement('a')
|
||||
link.href = URL.createObjectURL(blob)
|
||||
link.download = filename
|
||||
link.click()
|
||||
URL.revokeObjectURL(link.href)
|
||||
downloadBlob(filename, blob)
|
||||
}
|
||||
|
||||
private static saveString(text: string, filename: string): void {
|
||||
const blob = new Blob([text], { type: 'text/plain' })
|
||||
const link = document.createElement('a')
|
||||
link.href = URL.createObjectURL(blob)
|
||||
link.download = filename
|
||||
link.click()
|
||||
URL.revokeObjectURL(link.href)
|
||||
downloadBlob(filename, blob)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import * as THREE from 'three'
|
||||
|
||||
import { downloadBlob } from '@/base/common/downloadUtil'
|
||||
|
||||
import { type EventManagerInterface } from './interfaces'
|
||||
|
||||
export class RecordingManager {
|
||||
@@ -149,17 +151,7 @@ export class RecordingManager {
|
||||
|
||||
try {
|
||||
const blob = new Blob(this.recordedChunks, { type: 'video/webm' })
|
||||
|
||||
const url = URL.createObjectURL(blob)
|
||||
const a = document.createElement('a')
|
||||
document.body.appendChild(a)
|
||||
a.style.display = 'none'
|
||||
a.href = url
|
||||
a.download = filename
|
||||
a.click()
|
||||
|
||||
window.URL.revokeObjectURL(url)
|
||||
document.body.removeChild(a)
|
||||
downloadBlob(filename, blob)
|
||||
|
||||
this.eventManager.emitEvent('recordingExported', null)
|
||||
} catch (error) {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { downloadBlob } from '@/base/common/downloadUtil'
|
||||
import { t } from '@/i18n'
|
||||
import { LGraphCanvas } from '@/lib/litegraph/src/litegraph'
|
||||
import { useToastStore } from '@/platform/updates/common/toastStore'
|
||||
@@ -145,18 +146,7 @@ class ManageTemplates extends ComfyDialog {
|
||||
|
||||
const json = JSON.stringify({ templates: this.templates }, null, 2) // convert the data to a JSON string
|
||||
const blob = new Blob([json], { type: 'application/json' })
|
||||
const url = URL.createObjectURL(blob)
|
||||
const a = $el('a', {
|
||||
href: url,
|
||||
download: 'node_templates.json',
|
||||
style: { display: 'none' },
|
||||
parent: document.body
|
||||
})
|
||||
a.click()
|
||||
setTimeout(function () {
|
||||
a.remove()
|
||||
window.URL.revokeObjectURL(url)
|
||||
}, 0)
|
||||
downloadBlob('node_templates.json', blob)
|
||||
}
|
||||
|
||||
override show() {
|
||||
@@ -298,19 +288,9 @@ class ManageTemplates extends ComfyDialog {
|
||||
const blob = new Blob([json], {
|
||||
type: 'application/json'
|
||||
})
|
||||
const url = URL.createObjectURL(blob)
|
||||
const a = $el('a', {
|
||||
href: url,
|
||||
// @ts-expect-error fixme ts strict error
|
||||
download: (nameInput.value || t.name) + '.json',
|
||||
style: { display: 'none' },
|
||||
parent: document.body
|
||||
})
|
||||
a.click()
|
||||
setTimeout(function () {
|
||||
a.remove()
|
||||
window.URL.revokeObjectURL(url)
|
||||
}, 0)
|
||||
// @ts-expect-error fixme ts strict error
|
||||
const name = (nameInput.value || t.name) + '.json'
|
||||
downloadBlob(name, blob)
|
||||
}
|
||||
}),
|
||||
$el('button', {
|
||||
|
||||
@@ -3680,7 +3680,7 @@ export class LGraphNode
|
||||
}
|
||||
ctx.font = savedFont // Restore font after button measurements
|
||||
if (buttonsWidth > 0) {
|
||||
buttonsWidth += 10 // Extra margin before buttons
|
||||
buttonsWidth -= 20 // Reduce by empty padding
|
||||
availableWidth -= buttonsWidth
|
||||
}
|
||||
}
|
||||
|
||||
@@ -418,18 +418,6 @@ export class LLink implements LinkSegment, Serialisable<SerialisableLLink> {
|
||||
* If `input` or `output`, reroutes will not be automatically removed, and retain a connection to the input or output, respectively.
|
||||
*/
|
||||
disconnect(network: LinkNetwork, keepReroutes?: 'input' | 'output'): void {
|
||||
// Clean up the target node's input slot
|
||||
if (this.target_id !== -1) {
|
||||
const targetNode = network.getNodeById(this.target_id)
|
||||
if (targetNode) {
|
||||
const targetInput = targetNode.inputs?.[this.target_slot]
|
||||
if (targetInput && targetInput.link === this.id) {
|
||||
targetInput.link = null
|
||||
targetNode.setDirtyCanvas?.(true, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const reroutes = LLink.getReroutes(network, this)
|
||||
|
||||
const lastReroute = reroutes.at(-1)
|
||||
|
||||
@@ -137,7 +137,9 @@ export class ToInputFromIoNodeLink implements RenderLink {
|
||||
}
|
||||
disconnect(): boolean {
|
||||
if (!this.existingLink) return false
|
||||
this.existingLink.disconnect(this.network, 'input')
|
||||
const { input, inputNode } = this.existingLink.resolve(this.network)
|
||||
if (!inputNode || !input) return false
|
||||
this.node._disconnectNodeInput(inputNode, input, this.existingLink)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1526,310 +1526,6 @@
|
||||
"searchPlaceholder": "بحث..."
|
||||
},
|
||||
"sorting": "ترتيب حسب",
|
||||
"template": {
|
||||
"3D": {
|
||||
"3d_hunyuan3d_image_to_model": "Hunyuan3D 2.0",
|
||||
"3d_hunyuan3d_multiview_to_model": "Hunyuan3D 2.0 MV",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3D 2.0 MV Turbo",
|
||||
"stable_zero123_example": "Stable Zero123"
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin: من صورة إلى نموذج",
|
||||
"api_rodin_multiview_to_model": "Rodin: من عدة زوايا إلى نموذج",
|
||||
"api_tripo_image_to_model": "Tripo: من صورة إلى نموذج",
|
||||
"api_tripo_multiview_to_model": "Tripo: من عدة زوايا إلى نموذج",
|
||||
"api_tripo_text_to_model": "Tripo: من نص إلى نموذج"
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "تكوين المناطق",
|
||||
"area_composition_square_area_for_subject": "تكوين المناطق - مربع الموضوع"
|
||||
},
|
||||
"Audio": {
|
||||
"audio_ace_step_1_m2m_editing": "ACE-Step v1 تحرير M2M",
|
||||
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1 من نص إلى موسيقى آلية",
|
||||
"audio_ace_step_1_t2a_song": "ACE-Step v1 من نص إلى أغنية",
|
||||
"audio_stable_audio_example": "Stable Audio"
|
||||
},
|
||||
"Basics": {
|
||||
"default": "توليد الصور",
|
||||
"embedding_example": "تضمين",
|
||||
"gligen_textbox_example": "صندوق نص Gligen",
|
||||
"image2image": "صورة إلى صورة",
|
||||
"inpaint_example": "إعادة التلوين",
|
||||
"inpaint_model_outpainting": "التوسيع",
|
||||
"lora": "LoRA",
|
||||
"lora_multiple": "عدة LoRA"
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "ControlNet الوضعية مرورين",
|
||||
"controlnet_example": "ControlNet الرسومات التخطيطية",
|
||||
"depth_controlnet": "ControlNet العمق",
|
||||
"depth_t2i_adapter": "محول T2I للعمق",
|
||||
"mixing_controlnets": "دمج ControlNet"
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "نموذج Flux كاني",
|
||||
"flux_depth_lora_example": "عمق Flux LoRA",
|
||||
"flux_dev_checkpoint_example": "Flux تطوير fp8",
|
||||
"flux_dev_full_text_to_image": "Flux تطوير كامل من نص إلى صورة",
|
||||
"flux_fill_inpaint_example": "Flux إعادة تلوين",
|
||||
"flux_fill_outpaint_example": "Flux توسيع",
|
||||
"flux_kontext_dev_basic": "Flux Kontext تطوير (أساسي)",
|
||||
"flux_kontext_dev_grouped": "Flux Kontext تطوير (مجموعات)",
|
||||
"flux_redux_model_example": "نموذج Flux Redux",
|
||||
"flux_schnell": "Flux سريع fp8",
|
||||
"flux_schnell_full_text_to_image": "Flux سريع كامل من نص إلى صورة"
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "HiDream E1 كامل",
|
||||
"hidream_i1_dev": "HiDream I1 تطوير",
|
||||
"hidream_i1_fast": "HiDream I1 سريع",
|
||||
"hidream_i1_full": "HiDream I1 كامل",
|
||||
"image_chroma_text_to_image": "Chroma من نص إلى صورة",
|
||||
"image_cosmos_predict2_2B_t2i": "Cosmos Predict2 2B من نص إلى صورة",
|
||||
"image_lotus_depth_v1_1": "Lotus عمق",
|
||||
"image_omnigen2_image_edit": "OmniGen2 تعديل الصور",
|
||||
"image_omnigen2_t2i": "OmniGen2 من نص إلى صورة",
|
||||
"sd3_5_large_blur": "SD3.5 ضباب كبير",
|
||||
"sd3_5_large_canny_controlnet_example": "SD3.5 كاني كبير مع ControlNet",
|
||||
"sd3_5_large_depth": "SD3.5 عمق كبير",
|
||||
"sd3_5_simple_example": "SD3.5 بسيط",
|
||||
"sdxl_refiner_prompt_example": "SDXL تحسين العبارات",
|
||||
"sdxl_revision_text_prompts": "SDXL مراجعة العبارات النصية",
|
||||
"sdxl_revision_zero_positive": "SDXL مراجعة صفر إيجابي",
|
||||
"sdxl_simple_example": "SDXL بسيط",
|
||||
"sdxlturbo_example": "SDXL Turbo"
|
||||
},
|
||||
"Image API": {
|
||||
"api_bfl_flux_1_kontext_max_image": "BFL Flux.1 Kontext ماكس",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "BFL Flux.1 Kontext إدخال صور متعددة",
|
||||
"api_bfl_flux_1_kontext_pro_image": "BFL Flux.1 Kontext برو",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux[Pro]: من نص إلى صورة",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3: من نص إلى صورة",
|
||||
"api_luma_photon_i2i": "Luma Photon: من صورة إلى صورة",
|
||||
"api_luma_photon_style_ref": "Luma Photon: مرجع الأسلوب",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI: Dall-E 2 إعادة التلوين",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI: Dall-E 2 من نص إلى صورة",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI: Dall-E 3 من نص إلى صورة",
|
||||
"api_openai_image_1_i2i": "OpenAI: GPT-Image-1 من صورة إلى صورة",
|
||||
"api_openai_image_1_inpaint": "OpenAI: GPT-Image-1 إعادة التلوين",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI: GPT-Image-1 مدخلات متعددة",
|
||||
"api_openai_image_1_t2i": "OpenAI: GPT-Image-1 من نص إلى صورة",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft: توليد الصور مع تحكم اللون",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft: توليد الصور مع تحكم الأسلوب",
|
||||
"api_recraft_vector_gen": "Recraft: توليد المتجهات",
|
||||
"api_runway_reference_to_image": "Runway: من مرجع إلى صورة",
|
||||
"api_runway_text_to_image": "Runway: من نص إلى صورة",
|
||||
"api_stability_ai_i2i": "Stability AI: من صورة إلى صورة",
|
||||
"api_stability_ai_sd3_5_i2i": "Stability AI: SD3.5 من صورة إلى صورة",
|
||||
"api_stability_ai_sd3_5_t2i": "Stability AI: SD3.5 من نص إلى صورة",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Stability AI: صورة مستقرة ألترا من نص إلى صورة"
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Google Gemini: محادثة",
|
||||
"api_openai_chat": "OpenAI: محادثة"
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "ESRGAN",
|
||||
"hiresfix_esrgan_workflow": "HiresFix ESRGAN سير العمل",
|
||||
"hiresfix_latent_workflow": "تكبير",
|
||||
"latent_upscale_different_prompt_model": "تكبير كامن مع نموذج عبارات مختلف"
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "Hunyuan فيديو نص إلى فيديو",
|
||||
"image_to_video": "SVD من صورة إلى فيديو",
|
||||
"image_to_video_wan": "Wan 2.1 من صورة إلى فيديو",
|
||||
"ltxv_image_to_video": "LTXV من صورة إلى فيديو",
|
||||
"ltxv_text_to_video": "LTXV من نص إلى فيديو",
|
||||
"mochi_text_to_video_example": "Mochi من نص إلى فيديو",
|
||||
"text_to_video_wan": "Wan 2.1 من نص إلى فيديو",
|
||||
"txt_to_image_to_video": "SVD من نص إلى صورة إلى فيديو",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos Predict2 2B فيديو إلى العالم 480p 16fps",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 كاميرا ممتعة 14B",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 كاميرا ممتعة 1.3B",
|
||||
"video_wan_vace_14B_ref2v": "Wan VACE من المرجع إلى فيديو",
|
||||
"video_wan_vace_14B_t2v": "Wan VACE من نص إلى فيديو",
|
||||
"video_wan_vace_14B_v2v": "Wan VACE فيديو تحكم",
|
||||
"video_wan_vace_flf2v": "Wan VACE الإطار الأول والأخير",
|
||||
"video_wan_vace_inpainting": "Wan VACE إعادة التلوين الداخلي",
|
||||
"video_wan_vace_outpainting": "Wan VACE التوسيع الخارجي",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16",
|
||||
"wan2_1_fun_control": "Wan 2.1 تحكم نت",
|
||||
"wan2_1_fun_inp": "Wan 2.1 إعادة التلوين"
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "MiniMax: من صورة إلى فيديو",
|
||||
"api_hailuo_minimax_t2v": "MiniMax: من نص إلى فيديو",
|
||||
"api_kling_effects": "Kling: تأثيرات الفيديو",
|
||||
"api_kling_flf": "Kling: FLF2V",
|
||||
"api_kling_i2v": "Kling: من صورة إلى فيديو",
|
||||
"api_luma_i2v": "Luma: من صورة إلى فيديو",
|
||||
"api_luma_t2v": "Luma: من نص إلى فيديو",
|
||||
"api_moonvalley_image_to_video": "Moonvalley: من صورة إلى فيديو",
|
||||
"api_moonvalley_text_to_video": "Moonvalley: من نص إلى فيديو",
|
||||
"api_pika_i2v": "Pika: من صورة إلى فيديو",
|
||||
"api_pika_scene": "Pika المشاهد: من صور إلى فيديو",
|
||||
"api_pixverse_i2v": "PixVerse: من صورة إلى فيديو",
|
||||
"api_pixverse_t2v": "PixVerse: من نص إلى فيديو",
|
||||
"api_pixverse_template_i2v": "PixVerse القوالب: من صورة إلى فيديو",
|
||||
"api_runway_first_last_frame": "Runway: الإطار الأول والأخير إلى فيديو",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway: Gen3a Turbo من صورة إلى فيديو",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway: Gen4 Turbo من صورة إلى فيديو",
|
||||
"api_veo2_i2v": "Veo2: من صورة إلى فيديو"
|
||||
}
|
||||
},
|
||||
"templateDescription": {
|
||||
"3D": {
|
||||
"3d_hunyuan3d_image_to_model": "إنشاء نماذج ثلاثية الأبعاد من صور فردية باستخدام Hunyuan3D 2.0.",
|
||||
"3d_hunyuan3d_multiview_to_model": "إنشاء نماذج ثلاثية الأبعاد من عدة زوايا باستخدام Hunyuan3D 2.0 MV.",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "إنشاء نماذج ثلاثية الأبعاد من عدة زوايا باستخدام Hunyuan3D 2.0 MV Turbo.",
|
||||
"stable_zero123_example": "إنشاء مشاهد ثلاثية الأبعاد من صور فردية باستخدام Stable Zero123."
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "إنشاء نماذج ثلاثية الأبعاد مفصلة من صور فردية باستخدام Rodin AI.",
|
||||
"api_rodin_multiview_to_model": "نحت نماذج ثلاثية الأبعاد شاملة باستخدام إعادة بناء متعددة الزوايا من Rodin.",
|
||||
"api_tripo_image_to_model": "إنشاء أصول ثلاثية الأبعاد احترافية من صور ثنائية الأبعاد باستخدام محرك Tripo.",
|
||||
"api_tripo_multiview_to_model": "بناء نماذج ثلاثية الأبعاد من عدة زوايا باستخدام ماسح Tripo المتقدم.",
|
||||
"api_tripo_text_to_model": "تصميم أشياء ثلاثية الأبعاد من الوصف النصي باستخدام نمذجة Tripo المدفوعة بالنص."
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "إنشاء صور عبر التحكم في التكوين ضمن مناطق محددة.",
|
||||
"area_composition_square_area_for_subject": "إنشاء صور بوضع ثابت للموضوع باستخدام تكوين المناطق."
|
||||
},
|
||||
"Audio": {
|
||||
"audio_ace_step_1_m2m_editing": "تحرير الأغاني الموجودة لتغيير الأسلوب والكلمات باستخدام ACE-Step v1 M2M.",
|
||||
"audio_ace_step_1_t2a_instrumentals": "إنشاء موسيقى آلية من نصوص باستخدام ACE-Step v1.",
|
||||
"audio_ace_step_1_t2a_song": "إنشاء أغاني بصوت غنائي من نصوص مع دعم التعدد اللغوي وتخصيص الأسلوب باستخدام ACE-Step v1.",
|
||||
"audio_stable_audio_example": "إنشاء صوت من نصوص باستخدام Stable Audio."
|
||||
},
|
||||
"Basics": {
|
||||
"default": "إنشاء صور من نصوص الإرشادات.",
|
||||
"embedding_example": "إنشاء صور باستخدام الانعكاس النصي لأنماط متسقة.",
|
||||
"gligen_textbox_example": "إنشاء صور مع وضع دقيق للأشياء باستخدام مربعات النص.",
|
||||
"image2image": "تحويل الصور الموجودة باستخدام نصوص الإرشادات.",
|
||||
"inpaint_example": "تعديل أجزاء محددة من الصور بسلاسة.",
|
||||
"inpaint_model_outpainting": "تمديد الصور خارج حدودها الأصلية.",
|
||||
"lora": "إنشاء صور باستخدام نماذج LoRA لأنماط أو مواضيع متخصصة.",
|
||||
"lora_multiple": "إنشاء صور عبر دمج عدة نماذج LoRA."
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "إنشاء صور موجهة بإشارات وضعية باستخدام ControlNet.",
|
||||
"controlnet_example": "إنشاء صور موجهة برموز مرجعية مرسومة باستخدام ControlNet.",
|
||||
"depth_controlnet": "إنشاء صور موجهة بمعلومات العمق باستخدام ControlNet.",
|
||||
"depth_t2i_adapter": "إنشاء صور موجهة بمعلومات العمق باستخدام محول T2I.",
|
||||
"mixing_controlnets": "إنشاء صور بدمج عدة نماذج ControlNet."
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "إنشاء صور موجهة بالكشف عن الحواف باستخدام Flux Canny.",
|
||||
"flux_depth_lora_example": "إنشاء صور موجهة بمعلومات العمق باستخدام Flux LoRA.",
|
||||
"flux_dev_checkpoint_example": "إنشاء صور باستخدام نسخة Flux Dev fp8 المضغوطة. مناسبة لأجهزة ذات VRAM محدود، تتطلب ملف نموذج واحد فقط، لكن جودة الصورة أقل قليلاً من النسخة الكاملة.",
|
||||
"flux_dev_full_text_to_image": "إنشاء صور عالية الجودة مع نسخة Flux Dev الكاملة. تحتاج VRAM أكبر وعدة ملفات نموذج، لكنها توفر أفضل قدرة على اتباع النص وجودة الصور.",
|
||||
"flux_fill_inpaint_example": "ملء أجزاء مفقودة من الصور باستخدام Flux inpainting.",
|
||||
"flux_fill_outpaint_example": "تمديد الصور خارج الحدود باستخدام Flux outpainting.",
|
||||
"flux_kontext_dev_basic": "تعديل الصور باستخدام Flux Kontext مع رؤية كاملة للعُقد، مثالي لتعلم سير العمل.",
|
||||
"flux_kontext_dev_grouped": "نسخة مبسطة من Flux Kontext مع عُقد مجمعة لمساحة عمل أنظف.",
|
||||
"flux_redux_model_example": "إنشاء صور عبر نقل الأسلوب من صور مرجعية باستخدام Flux Redux.",
|
||||
"flux_schnell": "إنشاء صور بسرعة مع نسخة Flux Schnell fp8 المضغوطة. مثالية للأجهزة منخفضة الأداء، تتطلب 4 خطوات فقط لإنشاء الصور.",
|
||||
"flux_schnell_full_text_to_image": "إنشاء صور بسرعة مع نسخة Flux Schnell الكاملة. تستخدم ترخيص Apache2.0، تحتاج 4 خطوات فقط مع جودة صور جيدة."
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "تحرير الصور مع HiDream E1 - نموذج احترافي لتحرير الصور باستخدام اللغة الطبيعية.",
|
||||
"hidream_i1_dev": "إنشاء صور مع HiDream I1 Dev - نسخة متوازنة مع 28 خطوة استدلال، مناسبة لأجهزة متوسطة الأداء.",
|
||||
"hidream_i1_fast": "إنشاء صور بسرعة مع HiDream I1 Fast - نسخة خفيفة مع 16 خطوة استدلال، مثالية للمعاينات السريعة على أجهزة منخفضة الأداء.",
|
||||
"hidream_i1_full": "إنشاء صور مع HiDream I1 Full - نسخة كاملة مع 50 خطوة استدلال لأعلى جودة.",
|
||||
"image_chroma_text_to_image": "Chroma معدلة من Flux وتحوي بعض التغييرات في البنية.",
|
||||
"image_cosmos_predict2_2B_t2i": "إنشاء صور باستخدام Cosmos-Predict2 2B T2I بدقة فيزيائية عالية وتفاصيل غنية.",
|
||||
"image_lotus_depth_v1_1": "تشغيل Lotus Depth في ComfyUI لتقدير عمق أحادي بدون تدريب مسبق مع احتفاظ عالي بالتفاصيل.",
|
||||
"image_omnigen2_image_edit": "تحرير الصور باستخدام تعليمات اللغة الطبيعية مع دعم متقدم للصور والنصوص في OmniGen2.",
|
||||
"image_omnigen2_t2i": "إنشاء صور عالية الجودة من نصوص باستخدام نموذج OmniGen2 الموحد متعدد الأنماط 7B ذو البنية ذات المسارين.",
|
||||
"sd3_5_large_blur": "إنشاء صور موجهة باستخدام صور مرجعية ضبابية باستخدام SD 3.5.",
|
||||
"sd3_5_large_canny_controlnet_example": "إنشاء صور موجهة بالكشف عن الحواف باستخدام SD 3.5 Canny ControlNet.",
|
||||
"sd3_5_large_depth": "إنشاء صور موجهة بمعلومات العمق باستخدام SD 3.5.",
|
||||
"sd3_5_simple_example": "إنشاء صور باستخدام SD 3.5.",
|
||||
"sdxl_refiner_prompt_example": "تحسين صور SDXL باستخدام نماذج التكرير.",
|
||||
"sdxl_revision_text_prompts": "إنشاء صور بنقل مفاهيم من صور مرجعية باستخدام SDXL Revision.",
|
||||
"sdxl_revision_zero_positive": "إنشاء صور باستخدام نصوص وصور مرجعية مع SDXL Revision.",
|
||||
"sdxl_simple_example": "إنشاء صور عالية الجودة باستخدام SDXL.",
|
||||
"sdxlturbo_example": "إنشاء صور في خطوة واحدة باستخدام SDXL Turbo."
|
||||
},
|
||||
"Image API": {
|
||||
"api_bfl_flux_1_kontext_max_image": "تعديل الصور باستخدام Flux.1 Kontext max image.",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "إدخال عدة صور وتعديلها باستخدام Flux.1 Kontext.",
|
||||
"api_bfl_flux_1_kontext_pro_image": "تعديل الصور باستخدام Flux.1 Kontext pro image.",
|
||||
"api_bfl_flux_pro_t2i": "إنشاء صور مع اتباع ممتاز للنص وجودة بصرية باستخدام FLUX.1 Pro.",
|
||||
"api_ideogram_v3_t2i": "إنشاء صور ذات جودة احترافية مع محاذاة ممتازة للنص، الواقعية الفوتوغرافية، ودعم النصوص باستخدام Ideogram V3.",
|
||||
"api_luma_photon_i2i": "توجيه إنشاء الصور باستخدام مزيج من الصور والنص.",
|
||||
"api_luma_photon_style_ref": "إنشاء صور بدمج مرجعيات الأسلوب مع تحكم دقيق باستخدام Luma Photon.",
|
||||
"api_openai_dall_e_2_inpaint": "تعديل الصور باستخدام تقنيات inpainting مع OpenAI Dall-E 2 API.",
|
||||
"api_openai_dall_e_2_t2i": "إنشاء صور من نصوص باستخدام OpenAI Dall-E 2 API.",
|
||||
"api_openai_dall_e_3_t2i": "إنشاء صور من نصوص باستخدام OpenAI Dall-E 3 API.",
|
||||
"api_openai_image_1_i2i": "إنشاء صور من صور مدخلة باستخدام OpenAI GPT Image 1 API.",
|
||||
"api_openai_image_1_inpaint": "تعديل الصور باستخدام تقنيات inpainting مع OpenAI GPT Image 1 API.",
|
||||
"api_openai_image_1_multi_inputs": "إنشاء صور من مدخلات متعددة باستخدام OpenAI GPT Image 1 API.",
|
||||
"api_openai_image_1_t2i": "إنشاء صور من نصوص باستخدام OpenAI GPT Image 1 API.",
|
||||
"api_recraft_image_gen_with_color_control": "إنشاء صور مع لوحات ألوان مخصصة ومرئيات خاصة بالعلامة التجارية باستخدام Recraft.",
|
||||
"api_recraft_image_gen_with_style_control": "التحكم في الأسلوب باستخدام أمثلة بصرية، محاذاة المواقع، وضبط دقيق للكائنات. تخزين ومشاركة الأنماط لضمان تناسق العلامة التجارية.",
|
||||
"api_recraft_vector_gen": "إنشاء صور فيكتور عالية الجودة من نصوص باستخدام مولد الفكتور AI الخاص بـ Recraft.",
|
||||
"api_runway_reference_to_image": "إنشاء صور جديدة بناءً على أنماط وتراكيب مرجعية باستخدام Runway AI.",
|
||||
"api_runway_text_to_image": "إنشاء صور عالية الجودة من نصوص باستخدام نموذج Runway AI.",
|
||||
"api_stability_ai_i2i": "تحويل الصور مع إنشاء عالي الجودة باستخدام Stability AI، مثالي للتحرير المهني ونقل الأسلوب.",
|
||||
"api_stability_ai_sd3_5_i2i": "إنشاء صور عالية الجودة مع اتباع ممتاز للنص. مثالي للاستخدامات المهنية بدقة 1 ميجابكسل.",
|
||||
"api_stability_ai_sd3_5_t2i": "إنشاء صور عالية الجودة مع اتباع ممتاز للنص. مثالي للاستخدامات المهنية بدقة 1 ميجابكسل.",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "إنشاء صور عالية الجودة مع اتباع ممتاز للنص. مثالي للاستخدامات المهنية بدقة 1 ميجابكسل."
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "اختبر الذكاء الاصطناعي متعدد الوسائط من Google مع قدرات التفكير لدى Gemini.",
|
||||
"api_openai_chat": "التفاعل مع نماذج اللغة المتقدمة من OpenAI للمحادثات الذكية."
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "تكبير الصور باستخدام نماذج ESRGAN لتعزيز الجودة.",
|
||||
"hiresfix_esrgan_workflow": "تكبير الصور باستخدام نماذج ESRGAN خلال خطوات التوليد الوسيطة.",
|
||||
"hiresfix_latent_workflow": "تكبير الصور بتحسين الجودة في الفضاء الكامن.",
|
||||
"latent_upscale_different_prompt_model": "تكبير الصور مع تغيير العبارات المستخدمة عبر مراحل التوليد."
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "إنشاء فيديوهات من نصوص باستخدام نموذج Hunyuan.",
|
||||
"image_to_video": "إنشاء فيديوهات من صور ثابتة.",
|
||||
"image_to_video_wan": "إنشاء فيديوهات من صور باستخدام Wan 2.1.",
|
||||
"ltxv_image_to_video": "إنشاء فيديوهات من صور ثابتة.",
|
||||
"ltxv_text_to_video": "إنشاء فيديوهات من نصوص.",
|
||||
"mochi_text_to_video_example": "إنشاء فيديوهات من نصوص باستخدام نموذج Mochi.",
|
||||
"text_to_video_wan": "إنشاء فيديوهات من نصوص باستخدام Wan 2.1.",
|
||||
"txt_to_image_to_video": "إنشاء فيديوهات عن طريق إنشاء صور من النصوص أولاً.",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "إنشاء فيديوهات باستخدام Cosmos-Predict2 2B Video2World، بإنتاج محاكاة فيديو بدقة فيزيائية عالية وجودة فائقة ومتسقة.",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "إنشاء فيديوهات عالية الجودة مع تحكم متقدم بالكاميرا باستخدام النموذج الكامل 14B.",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "إنشاء فيديوهات ديناميكية مع حركات كاميرا سينمائية باستخدام نموذج Wan 2.1 Fun Camera 1.3B.",
|
||||
"video_wan_vace_14B_ref2v": "إنشاء فيديوهات تطابق أسلوب ومحتوى صورة مرجعية. مثالي لإنشاء فيديوهات متناسقة الأسلوب.",
|
||||
"video_wan_vace_14B_t2v": "تحويل أوصاف نصية إلى فيديوهات عالية الجودة. يدعم دقة 480p و720p مع نموذج VACE-14B.",
|
||||
"video_wan_vace_14B_v2v": "إنشاء فيديوهات بالتحكم في فيديوهات الإدخال والصور المرجعية باستخدام Wan VACE.",
|
||||
"video_wan_vace_flf2v": "إنشاء انتقالات فيديو سلسة عبر تحديد الإطارات الأولى والأخيرة. يدعم تسلسل إطارات مفتاحية مخصصة.",
|
||||
"video_wan_vace_inpainting": "تعديل مناطق محددة في الفيديو مع الحفاظ على المحتوى المحيط. مثالي لإزالة أو استبدال الأجسام.",
|
||||
"video_wan_vace_outpainting": "إنشاء فيديوهات ممتدة عبر توسيع حجم الفيديو باستخدام Wan VACE outpainting.",
|
||||
"wan2_1_flf2v_720_f16": "إنشاء فيديوهات بالتحكم في الإطارات الأولى والأخيرة باستخدام Wan 2.1 FLF2V.",
|
||||
"wan2_1_fun_control": "إنشاء فيديوهات موجهة بالتحكم بالوضع، العمق، والحواف باستخدام Wan 2.1 ControlNet.",
|
||||
"wan2_1_fun_inp": "إنشاء فيديوهات من الإطارات الأولى والأخيرة باستخدام Wan 2.1 inpainting."
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "إنشاء فيديوهات مصقولة من الصور والنصوص مع دمج CGI باستخدام MiniMax.",
|
||||
"api_hailuo_minimax_t2v": "إنشاء فيديوهات عالية الجودة مباشرة من نصوص. استكشف قدرات MiniMax المتقدمة لإنشاء سرد بصري متنوع مع تأثيرات CGI احترافية وعناصر أسلوبية لإحياء وصفك.",
|
||||
"api_kling_effects": "إنشاء فيديوهات ديناميكية بتطبيق تأثيرات بصرية على الصور باستخدام Kling.",
|
||||
"api_kling_flf": "إنشاء فيديوهات عبر التحكم في الإطارات الأولى والأخيرة.",
|
||||
"api_kling_i2v": "إنشاء فيديوهات مع اتباع ممتاز للنصوص للحركات والتعابير وحركات الكاميرا باستخدام Kling.",
|
||||
"api_luma_i2v": "تحويل الصور الثابتة إلى رسوم متحركة عالية الجودة بشكل فوري.",
|
||||
"api_luma_t2v": "يمكن إنشاء فيديوهات عالية الجودة باستخدام نصوص بسيطة.",
|
||||
"api_moonvalley_image_to_video": "إنشاء فيديوهات سينمائية بدقة 1080p من صور عبر نموذج مدرب حصريًا على بيانات مرخصة.",
|
||||
"api_moonvalley_text_to_video": "إنشاء فيديوهات سينمائية بدقة 1080p من نصوص عبر نموذج مدرب حصريًا على بيانات مرخصة.",
|
||||
"api_pika_i2v": "إنشاء فيديوهات متحركة سلسة من صورة ثابتة واحدة باستخدام Pika AI.",
|
||||
"api_pika_scene": "إنشاء فيديوهات تدمج عدة صور مدخلة باستخدام Pika Scenes.",
|
||||
"api_pixverse_i2v": "إنشاء فيديوهات ديناميكية من الصور الثابتة مع الحركة والتأثيرات باستخدام PixVerse.",
|
||||
"api_pixverse_t2v": "إنشاء فيديوهات مع تفسير دقيق للنصوص وديناميكية فيديو مذهلة.",
|
||||
"api_pixverse_template_i2v": "إنشاء فيديوهات ديناميكية من الصور الثابتة مع الحركة والتأثيرات باستخدام PixVerse.",
|
||||
"api_runway_first_last_frame": "إنشاء انتقالات فيديو سلسة بين إطارين رئيسيين بدقة Runway.",
|
||||
"api_runway_gen3a_turbo_image_to_video": "إنشاء فيديوهات سينمائية من صور ثابتة باستخدام Runway Gen3a Turbo.",
|
||||
"api_runway_gen4_turo_image_to_video": "إنشاء فيديوهات ديناميكية من الصور باستخدام Runway Gen4 Turbo.",
|
||||
"api_veo2_i2v": "إنشاء فيديوهات من الصور باستخدام Google Veo2 API."
|
||||
}
|
||||
},
|
||||
"title": "ابدأ باستخدام قالب",
|
||||
"useCasesSelected": "{count} حالات استخدام"
|
||||
},
|
||||
|
||||
@@ -1449,86 +1449,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"FluxProCannyNode": {
|
||||
"description": "توليد صورة باستخدام صورة تحكم (كاني).",
|
||||
"display_name": "Flux.1 صورة تحكم كاني",
|
||||
"inputs": {
|
||||
"canny_high_threshold": {
|
||||
"name": "عتبة_كاني_العالية",
|
||||
"tooltip": "عتبة عالية لكشف حواف كاني؛ تُتجاهل إذا كانت skip_processing مفعلة."
|
||||
},
|
||||
"canny_low_threshold": {
|
||||
"name": "عتبة_كاني_المنخفضة",
|
||||
"tooltip": "عتبة منخفضة لكشف حواف كاني؛ تُتجاهل إذا كانت skip_processing مفعلة."
|
||||
},
|
||||
"control_after_generate": {
|
||||
"name": "التحكم بعد التوليد"
|
||||
},
|
||||
"control_image": {
|
||||
"name": "صورة_التحكم"
|
||||
},
|
||||
"guidance": {
|
||||
"name": "الإرشاد",
|
||||
"tooltip": "قوة الإرشاد لعملية توليد الصورة"
|
||||
},
|
||||
"prompt": {
|
||||
"name": "الوصف",
|
||||
"tooltip": "الوصف المطلوب لتوليد الصورة"
|
||||
},
|
||||
"prompt_upsampling": {
|
||||
"name": "تحسين_الوصف",
|
||||
"tooltip": "ما إذا كان يجب تحسين الوصف. إذا تم تفعيله، يتم تعديل الوصف تلقائيًا للحصول على توليد إبداعي أكثر، لكن النتائج غير حتمية (نفس البذرة لن تنتج نفس النتيجة بالضبط)."
|
||||
},
|
||||
"seed": {
|
||||
"name": "البذرة",
|
||||
"tooltip": "البذرة العشوائية المستخدمة لإنشاء الضجيج."
|
||||
},
|
||||
"skip_preprocessing": {
|
||||
"name": "تخطي_المعالجة_المسبقة",
|
||||
"tooltip": "ما إذا كان يجب تخطي المعالجة المسبقة؛ اجعلها True إذا كانت صورة التحكم معالجة بالفعل بالكاني، و False إذا كانت صورة خام."
|
||||
},
|
||||
"steps": {
|
||||
"name": "الخطوات",
|
||||
"tooltip": "عدد الخطوات في عملية توليد الصورة"
|
||||
}
|
||||
}
|
||||
},
|
||||
"FluxProDepthNode": {
|
||||
"description": "توليد صورة باستخدام صورة تحكم (العمق).",
|
||||
"display_name": "Flux.1 صورة تحكم العمق",
|
||||
"inputs": {
|
||||
"control_after_generate": {
|
||||
"name": "التحكم بعد التوليد"
|
||||
},
|
||||
"control_image": {
|
||||
"name": "صورة_التحكم"
|
||||
},
|
||||
"guidance": {
|
||||
"name": "الإرشاد",
|
||||
"tooltip": "قوة الإرشاد لعملية توليد الصورة"
|
||||
},
|
||||
"prompt": {
|
||||
"name": "الوصف",
|
||||
"tooltip": "الوصف المطلوب لتوليد الصورة"
|
||||
},
|
||||
"prompt_upsampling": {
|
||||
"name": "تحسين_الوصف",
|
||||
"tooltip": "ما إذا كان يجب تحسين الوصف. إذا تم تفعيله، يتم تعديل الوصف تلقائيًا للحصول على توليد إبداعي أكثر، لكن النتائج غير حتمية (نفس البذرة لن تنتج نفس النتيجة بالضبط)."
|
||||
},
|
||||
"seed": {
|
||||
"name": "البذرة",
|
||||
"tooltip": "البذرة العشوائية المستخدمة لإنشاء الضجيج."
|
||||
},
|
||||
"skip_preprocessing": {
|
||||
"name": "تخطي_المعالجة_المسبقة",
|
||||
"tooltip": "ما إذا كان يجب تخطي المعالجة المسبقة؛ اجعلها True إذا كانت صورة التحكم معالجة بالفعل بالعمق، و False إذا كانت صورة خام."
|
||||
},
|
||||
"steps": {
|
||||
"name": "الخطوات",
|
||||
"tooltip": "عدد الخطوات في عملية توليد الصورة"
|
||||
}
|
||||
}
|
||||
},
|
||||
"FluxProExpandNode": {
|
||||
"description": "توسيع الصورة بناءً على الوصف.",
|
||||
"display_name": "Flux.1 توسيع الصورة",
|
||||
|
||||
@@ -36,6 +36,8 @@
|
||||
"import": "Import",
|
||||
"loadAllFolders": "Load All Folders",
|
||||
"logoAlt": "ComfyUI Logo",
|
||||
"comfyOrgLogoAlt": "ComfyOrg Logo",
|
||||
"comfy": "Comfy",
|
||||
"refresh": "Refresh",
|
||||
"refreshNode": "Refresh Node",
|
||||
"terminal": "Terminal",
|
||||
@@ -60,6 +62,10 @@
|
||||
"icon": "Icon",
|
||||
"color": "Color",
|
||||
"error": "Error",
|
||||
"resizeFromBottomRight": "Resize from bottom-right corner",
|
||||
"resizeFromTopRight": "Resize from top-right corner",
|
||||
"resizeFromBottomLeft": "Resize from bottom-left corner",
|
||||
"resizeFromTopLeft": "Resize from top-left corner",
|
||||
"info": "Node Info",
|
||||
"bookmark": "Save to Library",
|
||||
"moreOptions": "More Options",
|
||||
@@ -88,6 +94,11 @@
|
||||
"no": "No",
|
||||
"cancel": "Cancel",
|
||||
"close": "Close",
|
||||
"pressKeysForNewBinding": "Press keys for new binding",
|
||||
"defaultBanner": "default banner",
|
||||
"enableOrDisablePack": "Enable or disable pack",
|
||||
"openManager": "Open Manager",
|
||||
"graphNavigation": "Graph navigation",
|
||||
"dropYourFileOr": "Drop your file or",
|
||||
"back": "Back",
|
||||
"next": "Next",
|
||||
@@ -734,310 +745,6 @@
|
||||
"LLM API": "LLM API",
|
||||
"All": "All Templates"
|
||||
},
|
||||
"templateDescription": {
|
||||
"Basics": {
|
||||
"default": "Generate images from text prompts.",
|
||||
"image2image": "Transform existing images using text prompts.",
|
||||
"lora": "Generate images with LoRA models for specialized styles or subjects.",
|
||||
"lora_multiple": "Generate images by combining multiple LoRA models.",
|
||||
"inpaint_example": "Edit specific parts of images seamlessly.",
|
||||
"inpaint_model_outpainting": "Extend images beyond their original boundaries.",
|
||||
"embedding_example": "Generate images using textual inversion for consistent styles.",
|
||||
"gligen_textbox_example": "Generate images with precise object placement using text boxes."
|
||||
},
|
||||
"Flux": {
|
||||
"flux_kontext_dev_basic": "Edit image using Flux Kontext with full node visibility, perfect for learning the workflow.",
|
||||
"flux_kontext_dev_grouped": "Streamlined version of Flux Kontext with grouped nodes for cleaner workspace.",
|
||||
"flux_dev_checkpoint_example": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.",
|
||||
"flux_schnell": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.",
|
||||
"flux_dev_full_text_to_image": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.",
|
||||
"flux_schnell_full_text_to_image": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.",
|
||||
"flux_fill_inpaint_example": "Fill missing parts of images using Flux inpainting.",
|
||||
"flux_fill_outpaint_example": "Extend images beyond boundaries using Flux outpainting.",
|
||||
"flux_canny_model_example": "Generate images guided by edge detection using Flux Canny.",
|
||||
"flux_depth_lora_example": "Generate images guided by depth information using Flux LoRA.",
|
||||
"flux_redux_model_example": "Generate images by transferring style from reference images using Flux Redux."
|
||||
},
|
||||
"Image": {
|
||||
"image_omnigen2_t2i": "Generate high-quality images from text prompts using OmniGen2's unified 7B multimodal model with dual-path architecture.",
|
||||
"image_omnigen2_image_edit": "Edit images with natural language instructions using OmniGen2's advanced image editing capabilities and text rendering support.",
|
||||
"image_cosmos_predict2_2B_t2i": "Generate images with Cosmos-Predict2 2B T2I, delivering physically accurate, high-fidelity, and detail-rich image generation.",
|
||||
"image_chroma_text_to_image": "Chroma is modified from flux and has some changes in the architecture.",
|
||||
"hidream_i1_dev": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware.",
|
||||
"hidream_i1_fast": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware.",
|
||||
"hidream_i1_full": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output.",
|
||||
"hidream_e1_full": "Edit images with HiDream E1 - Professional natural language image editing model.",
|
||||
"sd3_5_simple_example": "Generate images using SD 3.5.",
|
||||
"sd3_5_large_canny_controlnet_example": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
|
||||
"sd3_5_large_depth": "Generate images guided by depth information using SD 3.5.",
|
||||
"sd3_5_large_blur": "Generate images guided by blurred reference images using SD 3.5.",
|
||||
"sdxl_simple_example": "Generate high-quality images using SDXL.",
|
||||
"sdxl_refiner_prompt_example": "Enhance SDXL images using refiner models.",
|
||||
"sdxl_revision_text_prompts": "Generate images by transferring concepts from reference images using SDXL Revision.",
|
||||
"sdxl_revision_zero_positive": "Generate images using both text prompts and reference images with SDXL Revision.",
|
||||
"sdxlturbo_example": "Generate images in a single step using SDXL Turbo.",
|
||||
"image_lotus_depth_v1_1": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention."
|
||||
},
|
||||
"Video": {
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Generate videos with Cosmos-Predict2 2B Video2World, generating physically accurate, high-fidelity, and consistent video simulations.",
|
||||
"video_wan_vace_14B_t2v": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
|
||||
"video_wan_vace_14B_ref2v": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
|
||||
"video_wan_vace_14B_v2v": "Generate videos by controlling input videos and reference images using Wan VACE.",
|
||||
"video_wan_vace_outpainting": "Generate extended videos by expanding video size using Wan VACE outpainting.",
|
||||
"video_wan_vace_flf2v": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
|
||||
"video_wan_vace_inpainting": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Generate high-quality videos with advanced camera control using the full 14B model",
|
||||
"text_to_video_wan": "Generate videos from text prompts using Wan 2.1.",
|
||||
"image_to_video_wan": "Generate videos from images using Wan 2.1.",
|
||||
"wan2_1_fun_inp": "Generate videos from start and end frames using Wan 2.1 inpainting.",
|
||||
"wan2_1_fun_control": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
|
||||
"wan2_1_flf2v_720_f16": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
|
||||
"ltxv_text_to_video": "Generate videos from text prompts.",
|
||||
"ltxv_image_to_video": "Generate videos from still images.",
|
||||
"mochi_text_to_video_example": "Generate videos from text prompts using Mochi model.",
|
||||
"hunyuan_video_text_to_video": "Generate videos from text prompts using Hunyuan model.",
|
||||
"image_to_video": "Generate videos from still images.",
|
||||
"txt_to_image_to_video": "Generate videos by first creating images from text prompts."
|
||||
},
|
||||
"Image API": {
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "Input multiple images and edit them with Flux.1 Kontext.",
|
||||
"api_bfl_flux_1_kontext_pro_image": "Edit images with Flux.1 Kontext pro image.",
|
||||
"api_bfl_flux_1_kontext_max_image": "Edit images with Flux.1 Kontext max image.",
|
||||
"api_bfl_flux_pro_t2i": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
|
||||
"api_luma_photon_i2i": "Guide image generation using a combination of images and prompt.",
|
||||
"api_luma_photon_style_ref": "Generate images by blending style references with precise control using Luma Photon.",
|
||||
"api_recraft_image_gen_with_color_control": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
|
||||
"api_recraft_image_gen_with_style_control": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.",
|
||||
"api_recraft_vector_gen": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
|
||||
"api_runway_text_to_image": "Generate high-quality images from text prompts using Runway's AI model.",
|
||||
"api_runway_reference_to_image": "Generate new images based on reference styles and compositions with Runway's AI.",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
|
||||
"api_stability_ai_i2i": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
|
||||
"api_stability_ai_sd3_5_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
|
||||
"api_stability_ai_sd3_5_i2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
|
||||
"api_ideogram_v3_t2i": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
|
||||
"api_openai_image_1_t2i": "Generate images from text prompts using OpenAI GPT Image 1 API.",
|
||||
"api_openai_image_1_i2i": "Generate images from input images using OpenAI GPT Image 1 API.",
|
||||
"api_openai_image_1_inpaint": "Edit images using inpainting with OpenAI GPT Image 1 API.",
|
||||
"api_openai_image_1_multi_inputs": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
|
||||
"api_openai_dall_e_2_t2i": "Generate images from text prompts using OpenAI Dall-E 2 API.",
|
||||
"api_openai_dall_e_2_inpaint": "Edit images using inpainting with OpenAI Dall-E 2 API.",
|
||||
"api_openai_dall_e_3_t2i": "Generate images from text prompts using OpenAI Dall-E 3 API."
|
||||
},
|
||||
"Video API": {
|
||||
"api_moonvalley_text_to_video": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.",
|
||||
"api_moonvalley_image_to_video": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.",
|
||||
"api_kling_i2v": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
|
||||
"api_kling_effects": "Generate dynamic videos by applying visual effects to images using Kling.",
|
||||
"api_kling_flf": "Generate videos through controlling the first and last frames.",
|
||||
"api_luma_i2v": "Take static images and instantly create magical high quality animations.",
|
||||
"api_luma_t2v": "High-quality videos can be generated using simple prompts.",
|
||||
"api_hailuo_minimax_t2v": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
|
||||
"api_hailuo_minimax_i2v": "Generate refined videos from images and text with CGI integration using MiniMax.",
|
||||
"api_pixverse_i2v": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
||||
"api_pixverse_template_i2v": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
||||
"api_pixverse_t2v": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
|
||||
"api_runway_gen4_turo_image_to_video": "Generate dynamic videos from images using Runway Gen4 Turbo.",
|
||||
"api_runway_first_last_frame": "Generate smooth video transitions between two keyframes with Runway's precision.",
|
||||
"api_pika_i2v": "Generate smooth animated videos from single static images using Pika AI.",
|
||||
"api_pika_scene": "Generate videos that incorporate multiple input images using Pika Scenes.",
|
||||
"api_veo2_i2v": "Generate videos from images using Google Veo2 API."
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Generate detailed 3D models from single photos using Rodin AI.",
|
||||
"api_rodin_multiview_to_model": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.",
|
||||
"api_tripo_text_to_model": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
|
||||
"api_tripo_image_to_model": "Generate professional 3D assets from 2D images using Tripo engine.",
|
||||
"api_tripo_multiview_to_model": "Build 3D models from multiple angles with Tripo's advanced scanner."
|
||||
},
|
||||
"LLM API": {
|
||||
"api_openai_chat": "Engage with OpenAI's advanced language models for intelligent conversations.",
|
||||
"api_google_gemini": "Experience Google's multimodal AI with Gemini's reasoning capabilities."
|
||||
},
|
||||
"Upscaling": {
|
||||
"hiresfix_latent_workflow": "Upscale images by enhancing quality in latent space.",
|
||||
"esrgan_example": "Upscale images using ESRGAN models to enhance quality.",
|
||||
"hiresfix_esrgan_workflow": "Upscale images using ESRGAN models during intermediate generation steps.",
|
||||
"latent_upscale_different_prompt_model": "Upscale images while changing prompts across generation passes."
|
||||
},
|
||||
"ControlNet": {
|
||||
"controlnet_example": "Generate images guided by scribble reference images using ControlNet.",
|
||||
"2_pass_pose_worship": "Generate images guided by pose references using ControlNet.",
|
||||
"depth_controlnet": "Generate images guided by depth information using ControlNet.",
|
||||
"depth_t2i_adapter": "Generate images guided by depth information using T2I adapter.",
|
||||
"mixing_controlnets": "Generate images by combining multiple ControlNet models."
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "Generate images by controlling composition with defined areas.",
|
||||
"area_composition_square_area_for_subject": "Generate images with consistent subject placement using area composition."
|
||||
},
|
||||
"3D": {
|
||||
"3d_hunyuan3d_image_to_model": "Generate 3D models from single images using Hunyuan3D 2.0.",
|
||||
"3d_hunyuan3d_multiview_to_model": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
|
||||
"stable_zero123_example": "Generate 3D views from single images using Stable Zero123."
|
||||
},
|
||||
"Audio": {
|
||||
"audio_stable_audio_example": "Generate audio from text prompts using Stable Audio.",
|
||||
"audio_ace_step_1_t2a_instrumentals": "Generate instrumental music from text prompts using ACE-Step v1.",
|
||||
"audio_ace_step_1_t2a_song": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
|
||||
"audio_ace_step_1_m2m_editing": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M."
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"Basics": {
|
||||
"default": "Image Generation",
|
||||
"image2image": "Image to Image",
|
||||
"lora": "LoRA",
|
||||
"lora_multiple": "LoRA Multiple",
|
||||
"inpaint_example": "Inpaint",
|
||||
"inpaint_model_outpainting": "Outpaint",
|
||||
"embedding_example": "Embedding",
|
||||
"gligen_textbox_example": "Gligen Textbox"
|
||||
},
|
||||
"Flux": {
|
||||
"flux_kontext_dev_basic": "Flux Kontext Dev(Basic)",
|
||||
"flux_kontext_dev_grouped": "Flux Kontext Dev(Grouped)",
|
||||
"flux_dev_checkpoint_example": "Flux Dev fp8",
|
||||
"flux_schnell": "Flux Schnell fp8",
|
||||
"flux_dev_full_text_to_image": "Flux Dev full text to image",
|
||||
"flux_schnell_full_text_to_image": "Flux Schnell full text to image",
|
||||
"flux_fill_inpaint_example": "Flux Inpaint",
|
||||
"flux_fill_outpaint_example": "Flux Outpaint",
|
||||
"flux_canny_model_example": "Flux Canny Model",
|
||||
"flux_depth_lora_example": "Flux Depth LoRA",
|
||||
"flux_redux_model_example": "Flux Redux Model"
|
||||
},
|
||||
"Image": {
|
||||
"image_omnigen2_t2i": "OmniGen2 Text to Image",
|
||||
"image_omnigen2_image_edit": "OmniGen2 Image Edit",
|
||||
"image_cosmos_predict2_2B_t2i": "Cosmos Predict2 2B T2I",
|
||||
"image_chroma_text_to_image": "Chroma text to image",
|
||||
"hidream_i1_dev": "HiDream I1 Dev",
|
||||
"hidream_i1_fast": "HiDream I1 Fast",
|
||||
"hidream_i1_full": "HiDream I1 Full",
|
||||
"hidream_e1_full": "HiDream E1 Full",
|
||||
"sd3_5_simple_example": "SD3.5 Simple",
|
||||
"sd3_5_large_canny_controlnet_example": "SD3.5 Large Canny ControlNet",
|
||||
"sd3_5_large_depth": "SD3.5 Large Depth",
|
||||
"sd3_5_large_blur": "SD3.5 Large Blur",
|
||||
"sdxl_simple_example": "SDXL Simple",
|
||||
"sdxl_refiner_prompt_example": "SDXL Refiner Prompt",
|
||||
"sdxl_revision_text_prompts": "SDXL Revision Text Prompts",
|
||||
"sdxl_revision_zero_positive": "SDXL Revision Zero Positive",
|
||||
"sdxlturbo_example": "SDXL Turbo",
|
||||
"image_lotus_depth_v1_1": "Lotus Depth"
|
||||
},
|
||||
"Video": {
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos Predict2 2B Video2World 480p 16fps",
|
||||
"video_wan_vace_14B_t2v": "Wan VACE Text to Video",
|
||||
"video_wan_vace_14B_ref2v": "Wan VACE Reference to Video",
|
||||
"video_wan_vace_14B_v2v": "Wan VACE Control Video",
|
||||
"video_wan_vace_outpainting": "Wan VACE Outpainting",
|
||||
"video_wan_vace_flf2v": "Wan VACE First-Last Frame",
|
||||
"video_wan_vace_inpainting": "Wan VACE Inpainting",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B",
|
||||
"text_to_video_wan": "Wan 2.1 Text to Video",
|
||||
"image_to_video_wan": "Wan 2.1 Image to Video",
|
||||
"wan2_1_fun_inp": "Wan 2.1 Inpainting",
|
||||
"wan2_1_fun_control": "Wan 2.1 ControlNet",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16",
|
||||
"ltxv_text_to_video": "LTXV Text to Video",
|
||||
"ltxv_image_to_video": "LTXV Image to Video",
|
||||
"mochi_text_to_video_example": "Mochi Text to Video",
|
||||
"hunyuan_video_text_to_video": "Hunyuan Video Text to Video",
|
||||
"image_to_video": "SVD Image to Video",
|
||||
"txt_to_image_to_video": "SVD Text to Image to Video"
|
||||
},
|
||||
"Image API": {
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "BFL Flux.1 Kontext Multiple Image Input",
|
||||
"api_bfl_flux_1_kontext_pro_image": "BFL Flux.1 Kontext Pro",
|
||||
"api_bfl_flux_1_kontext_max_image": "BFL Flux.1 Kontext Max",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux[Pro]: Text to Image",
|
||||
"api_luma_photon_i2i": "Luma Photon: Image to Image",
|
||||
"api_luma_photon_style_ref": "Luma Photon: Style Reference",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft: Color Control Image Generation",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft: Style Control Image Generation",
|
||||
"api_recraft_vector_gen": "Recraft: Vector Generation",
|
||||
"api_runway_text_to_image": "Runway: Text to Image",
|
||||
"api_runway_reference_to_image": "Runway: Reference to Image",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Stability AI: Stable Image Ultra Text to Image",
|
||||
"api_stability_ai_i2i": "Stability AI: Image to Image",
|
||||
"api_stability_ai_sd3_5_t2i": "Stability AI: SD3.5 Text to Image",
|
||||
"api_stability_ai_sd3_5_i2i": "Stability AI: SD3.5 Image to Image",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3: Text to Image",
|
||||
"api_openai_image_1_t2i": "OpenAI: GPT-Image-1 Text to Image",
|
||||
"api_openai_image_1_i2i": "OpenAI: GPT-Image-1 Image to Image",
|
||||
"api_openai_image_1_inpaint": "OpenAI: GPT-Image-1 Inpaint",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI: GPT-Image-1 Multi Inputs",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI: Dall-E 2 Text to Image",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI: Dall-E 2 Inpaint",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI: Dall-E 3 Text to Image"
|
||||
},
|
||||
"Video API": {
|
||||
"api_moonvalley_text_to_video": "Moonvalley: Text to Video",
|
||||
"api_moonvalley_image_to_video": "Moonvalley: Image to Video",
|
||||
"api_kling_i2v": "Kling: Image to Video",
|
||||
"api_kling_effects": "Kling: Video Effects",
|
||||
"api_kling_flf": "Kling: FLF2V",
|
||||
"api_luma_i2v": "Luma: Image to Video",
|
||||
"api_luma_t2v": "Luma: Text to Video",
|
||||
"api_hailuo_minimax_t2v": "MiniMax: Text to Video",
|
||||
"api_hailuo_minimax_i2v": "MiniMax: Image to Video",
|
||||
"api_pixverse_i2v": "PixVerse: Image to Video",
|
||||
"api_pixverse_template_i2v": "PixVerse Templates: Image to Video",
|
||||
"api_pixverse_t2v": "PixVerse: Text to Video",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway: Gen3a Turbo Image to Video",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway: Gen4 Turbo Image to Video",
|
||||
"api_runway_first_last_frame": "Runway: First Last Frame to Video",
|
||||
"api_pika_i2v": "Pika: Image to Video",
|
||||
"api_pika_scene": "Pika Scenes: Images to Video",
|
||||
"api_veo2_i2v": "Veo2: Image to Video"
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin: Image to Model",
|
||||
"api_rodin_multiview_to_model": "Rodin: Multiview to Model",
|
||||
"api_tripo_text_to_model": "Tripo: Text to Model",
|
||||
"api_tripo_image_to_model": "Tripo: Image to Model",
|
||||
"api_tripo_multiview_to_model": "Tripo: Multiview to Model"
|
||||
},
|
||||
"LLM API": {
|
||||
"api_openai_chat": "OpenAI: Chat",
|
||||
"api_google_gemini": "Google Gemini: Chat"
|
||||
},
|
||||
"Upscaling": {
|
||||
"hiresfix_latent_workflow": "Upscale",
|
||||
"esrgan_example": "ESRGAN",
|
||||
"hiresfix_esrgan_workflow": "HiresFix ESRGAN Workflow",
|
||||
"latent_upscale_different_prompt_model": "Latent Upscale Different Prompt Model"
|
||||
},
|
||||
"ControlNet": {
|
||||
"controlnet_example": "Scribble ControlNet",
|
||||
"2_pass_pose_worship": "Pose ControlNet 2 Pass",
|
||||
"depth_controlnet": "Depth ControlNet",
|
||||
"depth_t2i_adapter": "Depth T2I Adapter",
|
||||
"mixing_controlnets": "Mixing ControlNets"
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "Area Composition",
|
||||
"area_composition_square_area_for_subject": "Area Composition Square Area for Subject"
|
||||
},
|
||||
"3D": {
|
||||
"3d_hunyuan3d_image_to_model": "Hunyuan3D 2.0",
|
||||
"3d_hunyuan3d_multiview_to_model": "Hunyuan3D 2.0 MV",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3D 2.0 MV Turbo",
|
||||
"stable_zero123_example": "Stable Zero123"
|
||||
},
|
||||
"Audio": {
|
||||
"audio_stable_audio_example": "Stable Audio",
|
||||
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1 Text to Instrumentals Music",
|
||||
"audio_ace_step_1_t2a_song": "ACE Step v1 Text to Song",
|
||||
"audio_ace_step_1_m2m_editing": "ACE Step v1 M2M Editing"
|
||||
}
|
||||
},
|
||||
"categories": "Categories",
|
||||
"resetFilters": "Clear Filters",
|
||||
"sorting": "Sort by",
|
||||
@@ -1948,7 +1655,7 @@
|
||||
"benefit2": "Up to 30 min runtime per job"
|
||||
},
|
||||
"required": {
|
||||
"title": "Subscribe to",
|
||||
"title": "Subscribe to",
|
||||
"waitingForSubscription": "Complete your subscription in the new tab. We'll automatically detect when you're done!",
|
||||
"subscribe": "Subscribe"
|
||||
},
|
||||
|
||||
@@ -2369,96 +2369,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"FluxProCannyNode": {
|
||||
"display_name": "Flux.1 Canny Control Image",
|
||||
"description": "Generate image using a control image (canny).",
|
||||
"inputs": {
|
||||
"control_image": {
|
||||
"name": "control_image"
|
||||
},
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"tooltip": "Prompt for the image generation"
|
||||
},
|
||||
"prompt_upsampling": {
|
||||
"name": "prompt_upsampling",
|
||||
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
|
||||
},
|
||||
"canny_low_threshold": {
|
||||
"name": "canny_low_threshold",
|
||||
"tooltip": "Low threshold for Canny edge detection; ignored if skip_processing is True"
|
||||
},
|
||||
"canny_high_threshold": {
|
||||
"name": "canny_high_threshold",
|
||||
"tooltip": "High threshold for Canny edge detection; ignored if skip_processing is True"
|
||||
},
|
||||
"skip_preprocessing": {
|
||||
"name": "skip_preprocessing",
|
||||
"tooltip": "Whether to skip preprocessing; set to True if control_image already is canny-fied, False if it is a raw image."
|
||||
},
|
||||
"guidance": {
|
||||
"name": "guidance",
|
||||
"tooltip": "Guidance strength for the image generation process"
|
||||
},
|
||||
"steps": {
|
||||
"name": "steps",
|
||||
"tooltip": "Number of steps for the image generation process"
|
||||
},
|
||||
"seed": {
|
||||
"name": "seed",
|
||||
"tooltip": "The random seed used for creating the noise."
|
||||
},
|
||||
"control_after_generate": {
|
||||
"name": "control after generate"
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"0": {
|
||||
"tooltip": null
|
||||
}
|
||||
}
|
||||
},
|
||||
"FluxProDepthNode": {
|
||||
"display_name": "Flux.1 Depth Control Image",
|
||||
"description": "Generate image using a control image (depth).",
|
||||
"inputs": {
|
||||
"control_image": {
|
||||
"name": "control_image"
|
||||
},
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"tooltip": "Prompt for the image generation"
|
||||
},
|
||||
"prompt_upsampling": {
|
||||
"name": "prompt_upsampling",
|
||||
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result)."
|
||||
},
|
||||
"skip_preprocessing": {
|
||||
"name": "skip_preprocessing",
|
||||
"tooltip": "Whether to skip preprocessing; set to True if control_image already is depth-ified, False if it is a raw image."
|
||||
},
|
||||
"guidance": {
|
||||
"name": "guidance",
|
||||
"tooltip": "Guidance strength for the image generation process"
|
||||
},
|
||||
"steps": {
|
||||
"name": "steps",
|
||||
"tooltip": "Number of steps for the image generation process"
|
||||
},
|
||||
"seed": {
|
||||
"name": "seed",
|
||||
"tooltip": "The random seed used for creating the noise."
|
||||
},
|
||||
"control_after_generate": {
|
||||
"name": "control after generate"
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"0": {
|
||||
"tooltip": null
|
||||
}
|
||||
}
|
||||
},
|
||||
"FluxProExpandNode": {
|
||||
"display_name": "Flux.1 Expand Image",
|
||||
"description": "Outpaints image based on prompt.",
|
||||
@@ -11662,10 +11572,12 @@
|
||||
},
|
||||
"outputs": {
|
||||
"0": {
|
||||
"name": "model_file"
|
||||
"name": "model_file",
|
||||
"tooltip": null
|
||||
},
|
||||
"1": {
|
||||
"name": "model task_id"
|
||||
"name": "model task_id",
|
||||
"tooltip": null
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -11718,10 +11630,12 @@
|
||||
},
|
||||
"outputs": {
|
||||
"0": {
|
||||
"name": "model_file"
|
||||
"name": "model_file",
|
||||
"tooltip": null
|
||||
},
|
||||
"1": {
|
||||
"name": "model task_id"
|
||||
"name": "model task_id",
|
||||
"tooltip": null
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -11736,10 +11650,12 @@
|
||||
},
|
||||
"outputs": {
|
||||
"0": {
|
||||
"name": "model_file"
|
||||
"name": "model_file",
|
||||
"tooltip": null
|
||||
},
|
||||
"1": {
|
||||
"name": "model task_id"
|
||||
"name": "model task_id",
|
||||
"tooltip": null
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -11755,10 +11671,12 @@
|
||||
},
|
||||
"outputs": {
|
||||
"0": {
|
||||
"name": "model_file"
|
||||
"name": "model_file",
|
||||
"tooltip": null
|
||||
},
|
||||
"1": {
|
||||
"name": "retarget task_id"
|
||||
"name": "retarget task_id",
|
||||
"tooltip": null
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -11771,10 +11689,12 @@
|
||||
},
|
||||
"outputs": {
|
||||
"0": {
|
||||
"name": "model_file"
|
||||
"name": "model_file",
|
||||
"tooltip": null
|
||||
},
|
||||
"1": {
|
||||
"name": "rig task_id"
|
||||
"name": "rig task_id",
|
||||
"tooltip": null
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -11820,10 +11740,12 @@
|
||||
},
|
||||
"outputs": {
|
||||
"0": {
|
||||
"name": "model_file"
|
||||
"name": "model_file",
|
||||
"tooltip": null
|
||||
},
|
||||
"1": {
|
||||
"name": "model task_id"
|
||||
"name": "model task_id",
|
||||
"tooltip": null
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -11851,10 +11773,12 @@
|
||||
},
|
||||
"outputs": {
|
||||
"0": {
|
||||
"name": "model_file"
|
||||
"name": "model_file",
|
||||
"tooltip": null
|
||||
},
|
||||
"1": {
|
||||
"name": "model task_id"
|
||||
"name": "model task_id",
|
||||
"tooltip": null
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1523,310 +1523,6 @@
|
||||
"searchPlaceholder": "Buscar..."
|
||||
},
|
||||
"sorting": "Ordenar por",
|
||||
"template": {
|
||||
"3D": {
|
||||
"3d_hunyuan3d_image_to_model": "Hunyuan3D 2.0",
|
||||
"3d_hunyuan3d_multiview_to_model": "Hunyuan3D 2.0 MV",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Hunyuan3D 2.0 MV Turbo",
|
||||
"stable_zero123_example": "Estable Zero123"
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Rodin: Imagen a Modelo",
|
||||
"api_rodin_multiview_to_model": "Rodin: Multivista a Modelo",
|
||||
"api_tripo_image_to_model": "Tripo: Imagen a Modelo",
|
||||
"api_tripo_multiview_to_model": "Tripo: Multivista a Modelo",
|
||||
"api_tripo_text_to_model": "Tripo: Texto a Modelo"
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "Composición de Área",
|
||||
"area_composition_square_area_for_subject": "Composición de Área Cuadrada para el Sujeto"
|
||||
},
|
||||
"Audio": {
|
||||
"audio_ace_step_1_m2m_editing": "ACE Step v1 Edición M2M",
|
||||
"audio_ace_step_1_t2a_instrumentals": "ACE-Step v1 Texto a Música Instrumental",
|
||||
"audio_ace_step_1_t2a_song": "ACE Step v1 Texto a Canción",
|
||||
"audio_stable_audio_example": "Stable Audio"
|
||||
},
|
||||
"Basics": {
|
||||
"default": "Generación de Imagen",
|
||||
"embedding_example": "Incrustación",
|
||||
"gligen_textbox_example": "Caja de Texto Gligen",
|
||||
"image2image": "Imagen a Imagen",
|
||||
"inpaint_example": "Inpaint",
|
||||
"inpaint_model_outpainting": "Outpaint",
|
||||
"lora": "LoRA",
|
||||
"lora_multiple": "LoRA Múltiple"
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "ControlNet de Pose 2 Pasadas",
|
||||
"controlnet_example": "ControlNet de Garabato",
|
||||
"depth_controlnet": "ControlNet de Profundidad",
|
||||
"depth_t2i_adapter": "Adaptador de Profundidad T2I",
|
||||
"mixing_controlnets": "Mezcla de ControlNets"
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "Flux Canny Model",
|
||||
"flux_depth_lora_example": "Flux Depth LoRA",
|
||||
"flux_dev_checkpoint_example": "Flux Dev fp8",
|
||||
"flux_dev_full_text_to_image": "Flux Dev texto a imagen completo",
|
||||
"flux_fill_inpaint_example": "Flux Inpaint",
|
||||
"flux_fill_outpaint_example": "Flux Outpaint",
|
||||
"flux_kontext_dev_basic": "Flux Kontext Dev (Básico)",
|
||||
"flux_kontext_dev_grouped": "Flux Kontext Dev (Agrupado)",
|
||||
"flux_redux_model_example": "Flux Redux Model",
|
||||
"flux_schnell": "Flux Schnell fp8",
|
||||
"flux_schnell_full_text_to_image": "Flux Schnell texto a imagen completo"
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "HiDream E1 Completo",
|
||||
"hidream_i1_dev": "HiDream I1 Dev",
|
||||
"hidream_i1_fast": "HiDream I1 Rápido",
|
||||
"hidream_i1_full": "HiDream I1 Completo",
|
||||
"image_chroma_text_to_image": "Chroma texto a imagen",
|
||||
"image_cosmos_predict2_2B_t2i": "Cosmos Predict2 2B T2I",
|
||||
"image_lotus_depth_v1_1": "Lotus Depth",
|
||||
"image_omnigen2_image_edit": "OmniGen2 Edición de Imagen",
|
||||
"image_omnigen2_t2i": "OmniGen2 Texto a Imagen",
|
||||
"sd3_5_large_blur": "SD3.5 Grande Desenfoque",
|
||||
"sd3_5_large_canny_controlnet_example": "SD3.5 Grande Canny ControlNet",
|
||||
"sd3_5_large_depth": "SD3.5 Grande Profundidad",
|
||||
"sd3_5_simple_example": "SD3.5 Simple",
|
||||
"sdxl_refiner_prompt_example": "SDXL Refinador de Solicitud",
|
||||
"sdxl_revision_text_prompts": "SDXL Revisión de Solicitud de Texto",
|
||||
"sdxl_revision_zero_positive": "SDXL Revisión Cero Positivo",
|
||||
"sdxl_simple_example": "SDXL Simple",
|
||||
"sdxlturbo_example": "SDXL Turbo"
|
||||
},
|
||||
"Image API": {
|
||||
"api_bfl_flux_1_kontext_max_image": "BFL Flux.1 Kontext Max",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "BFL Flux.1 Kontext Entrada de Múltiples Imágenes",
|
||||
"api_bfl_flux_1_kontext_pro_image": "BFL Flux.1 Kontext Pro",
|
||||
"api_bfl_flux_pro_t2i": "BFL Flux[Pro]: Texto a Imagen",
|
||||
"api_ideogram_v3_t2i": "Ideogram V3: Texto a Imagen",
|
||||
"api_luma_photon_i2i": "Luma Photon: Imagen a Imagen",
|
||||
"api_luma_photon_style_ref": "Luma Photon: Referencia de Estilo",
|
||||
"api_openai_dall_e_2_inpaint": "OpenAI: Dall-E 2 Rellenar",
|
||||
"api_openai_dall_e_2_t2i": "OpenAI: Dall-E 2 Texto a Imagen",
|
||||
"api_openai_dall_e_3_t2i": "OpenAI: Dall-E 3 Texto a Imagen",
|
||||
"api_openai_image_1_i2i": "OpenAI: GPT-Image-1 Imagen a Imagen",
|
||||
"api_openai_image_1_inpaint": "OpenAI: GPT-Image-1 Rellenar",
|
||||
"api_openai_image_1_multi_inputs": "OpenAI: GPT-Image-1 Múltiples Entradas",
|
||||
"api_openai_image_1_t2i": "OpenAI: GPT-Image-1 Texto a Imagen",
|
||||
"api_recraft_image_gen_with_color_control": "Recraft: Generación de Imagen con Control de Color",
|
||||
"api_recraft_image_gen_with_style_control": "Recraft: Generación de Imagen con Control de Estilo",
|
||||
"api_recraft_vector_gen": "Recraft: Generación de Vectores",
|
||||
"api_runway_reference_to_image": "Runway: Referencia a Imagen",
|
||||
"api_runway_text_to_image": "Runway: Texto a Imagen",
|
||||
"api_stability_ai_i2i": "Stability AI: Imagen a Imagen",
|
||||
"api_stability_ai_sd3_5_i2i": "Stability AI: SD3.5 Imagen a Imagen",
|
||||
"api_stability_ai_sd3_5_t2i": "Stability AI: SD3.5 Texto a Imagen",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Stability AI: Stable Image Ultra Texto a Imagen"
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Google Gemini: Chat",
|
||||
"api_openai_chat": "OpenAI: Chat"
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "ESRGAN",
|
||||
"hiresfix_esrgan_workflow": "Flujo de Trabajo HiresFix ESRGAN",
|
||||
"hiresfix_latent_workflow": "Ampliación",
|
||||
"latent_upscale_different_prompt_model": "Ampliación Latente Modelo de Solicitud Diferente"
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "Hunyuan Video Texto a Video",
|
||||
"image_to_video": "SVD Imagen a Video",
|
||||
"image_to_video_wan": "Wan 2.1 Imagen a Video",
|
||||
"ltxv_image_to_video": "LTXV Imagen a Video",
|
||||
"ltxv_text_to_video": "LTXV Texto a Video",
|
||||
"mochi_text_to_video_example": "Mochi Texto a Video",
|
||||
"text_to_video_wan": "Wan 2.1 Texto a Video",
|
||||
"txt_to_image_to_video": "SVD Texto a Imagen a Video",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Cosmos Predict2 2B Video2World 480p 16fps",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B",
|
||||
"video_wan_vace_14B_ref2v": "Wan VACE Referencia a Video",
|
||||
"video_wan_vace_14B_t2v": "Wan VACE Texto a Video",
|
||||
"video_wan_vace_14B_v2v": "Wan VACE Control Video",
|
||||
"video_wan_vace_flf2v": "Wan VACE Primer-Ultimo Fotograma",
|
||||
"video_wan_vace_inpainting": "Wan VACE Inpainting",
|
||||
"video_wan_vace_outpainting": "Wan VACE Outpainting",
|
||||
"wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16",
|
||||
"wan2_1_fun_control": "Wan 2.1 ControlNet",
|
||||
"wan2_1_fun_inp": "Wan 2.1 Inpainting"
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "MiniMax: Imagen a Video",
|
||||
"api_hailuo_minimax_t2v": "MiniMax: Texto a Video",
|
||||
"api_kling_effects": "Kling: Efectos de Video",
|
||||
"api_kling_flf": "Kling: FLF2V",
|
||||
"api_kling_i2v": "Kling: Imagen a Video",
|
||||
"api_luma_i2v": "Luma: Imagen a Video",
|
||||
"api_luma_t2v": "Luma: Texto a Video",
|
||||
"api_moonvalley_image_to_video": "Moonvalley: Imagen a Video",
|
||||
"api_moonvalley_text_to_video": "Moonvalley: Texto a Video",
|
||||
"api_pika_i2v": "Pika: Imagen a Video",
|
||||
"api_pika_scene": "Pika Escenas: Imágenes a Video",
|
||||
"api_pixverse_i2v": "PixVerse: Imagen a Video",
|
||||
"api_pixverse_t2v": "PixVerse: Texto a Video",
|
||||
"api_pixverse_template_i2v": "PixVerse Plantillas: Imagen a Video",
|
||||
"api_runway_first_last_frame": "Runway: Primer Último Fotograma a Video",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Runway: Gen3a Turbo Imagen a Video",
|
||||
"api_runway_gen4_turo_image_to_video": "Runway: Gen4 Turbo Imagen a Video",
|
||||
"api_veo2_i2v": "Veo2: Imagen a Video"
|
||||
}
|
||||
},
|
||||
"templateDescription": {
|
||||
"3D": {
|
||||
"3d_hunyuan3d_image_to_model": "Genera modelos 3D a partir de imágenes individuales usando Hunyuan3D 2.0.",
|
||||
"3d_hunyuan3d_multiview_to_model": "Genera modelos 3D a partir de múltiples vistas usando Hunyuan3D 2.0 MV.",
|
||||
"3d_hunyuan3d_multiview_to_model_turbo": "Genera modelos 3D a partir de múltiples vistas usando Hunyuan3D 2.0 MV Turbo.",
|
||||
"stable_zero123_example": "Genera vistas 3D a partir de imágenes individuales usando Stable Zero123."
|
||||
},
|
||||
"3D API": {
|
||||
"api_rodin_image_to_model": "Genera modelos 3D detallados a partir de una sola foto usando Rodin AI.",
|
||||
"api_rodin_multiview_to_model": "Esculpe modelos 3D completos usando reconstrucción multivista de Rodin.",
|
||||
"api_tripo_image_to_model": "Genera activos 3D profesionales a partir de imágenes 2D usando el motor Tripo.",
|
||||
"api_tripo_multiview_to_model": "Construye modelos 3D a partir de múltiples ángulos con el escáner avanzado de Tripo.",
|
||||
"api_tripo_text_to_model": "Crea objetos 3D a partir de descripciones con modelado basado en texto de Tripo."
|
||||
},
|
||||
"Area Composition": {
|
||||
"area_composition": "Genera imágenes controlando la composición con áreas definidas.",
|
||||
"area_composition_square_area_for_subject": "Genera imágenes con colocación consistente del sujeto usando composición de áreas."
|
||||
},
|
||||
"Audio": {
|
||||
"audio_ace_step_1_m2m_editing": "Edita canciones existentes para cambiar el estilo y la letra usando ACE-Step v1 M2M.",
|
||||
"audio_ace_step_1_t2a_instrumentals": "Genera música instrumental a partir de texto usando ACE-Step v1.",
|
||||
"audio_ace_step_1_t2a_song": "Genera canciones con voz a partir de texto usando ACE-Step v1, soportando múltiples idiomas y personalización de estilo.",
|
||||
"audio_stable_audio_example": "Genera audio a partir de descripciones de texto usando Stable Audio."
|
||||
},
|
||||
"Basics": {
|
||||
"default": "Genera imágenes a partir de descripciones de texto.",
|
||||
"embedding_example": "Genera imágenes usando inversión textual para estilos consistentes.",
|
||||
"gligen_textbox_example": "Genera imágenes con colocación precisa de objetos usando cajas de texto.",
|
||||
"image2image": "Transforma imágenes existentes usando indicaciones de texto.",
|
||||
"inpaint_example": "Edita partes específicas de imágenes de manera fluida.",
|
||||
"inpaint_model_outpainting": "Extiende imágenes más allá de sus límites originales.",
|
||||
"lora": "Genera imágenes con modelos LoRA para estilos o temas especializados.",
|
||||
"lora_multiple": "Genera imágenes combinando múltiples modelos LoRA."
|
||||
},
|
||||
"ControlNet": {
|
||||
"2_pass_pose_worship": "Genera imágenes guiadas por referencias de pose usando ControlNet.",
|
||||
"controlnet_example": "Genera imágenes guiadas por imágenes de garabato usando ControlNet.",
|
||||
"depth_controlnet": "Genera imágenes guiadas por información de profundidad usando ControlNet.",
|
||||
"depth_t2i_adapter": "Genera imágenes guiadas por información de profundidad usando el adaptador T2I.",
|
||||
"mixing_controlnets": "Genera imágenes combinando múltiples modelos ControlNet."
|
||||
},
|
||||
"Flux": {
|
||||
"flux_canny_model_example": "Genera imágenes guiadas por detección de bordes usando Flux Canny.",
|
||||
"flux_depth_lora_example": "Genera imágenes guiadas por información de profundidad usando Flux LoRA.",
|
||||
"flux_dev_checkpoint_example": "Genera imágenes usando la versión cuantizada fp8 de Flux Dev. Ideal para dispositivos con poca VRAM, solo requiere un archivo de modelo, pero la calidad es ligeramente inferior a la versión completa.",
|
||||
"flux_dev_full_text_to_image": "Genera imágenes de alta calidad con la versión completa de Flux Dev. Requiere más VRAM y múltiples archivos de modelo, pero ofrece la mejor adherencia a la indicación y calidad de imagen.",
|
||||
"flux_fill_inpaint_example": "Rellena partes faltantes de imágenes usando inpainting de Flux.",
|
||||
"flux_fill_outpaint_example": "Extiende imágenes más allá de los límites usando outpainting de Flux.",
|
||||
"flux_kontext_dev_basic": "Edita imágenes usando Flux Kontext con visibilidad total de nodos, ideal para aprender el flujo de trabajo.",
|
||||
"flux_kontext_dev_grouped": "Versión simplificada de Flux Kontext con nodos agrupados para un espacio de trabajo más limpio.",
|
||||
"flux_redux_model_example": "Genera imágenes transfiriendo el estilo de imágenes de referencia usando Flux Redux.",
|
||||
"flux_schnell": "Genera imágenes rápidamente con la versión cuantizada fp8 de Flux Schnell. Perfecto para hardware de gama baja, solo requiere 4 pasos.",
|
||||
"flux_schnell_full_text_to_image": "Genera imágenes rápidamente con la versión completa de Flux Schnell. Licencia Apache2.0, solo requiere 4 pasos manteniendo buena calidad."
|
||||
},
|
||||
"Image": {
|
||||
"hidream_e1_full": "Edita imágenes con HiDream E1 - Modelo profesional de edición de imágenes por lenguaje natural.",
|
||||
"hidream_i1_dev": "Genera imágenes con HiDream I1 Dev - Versión equilibrada con 28 pasos de inferencia, adecuada para hardware medio.",
|
||||
"hidream_i1_fast": "Genera imágenes rápidamente con HiDream I1 Fast - Versión ligera con 16 pasos, ideal para previsualizaciones rápidas.",
|
||||
"hidream_i1_full": "Genera imágenes con HiDream I1 Full - Versión completa con 50 pasos para la máxima calidad.",
|
||||
"image_chroma_text_to_image": "Chroma está modificado de Flux y tiene algunos cambios en la arquitectura.",
|
||||
"image_cosmos_predict2_2B_t2i": "Genera imágenes con Cosmos-Predict2 2B T2I, logrando generación física precisa, alta fidelidad y gran detalle.",
|
||||
"image_lotus_depth_v1_1": "Ejecuta Lotus Depth en ComfyUI para estimación de profundidad monocular eficiente y detallada.",
|
||||
"image_omnigen2_image_edit": "Edita imágenes con instrucciones en lenguaje natural usando las avanzadas capacidades de edición de imagen y soporte de texto de OmniGen2.",
|
||||
"image_omnigen2_t2i": "Genera imágenes de alta calidad a partir de texto usando el modelo multimodal unificado 7B de OmniGen2 con arquitectura de doble vía.",
|
||||
"sd3_5_large_blur": "Genera imágenes guiadas por imágenes de referencia borrosas usando SD 3.5.",
|
||||
"sd3_5_large_canny_controlnet_example": "Genera imágenes guiadas por detección de bordes usando SD 3.5 Canny ControlNet.",
|
||||
"sd3_5_large_depth": "Genera imágenes guiadas por información de profundidad usando SD 3.5.",
|
||||
"sd3_5_simple_example": "Genera imágenes usando SD 3.5.",
|
||||
"sdxl_refiner_prompt_example": "Mejora imágenes SDXL usando modelos refinadores.",
|
||||
"sdxl_revision_text_prompts": "Genera imágenes transfiriendo conceptos de imágenes de referencia usando SDXL Revision.",
|
||||
"sdxl_revision_zero_positive": "Genera imágenes usando tanto indicaciones de texto como imágenes de referencia con SDXL Revision.",
|
||||
"sdxl_simple_example": "Genera imágenes de alta calidad usando SDXL.",
|
||||
"sdxlturbo_example": "Genera imágenes en un solo paso usando SDXL Turbo."
|
||||
},
|
||||
"Image API": {
|
||||
"api_bfl_flux_1_kontext_max_image": "Edita imágenes con Flux.1 Kontext max image.",
|
||||
"api_bfl_flux_1_kontext_multiple_images_input": "Introduce múltiples imágenes y edítalas con Flux.1 Kontext.",
|
||||
"api_bfl_flux_1_kontext_pro_image": "Edita imágenes con Flux.1 Kontext pro image.",
|
||||
"api_bfl_flux_pro_t2i": "Genera imágenes con excelente seguimiento de indicaciones y calidad visual usando FLUX.1 Pro.",
|
||||
"api_ideogram_v3_t2i": "Genera imágenes de calidad profesional con excelente alineación de indicaciones, fotorrealismo y renderizado de texto usando Ideogram V3.",
|
||||
"api_luma_photon_i2i": "Guía la generación de imágenes usando una combinación de imágenes e indicaciones.",
|
||||
"api_luma_photon_style_ref": "Genera imágenes combinando referencias de estilo con control preciso usando Luma Photon.",
|
||||
"api_openai_dall_e_2_inpaint": "Edita imágenes usando inpainting con la API OpenAI Dall-E 2.",
|
||||
"api_openai_dall_e_2_t2i": "Genera imágenes a partir de texto usando la API OpenAI Dall-E 2.",
|
||||
"api_openai_dall_e_3_t2i": "Genera imágenes a partir de texto usando la API OpenAI Dall-E 3.",
|
||||
"api_openai_image_1_i2i": "Genera imágenes a partir de imágenes usando la API OpenAI GPT Image 1.",
|
||||
"api_openai_image_1_inpaint": "Edita imágenes usando inpainting con la API OpenAI GPT Image 1.",
|
||||
"api_openai_image_1_multi_inputs": "Genera imágenes a partir de múltiples entradas usando la API OpenAI GPT Image 1.",
|
||||
"api_openai_image_1_t2i": "Genera imágenes a partir de texto usando la API OpenAI GPT Image 1.",
|
||||
"api_recraft_image_gen_with_color_control": "Genera imágenes con paletas de color personalizadas y visuales de marca usando Recraft.",
|
||||
"api_recraft_image_gen_with_style_control": "Controla el estilo con ejemplos visuales, alinea la posición y ajusta objetos. Guarda y comparte estilos para consistencia de marca.",
|
||||
"api_recraft_vector_gen": "Genera imágenes vectoriales de alta calidad a partir de texto usando el generador de vectores IA de Recraft.",
|
||||
"api_runway_reference_to_image": "Genera nuevas imágenes basadas en estilos y composiciones de referencia con Runway.",
|
||||
"api_runway_text_to_image": "Genera imágenes de alta calidad a partir de texto usando el modelo IA de Runway.",
|
||||
"api_stability_ai_i2i": "Transforma imágenes con generación de alta calidad usando Stability AI, ideal para edición profesional y transferencia de estilo.",
|
||||
"api_stability_ai_sd3_5_i2i": "Genera imágenes de alta calidad con excelente adherencia a la indicación. Perfecto para uso profesional a 1 megapíxel.",
|
||||
"api_stability_ai_sd3_5_t2i": "Genera imágenes de alta calidad con excelente adherencia a la indicación. Perfecto para uso profesional a 1 megapíxel.",
|
||||
"api_stability_ai_stable_image_ultra_t2i": "Genera imágenes de alta calidad con excelente adherencia a la indicación. Perfecto para uso profesional a 1 megapíxel."
|
||||
},
|
||||
"LLM API": {
|
||||
"api_google_gemini": "Experimenta la IA multimodal de Google con las capacidades de razonamiento de Gemini.",
|
||||
"api_openai_chat": "Interactúa con los avanzados modelos de lenguaje de OpenAI para conversaciones inteligentes."
|
||||
},
|
||||
"Upscaling": {
|
||||
"esrgan_example": "Escala imágenes usando modelos ESRGAN para mejorar la calidad.",
|
||||
"hiresfix_esrgan_workflow": "Escala imágenes usando modelos ESRGAN durante pasos intermedios.",
|
||||
"hiresfix_latent_workflow": "Escala imágenes mejorando la calidad en el espacio latente.",
|
||||
"latent_upscale_different_prompt_model": "Escala imágenes cambiando las indicaciones entre pasadas."
|
||||
},
|
||||
"Video": {
|
||||
"hunyuan_video_text_to_video": "Genera videos a partir de texto usando el modelo Hunyuan.",
|
||||
"image_to_video": "Genera videos a partir de imágenes fijas.",
|
||||
"image_to_video_wan": "Genera videos a partir de imágenes usando Wan 2.1.",
|
||||
"ltxv_image_to_video": "Genera videos a partir de imágenes fijas.",
|
||||
"ltxv_text_to_video": "Genera videos a partir de texto.",
|
||||
"mochi_text_to_video_example": "Genera videos a partir de texto usando el modelo Mochi.",
|
||||
"text_to_video_wan": "Genera videos a partir de texto usando Wan 2.1.",
|
||||
"txt_to_image_to_video": "Genera videos creando primero imágenes a partir de texto.",
|
||||
"video_cosmos_predict2_2B_video2world_480p_16fps": "Genera videos con Cosmos-Predict2 2B Video2World, logrando simulaciones físicas precisas, alta fidelidad y consistencia.",
|
||||
"video_wan2_1_fun_camera_v1_1_14B": "Genera videos de alta calidad con control avanzado de cámara usando el modelo completo de 14B.",
|
||||
"video_wan2_1_fun_camera_v1_1_1_3B": "Genera videos dinámicos con movimientos de cámara cinematográficos usando Wan 2.1 Fun Camera 1.3B.",
|
||||
"video_wan_vace_14B_ref2v": "Crea videos que coinciden con el estilo y contenido de una imagen de referencia.",
|
||||
"video_wan_vace_14B_t2v": "Transforma descripciones de texto en videos de alta calidad. Soporta 480p y 720p con el modelo VACE-14B.",
|
||||
"video_wan_vace_14B_v2v": "Genera videos controlando videos de entrada e imágenes de referencia usando Wan VACE.",
|
||||
"video_wan_vace_flf2v": "Genera transiciones suaves definiendo fotogramas iniciales y finales. Soporta secuencias de fotogramas personalizadas.",
|
||||
"video_wan_vace_inpainting": "Edita regiones específicas en videos preservando el contenido circundante.",
|
||||
"video_wan_vace_outpainting": "Genera videos extendidos expandiendo el tamaño usando Wan VACE outpainting.",
|
||||
"wan2_1_flf2v_720_f16": "Genera videos controlando primer y último fotograma usando Wan 2.1 FLF2V.",
|
||||
"wan2_1_fun_control": "Genera videos guiados por pose, profundidad y bordes usando Wan 2.1 ControlNet.",
|
||||
"wan2_1_fun_inp": "Genera videos a partir de fotogramas iniciales y finales usando Wan 2.1 inpainting."
|
||||
},
|
||||
"Video API": {
|
||||
"api_hailuo_minimax_i2v": "Genera videos refinados a partir de imágenes y texto con integración CGI usando MiniMax.",
|
||||
"api_hailuo_minimax_t2v": "Genera videos de alta calidad directamente desde texto. Explora las capacidades avanzadas de IA de MiniMax para crear narrativas visuales diversas con efectos CGI profesionales.",
|
||||
"api_kling_effects": "Genera videos dinámicos aplicando efectos visuales a imágenes usando Kling.",
|
||||
"api_kling_flf": "Genera videos controlando los primeros y últimos fotogramas.",
|
||||
"api_kling_i2v": "Genera videos con excelente adherencia a la indicación para acciones, expresiones y movimientos de cámara usando Kling.",
|
||||
"api_luma_i2v": "Convierte imágenes estáticas en animaciones mágicas de alta calidad al instante.",
|
||||
"api_luma_t2v": "Genera videos de alta calidad usando indicaciones simples.",
|
||||
"api_moonvalley_image_to_video": "Genera videos cinematográficos 1080p a partir de una imagen usando un modelo entrenado solo con datos licenciados.",
|
||||
"api_moonvalley_text_to_video": "Genera videos cinematográficos 1080p a partir de texto usando un modelo entrenado solo con datos licenciados.",
|
||||
"api_pika_i2v": "Genera videos animados suaves a partir de imágenes estáticas usando Pika AI.",
|
||||
"api_pika_scene": "Genera videos que incorporan múltiples imágenes de entrada usando Pika Scenes.",
|
||||
"api_pixverse_i2v": "Genera videos dinámicos a partir de imágenes estáticas con movimiento y efectos usando PixVerse.",
|
||||
"api_pixverse_t2v": "Genera videos con interpretación precisa de indicaciones y dinámica visual impresionante.",
|
||||
"api_pixverse_template_i2v": "Genera videos dinámicos a partir de imágenes estáticas con movimiento y efectos usando PixVerse.",
|
||||
"api_runway_first_last_frame": "Genera transiciones de video suaves entre dos fotogramas clave con precisión de Runway.",
|
||||
"api_runway_gen3a_turbo_image_to_video": "Genera videos cinematográficos a partir de imágenes estáticas usando Runway Gen3a Turbo.",
|
||||
"api_runway_gen4_turo_image_to_video": "Genera videos dinámicos a partir de imágenes usando Runway Gen4 Turbo.",
|
||||
"api_veo2_i2v": "Genera videos a partir de imágenes usando la API Google Veo2."
|
||||
}
|
||||
},
|
||||
"title": "Comienza con una Plantilla",
|
||||
"useCasesSelected": "{count} casos de uso"
|
||||
},
|
||||
|
||||