Compare commits

...

7 Commits

Author SHA1 Message Date
snomiao
4bdfb57f9b Merge main into bl-selective-snapshot-update
Resolved conflict in pr-update-playwright-expectations.yaml by keeping the detailed comments from the feature branch while adopting the updated workflow name from main.
2025-10-19 03:46:37 +00:00
bymyself
ebd20a815e Merge latest main (v1.29.2) into bl-selective-snapshot-update 2025-10-14 11:56:10 -07:00
bymyself
9616b91700 Merge latest main into bl-selective-snapshot-update
Resolved conflicts by:
- Keeping our selective snapshot update logic in workflows
- Accepting main's Vue node refactoring changes
2025-10-14 11:03:41 -07:00
bymyself
939d1a0e44 Merge branch 'main' into bl-selective-snapshot-update
Resolved conflicts in update-playwright-expectations.yaml by:
- Keeping main's improvements: concurrency control, comment reactions, better branch checkout
- Keeping our selective snapshot update logic with validation
- Keeping our workflow summary generation
- Combined both sets of improvements for a robust solution

Fixed eslint configuration issue where vite.config.mts was in both allowDefaultProject and tsconfig.json
2025-10-12 20:27:25 -07:00
bymyself
df6723415b Address review comments and improve workflow
- Add workflow documentation explaining selective update strategy
- Improve logging with clear output formatting (no emojis)
- Add GitHub Actions workflow summary with file change details
- Fix command injection vulnerability by validating test paths with regex
- Add error handling for JSON.parse with descriptive messages
- Replace non-null assertion with safer null checking pattern
- Add explicit error handling for TypeScript script execution
2025-10-12 16:00:38 -07:00
Benjamin Lu
83ff415815 Merge remote-tracking branch 'origin/main' into bl-selective-snapshot-update 2025-10-07 19:29:01 -07:00
Benjamin Lu
87d3111d5c Only update snapshots of failed 2025-10-06 19:07:22 -07:00
7 changed files with 1531 additions and 14 deletions

View File

@@ -143,7 +143,7 @@ jobs:
merge-reports:
needs: [playwright-tests-chromium-sharded]
runs-on: ubuntu-latest
if: ${{ !cancelled() }}
if: ${{ always() && !cancelled() }}
steps:
- name: Checkout repository
uses: actions/checkout@v5
@@ -169,6 +169,26 @@ jobs:
PLAYWRIGHT_JSON_OUTPUT_NAME=playwright-report/report.json \
pnpm exec playwright merge-reports --reporter=json ./all-blob-reports
- name: Build failed screenshot manifest
if: ${{ needs.playwright-tests-chromium-sharded.result == 'failure' }}
run: |
set -euo pipefail
if ! pnpm tsx scripts/cicd/build-failed-screenshot-manifest.ts; then
echo "ERROR: Failed to generate screenshot manifest"
echo "This may indicate an issue with the Playwright JSON report or the manifest script"
exit 1
fi
working-directory: ComfyUI_frontend
- name: Upload failed screenshot manifest
if: ${{ needs.playwright-tests-chromium-sharded.result == 'failure' }}
uses: actions/upload-artifact@v4
with:
name: failed-screenshot-tests
path: ComfyUI_frontend/ci-rerun/*.txt
retention-days: 7
if-no-files-found: ignore
- name: Upload HTML report
uses: actions/upload-artifact@v4
with:

View File

@@ -1,4 +1,12 @@
# Setting test expectation screenshots for Playwright
#
# This workflow uses a selective snapshot update strategy:
# 1. When tests fail in CI, they generate a manifest of failed test locations (file:line)
# 2. This workflow downloads that manifest from the failed test run artifacts
# 3. Only the failed tests are re-run with --update-snapshots (much faster than running all tests)
# 4. Updated snapshots are committed back to the PR branch
#
# Trigger: Add label "New Browser Test Expectations" OR comment "/update-playwright" on PR
name: "PR: Update Playwright Expectations"
on:
@@ -16,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
if: >
( github.event_name == 'pull_request' && github.event.label.name == 'New Browser Test Expectations' ) ||
( github.event.issue.pull_request &&
( github.event.issue.pull_request &&
github.event_name == 'issue_comment' &&
(
github.event.comment.author_association == 'OWNER' ||
@@ -55,43 +63,211 @@ jobs:
uses: actions/checkout@v5
with:
ref: ${{ steps.get-branch.outputs.branch }}
- name: Setup Frontend
uses: ./.github/actions/setup-frontend
with:
include_build_step: true
- name: Setup ComfyUI Server
uses: ./.github/actions/setup-comfyui-server
with:
launch_server: true
- name: Setup Playwright
uses: ./.github/actions/setup-playwright
- name: Run Playwright tests and update snapshots
- name: Locate failed screenshot manifest artifact
id: locate-manifest
uses: actions/github-script@v8
with:
script: |
const { owner, repo } = context.repo
let headSha = ''
if (context.eventName === 'pull_request') {
headSha = context.payload.pull_request.head.sha
} else if (context.eventName === 'issue_comment') {
const prNumber = context.payload.issue.number
const pr = await github.rest.pulls.get({ owner, repo, pull_number: prNumber })
headSha = pr.data.head.sha
}
if (!headSha) {
core.setOutput('run_id', '')
core.setOutput('has_manifest', 'false')
return
}
const { data } = await github.rest.actions.listWorkflowRuns({
owner,
repo,
workflow_id: 'tests-ci.yaml',
head_sha: headSha,
event: 'pull_request',
per_page: 1,
})
const run = data.workflow_runs?.[0]
let has = 'false'
let runId = ''
if (run) {
runId = String(run.id)
const { data: { artifacts = [] } } = await github.rest.actions.listWorkflowRunArtifacts({
owner,
repo,
run_id: run.id,
per_page: 100,
})
if (artifacts.some(a => a.name === 'failed-screenshot-tests' && !a.expired)) has = 'true'
}
core.setOutput('run_id', runId)
core.setOutput('has_manifest', has)
- name: Download failed screenshot manifest
if: steps.locate-manifest.outputs.has_manifest == 'true'
uses: actions/download-artifact@v4
with:
run-id: ${{ steps.locate-manifest.outputs.run_id }}
name: failed-screenshot-tests
path: ci-rerun
- name: Re-run failed screenshot tests and update snapshots
id: playwright-tests
run: pnpm exec playwright test --update-snapshots
continue-on-error: true
run: |
set -euo pipefail
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Selective Snapshot Update"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Check if manifest exists
if [ ! -d ci-rerun ]; then
echo "ERROR: No manifest found in ci-rerun/ directory"
echo " This means no failed screenshot tests were detected in the latest CI run."
echo " Please ensure tests have been run and failures were recorded."
exit 1
fi
shopt -s nullglob
files=(ci-rerun/*.txt)
if [ ${#files[@]} -eq 0 ]; then
echo "ERROR: No manifest files found in ci-rerun/"
echo " Expected files like: chromium.txt, chromium-2x.txt, mobile-chrome.txt"
exit 1
fi
echo "Found ${#files[@]} project manifest(s):"
for f in "${files[@]}"; do
project="$(basename "$f" .txt)"
count=$(grep -c . "$f" 2>/dev/null || echo "0")
echo " - $project: $count failed test(s)"
done
echo ""
# Re-run tests per project
total_tests=0
for f in "${files[@]}"; do
project="$(basename "$f" .txt)"
mapfile -t lines < "$f"
filtered=( )
# Validate and sanitize test paths to prevent command injection
for l in "${lines[@]}"; do
# Skip empty lines
[ -z "$l" ] && continue
# Validate format: must be browser_tests/...spec.ts:number
if [[ "$l" =~ ^browser_tests/.+\.spec\.ts:[0-9]+$ ]]; then
filtered+=("$l")
else
echo "WARNING: Skipping invalid test path: $l"
fi
done
if [ ${#filtered[@]} -eq 0 ]; then
echo "WARNING: Skipping $project (no valid tests in manifest)"
continue
fi
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Updating snapshots for project: $project"
echo " Re-running ${#filtered[@]} failed test(s)..."
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
PLAYWRIGHT_JSON_OUTPUT_NAME=playwright-report/report.json \
pnpm exec playwright test --project="$project" --update-snapshots \
--reporter=line --reporter=html \
"${filtered[@]}"
total_tests=$((total_tests + ${#filtered[@]}))
echo ""
done
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Completed snapshot updates for $total_tests test(s)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
- uses: actions/upload-artifact@v4
if: always()
with:
name: playwright-report
path: ./playwright-report/
retention-days: 30
- name: Debugging info
run: |
echo "PR: ${{ github.event.issue.number }}"
echo "Branch: ${{ steps.get-branch.outputs.branch }}"
git status
- name: Commit updated expectations
id: commit
run: |
git config --global user.name 'github-actions'
git config --global user.email 'github-actions@github.com'
git add browser_tests
if git diff --cached --quiet; then
echo "No changes to commit"
echo "No expectation updates detected; skipping commit."
echo "changed=false" >> $GITHUB_OUTPUT
else
# Count changed snapshots
changed_count=$(git diff --cached --name-only browser_tests | wc -l)
echo "changed=true" >> $GITHUB_OUTPUT
echo "count=$changed_count" >> $GITHUB_OUTPUT
git commit -m "[automated] Update test expectations"
git push origin ${{ steps.get-branch.outputs.branch }}
fi
- name: Generate workflow summary
if: always()
run: |
echo "## Snapshot Update Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if [ "${{ steps.commit.outputs.changed }}" = "true" ]; then
echo "**${{ steps.commit.outputs.count }} snapshot(s) updated**" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "<details>" >> $GITHUB_STEP_SUMMARY
echo "<summary>View updated files</summary>" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
git diff HEAD~1 --name-only browser_tests 2>/dev/null || echo "No git history available" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
echo "</details>" >> $GITHUB_STEP_SUMMARY
elif [ "${{ steps.commit.outputs.changed }}" = "false" ]; then
echo "No snapshot changes detected" >> $GITHUB_STEP_SUMMARY
else
echo "WARNING: Snapshot update may have failed - check logs above" >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
echo "---" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Strategy:** Selective snapshot update (only failed tests re-run)" >> $GITHUB_STEP_SUMMARY
- name: Add Done Reaction
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9
if: github.event_name == 'issue_comment'

View File

@@ -0,0 +1,755 @@
# Playwright Selective Test Rerun Alternatives
This document analyzes alternatives for selectively re-running only failed Playwright tests for snapshot updates, comparing native Playwright features with the current custom manifest approach used in this project.
## Table of Contents
- [Current Approach](#current-approach)
- [Native Playwright Features](#native-playwright-features)
- [Playwright Reporter Options](#playwright-reporter-options)
- [GitHub Actions Integration Patterns](#github-actions-integration-patterns)
- [Third-Party Solutions](#third-party-solutions)
- [Comparison and Recommendations](#comparison-and-recommendations)
---
## Current Approach
### Implementation
The project currently uses a **custom manifest-based approach** that:
1. **Generates a manifest** of failed screenshot tests after CI runs
- Script: `scripts/cicd/build-failed-screenshot-manifest.ts`
- Parses JSON report to find tests with failed screenshot assertions
- Creates per-project text files: `ci-rerun/{project}.txt`
- Format: `file_path:line_number` (e.g., `browser_tests/menu.test.ts:42`)
2. **Stores manifest as GitHub artifact**
- Artifact name: `failed-screenshot-tests`
- Retention: 7 days
- Only uploaded when chromium sharded tests fail
3. **Downloads manifest in update workflow**
- Workflow: `.github/workflows/update-playwright-expectations.yaml`
- Triggered by: PR label "New Browser Test Expectations" or `/update-playwright` comment
- Falls back to full test suite if manifest not found
4. **Re-runs only failed tests**
```bash
for f in ci-rerun/*.txt; do
project="$(basename "$f" .txt)"
mapfile -t lines < "$f"
# Filter empty lines
pnpm exec playwright test --project="$project" --update-snapshots "${filtered[@]}"
done
```
### Advantages
- ✅ Works across workflow runs and different trigger mechanisms
- ✅ Survives beyond single workflow execution
- ✅ Precise control over which tests to re-run
- ✅ Supports multiple projects with separate manifests
- ✅ Works with sharded test runs (merged report)
- ✅ Platform-agnostic approach (works on any CI/CD platform)
### Disadvantages
- ❌ Custom implementation requires maintenance
- ❌ Requires parsing JSON report format (could break with Playwright updates)
- ❌ Additional artifact storage needed
- ❌ More complex than native solutions
---
## Native Playwright Features
### 1. `--last-failed` CLI Flag
**Availability:** Playwright v1.44.0+ (May 2024)
#### How It Works
```bash
# First run - execute all tests
npx playwright test
# Second run - only re-run failed tests
npx playwright test --last-failed
```
Playwright maintains a `.last-run.json` file in the `test-results/` directory that tracks failed tests.
#### CLI Examples
```bash
# Run only failed tests from last run
npx playwright test --last-failed
# Update snapshots for only failed tests
npx playwright test --last-failed --update-snapshots
# Combine with project filtering
npx playwright test --last-failed --project=chromium
# Debug failed tests
npx playwright test --last-failed --debug
```
#### File Location and Format
- **Location:** `test-results/.last-run.json`
- **Format:** JSON object containing failed test information
- **Structure:** Contains a `failedTests: []` array with test identifiers
- **Persistence:** Cleared when all tests pass on subsequent run
#### Advantages
- ✅ Built into Playwright (no custom code)
- ✅ Simple CLI flag
- ✅ Automatically maintained by Playwright
- ✅ Works with all Playwright features (debug, UI mode, etc.)
#### Limitations
- ❌ **Not designed for CI/CD distributed testing** (per Playwright maintainers)
- ❌ **Intended for local development only** ("inner loop scenario")
- ❌ Cleared on new test runs (doesn't persist across clean environments)
- ❌ **GitHub Actions starts with clean environment** - `.last-run.json` not available on retry
- ❌ **Doesn't work with sharded tests** - each shard creates its own `.last-run.json`
- ❌ No native way to merge `.last-run.json` across shards
- ❌ Not designed for cross-workflow persistence
#### CI/CD Workaround (Not Recommended)
To use `--last-failed` in GitHub Actions, you would need to:
```yaml
- name: Run Playwright tests
id: playwright-test
run: npx playwright test
- name: Upload last run state
if: failure()
uses: actions/upload-artifact@v4
with:
name: last-run-state
path: test-results/.last-run.json
# In retry workflow:
- name: Download last run state
uses: actions/download-artifact@v4
with:
name: last-run-state
path: test-results/
- name: Rerun failed tests
run: npx playwright test --last-failed --update-snapshots
```
**Why This Isn't Ideal:**
- Playwright maintainers explicitly state this is not the intended use case
- Doesn't work well with sharded tests (multiple `.last-run.json` files)
- Requires manual artifact management
- More complex than the current custom approach for this use case
### 2. File:Line Syntax for Specific Tests
Playwright supports running tests at specific line numbers:
```bash
# Run a specific test at line 42
npx playwright test tests/example.spec.ts:42
# Multiple tests
npx playwright test tests/file1.spec.ts:10 tests/file2.spec.ts:25
# With snapshot updates
npx playwright test tests/example.spec.ts:42 --update-snapshots
# With project selection
npx playwright test --project=chromium tests/example.spec.ts:42
```
This is **exactly the format** the current custom manifest uses, making it compatible with Playwright's native CLI.
### 3. Test Filtering Options
```bash
# Filter by grep pattern
npx playwright test -g "screenshot"
# Inverse grep
npx playwright test --grep-invert "mobile"
# By project
npx playwright test --project=chromium
# Multiple projects
npx playwright test --project=chromium --project=firefox
# Specific directory
npx playwright test tests/screenshots/
```
---
## Playwright Reporter Options
### 1. JSON Reporter
**Purpose:** Machine-readable test results
#### Configuration
```typescript
// playwright.config.ts
export default defineConfig({
reporter: [
['json', { outputFile: 'results.json' }]
]
})
```
Or via environment variable:
```bash
PLAYWRIGHT_JSON_OUTPUT_NAME=results.json npx playwright test --reporter=json
```
#### Output Structure
```json
{
"stats": {
"expected": 100,
"unexpected": 5,
"flaky": 2,
"skipped": 3
},
"suites": [
{
"title": "Test Suite",
"specs": [
{
"file": "browser_tests/example.test.ts",
"line": 42,
"tests": [
{
"projectId": "chromium",
"results": [
{
"status": "failed",
"attachments": [
{ "contentType": "image/png" }
]
}
]
}
]
}
]
}
]
}
```
**This is the format** the current `build-failed-screenshot-manifest.ts` script parses.
#### Advantages
- ✅ Stable, documented JSON schema (`@playwright/test/reporter`)
- ✅ Includes all test metadata (file, line, project, status, attachments)
- ✅ Can be used programmatically
- ✅ Supports multiple reporters simultaneously
#### Current Project Usage
```yaml
# In tests-ci.yaml
PLAYWRIGHT_JSON_OUTPUT_NAME=playwright-report/report.json \
pnpm exec playwright test --project=${{ matrix.browser }} \
--reporter=list \
--reporter=html \
--reporter=json
```
### 2. Blob Reporter
**Purpose:** Merging sharded test reports
#### Configuration
```typescript
// playwright.config.ts
export default defineConfig({
reporter: process.env.CI ? 'blob' : 'html'
})
```
#### Usage with Sharding
```bash
# Run sharded test with blob output
npx playwright test --shard=1/4 --reporter=blob
# Merge blob reports
npx playwright merge-reports --reporter=html ./all-blob-reports
npx playwright merge-reports --reporter=json ./all-blob-reports
```
#### Current Project Usage
```yaml
# Sharded chromium tests
- run: pnpm exec playwright test --project=chromium --shard=${{ matrix.shardIndex }}/${{ matrix.shardTotal }} --reporter=blob
env:
PLAYWRIGHT_BLOB_OUTPUT_DIR: ../blob-report
# Merge reports job
- run: |
pnpm exec playwright merge-reports --reporter=html ./all-blob-reports
PLAYWRIGHT_JSON_OUTPUT_NAME=playwright-report/report.json \
pnpm exec playwright merge-reports --reporter=json ./all-blob-reports
```
#### Advantages
- ✅ Designed for distributed testing
- ✅ Can merge into any reporter format (HTML, JSON, etc.)
- ✅ Preserves all test information across shards
#### Blob Reporter and `--last-failed`
- ❌ Blob reports **do not contain** a merged `.last-run.json`
- ❌ Each shard creates its own `.last-run.json` that isn't included in blob
- ❌ GitHub issue [#30924](https://github.com/microsoft/playwright/issues/30924) requests this feature (currently unsupported)
### 3. Multiple Reporters
You can use multiple reporters simultaneously:
```typescript
export default defineConfig({
reporter: [
['list'], // Terminal output
['html'], // Browse results
['json', { outputFile: 'results.json' }], // Programmatic parsing
['junit', { outputFile: 'results.xml' }] // CI integration
]
})
```
Or via CLI:
```bash
npx playwright test --reporter=list --reporter=html --reporter=json
```
---
## GitHub Actions Integration Patterns
### Pattern 1: Comment-Triggered Workflow (JupyterLab Approach)
**Example:** [jupyterlab/jupyterlab-git](https://github.com/jupyterlab/jupyterlab-git/blob/main/.github/workflows/update-integration-tests.yml)
```yaml
name: Update Playwright Snapshots
on:
issue_comment:
types: [created, edited]
permissions:
contents: write
pull-requests: write
jobs:
update-snapshots:
# Only run for authorized users on PRs with specific comment
if: >
(github.event.issue.author_association == 'OWNER' ||
github.event.issue.author_association == 'COLLABORATOR' ||
github.event.issue.author_association == 'MEMBER'
) && github.event.issue.pull_request &&
contains(github.event.comment.body, 'please update snapshots')
runs-on: ubuntu-latest
steps:
- name: React to the triggering comment
run: gh api repos/${{ github.repository }}/issues/comments/${{ github.event.comment.id }}/reactions --raw-field 'content=+1'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Checkout
uses: actions/checkout@v4
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Checkout PR branch
run: gh pr checkout ${{ github.event.issue.number }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Setup and run tests
run: |
npm ci
npx playwright install --with-deps
npx playwright test --update-snapshots
- name: Commit and push
run: |
git config user.name 'github-actions'
git config user.email 'github-actions@github.com'
git add .
git diff --cached --quiet || git commit -m "Update snapshots"
git push
```
#### Advantages
- ✅ Simple comment-based trigger
- ✅ Visual feedback (reaction on comment)
- ✅ Authorization checks built-in
- ✅ Auto-commits to PR branch
#### Limitations
- ❌ Runs **all** tests with `--update-snapshots` (not selective)
- ❌ No integration with failed test information from CI
### Pattern 2: Label-Based Trigger + Manifest (Current Approach)
```yaml
name: Update Playwright Expectations
on:
pull_request:
types: [labeled]
issue_comment:
types: [created]
jobs:
test:
if: >
( github.event_name == 'pull_request' &&
github.event.label.name == 'New Browser Test Expectations' ) ||
( github.event.issue.pull_request &&
startsWith(github.event.comment.body, '/update-playwright') )
steps:
# ... setup steps ...
- name: Locate failed screenshot manifest artifact
id: locate-manifest
uses: actions/github-script@v8
with:
script: |
const { owner, repo } = context.repo
let headSha = ''
if (context.eventName === 'pull_request') {
headSha = context.payload.pull_request.head.sha
} else if (context.eventName === 'issue_comment') {
const prNumber = context.payload.issue.number
const pr = await github.rest.pulls.get({ owner, repo, pull_number: prNumber })
headSha = pr.data.head.sha
}
const { data } = await github.rest.actions.listWorkflowRuns({
owner, repo,
workflow_id: 'tests-ci.yaml',
head_sha: headSha,
per_page: 1,
})
const run = data.workflow_runs?.[0]
let has = 'false'
if (run) {
const { data: { artifacts = [] } } = await github.rest.actions.listWorkflowRunArtifacts({
owner, repo, run_id: run.id
})
if (artifacts.some(a => a.name === 'failed-screenshot-tests' && !a.expired))
has = 'true'
}
core.setOutput('has_manifest', has)
- name: Download failed screenshot manifest
if: steps.locate-manifest.outputs.has_manifest == 'true'
uses: actions/download-artifact@v4
with:
run-id: ${{ steps.locate-manifest.outputs.run_id }}
name: failed-screenshot-tests
path: ComfyUI_frontend/ci-rerun
- name: Re-run failed screenshot tests
run: |
if [ ! -d ci-rerun ]; then
echo "No manifest found; running full suite"
pnpm exec playwright test --update-snapshots
exit 0
fi
for f in ci-rerun/*.txt; do
project="$(basename "$f" .txt)"
mapfile -t lines < "$f"
filtered=()
for l in "${lines[@]}"; do
[ -n "$l" ] && filtered+=("$l")
done
if [ ${#filtered[@]} -gt 0 ]; then
echo "Re-running ${#filtered[@]} tests for project $project"
pnpm exec playwright test --project="$project" --update-snapshots "${filtered[@]}"
fi
done
```
#### Advantages
- ✅ **Selective** - only re-runs failed screenshot tests
- ✅ Works across different trigger mechanisms (label or comment)
- ✅ Fallback to full suite if manifest not found
- ✅ Per-project manifests support multiple browser configurations
- ✅ Handles sharded tests via merged report
### Pattern 3: WordPress/Openverse Approach (Always Update)
Proposed pattern (not fully implemented):
1. CI always runs with `--update-snapshots` flag
2. If snapshots change, create/update a secondary branch
3. Open PR targeting the original PR branch
4. Developer reviews snapshot changes before merging
#### Advantages
- ✅ Always generates correct snapshots
- ✅ Snapshot changes are visible in separate PR
- ✅ No test failures due to mismatched snapshots
#### Limitations
- ❌ Creates multiple PRs
- ❌ More complex merge workflow
- ❌ Potential for snapshot changes to mask real issues
### Pattern 4: Manual Workflow Dispatch
```yaml
name: Update Snapshots
on:
workflow_dispatch:
inputs:
update-snapshots:
description: 'Update snapshots'
type: boolean
default: false
test-pattern:
description: 'Test pattern (optional)'
type: string
required: false
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup
run: |
npm ci
npx playwright install --with-deps
- name: Run tests
run: |
if [ "${{ inputs.update-snapshots }}" = "true" ]; then
FLAGS="--update-snapshots"
fi
PATTERN="${{ inputs.test-pattern }}"
npx playwright test ${PATTERN} ${FLAGS}
```
#### Advantages
- ✅ Full manual control
- ✅ Can specify test patterns
- ✅ Simple to understand
#### Limitations
- ❌ Requires manual triggering
- ❌ Not integrated with CI failures
---
## Third-Party Solutions
### Currents.dev - Last Failed GitHub Action
**Repository:** [currents-dev/playwright-last-failed](https://github.com/currents-dev/playwright-last-failed)
#### Purpose
Helps run last failed Playwright tests using Currents' cloud-based caching service.
#### Usage
```yaml
- name: Playwright Last Failed action
id: last-failed-action
uses: currents-dev/playwright-last-failed@v1
with:
pw-output-dir: test-results
matrix-index: ${{ matrix.shard }}
matrix-total: ${{ strategy.job-total }}
```
#### How It Works
- Uses Currents' cloud service to persist failed test information
- Supports sharded tests via matrix parameters
- Enables selective rerun of failed tests across workflow retries
#### Advantages
- ✅ Works with sharded tests
- ✅ Persists across workflow runs
- ✅ Supports GitHub Actions retry mechanism
- ✅ Handles distributed testing
#### Limitations
- ❌ **Requires Currents subscription** (third-party paid service)
- ❌ Dependency on external service
- ❌ Data sent to third-party cloud
- ❌ Additional cost
- ❌ Vendor lock-in
#### Recommendation
**Not suitable for this project** due to:
- External service dependency
- Cost implications
- The current custom solution is already working well
---
## Comparison and Recommendations
### Feature Matrix
| Feature | Current Approach | `--last-failed` | Currents | Comment Trigger Only |
|---------|-----------------|-----------------|----------|---------------------|
| Works with sharded tests | ✅ Yes | ❌ No | ✅ Yes | ✅ Yes |
| Persists across workflows | ✅ Yes | ❌ No | ✅ Yes | N/A |
| Selective reruns | ✅ Yes | ✅ Yes | ✅ Yes | ❌ No (runs all) |
| No external dependencies | ✅ Yes | ✅ Yes | ❌ No | ✅ Yes |
| Simple implementation | ⚠️ Medium | ✅ Simple | ✅ Simple | ✅ Simple |
| Maintenance overhead | ⚠️ Medium | ✅ Low | ✅ Low | ✅ Low |
| Works in CI/CD | ✅ Yes | ⚠️ Workaround | ✅ Yes | ✅ Yes |
| Cost | ✅ Free | ✅ Free | ❌ Paid | ✅ Free |
| Supports multiple projects | ✅ Yes | ✅ Yes | ✅ Yes | ✅ Yes |
### Why `--last-failed` Isn't Suitable (Currently)
1. **Not designed for CI/CD:** Playwright maintainers explicitly state it's for "inner loop scenario (local development)"
2. **Doesn't work with sharded tests:** Each shard creates its own `.last-run.json` with no native merge
3. **Clean environment issue:** GitHub Actions starts fresh, losing `.last-run.json`
4. **Feature request pending:** GitHub issue [#30924](https://github.com/microsoft/playwright/issues/30924) requests blob report integration (not yet implemented)
### Recommendations
#### Short Term: Keep Current Approach
**Verdict: The current custom manifest approach is the best solution for this project's needs.**
**Reasons:**
1. ✅ **Works perfectly with sharded tests** - merges results across 8 shards
2. ✅ **Persists across workflows** - artifact storage for 7 days
3. ✅ **Selective reruns** - only failed screenshot tests
4. ✅ **No external dependencies** - fully self-contained
5. ✅ **Uses stable Playwright JSON format** - typed via `@playwright/test/reporter`
6. ✅ **Already working well** - proven in production
**Minor Improvements:**
```typescript
// Add version check to warn if JSON schema changes
import { version } from '@playwright/test/package.json'
if (major(version) !== 1) {
console.warn('Playwright major version changed - verify JSON schema compatibility')
}
// Add more robust error handling
try {
const report: JSONReport = JSON.parse(raw)
} catch (error) {
throw new Error(`Failed to parse Playwright JSON report: ${error.message}`)
}
// Consider adding tests for the manifest builder
// e.g., tests/cicd/build-failed-screenshot-manifest.test.ts
```
#### Long Term: Monitor Playwright Development
**Watch for these features:**
1. **Blob report + `.last-run.json` merge** - GitHub issue [#30924](https://github.com/microsoft/playwright/issues/30924)
2. **Native CI/CD support for `--last-failed`** - may never happen (by design)
3. **Report merging improvements** - GitHub issue [#33094](https://github.com/microsoft/playwright/issues/33094)
**Migration path if native support improves:**
```yaml
# Future potential approach (if Playwright adds this feature)
- name: Merge reports with last-run
run: |
npx playwright merge-reports --reporter=html ./all-blob-reports
npx playwright merge-reports --reporter=last-failed ./all-blob-reports
- name: Upload merged last-run
uses: actions/upload-artifact@v4
with:
name: last-run-state
path: test-results/.last-run.json
# In update workflow
- name: Download last-run state
uses: actions/download-artifact@v4
with:
name: last-run-state
path: test-results/
- name: Update snapshots for failed tests
run: npx playwright test --last-failed --update-snapshots
```
**However, this is speculative** - Playwright maintainers have indicated `--last-failed` is not intended for CI/CD.
#### Alternative: Simplify to Full Suite Reruns
If the custom manifest becomes too complex to maintain, consider:
```yaml
- name: Re-run ALL screenshot tests
run: |
# Simple grep-based filtering for screenshot tests
npx playwright test -g "screenshot" --update-snapshots
```
**Trade-offs:**
- ✅ Much simpler
- ✅ No custom scripts
- ❌ Slower (runs all screenshot tests, not just failed ones)
- ❌ Potentially updates snapshots that weren't actually failing
---
## Conclusion
The current custom manifest approach is **well-designed** and **appropriate** for this project's requirements:
1. **Handles sharded tests** - critical for CI performance
2. **Selective reruns** - saves time and resources
3. **Stable implementation** - uses documented Playwright JSON schema
4. **No external dependencies** - fully controlled
While `--last-failed` is a nice feature for **local development**, Playwright's own documentation and maintainer comments confirm it's **not suitable for distributed CI/CD testing**, which is exactly what this project needs.
The only potentially better solution (Currents) requires a paid external service, which adds cost and complexity without significant benefits over the current approach.
**Recommendation: Keep the current implementation**, with minor improvements to error handling and documentation. Monitor Playwright development for native improvements, but don't expect `--last-failed` to become a viable alternative for this use case.
---
## References
### Official Playwright Documentation
- [Command Line](https://playwright.dev/docs/test-cli)
- [Reporters](https://playwright.dev/docs/test-reporters)
- [Test Sharding](https://playwright.dev/docs/test-sharding)
- [CI/CD Setup](https://playwright.dev/docs/ci-intro)
### Community Resources
- [Playwright Solutions: How to Run Failures Only](https://playwrightsolutions.com/how-to-run-failures-only-from-the-last-playwright-run/)
- [Medium: How to Run Only Last Failed Tests](https://medium.com/@testerstalk/how-to-run-only-last-failed-tests-in-playwright-e5e41472594a)
- [Medium: Streamlining Visual Regression Testing](https://medium.com/@haleywardo/streamlining-playwright-visual-regression-testing-with-github-actions-e077fd33c27c)
### GitHub Issues
- [#30924 - Last-failed with blob reports](https://github.com/microsoft/playwright/issues/30924)
- [#33094 - Merging main run with --last-failed](https://github.com/microsoft/playwright/issues/33094)
- [#28254 - Feature request for --last-failed](https://github.com/microsoft/playwright/issues/28254)
### Example Implementations
- [JupyterLab Git - Update Integration Tests](https://github.com/jupyterlab/jupyterlab-git/blob/main/.github/workflows/update-integration-tests.yml)
- [WordPress Openverse - Discussion #4535](https://github.com/WordPress/openverse/issues/4535)
### Third-Party Tools
- [Currents - Playwright Last Failed Action](https://github.com/currents-dev/playwright-last-failed)
- [Currents - Re-run Only Failed Tests](https://docs.currents.dev/guides/re-run-only-failed-tests)

View File

@@ -0,0 +1,482 @@
# Snapshot Update from Actual Files (Fast Approach)
**Date:** 2025-10-08
**Status:** Proposed Optimization
## Overview
When Playwright snapshot tests fail, Playwright **already generates the new ("actual") snapshots**. Instead of re-running tests with `--update-snapshots`, we can extract these actual snapshots from the `test-results/` directory and copy them to overwrite the expected snapshots.
**Performance improvement:** ~1-2 minutes → **~10-30 seconds**
## How Playwright Stores Snapshots
### Expected (Baseline) Snapshots
Stored in: `<test-file>-snapshots/<snapshot-name>-<project>-<platform>.png`
**Example:**
```
browser_tests/tests/interaction.spec.ts-snapshots/default-chromium-linux.png
```
### Failed Test Artifacts
When a snapshot test fails, Playwright creates:
```
test-results/<test-hash>/
├── <snapshot-name>-actual.png # The NEW screenshot
├── <snapshot-name>-expected.png # Copy of baseline
└── <snapshot-name>-diff.png # Visual diff
```
**Example:**
```
test-results/interaction-default-chromium-67af3c/
├── default-1-actual.png
├── default-1-expected.png
└── default-1-diff.png
```
## Current Approach vs. Proposed Approach
### Current: Re-run Tests with `--update-snapshots`
```yaml
# Current workflow (.github/workflows/update-playwright-expectations.yaml)
- name: Re-run failed screenshot tests and update snapshots
run: |
# Download manifest of failed tests
# For each project: chromium, chromium-2x, etc.
# Run: playwright test --project="$project" --update-snapshots test1.spec.ts:42 test2.spec.ts:87 ...
```
**Time:** ~2-5 minutes (depends on # of failed tests)
**Why slow:**
- Re-executes tests (browser startup, navigation, interactions)
- Waits for elements, animations, etc.
- Generates HTML report
- Each test takes 5-15 seconds
### Proposed: Copy Actual → Expected
```yaml
# Proposed workflow
- name: Download test artifacts (includes test-results/)
- name: Copy actual snapshots to expected locations
run: pnpm tsx scripts/cicd/update-snapshots-from-actuals.ts
- name: Commit and push
```
**Time:** ~10-30 seconds (just file operations)
**Why fast:**
- No test execution
- No browser startup
- Just file copying
- Parallel file operations
## Implementation Plan
### Step 1: Modify tests-ci.yaml
Currently, test artifacts upload only the `playwright-report/` directory.
**Add test-results/ to artifacts:**
```yaml
# .github/workflows/tests-ci.yaml
- uses: actions/upload-artifact@v4
if: always()
with:
name: playwright-results-${{ matrix.browser }} # New artifact
path: |
ComfyUI_frontend/test-results/**/*-actual.png
ComfyUI_frontend/test-results/**/*-expected.png
ComfyUI_frontend/test-results/**/*-diff.png
retention-days: 7
```
**Optimization:** Only upload actual snapshots for failed tests (saves artifact storage)
### Step 2: Create Script to Map Actuals → Expected
**File:** `scripts/cicd/update-snapshots-from-actuals.ts`
```typescript
import type { JSONReport, JSONReportTestResult } from '@playwright/test/reporter'
import fs from 'node:fs'
import fsp from 'node:fs/promises'
import path from 'node:path'
interface SnapshotMapping {
actualPath: string // test-results/.../snapshot-1-actual.png
expectedPath: string // browser_tests/tests/foo.spec.ts-snapshots/snapshot-chromium-linux.png
testFile: string
testName: string
project: string
}
async function main() {
const reportPath = path.join('playwright-report', 'report.json')
if (!fs.existsSync(reportPath)) {
console.log('No report.json found - no failed tests to update')
return
}
const raw = await fsp.readFile(reportPath, 'utf8')
const report: JSONReport = JSON.parse(raw)
const mappings: SnapshotMapping[] = []
// Parse JSON report to extract snapshot paths
function collectFailedSnapshots(suite: any) {
if (!suite) return
for (const childSuite of suite.suites ?? []) {
collectFailedSnapshots(childSuite)
}
for (const spec of suite.specs ?? []) {
for (const test of spec.tests) {
const lastResult = test.results[test.results.length - 1]
if (lastResult?.status !== 'failed') continue
// Check if test has image attachments (indicates screenshot test)
const imageAttachments = lastResult.attachments.filter(
(att: any) => att?.contentType?.startsWith('image/')
)
if (imageAttachments.length === 0) continue
// Extract snapshot mapping from attachments
for (const attachment of imageAttachments) {
const attachmentPath = attachment.path
if (!attachmentPath || !attachmentPath.includes('-actual.png')) {
continue
}
// Parse test-results path to determine expected location
// test-results/interaction-default-chromium-67af3c/default-1-actual.png
// → browser_tests/tests/interaction.spec.ts-snapshots/default-chromium-linux.png
const actualPath = attachmentPath
const expectedPath = inferExpectedPath(actualPath, spec.file, test.projectId)
if (expectedPath) {
mappings.push({
actualPath,
expectedPath,
testFile: spec.file,
testName: test.annotations[0]?.description || test.title,
project: test.projectId
})
}
}
}
}
}
collectFailedSnapshots(report)
if (mappings.length === 0) {
console.log('No failed snapshot tests found')
return
}
console.log(`Found ${mappings.length} snapshots to update`)
// Copy actual → expected
let successCount = 0
let errorCount = 0
for (const mapping of mappings) {
try {
if (!fs.existsSync(mapping.actualPath)) {
console.warn(`⚠️ Actual file not found: ${mapping.actualPath}`)
errorCount++
continue
}
// Ensure expected directory exists
const expectedDir = path.dirname(mapping.expectedPath)
await fsp.mkdir(expectedDir, { recursive: true })
// Copy actual → expected
await fsp.copyFile(mapping.actualPath, mapping.expectedPath)
console.log(`✓ Updated: ${path.basename(mapping.expectedPath)}`)
successCount++
} catch (error) {
console.error(`✗ Failed to update ${mapping.expectedPath}:`, error)
errorCount++
}
}
console.log(`\n✅ Successfully updated ${successCount} snapshots`)
if (errorCount > 0) {
console.log(`⚠️ Failed to update ${errorCount} snapshots`)
process.exit(1)
}
}
/**
* Infer the expected snapshot path from the actual path
*
* Actual: test-results/interaction-default-chromium-67af3c/default-1-actual.png
* Expected: browser_tests/tests/interaction.spec.ts-snapshots/default-chromium-linux.png
*/
function inferExpectedPath(actualPath: string, testFile: string, projectId: string): string | null {
try {
// Extract snapshot name from actual path
// "default-1-actual.png" → "default"
const actualFilename = path.basename(actualPath)
const snapshotName = actualFilename.replace(/-\d+-actual\.png$/, '')
// Determine platform (linux, darwin, win32)
const platform = process.platform === 'linux' ? 'linux'
: process.platform === 'darwin' ? 'darwin'
: 'win32'
// Build expected path
const testDir = path.dirname(testFile)
const testBasename = path.basename(testFile)
const snapshotsDir = path.join(testDir, `${testBasename}-snapshots`)
const expectedFilename = `${snapshotName}-${projectId}-${platform}.png`
return path.join(snapshotsDir, expectedFilename)
} catch (error) {
console.error(`Failed to infer expected path for ${actualPath}:`, error)
return null
}
}
main().catch((err) => {
console.error('Failed to update snapshots:', err)
process.exit(1)
})
```
### Step 3: Better Approach - Use Playwright's Attachment Metadata
The JSON reporter actually includes the **expected snapshot path** in the attachments!
**Simplified script:**
```typescript
async function main() {
const report: JSONReport = JSON.parse(await fsp.readFile('playwright-report/report.json', 'utf8'))
const updates: Array<{ actual: string; expected: string }> = []
for (const result of getAllTestResults(report)) {
if (result.status !== 'failed') continue
for (const attachment of result.attachments) {
// Playwright includes both actual and expected in attachments
if (attachment.name?.includes('-actual') && attachment.path) {
const actualPath = attachment.path
// Find corresponding expected attachment
const expectedAttachment = result.attachments.find(
att => att.name === attachment.name.replace('-actual', '-expected')
)
if (expectedAttachment?.path) {
// The expected path in attachment points to the test-results copy
// But we can infer the real expected path from the attachment metadata
const expectedPath = inferRealExpectedPath(expectedAttachment)
updates.push({ actual: actualPath, expected: expectedPath })
}
}
}
}
// Copy files
for (const { actual, expected } of updates) {
await fsp.copyFile(actual, expected)
console.log(`✓ Updated: ${path.relative(process.cwd(), expected)}`)
}
}
```
### Step 4: Update GitHub Actions Workflow
```yaml
# .github/workflows/update-playwright-expectations.yaml
name: Update Playwright Expectations
on:
issue_comment:
types: [created]
jobs:
update:
if: |
github.event.issue.pull_request &&
contains(github.event.comment.body, '/update-snapshots') &&
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'),
github.event.comment.author_association)
runs-on: ubuntu-latest
steps:
- name: React to comment
uses: actions/github-script@v8
with:
script: |
github.rest.reactions.createForIssueComment({
comment_id: context.payload.comment.id,
content: '+1'
})
- name: Checkout PR
run: gh pr checkout ${{ github.event.issue.number }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Frontend
uses: ./.github/actions/setup-frontend
- name: Get latest failed test run
id: get-run
uses: actions/github-script@v8
with:
script: |
const pr = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.payload.issue.number
})
const runs = await github.rest.actions.listWorkflowRuns({
owner: context.repo.owner,
repo: context.repo.repo,
workflow_id: 'tests-ci.yaml',
head_sha: pr.data.head.sha,
per_page: 1
})
core.setOutput('run_id', runs.data.workflow_runs[0]?.id || '')
- name: Download test results
uses: actions/download-artifact@v4
with:
run-id: ${{ steps.get-run.outputs.run_id }}
pattern: playwright-results-*
path: ComfyUI_frontend/test-results
merge-multiple: true
- name: Download JSON report
uses: actions/download-artifact@v4
with:
run-id: ${{ steps.get-run.outputs.run_id }}
pattern: playwright-report-*
path: ComfyUI_frontend/playwright-report
merge-multiple: true
- name: Update snapshots from actuals
working-directory: ComfyUI_frontend
run: pnpm tsx scripts/cicd/update-snapshots-from-actuals.ts
- name: Commit and push
working-directory: ComfyUI_frontend
run: |
git config user.name 'github-actions'
git config user.email 'github-actions@github.com'
git add browser_tests/**/*-snapshots/*.png
if git diff --cached --quiet; then
echo "No snapshot changes detected"
else
git commit -m "[automated] Update test expectations"
git push
fi
```
## Performance Comparison
### Current Approach: Re-run Tests
| Step | Time |
|------|------|
| Download manifest | 5s |
| Install Playwright browsers | 20s |
| Re-run 50 failed tests | 2-3 min |
| Generate report | 10s |
| Commit and push | 10s |
| **Total** | **~3-4 min** |
### Proposed Approach: Copy Actuals
| Step | Time |
|------|------|
| Download test-results artifacts | 10s |
| Download JSON report | 2s |
| Run copy script | 5s |
| Commit and push | 10s |
| **Total** | **~30s** |
**Speedup: 6-8x faster**
## Advantages
**Much faster** - No test re-execution
**Simpler** - No need for manifest generation
**Fewer dependencies** - No Playwright browser install needed
**Less resource usage** - No ComfyUI server, no browser processes
**More reliable** - File operations are deterministic
**Already tested** - The snapshots were generated during the actual test run
## Disadvantages / Edge Cases
**New snapshots** - If a test creates a snapshot for the first time, there's no existing expected file. This is rare and can be handled by fallback to re-running.
**Deleted tests** - Old snapshots won't be cleaned up automatically. Could add a cleanup step.
**Multiple projects** - Each project (chromium, chromium-2x, mobile-chrome) generates separate actuals. The script needs to handle all of them.
**Artifact storage** - Storing test-results/ increases artifact size. Mitigation: Only upload `-actual.png` files, not traces/videos.
## Hybrid Approach (Recommended)
Use the fast copy approach **with fallback**:
```yaml
- name: Update snapshots
run: |
# Try fast approach first
if pnpm tsx scripts/cicd/update-snapshots-from-actuals.ts; then
echo "✓ Updated snapshots from actuals"
else
echo "⚠ Fast update failed, falling back to re-running tests"
# Fallback to current approach
pnpm exec playwright test --update-snapshots --project=chromium ...
fi
```
## Implementation Checklist
- [ ] Create `scripts/cicd/update-snapshots-from-actuals.ts`
- [ ] Update `tests-ci.yaml` to upload `test-results/` artifacts
- [ ] Update `update-playwright-expectations.yaml` to use new script
- [ ] Add fallback logic for edge cases
- [ ] Test with actual PR
- [ ] Update documentation
- [ ] Consider switching from label trigger → comment trigger (`/update-snapshots`)
## Related Links
- **Playwright snapshot docs:** https://playwright.dev/docs/test-snapshots
- **JSON reporter types:** `@playwright/test/reporter`
- **GitHub Actions artifacts:** https://docs.github.com/en/actions/using-workflows/storing-workflow-data-as-artifacts
- **Issue #22064:** Playwright feature request for better snapshot file alignment
## Conclusion
This approach is **significantly faster** and **simpler** than re-running tests. The main trade-off is artifact storage size, but this can be mitigated by only uploading actual snapshots (not traces/videos).
**Recommendation:** Implement this as the primary approach with fallback to re-running tests for edge cases.

View File

@@ -0,0 +1,87 @@
import type {
JSONReport,
JSONReportSpec,
JSONReportSuite,
JSONReportTestResult
} from '@playwright/test/reporter'
import fs from 'node:fs'
import fsp from 'node:fs/promises'
import path from 'node:path'
const argv = process.argv.slice(2)
const getArg = (flag: string, fallback: string) => {
const i = argv.indexOf(flag)
if (i >= 0 && i + 1 < argv.length) return argv[i + 1]
return fallback
}
async function main() {
// Defaults mirror the workflow layout
const reportPath = getArg(
'--report',
path.join('playwright-report', 'report.json')
)
const outDir = getArg('--out', path.join('ci-rerun'))
if (!fs.existsSync(reportPath)) {
throw Error(`Report not found at ${reportPath}`)
}
const raw = await fsp.readFile(reportPath, 'utf8')
let data: JSONReport
try {
data = JSON.parse(raw)
} catch (error) {
throw new Error(
`Failed to parse Playwright JSON report at ${reportPath}. ` +
`The report file may be corrupted or incomplete. ` +
`Error: ${error instanceof Error ? error.message : String(error)}`
)
}
const hasScreenshotSignal = (r: JSONReportTestResult) => {
return r.attachments.some((att) => att?.contentType?.startsWith('image/'))
}
const out = new Map<string, Set<string>>()
const collectFailedScreenshots = (suite?: JSONReportSuite) => {
if (!suite) return
const childSuites = suite.suites ?? []
for (const childSuite of childSuites) collectFailedScreenshots(childSuite)
const specs: JSONReportSpec[] = suite.specs ?? []
for (const spec of specs) {
const file = spec.file
const line = spec.line
const loc = `${file}:${line}`
for (const test of spec.tests) {
const project = test.projectId
const last = test.results[test.results.length - 1]
const failedScreenshot =
last && last.status === 'failed' && hasScreenshotSignal(last)
if (!failedScreenshot) continue
if (!out.has(project)) out.set(project, new Set())
const projectSet = out.get(project)
if (projectSet) {
projectSet.add(loc)
}
}
}
}
const report: JSONReport = data
const rootSuites = report.suites ?? []
for (const suite of rootSuites) collectFailedScreenshots(suite)
await fsp.mkdir(outDir, { recursive: true })
for (const [project, set] of out.entries()) {
const f = path.join(outDir, `${project}.txt`)
await fsp.writeFile(f, Array.from(set).join('\n') + '\n', 'utf8')
}
}
main().catch((err) => {
console.error('Manifest generation failed:', err)
process.exit(1)
})

View File

@@ -1,11 +1,8 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { layoutStore } from '@/renderer/core/layout/store/layoutStore'
import {
type LayoutChange,
LayoutSource,
type NodeLayout
} from '@/renderer/core/layout/types'
import { LayoutSource } from '@/renderer/core/layout/types'
import type { LayoutChange, NodeLayout } from '@/renderer/core/layout/types'
describe('layoutStore CRDT operations', () => {
beforeEach(() => {

View File

@@ -1,10 +1,10 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { computed, shallowRef } from 'vue'
import {
type GraphNodeManager,
type VueNodeData,
useGraphNodeManager
import { useGraphNodeManager } from '@/composables/graph/useGraphNodeManager'
import type {
GraphNodeManager,
VueNodeData
} from '@/composables/graph/useGraphNodeManager'
import { useVueNodeLifecycle } from '@/composables/graph/useVueNodeLifecycle'
import type {