Merge branch 'main' into sno-api-changelog

Resolve conflict in knip.config.ts by keeping demo-snapshots/** ignore
entry alongside new main entries.
This commit is contained in:
snomiao
2026-03-13 01:59:34 +00:00
2708 changed files with 388950 additions and 81444 deletions

View File

@@ -2,10 +2,7 @@
import { execSync } from 'child_process'
import * as fs from 'fs'
import { globSync } from 'glob'
interface LocaleData {
[key: string]: any
}
import type { LocaleData } from './i18n-types'
// Configuration
const SOURCE_PATTERNS = ['src/**/*.{js,ts,vue}', '!src/locales/**/*']
@@ -45,7 +42,7 @@ function getStagedLocaleFiles(): string[] {
}
// Extract all keys from a nested object
function extractKeys(obj: any, prefix = ''): string[] {
function extractKeys(obj: LocaleData, prefix = ''): string[] {
const keys: string[] = []
for (const [key, value] of Object.entries(obj)) {
@@ -166,17 +163,17 @@ async function checkNewUnusedKeys() {
// Report results
if (unusedNewKeys.length > 0) {
console.log('\n⚠ Warning: Found unused NEW i18n keys:\n')
console.warn('\n⚠ Warning: Found unused NEW i18n keys:\n')
for (const key of unusedNewKeys.sort()) {
console.log(` - ${key}`)
console.warn(` - ${key}`)
}
console.log(`\n✨ Total unused new keys: ${unusedNewKeys.length}`)
console.log(
console.warn(`\n✨ Total unused new keys: ${unusedNewKeys.length}`)
console.warn(
'\nThese keys were added but are not used anywhere in the codebase.'
)
console.log('Consider using them or removing them in a future update.')
console.warn('Consider using them or removing them in a future update.')
// Changed from process.exit(1) to process.exit(0) for warning only
process.exit(0)

19
scripts/cicd/check-shell.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(git rev-parse --show-toplevel)"
cd "$ROOT_DIR"
if ! command -v shellcheck >/dev/null 2>&1; then
echo "Error: shellcheck is required but not installed" >&2
exit 127
fi
mapfile -t shell_files < <(git ls-files -- '*.sh')
if [[ ${#shell_files[@]} -eq 0 ]]; then
echo 'No shell scripts found.'
exit 0
fi
shellcheck --format=gcc "${shell_files[@]}"

14
scripts/cicd/check-yaml.sh Executable file
View File

@@ -0,0 +1,14 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(git rev-parse --show-toplevel)"
cd "$ROOT_DIR"
mapfile -t yaml_files < <(git ls-files '*.yml' '*.yaml')
if [[ ${#yaml_files[@]} -eq 0 ]]; then
echo "No YAML files found to lint"
exit 0
fi
yamllint --config-file .yamllint "${yaml_files[@]}"

View File

@@ -10,37 +10,158 @@ interface TestStats {
finished?: number
}
interface TestResult {
status: string
duration?: number
error?: {
message?: string
stack?: string
}
attachments?: Array<{
name: string
path?: string
contentType: string
}>
}
interface TestCase {
title: string
ok: boolean
outcome: string
results: TestResult[]
}
interface Suite {
title: string
file: string
suites?: Suite[]
tests?: TestCase[]
}
interface FullReportData {
stats?: TestStats
suites?: Suite[]
}
interface ReportData {
stats?: TestStats
}
interface FailedTest {
name: string
file: string
traceUrl?: string
error?: string
}
interface TestCounts {
passed: number
failed: number
flaky: number
skipped: number
total: number
failures?: FailedTest[]
}
/**
* Extract failed test details from Playwright report
*/
function extractFailedTests(
reportData: FullReportData,
baseUrl?: string
): FailedTest[] {
const failures: FailedTest[] = []
function processTest(test: TestCase, file: string, suitePath: string[]) {
// Check if test failed or is flaky
const hasFailed = test.results.some(
(r) => r.status === 'failed' || r.status === 'timedOut'
)
const isFlaky = test.outcome === 'flaky'
if (hasFailed || isFlaky) {
const fullTestName = [...suitePath, test.title]
.filter(Boolean)
.join(' ')
const failedResult = test.results.find(
(r) => r.status === 'failed' || r.status === 'timedOut'
)
// Find trace attachment
let traceUrl: string | undefined
if (failedResult?.attachments) {
const traceAttachment = failedResult.attachments.find(
(a) => a.name === 'trace' && a.contentType === 'application/zip'
)
if (traceAttachment?.path) {
// Convert local path to URL path
const tracePath = traceAttachment.path.replace(/\\/g, '/')
const traceFile = path.basename(tracePath)
if (baseUrl) {
// Construct trace viewer URL
const traceDataUrl = `${baseUrl}/data/${traceFile}`
traceUrl = `${baseUrl}/trace/?trace=${encodeURIComponent(traceDataUrl)}`
}
}
}
failures.push({
name: fullTestName,
file: file,
traceUrl,
error: failedResult?.error?.message
})
}
}
function processSuite(suite: Suite, parentPath: string[] = []) {
const suitePath = suite.title ? [...parentPath, suite.title] : parentPath
// Process tests in this suite
if (suite.tests) {
for (const test of suite.tests) {
processTest(test, suite.file, suitePath)
}
}
// Recursively process nested suites
if (suite.suites) {
for (const childSuite of suite.suites) {
processSuite(childSuite, suitePath)
}
}
}
if (reportData.suites) {
for (const suite of reportData.suites) {
processSuite(suite)
}
}
return failures
}
/**
* Extract test counts from Playwright HTML report
* @param reportDir - Path to the playwright-report directory
* @returns Test counts { passed, failed, flaky, skipped, total }
* @param baseUrl - Base URL of the deployed report (for trace links)
* @returns Test counts { passed, failed, flaky, skipped, total, failures }
*/
function extractTestCounts(reportDir: string): TestCounts {
function extractTestCounts(reportDir: string, baseUrl?: string): TestCounts {
const counts: TestCounts = {
passed: 0,
failed: 0,
flaky: 0,
skipped: 0,
total: 0
total: 0,
failures: []
}
try {
// First, try to find report.json which Playwright generates with JSON reporter
const jsonReportFile = path.join(reportDir, 'report.json')
if (fs.existsSync(jsonReportFile)) {
const reportJson: ReportData = JSON.parse(
const reportJson: FullReportData = JSON.parse(
fs.readFileSync(jsonReportFile, 'utf-8')
)
if (reportJson.stats) {
@@ -54,6 +175,12 @@ function extractTestCounts(reportDir: string): TestCounts {
counts.failed = stats.unexpected || 0
counts.flaky = stats.flaky || 0
counts.skipped = stats.skipped || 0
// Extract detailed failure information
if (counts.failed > 0 || counts.flaky > 0) {
counts.failures = extractFailedTests(reportJson, baseUrl)
}
return counts
}
}
@@ -169,15 +296,18 @@ function extractTestCounts(reportDir: string): TestCounts {
// Main execution
const reportDir = process.argv[2]
const baseUrl = process.argv[3] // Optional: base URL for trace links
if (!reportDir) {
console.error('Usage: extract-playwright-counts.ts <report-directory>')
console.error(
'Usage: extract-playwright-counts.ts <report-directory> [base-url]'
)
process.exit(1)
}
const counts = extractTestCounts(reportDir)
const counts = extractTestCounts(reportDir, baseUrl)
// Output as JSON for easy parsing in shell script
console.log(JSON.stringify(counts))
process.stdout.write(JSON.stringify(counts) + '\n')
export { extractTestCounts }
export { extractTestCounts, extractFailedTests }

View File

@@ -2,7 +2,7 @@
set -e
# Deploy Playwright test reports to Cloudflare Pages and comment on PR
# Usage: ./pr-playwright-deploy-and-comment.sh <pr_number> <branch_name> <status> [start_time]
# Usage: ./pr-playwright-deploy-and-comment.sh <pr_number> <branch_name> <status>
# Input validation
# Validate PR number is numeric
@@ -31,8 +31,6 @@ case "$STATUS" in
;;
esac
START_TIME="${4:-$(date -u '+%m/%d/%Y, %I:%M:%S %p')}"
# Required environment variables
: "${GITHUB_TOKEN:?GITHUB_TOKEN is required}"
: "${GITHUB_REPOSITORY:?GITHUB_REPOSITORY is required}"
@@ -74,7 +72,7 @@ deploy_report() {
# Project name with dots converted to dashes for Cloudflare
sanitized_browser=$(echo "$browser" | sed 's/\./-/g')
sanitized_browser="${browser//./-}"
project="comfyui-playwright-${sanitized_browser}"
echo "Deploying $browser to project $project on branch $branch..." >&2
@@ -134,25 +132,9 @@ post_comment() {
# Main execution
if [ "$STATUS" = "starting" ]; then
# Post starting comment
comment=$(cat <<EOF
$COMMENT_MARKER
## 🎭 Playwright Test Results
<img alt='loading' src='https://github.com/user-attachments/assets/755c86ee-e445-4ea8-bc2c-cca85df48686' width='14px' height='14px'/> **Tests are starting...**
⏰ Started at: $START_TIME UTC
### 🚀 Running Tests
- 🧪 **chromium**: Running tests...
- 🧪 **chromium-0.5x**: Running tests...
- 🧪 **chromium-2x**: Running tests...
- 🧪 **mobile-chrome**: Running tests...
---
⏱️ Please wait while tests are running...
EOF
)
# Post concise starting comment
comment="$COMMENT_MARKER
## 🎭 Playwright: ⏳ Running..."
post_comment "$comment"
else
@@ -189,7 +171,8 @@ else
if command -v tsx > /dev/null 2>&1 && [ -f "$EXTRACT_SCRIPT" ]; then
echo "Extracting counts from $REPORT_DIR using $EXTRACT_SCRIPT" >&2
counts=$(tsx "$EXTRACT_SCRIPT" "$REPORT_DIR" 2>&1 || echo '{}')
# Pass the base URL so we can generate trace links
counts=$(tsx "$EXTRACT_SCRIPT" "$REPORT_DIR" "$url" 2>&1 || echo '{}')
echo "Extracted counts for $browser: $counts" >&2
echo "$counts" > "$temp_dir/$i.counts"
else
@@ -208,7 +191,7 @@ else
# Wait for all deployments to complete
for pid in $pids; do
wait $pid
wait "$pid"
done
# Collect URLs and counts in order
@@ -254,9 +237,9 @@ else
total_tests=0
# Parse counts and calculate totals
IFS='|'
set -- $all_counts
for counts_json; do
IFS='|' read -r -a counts_array <<< "$all_counts"
for counts_json in "${counts_array[@]}"; do
[ -z "$counts_json" ] && continue
if [ "$counts_json" != "{}" ] && [ -n "$counts_json" ]; then
# Parse JSON counts using simple grep/sed if jq is not available
if command -v jq > /dev/null 2>&1; then
@@ -283,54 +266,77 @@ else
done
unset IFS
# Determine overall status
# Determine overall status (flaky tests are treated as passing)
if [ $total_failed -gt 0 ]; then
status_icon="❌"
status_text="Some tests failed"
elif [ $total_flaky -gt 0 ]; then
status_icon="⚠️"
status_text="Tests passed with flaky tests"
elif [ $total_tests -gt 0 ]; then
status_icon="✅"
status_text="All tests passed!"
else
status_icon="🕵🏻"
status_text="No test results found"
fi
# Generate completion comment
# Build flaky indicator if any (small subtext, no warning icon)
flaky_note=""
if [ $total_flaky -gt 0 ]; then
flaky_note=" · $total_flaky flaky"
fi
# Generate compact single-line comment
comment="$COMMENT_MARKER
## 🎭 Playwright Test Results
## 🎭 Playwright: $status_icon $total_passed passed, $total_failed failed$flaky_note"
$status_icon **$status_text**
⏰ Completed at: $(date -u '+%m/%d/%Y, %I:%M:%S %p') UTC"
# Add summary counts if we have test data
if [ $total_tests -gt 0 ]; then
# Extract and display failed tests from all browsers (flaky tests are treated as passing)
if [ $total_failed -gt 0 ]; then
comment="$comment
### 📈 Summary
- **Total Tests:** $total_tests
- **Passed:** $total_passed
- **Failed:** $total_failed $([ $total_failed -gt 0 ] && echo '❌' || echo '')
- **Flaky:** $total_flaky $([ $total_flaky -gt 0 ] && echo '⚠️' || echo '')
- **Skipped:** $total_skipped $([ $total_skipped -gt 0 ] && echo '⏭️' || echo '')"
### ❌ Failed Tests"
# Process each browser's failures
for counts_json in "${counts_array[@]}"; do
[ -z "$counts_json" ] || [ "$counts_json" = "{}" ] && continue
if command -v jq > /dev/null 2>&1; then
# Extract failures array from JSON
failures=$(echo "$counts_json" | jq -r '.failures // [] | .[]? | "\(.name)|\(.file)|\(.traceUrl // "")"')
if [ -n "$failures" ]; then
while IFS='|' read -r test_name test_file trace_url; do
[ -z "$test_name" ] && continue
# Convert file path to GitHub URL (relative to repo root)
github_file_url="https://github.com/$GITHUB_REPOSITORY/blob/$GITHUB_SHA/$test_file"
# Build the failed test line
test_line="- [$test_name]($github_file_url)"
if [ -n "$trace_url" ] && [ "$trace_url" != "null" ]; then
test_line="$test_line: [View trace]($trace_url)"
fi
comment="$comment
$test_line"
done <<< "$failures"
fi
fi
done
fi
# Add browser reports in collapsible section
comment="$comment
### 📊 Test Reports by Browser"
<details>
<summary>📊 Browser Reports</summary>
"
# Add browser results with individual counts
# Add browser results
i=0
IFS='|'
set -- $all_counts
for counts_json; do
# Get browser name
browser=$(echo "$BROWSERS" | cut -d' ' -f$((i + 1)))
# Get URL at position i
url=$(echo "$urls" | cut -d' ' -f$((i + 1)))
IFS=' ' read -r -a browser_array <<< "$BROWSERS"
IFS=' ' read -r -a url_array <<< "$urls"
for counts_json in "${counts_array[@]}"; do
[ -z "$counts_json" ] && { i=$((i + 1)); continue; }
browser="${browser_array[$i]:-}"
url="${url_array[$i]:-}"
if [ "$url" != "failed" ] && [ -n "$url" ]; then
# Parse individual browser counts
@@ -350,7 +356,7 @@ $status_icon **$status_text**
fi
if [ -n "$b_total" ] && [ "$b_total" != "0" ]; then
counts_str=" $b_passed / ❌ $b_failed / ⚠️ $b_flaky / ⏭️ $b_skipped"
counts_str=" ($b_passed / ❌ $b_failed / ⚠️ $b_flaky / ⏭️ $b_skipped)"
else
counts_str=""
fi
@@ -359,10 +365,10 @@ $status_icon **$status_text**
fi
comment="$comment
- **${browser}**: [View Report](${url})${counts_str}"
- **${browser}**: [View Report](${url})${counts_str}"
else
comment="$comment
- **${browser}**: Deployment failed"
- **${browser}**: Deployment failed"
fi
i=$((i + 1))
done
@@ -370,8 +376,7 @@ $status_icon **$status_text**
comment="$comment
---
🎉 Click on the links above to view detailed test results for each browser configuration."
</details>"
post_comment "$comment"
fi
fi

View File

@@ -2,7 +2,7 @@
set -e
# Deploy Storybook to Cloudflare Pages and comment on PR
# Usage: ./pr-storybook-deploy-and-comment.sh <pr_number> <branch_name> <status> [start_time]
# Usage: ./pr-storybook-deploy-and-comment.sh <pr_number> <branch_name> <status>
# Input validation
# Validate PR number is numeric
@@ -31,7 +31,6 @@ case "$STATUS" in
;;
esac
START_TIME="${4:-$(date -u '+%m/%d/%Y, %I:%M:%S %p')}"
# Required environment variables
: "${GITHUB_TOKEN:?GITHUB_TOKEN is required}"
@@ -120,50 +119,9 @@ post_comment() {
# Main execution
if [ "$STATUS" = "starting" ]; then
# Check if this is a version-bump branch
IS_VERSION_BUMP="false"
if echo "$BRANCH_NAME" | grep -q "^version-bump-"; then
IS_VERSION_BUMP="true"
fi
# Post starting comment with appropriate message
if [ "$IS_VERSION_BUMP" = "true" ]; then
comment=$(cat <<EOF
$COMMENT_MARKER
## 🎨 Storybook Build Status
<img alt='loading' src='https://github.com/user-attachments/assets/755c86ee-e445-4ea8-bc2c-cca85df48686' width='14px' height='14px'/> **Build is starting...**
⏰ Started at: $START_TIME UTC
### 🚀 Building Storybook
- 📦 Installing dependencies...
- 🔧 Building Storybook components...
- 🎨 Running Chromatic visual tests...
---
⏱️ Please wait while the Storybook build is in progress...
EOF
)
else
comment=$(cat <<EOF
$COMMENT_MARKER
## 🎨 Storybook Build Status
<img alt='loading' src='https://github.com/user-attachments/assets/755c86ee-e445-4ea8-bc2c-cca85df48686' width='14px' height='14px'/> **Build is starting...**
⏰ Started at: $START_TIME UTC
### 🚀 Building Storybook
- 📦 Installing dependencies...
- 🔧 Building Storybook components...
- 🌐 Preparing deployment to Cloudflare Pages...
---
⏱️ Please wait while the Storybook build is in progress...
EOF
)
fi
# Post starting comment
comment="$COMMENT_MARKER
## 🎨 Storybook: <img alt='loading' src='https://github.com/user-attachments/assets/755c86ee-e445-4ea8-bc2c-cca85df48686' width='14px' height='14px'/> Building..."
post_comment "$comment"
elif [ "$STATUS" = "completed" ]; then
@@ -192,56 +150,57 @@ elif [ "$STATUS" = "completed" ]; then
WORKFLOW_CONCLUSION="${WORKFLOW_CONCLUSION:-success}"
WORKFLOW_URL="${WORKFLOW_URL:-}"
# Generate completion comment based on conclusion
# Generate compact header based on conclusion
if [ "$WORKFLOW_CONCLUSION" = "success" ]; then
status_icon="✅"
status_text="Build completed successfully!"
footer_text="🎉 Your Storybook is ready for review!"
status_text="Built"
elif [ "$WORKFLOW_CONCLUSION" = "skipped" ]; then
status_icon="⏭️"
status_text="Build skipped."
footer_text=" Chromatic was skipped for this PR."
status_text="Skipped"
elif [ "$WORKFLOW_CONCLUSION" = "cancelled" ]; then
status_icon="🚫"
status_text="Build cancelled."
footer_text=" The Chromatic run was cancelled."
status_text="Cancelled"
else
status_icon="❌"
status_text="Build failed!"
footer_text="⚠️ Please check the workflow logs for error details."
status_text="Failed"
fi
comment="$COMMENT_MARKER
## 🎨 Storybook Build Status
$status_icon **$status_text**
# Build compact header with optional storybook link
header="## 🎨 Storybook: $status_icon $status_text"
if [ "$deployment_url" != "Not deployed" ] && [ "$deployment_url" != "Deployment failed" ] && [ "$WORKFLOW_CONCLUSION" = "success" ]; then
header="$header$deployment_url"
fi
# Build details section
details="<details>
<summary>Details</summary>
⏰ Completed at: $(date -u '+%m/%d/%Y, %I:%M:%S %p') UTC
### 🔗 Links
**Links**
- [📊 View Workflow Run]($WORKFLOW_URL)"
# Add deployment status
if [ "$deployment_url" != "Not deployed" ]; then
if [ "$deployment_url" = "Deployment failed" ]; then
comment="$comment
details="$details
- ❌ Storybook deployment failed"
elif [ "$WORKFLOW_CONCLUSION" = "success" ]; then
comment="$comment
- 🎨 $deployment_url"
else
comment="$comment
- ⚠️ Build failed - $deployment_url"
elif [ "$WORKFLOW_CONCLUSION" != "success" ]; then
details="$details
- ⚠️ Build failed — $deployment_url"
fi
elif [ "$WORKFLOW_CONCLUSION" != "success" ]; then
comment="$comment
details="$details
- ⏭️ Storybook deployment skipped (build did not succeed)"
fi
comment="$comment
---
$footer_text"
details="$details
</details>"
comment="$COMMENT_MARKER
$header
$details"
post_comment "$comment"
fi

View File

@@ -0,0 +1,270 @@
#!/usr/bin/env tsx
import { execSync } from 'child_process'
import fs from 'fs'
import path from 'path'
interface ReleaseInfo {
current_version: string
target_minor: number
target_version: string
target_branch: string
needs_release: boolean
latest_patch_tag: string | null
branch_head_sha: string | null
tag_commit_sha: string | null
diff_url: string
release_pr_url: string | null
}
/**
* Execute a command and return stdout
*/
function exec(command: string, cwd?: string): string {
try {
return execSync(command, {
cwd,
encoding: 'utf-8',
stdio: ['pipe', 'pipe', 'pipe']
}).trim()
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error)
const cwdInfo = cwd ? ` in directory: ${cwd}` : ''
console.error(
`Command failed: ${command}${cwdInfo}\nError: ${errorMessage}`
)
return ''
}
}
/**
* Parse version from requirements.txt
* Handles formats: comfyui-frontend-package==1.2.3, comfyui-frontend-package>=1.2.3, etc.
*/
function parseRequirementsVersion(requirementsPath: string): string | null {
if (!fs.existsSync(requirementsPath)) {
console.error(`Requirements file not found: ${requirementsPath}`)
return null
}
const content = fs.readFileSync(requirementsPath, 'utf-8')
const match = content.match(
/comfyui-frontend-package\s*(?:==|>=|<=|~=|>|<)\s*([0-9]+\.[0-9]+\.[0-9]+)/
)
if (!match) {
console.error(
'Could not find comfyui-frontend-package version in requirements.txt'
)
return null
}
return match[1]
}
/**
* Validate semantic version string
*/
function isValidSemver(version: string): boolean {
if (!version || typeof version !== 'string') {
return false
}
const parts = version.split('.')
if (parts.length !== 3) {
return false
}
return parts.every((part) => {
const num = Number(part)
return Number.isFinite(num) && num >= 0 && String(num) === part
})
}
/**
* Get the latest patch tag for a given minor version
*/
function getLatestPatchTag(repoPath: string, minor: number): string | null {
// Fetch all tags
exec('git fetch --tags', repoPath)
// Use git's native version sorting to get the latest tag
const latestTag = exec(
`git tag -l 'v1.${minor}.*' --sort=-version:refname | head -n 1`,
repoPath
)
if (!latestTag) {
return null
}
// Validate the tag is a valid semver (vX.Y.Z format)
const validTagRegex = /^v\d+\.\d+\.\d+$/
if (!validTagRegex.test(latestTag)) {
console.error(
`Latest tag for minor version ${minor} is not valid semver: ${latestTag}`
)
return null
}
return latestTag
}
/**
* Resolve the ComfyUI release information
*/
function resolveRelease(
comfyuiRepoPath: string,
frontendRepoPath: string
): ReleaseInfo | null {
// Parse current version from ComfyUI requirements.txt
const requirementsPath = path.join(comfyuiRepoPath, 'requirements.txt')
const currentVersion = parseRequirementsVersion(requirementsPath)
if (!currentVersion) {
return null
}
// Validate version format
if (!isValidSemver(currentVersion)) {
console.error(
`Invalid semantic version format: ${currentVersion}. Expected format: X.Y.Z`
)
return null
}
const [major, currentMinor, patch] = currentVersion.split('.').map(Number)
// Fetch all branches
exec('git fetch origin', frontendRepoPath)
// Try next minor first, fall back to current minor if not available
let targetMinor = currentMinor + 1
let targetBranch = `core/1.${targetMinor}`
const nextMinorExists = exec(
`git rev-parse --verify origin/${targetBranch}`,
frontendRepoPath
)
if (!nextMinorExists) {
// Fall back to current minor for patch releases
targetMinor = currentMinor
targetBranch = `core/1.${targetMinor}`
const currentMinorExists = exec(
`git rev-parse --verify origin/${targetBranch}`,
frontendRepoPath
)
if (!currentMinorExists) {
console.error(
`Neither core/1.${currentMinor + 1} nor core/1.${currentMinor} branches exist in frontend repo`
)
return null
}
console.error(
`Next minor branch core/1.${currentMinor + 1} not found, falling back to core/1.${currentMinor} for patch release`
)
}
// Get latest patch tag for target minor
const latestPatchTag = getLatestPatchTag(frontendRepoPath, targetMinor)
let needsRelease = false
let branchHeadSha: string | null = null
let tagCommitSha: string | null = null
let targetVersion = currentVersion
if (latestPatchTag) {
// Get commit SHA for the tag
tagCommitSha = exec(`git rev-list -n 1 ${latestPatchTag}`, frontendRepoPath)
// Get commit SHA for branch head
branchHeadSha = exec(
`git rev-parse origin/${targetBranch}`,
frontendRepoPath
)
// Check if there are commits between tag and branch head
const commitsBetween = exec(
`git rev-list ${latestPatchTag}..origin/${targetBranch} --count`,
frontendRepoPath
)
const commitCount = parseInt(commitsBetween, 10)
needsRelease = !isNaN(commitCount) && commitCount > 0
// Parse existing patch number and increment if needed
const tagVersion = latestPatchTag.replace('v', '')
// Validate tag version format
if (!isValidSemver(tagVersion)) {
console.error(
`Invalid tag version format: ${tagVersion}. Expected format: X.Y.Z`
)
return null
}
const [, , existingPatch] = tagVersion.split('.').map(Number)
// Validate existingPatch is a valid number
if (!Number.isFinite(existingPatch) || existingPatch < 0) {
console.error(`Invalid patch number in tag: ${existingPatch}`)
return null
}
if (needsRelease) {
targetVersion = `1.${targetMinor}.${existingPatch + 1}`
} else {
targetVersion = tagVersion
}
} else {
// No tags exist for this minor version, need to create v1.{targetMinor}.0
needsRelease = true
targetVersion = `1.${targetMinor}.0`
branchHeadSha = exec(
`git rev-parse origin/${targetBranch}`,
frontendRepoPath
)
}
const diffUrl = `https://github.com/Comfy-Org/ComfyUI_frontend/compare/v${currentVersion}...v${targetVersion}`
return {
current_version: currentVersion,
target_minor: targetMinor,
target_version: targetVersion,
target_branch: targetBranch,
needs_release: needsRelease,
latest_patch_tag: latestPatchTag,
branch_head_sha: branchHeadSha,
tag_commit_sha: tagCommitSha,
diff_url: diffUrl,
release_pr_url: null // Will be populated by workflow if release is triggered
}
}
// Main execution
const comfyuiRepoPath = process.argv[2]
const frontendRepoPath = process.argv[3] || process.cwd()
if (!comfyuiRepoPath) {
console.error(
'Usage: resolve-comfyui-release.ts <comfyui-repo-path> [frontend-repo-path]'
)
process.exit(1)
}
const releaseInfo = resolveRelease(comfyuiRepoPath, frontendRepoPath)
if (!releaseInfo) {
console.error('Failed to resolve release information')
process.exit(1)
}
// Output as JSON for GitHub Actions
console.log(JSON.stringify(releaseInfo, null, 2))
export { resolveRelease }

View File

@@ -1,5 +1,8 @@
import * as fs from 'fs'
// Import Vite define shim to make __DISTRIBUTION__ and other define variables available
import './vite-define-shim'
import { DESKTOP_DIALOGS } from '../apps/desktop-ui/src/constants/desktopDialogs'
import { comfyPageFixture as test } from '../browser_tests/fixtures/ComfyPage'
import {
@@ -8,7 +11,7 @@ import {
} from '../packages/shared-frontend-utils/src/formatUtil'
import { CORE_MENU_COMMANDS } from '../src/constants/coreMenuCommands'
import { SERVER_CONFIG_ITEMS } from '../src/constants/serverConfig'
import type { FormItem, SettingParams } from '../src/platform/settings/types'
import type { SettingParams } from '../src/platform/settings/types'
import type { ComfyCommandImpl } from '../src/stores/commandStore'
const localePath = './src/locales/en/main.json'
@@ -120,8 +123,8 @@ test('collect-i18n-general', async ({ comfyPage }) => {
SERVER_CONFIG_ITEMS.map((config) => [
normalizeI18nKey(config.id),
{
name: (config as unknown as FormItem).name,
tooltip: (config as unknown as FormItem).tooltip
name: config.name,
tooltip: config.tooltip
}
])
)

View File

@@ -7,6 +7,7 @@ import {
writeFileSync
} from 'fs'
import { dirname, join } from 'path'
import type { LocaleData } from './i18n-types'
// Ensure directories exist
function ensureDir(dir: string) {
@@ -41,8 +42,8 @@ function getAllJsonFiles(dir: string): string[] {
}
// Find additions in new object compared to base
function findAdditions(base: any, updated: any): Record<string, any> {
const additions: Record<string, any> = {}
function findAdditions(base: LocaleData, updated: LocaleData): LocaleData {
const additions: LocaleData = {}
for (const key in updated) {
if (!(key in base)) {
@@ -74,7 +75,7 @@ function capture(srcLocaleDir: string, tempBaseDir: string) {
ensureDir(dirname(targetPath))
writeFileSync(targetPath, readFileSync(file, 'utf8'))
}
console.log('Captured current locale files to temp/base/')
console.warn('Captured current locale files to temp/base/')
}
// Diff command
@@ -94,7 +95,7 @@ function diff(srcLocaleDir: string, tempBaseDir: string, tempDiffDir: string) {
if (Object.keys(additions).length > 0) {
ensureDir(dirname(diffPath))
writeFileSync(diffPath, JSON.stringify(additions, null, 2))
console.log(`Wrote diff to ${diffPath}`)
console.warn(`Wrote diff to ${diffPath}`)
}
}
}
@@ -116,9 +117,9 @@ switch (command) {
// Remove temp directory recursively
if (existsSync('temp')) {
rmSync('temp', { recursive: true, force: true })
console.log('Removed temp directory')
console.warn('Removed temp directory')
}
break
default:
console.log('Please specify either "capture" or "diff" command')
console.error('Please specify either "capture" or "diff" command')
}

5
scripts/i18n-types.ts Normal file
View File

@@ -0,0 +1,5 @@
/**
* Shared types for i18n-related scripts
*/
export type LocaleData = { [key: string]: string | LocaleData }

311
scripts/perf-report.ts Normal file
View File

@@ -0,0 +1,311 @@
import { existsSync, readFileSync, readdirSync } from 'node:fs'
import { join } from 'node:path'
import type { MetricStats } from './perf-stats'
import {
classifyChange,
computeStats,
formatSignificance,
isNoteworthy,
zScore
} from './perf-stats'
interface PerfMeasurement {
name: string
durationMs: number
styleRecalcs: number
styleRecalcDurationMs: number
layouts: number
layoutDurationMs: number
taskDurationMs: number
heapDeltaBytes: number
}
interface PerfReport {
timestamp: string
gitSha: string
branch: string
measurements: PerfMeasurement[]
}
const CURRENT_PATH = 'test-results/perf-metrics.json'
const BASELINE_PATH = 'temp/perf-baseline/perf-metrics.json'
const HISTORY_DIR = 'temp/perf-history'
type MetricKey = 'styleRecalcs' | 'layouts' | 'taskDurationMs'
const REPORTED_METRICS: { key: MetricKey; label: string; unit: string }[] = [
{ key: 'styleRecalcs', label: 'style recalcs', unit: '' },
{ key: 'layouts', label: 'layouts', unit: '' },
{ key: 'taskDurationMs', label: 'task duration', unit: 'ms' }
]
function groupByName(
measurements: PerfMeasurement[]
): Map<string, PerfMeasurement[]> {
const map = new Map<string, PerfMeasurement[]>()
for (const m of measurements) {
const list = map.get(m.name) ?? []
list.push(m)
map.set(m.name, list)
}
return map
}
function loadHistoricalReports(): PerfReport[] {
if (!existsSync(HISTORY_DIR)) return []
const reports: PerfReport[] = []
for (const dir of readdirSync(HISTORY_DIR)) {
const filePath = join(HISTORY_DIR, dir, 'perf-metrics.json')
if (!existsSync(filePath)) continue
try {
reports.push(JSON.parse(readFileSync(filePath, 'utf-8')) as PerfReport)
} catch {
console.warn(`Skipping malformed perf history: ${filePath}`)
}
}
return reports
}
function getHistoricalStats(
reports: PerfReport[],
testName: string,
metric: MetricKey
): MetricStats {
const values: number[] = []
for (const r of reports) {
const group = groupByName(r.measurements)
const samples = group.get(testName)
if (samples) {
const mean =
samples.reduce((sum, s) => sum + s[metric], 0) / samples.length
values.push(mean)
}
}
return computeStats(values)
}
function computeCV(stats: MetricStats): number {
return stats.mean > 0 ? (stats.stddev / stats.mean) * 100 : 0
}
function formatValue(value: number, unit: string): string {
return unit === 'ms' ? `${value.toFixed(0)}ms` : `${value.toFixed(0)}`
}
function formatDelta(pct: number | null): string {
if (pct === null) return '—'
const sign = pct >= 0 ? '+' : ''
return `${sign}${pct.toFixed(0)}%`
}
function meanMetric(samples: PerfMeasurement[], key: MetricKey): number {
return samples.reduce((sum, s) => sum + s[key], 0) / samples.length
}
function formatBytes(bytes: number): string {
if (Math.abs(bytes) < 1024) return `${bytes} B`
if (Math.abs(bytes) < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`
return `${(bytes / (1024 * 1024)).toFixed(1)} MB`
}
function renderFullReport(
prGroups: Map<string, PerfMeasurement[]>,
baseline: PerfReport,
historical: PerfReport[]
): string[] {
const lines: string[] = []
const baselineGroups = groupByName(baseline.measurements)
const tableHeader = [
'| Metric | Baseline | PR (n=3) | Δ | Sig |',
'|--------|----------|----------|---|-----|'
]
const flaggedRows: string[] = []
const allRows: string[] = []
for (const [testName, prSamples] of prGroups) {
const baseSamples = baselineGroups.get(testName)
for (const { key, label, unit } of REPORTED_METRICS) {
const prValues = prSamples.map((s) => s[key])
const prMean = prValues.reduce((a, b) => a + b, 0) / prValues.length
const histStats = getHistoricalStats(historical, testName, key)
const cv = computeCV(histStats)
if (!baseSamples?.length) {
allRows.push(
`| ${testName}: ${label} | — | ${formatValue(prMean, unit)} | new | — |`
)
continue
}
const baseVal = meanMetric(baseSamples, key)
const deltaPct =
baseVal === 0
? prMean === 0
? 0
: null
: ((prMean - baseVal) / baseVal) * 100
const z = zScore(prMean, histStats)
const sig = classifyChange(z, cv)
const row = `| ${testName}: ${label} | ${formatValue(baseVal, unit)} | ${formatValue(prMean, unit)} | ${formatDelta(deltaPct)} | ${formatSignificance(sig, z)} |`
allRows.push(row)
if (isNoteworthy(sig)) {
flaggedRows.push(row)
}
}
}
if (flaggedRows.length > 0) {
lines.push(
`⚠️ **${flaggedRows.length} regression${flaggedRows.length > 1 ? 's' : ''} detected**`,
'',
...tableHeader,
...flaggedRows,
''
)
} else {
lines.push('No regressions detected.', '')
}
lines.push(
`<details><summary>All metrics</summary>`,
'',
...tableHeader,
...allRows,
'',
'</details>',
''
)
lines.push(
`<details><summary>Historical variance (last ${historical.length} runs)</summary>`,
'',
'| Metric | μ | σ | CV |',
'|--------|---|---|-----|'
)
for (const [testName] of prGroups) {
for (const { key, label, unit } of REPORTED_METRICS) {
const stats = getHistoricalStats(historical, testName, key)
if (stats.n < 2) continue
const cv = computeCV(stats)
lines.push(
`| ${testName}: ${label} | ${formatValue(stats.mean, unit)} | ${formatValue(stats.stddev, unit)} | ${cv.toFixed(1)}% |`
)
}
}
lines.push('', '</details>')
return lines
}
function renderColdStartReport(
prGroups: Map<string, PerfMeasurement[]>,
baseline: PerfReport,
historicalCount: number
): string[] {
const lines: string[] = []
const baselineGroups = groupByName(baseline.measurements)
lines.push(
`> Collecting baseline variance data (${historicalCount}/5 runs). Significance will appear after 2 main branch runs.`,
'',
'| Metric | Baseline | PR | Δ |',
'|--------|----------|-----|---|'
)
for (const [testName, prSamples] of prGroups) {
const baseSamples = baselineGroups.get(testName)
for (const { key, label, unit } of REPORTED_METRICS) {
const prValues = prSamples.map((s) => s[key])
const prMean = prValues.reduce((a, b) => a + b, 0) / prValues.length
if (!baseSamples?.length) {
lines.push(
`| ${testName}: ${label} | — | ${formatValue(prMean, unit)} | new |`
)
continue
}
const baseVal = meanMetric(baseSamples, key)
const deltaPct =
baseVal === 0
? prMean === 0
? 0
: null
: ((prMean - baseVal) / baseVal) * 100
lines.push(
`| ${testName}: ${label} | ${formatValue(baseVal, unit)} | ${formatValue(prMean, unit)} | ${formatDelta(deltaPct)} |`
)
}
}
return lines
}
function renderNoBaselineReport(
prGroups: Map<string, PerfMeasurement[]>
): string[] {
const lines: string[] = []
lines.push(
'No baseline found — showing absolute values.\n',
'| Metric | Value |',
'|--------|-------|'
)
for (const [testName, prSamples] of prGroups) {
const prMean = (key: MetricKey) =>
prSamples.reduce((sum, s) => sum + s[key], 0) / prSamples.length
lines.push(
`| ${testName}: style recalcs | ${prMean('styleRecalcs').toFixed(0)} |`
)
lines.push(`| ${testName}: layouts | ${prMean('layouts').toFixed(0)} |`)
lines.push(
`| ${testName}: task duration | ${prMean('taskDurationMs').toFixed(0)}ms |`
)
const heapMean =
prSamples.reduce((sum, s) => sum + s.heapDeltaBytes, 0) / prSamples.length
lines.push(`| ${testName}: heap delta | ${formatBytes(heapMean)} |`)
}
return lines
}
function main() {
if (!existsSync(CURRENT_PATH)) {
process.stdout.write(
'## ⚡ Performance Report\n\nNo perf metrics found. Perf tests may not have run.\n'
)
process.exit(0)
}
const current: PerfReport = JSON.parse(readFileSync(CURRENT_PATH, 'utf-8'))
const baseline: PerfReport | null = existsSync(BASELINE_PATH)
? JSON.parse(readFileSync(BASELINE_PATH, 'utf-8'))
: null
const historical = loadHistoricalReports()
const prGroups = groupByName(current.measurements)
const lines: string[] = []
lines.push('## ⚡ Performance Report\n')
if (baseline && historical.length >= 2) {
lines.push(...renderFullReport(prGroups, baseline, historical))
} else if (baseline) {
lines.push(...renderColdStartReport(prGroups, baseline, historical.length))
} else {
lines.push(...renderNoBaselineReport(prGroups))
}
lines.push('\n<details><summary>Raw data</summary>\n')
lines.push('```json')
lines.push(JSON.stringify(current, null, 2))
lines.push('```')
lines.push('\n</details>')
process.stdout.write(lines.join('\n') + '\n')
}
main()

133
scripts/perf-stats.test.ts Normal file
View File

@@ -0,0 +1,133 @@
import { describe, expect, it } from 'vitest'
import {
classifyChange,
computeStats,
formatSignificance,
isNoteworthy,
zScore
} from './perf-stats'
describe('computeStats', () => {
it('returns zeros for empty array', () => {
const stats = computeStats([])
expect(stats).toEqual({ mean: 0, stddev: 0, min: 0, max: 0, n: 0 })
})
it('returns value with zero stddev for single element', () => {
const stats = computeStats([42])
expect(stats).toEqual({ mean: 42, stddev: 0, min: 42, max: 42, n: 1 })
})
it('computes correct stats for known values', () => {
// Values: [2, 4, 4, 4, 5, 5, 7, 9]
// Mean = 5, sample variance ≈ 4.57, sample stddev ≈ 2.14
const stats = computeStats([2, 4, 4, 4, 5, 5, 7, 9])
expect(stats.mean).toBe(5)
expect(stats.stddev).toBeCloseTo(2.138, 2)
expect(stats.min).toBe(2)
expect(stats.max).toBe(9)
expect(stats.n).toBe(8)
})
it('uses sample stddev (n-1 denominator)', () => {
// [10, 20] → mean=15, variance=(25+25)/1=50, stddev≈7.07
const stats = computeStats([10, 20])
expect(stats.mean).toBe(15)
expect(stats.stddev).toBeCloseTo(7.071, 2)
expect(stats.n).toBe(2)
})
it('handles identical values', () => {
const stats = computeStats([5, 5, 5, 5])
expect(stats.mean).toBe(5)
expect(stats.stddev).toBe(0)
})
})
describe('zScore', () => {
it('returns null when stddev is 0', () => {
const stats = computeStats([5, 5, 5])
expect(zScore(10, stats)).toBeNull()
})
it('returns null when n < 2', () => {
const stats = computeStats([5])
expect(zScore(10, stats)).toBeNull()
})
it('computes correct z-score', () => {
const stats = { mean: 100, stddev: 10, min: 80, max: 120, n: 5 }
expect(zScore(120, stats)).toBe(2)
expect(zScore(80, stats)).toBe(-2)
expect(zScore(100, stats)).toBe(0)
})
})
describe('classifyChange', () => {
it('returns noisy when CV > 50%', () => {
expect(classifyChange(3, 60)).toBe('noisy')
expect(classifyChange(-3, 51)).toBe('noisy')
})
it('does not classify as noisy when CV is exactly 50%', () => {
expect(classifyChange(3, 50)).toBe('regression')
expect(classifyChange(-3, 50)).toBe('improvement')
})
it('returns neutral when z is null', () => {
expect(classifyChange(null, 10)).toBe('neutral')
})
it('returns regression when z > 2', () => {
expect(classifyChange(2.1, 10)).toBe('regression')
expect(classifyChange(5, 10)).toBe('regression')
})
it('returns improvement when z < -2', () => {
expect(classifyChange(-2.1, 10)).toBe('improvement')
expect(classifyChange(-5, 10)).toBe('improvement')
})
it('returns neutral when z is within [-2, 2]', () => {
expect(classifyChange(0, 10)).toBe('neutral')
expect(classifyChange(1.9, 10)).toBe('neutral')
expect(classifyChange(-1.9, 10)).toBe('neutral')
expect(classifyChange(2, 10)).toBe('neutral')
expect(classifyChange(-2, 10)).toBe('neutral')
})
})
describe('formatSignificance', () => {
it('formats regression with z-score and emoji', () => {
expect(formatSignificance('regression', 3.2)).toBe('⚠️ z=3.2')
})
it('formats improvement with z-score without emoji', () => {
expect(formatSignificance('improvement', -2.5)).toBe('z=-2.5')
})
it('formats noisy as descriptive text', () => {
expect(formatSignificance('noisy', null)).toBe('variance too high')
})
it('formats neutral with z-score without emoji', () => {
expect(formatSignificance('neutral', 0.5)).toBe('z=0.5')
})
it('formats neutral without z-score as dash', () => {
expect(formatSignificance('neutral', null)).toBe('—')
})
})
describe('isNoteworthy', () => {
it('returns true for regressions', () => {
expect(isNoteworthy('regression')).toBe(true)
})
it('returns false for non-regressions', () => {
expect(isNoteworthy('improvement')).toBe(false)
expect(isNoteworthy('neutral')).toBe(false)
expect(isNoteworthy('noisy')).toBe(false)
})
})

63
scripts/perf-stats.ts Normal file
View File

@@ -0,0 +1,63 @@
export interface MetricStats {
mean: number
stddev: number
min: number
max: number
n: number
}
export function computeStats(values: number[]): MetricStats {
const n = values.length
if (n === 0) return { mean: 0, stddev: 0, min: 0, max: 0, n: 0 }
if (n === 1)
return { mean: values[0], stddev: 0, min: values[0], max: values[0], n: 1 }
const mean = values.reduce((a, b) => a + b, 0) / n
const variance = values.reduce((sum, v) => sum + (v - mean) ** 2, 0) / (n - 1)
return {
mean,
stddev: Math.sqrt(variance),
min: Math.min(...values),
max: Math.max(...values),
n
}
}
export function zScore(value: number, stats: MetricStats): number | null {
if (stats.stddev === 0 || stats.n < 2) return null
return (value - stats.mean) / stats.stddev
}
export type Significance = 'regression' | 'improvement' | 'neutral' | 'noisy'
export function classifyChange(
z: number | null,
historicalCV: number
): Significance {
if (historicalCV > 50) return 'noisy'
if (z === null) return 'neutral'
if (z > 2) return 'regression'
if (z < -2) return 'improvement'
return 'neutral'
}
export function formatSignificance(
sig: Significance,
z: number | null
): string {
switch (sig) {
case 'regression':
return `⚠️ z=${z!.toFixed(1)}`
case 'improvement':
return `z=${z!.toFixed(1)}`
case 'noisy':
return 'variance too high'
case 'neutral':
return z !== null ? `z=${z.toFixed(1)}` : '—'
}
}
export function isNoteworthy(sig: Significance): boolean {
return sig === 'regression'
}

View File

@@ -175,15 +175,9 @@ async function buildBundleReport() {
* @returns {string}
*/
function renderReport(report) {
const parts = ['## Bundle Size Report\n']
parts.push(renderSummary(report))
const parts = [renderCompactHeader(report)]
if (report.categories.length > 0) {
const glance = renderCategoryGlance(report)
if (glance) {
parts.push('\n' + glance)
}
parts.push('\n' + renderCategoryDetails(report))
}
@@ -195,6 +189,24 @@ function renderReport(report) {
)
}
/**
* Render compact single-line header with key metrics
* @param {BundleReport} report
* @returns {string}
*/
function renderCompactHeader(report) {
const { overall, hasBaseline } = report
const gzipSize = prettyBytes(overall.metrics.current.gzip)
let header = `## 📦 Bundle: ${gzipSize} gzip`
if (hasBaseline) {
header += ` ${formatDiffIndicator(overall.metrics.diff.gzip)}`
}
return header
}
/**
* Render overall summary bullets
* @param {BundleReport} report
@@ -310,7 +322,16 @@ function renderCategoryGlance(report) {
* @returns {string}
*/
function renderCategoryDetails(report) {
const lines = ['<details>', '<summary>Per-category breakdown</summary>', '']
const lines = ['<details>', '<summary>Details</summary>', '']
lines.push(renderSummary(report))
lines.push('')
const glance = renderCategoryGlance(report)
if (glance) {
lines.push(glance)
lines.push('')
}
for (const category of report.categories) {
lines.push(renderCategoryBlock(category, report.hasBaseline))

View File

@@ -8,7 +8,5 @@
"noUnusedParameters": true,
"resolveJsonModule": true
},
"include": [
"**/*.ts"
]
}
"include": ["**/*.ts"]
}

View File

@@ -0,0 +1,49 @@
/**
* Shim for Vite define variables to make them available during Playwright test execution
* This file should be imported before any code that uses Vite define variables
*/
// Define global constants that Vite would normally replace at build time
declare global {
const __COMFYUI_FRONTEND_VERSION__: string
const __SENTRY_ENABLED__: boolean
const __SENTRY_DSN__: string
const __ALGOLIA_APP_ID__: string
const __ALGOLIA_API_KEY__: string
const __USE_PROD_CONFIG__: boolean
const __DISTRIBUTION__: 'desktop' | 'localhost' | 'cloud'
const __IS_NIGHTLY__: boolean
}
type GlobalWithDefines = typeof globalThis & {
__COMFYUI_FRONTEND_VERSION__: string
__SENTRY_ENABLED__: boolean
__SENTRY_DSN__: string
__ALGOLIA_APP_ID__: string
__ALGOLIA_API_KEY__: string
__USE_PROD_CONFIG__: boolean
__DISTRIBUTION__: 'desktop' | 'localhost' | 'cloud'
__IS_NIGHTLY__: boolean
window?: Record<string, unknown>
}
const globalWithDefines = globalThis as GlobalWithDefines
// Set default values for Playwright test environment
globalWithDefines.__COMFYUI_FRONTEND_VERSION__ =
process.env.npm_package_version || '1.0.0'
globalWithDefines.__SENTRY_ENABLED__ = false
globalWithDefines.__SENTRY_DSN__ = ''
globalWithDefines.__ALGOLIA_APP_ID__ = ''
globalWithDefines.__ALGOLIA_API_KEY__ = ''
globalWithDefines.__USE_PROD_CONFIG__ = false
globalWithDefines.__DISTRIBUTION__ = 'localhost'
globalWithDefines.__IS_NIGHTLY__ = false
// Provide a minimal window shim for Node environment
// This is needed for code that checks window existence during imports
if (typeof window === 'undefined') {
globalWithDefines.window = {}
}
export {}

View File

@@ -1,9 +1,14 @@
import zipdir from 'zip-dir'
zipdir('./dist', { saveTo: './dist.zip' }, function (err, buffer) {
const sourceDir = process.argv[2] || './dist'
const outputPath = process.argv[3] || './dist.zip'
zipdir(sourceDir, { saveTo: outputPath }, function (err, buffer) {
if (err) {
console.error('Error zipping "dist" directory:', err)
console.error(`Error zipping "${sourceDir}" directory:`, err)
} else {
console.log('Successfully zipped "dist" directory.')
process.stdout.write(
`Successfully zipped "${sourceDir}" directory to "${outputPath}".\n`
)
}
})