Skip to content

deps-dev(deps-dev): Bump typescript-eslint from 8.43.0 to 8.44.0 #28

deps-dev(deps-dev): Bump typescript-eslint from 8.43.0 to 8.44.0

deps-dev(deps-dev): Bump typescript-eslint from 8.43.0 to 8.44.0 #28

Workflow file for this run

name: Performance Benchmarks
on:
push:
branches: [main]
paths:
- 'src/**'
- '__tests__/**/*.bench.ts'
- 'package.json'
- 'pnpm-lock.yaml'
pull_request:
paths:
- 'src/**'
- '__tests__/**/*.bench.ts'
- 'package.json'
- 'pnpm-lock.yaml'
schedule:
# Run performance benchmarks weekly to track trends
- cron: '0 6 * * 2' # Every Tuesday at 6 AM UTC
workflow_dispatch:
permissions:
contents: write
pages: write
id-token: write
pull-requests: write
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
FORCE_COLOR: 1
# Performance regression threshold (%)
REGRESSION_THRESHOLD: 5
jobs:
benchmark:
name: Performance Benchmarks (Node.js ${{ matrix.node-version }})
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
node-version: [22, 24]
outputs:
regression-detected: ${{ steps.regression-check.outputs.regression }}
steps:
- name: Checkout code
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
- name: Setup pnpm
uses: pnpm/action-setup@f2b2b233b538f500472c7274c7012f57857d8ce0 # v4.1.0
- name: Setup Node.js ${{ matrix.node-version }}
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
with:
node-version: ${{ matrix.node-version }}
- name: Setup mise
uses: jdx/mise-action@5ac50f778e26fac95da98d50503682459e86d566 # v3.2.0
with:
install: true
cache: true
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Build project
run: |
pnpm run build
pnpm run package
- name: Run performance benchmarks
run: |
echo "Running performance benchmarks for Node.js ${{ matrix.node-version }}..."
# Run benchmarks and capture output
pnpm run bench --reporter=json > "benchmark-results-node${{ matrix.node-version }}.json" || {
echo "⚠️ Benchmarks failed, creating fallback results"
{
echo "{"
echo " \"benchmarks\": [],"
echo " \"errors\": [\"Benchmark suite not yet implemented\"],"
echo " \"timestamp\": \"$(date -u +%Y-%m-%dT%H:%M:%SZ)\""
echo "}"
} > "benchmark-results-node${{ matrix.node-version }}.json"
}
# Also generate human-readable output
pnpm run bench --reporter=verbose > "benchmark-results-node${{ matrix.node-version }}.txt" || {
echo "Benchmark suite not yet implemented" > "benchmark-results-node${{ matrix.node-version }}.txt"
}
- name: Analyze bundle performance
run: |
echo "Analyzing bundle performance for Node.js ${{ matrix.node-version }}..."
# Bundle size analysis
BUNDLE_SIZE=$(stat -f%z dist/index.js 2>/dev/null || stat -c%s dist/index.js)
BUNDLE_SIZE_KB=$((BUNDLE_SIZE / 1024))
BUNDLE_SIZE_GZIP=$(gzip -c dist/index.js | wc -c | tr -d ' ')
BUNDLE_SIZE_GZIP_KB=$((BUNDLE_SIZE_GZIP / 1024))
# Memory usage estimation (rough)
NODE_VERSION="${{ matrix.node-version }}"
# Create performance metrics
cat > performance-metrics-node${{ matrix.node-version }}.json << EOF
{
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
"nodeVersion": "$NODE_VERSION",
"bundle": {
"size": $BUNDLE_SIZE,
"sizeKB": $BUNDLE_SIZE_KB,
"gzipSize": $BUNDLE_SIZE_GZIP,
"gzipSizeKB": $BUNDLE_SIZE_GZIP_KB
},
"limits": {
"githubActionsSizeLimit": 512000,
"recommended": 400000
},
"performance": {
"bundleHealthy": $([ "$BUNDLE_SIZE" -lt 688128 ] && echo "true" || echo "false"),
"withinLimits": $([ "$BUNDLE_SIZE" -lt 716800 ] && echo "true" || echo "false")
}
}
EOF
echo "📦 Bundle Analysis for Node.js $NODE_VERSION:"
echo " Size: ${BUNDLE_SIZE_KB} KB (${BUNDLE_SIZE} bytes)"
echo " Gzipped: ${BUNDLE_SIZE_GZIP_KB} KB (${BUNDLE_SIZE_GZIP} bytes)"
echo " Current Baseline: 672 KB"
echo " Status: $([ $BUNDLE_SIZE_KB -le 672 ] && echo "✅ Excellent" || [ $BUNDLE_SIZE_KB -lt 700 ] && echo "⚠️ Acceptable" || echo "❌ Regression")"
- name: Load baseline performance data
id: load-baseline
run: |
echo "Loading baseline performance data..."
# Try to get baseline from GitHub Pages
if curl -s -f "https://santosr2.github.io/conditional-paths-action/performance/performance-baseline-node${{ matrix.node-version }}.json" > baseline-node${{ matrix.node-version }}.json 2>/dev/null; then
echo "✅ Loaded baseline from GitHub Pages"
echo "baseline-available=true" >> "$GITHUB_OUTPUT"
else
echo "⚠️ No baseline available, will create one"
echo "baseline-available=false" >> "$GITHUB_OUTPUT"
# Create initial baseline
cp performance-metrics-node${{ matrix.node-version }}.json baseline-node${{ matrix.node-version }}.json
fi
- name: Compare performance with baseline
id: regression-check
run: |
echo "Comparing performance with baseline for Node.js ${{ matrix.node-version }}..."
# Extract current and baseline bundle sizes
current_size=$(jq '.bundle.size' performance-metrics-node${{ matrix.node-version }}.json)
baseline_size=$(jq '.bundle.size' baseline-node${{ matrix.node-version }}.json)
# Calculate percentage change
if [ "$baseline_size" -gt 0 ]; then
size_change=$(echo "scale=2; (($current_size - $baseline_size) * 100) / $baseline_size" | bc -l)
echo "📊 Performance Comparison:"
echo " Baseline: $baseline_size bytes"
echo " Current: $current_size bytes"
echo " Change: ${size_change}%"
# Check for regression
if (( $(echo "$size_change > $REGRESSION_THRESHOLD" | bc -l) )); then
echo "❌ Performance regression detected: ${size_change}% increase"
echo "regression=true" >> "$GITHUB_OUTPUT"
echo "regression-details=Bundle size increased by ${size_change}% (threshold: ${REGRESSION_THRESHOLD}%)" >> "$GITHUB_OUTPUT"
else
echo "✅ Performance within acceptable limits"
echo "regression=false" >> "$GITHUB_OUTPUT"
fi
else
echo "⚠️ Invalid baseline, creating new baseline"
echo "regression=false" >> "$GITHUB_OUTPUT"
fi
- name: Generate performance report
run: |
echo "Generating performance report for Node.js ${{ matrix.node-version }}..."
# Create comprehensive performance report
cat > performance-report-node${{ matrix.node-version }}.md << 'EOF'
# 📊 Performance Report - Node.js ${{ matrix.node-version }}
## 📦 Bundle Analysis
EOF
# Add bundle metrics
bundle_size_kb=$(jq '.bundle.sizeKB' performance-metrics-node${{ matrix.node-version }}.json)
gzip_size_kb=$(jq '.bundle.gzipSizeKB' performance-metrics-node${{ matrix.node-version }}.json)
is_healthy=$(jq '.performance.bundleHealthy' performance-metrics-node${{ matrix.node-version }}.json)
cat >> performance-report-node${{ matrix.node-version }}.md << EOF
| Metric | Value | Status |
|--------|-------|--------|
| Bundle Size | ${bundle_size_kb} KB | $([ "$is_healthy" = "true" ] && echo "✅ Good" || echo "⚠️ Needs attention") |
| Gzipped Size | ${gzip_size_kb} KB | - |
| Current Baseline | 672 KB | $([ "$bundle_size_kb" -le 672 ] && echo "✅ At baseline" || echo "⚠️ Above baseline") |
## 🏃 Benchmark Results
EOF
# Add benchmark results if available
if jq -e '.benchmarks | length > 0' benchmark-results-node${{ matrix.node-version }}.json > /dev/null 2>&1; then
{
echo "Benchmark results available:"
echo '```'
cat benchmark-results-node${{ matrix.node-version }}.txt
echo '```'
} >> performance-report-node${{ matrix.node-version }}.md
else
{
echo "📝 Benchmark suite not yet implemented. To add benchmarks:"
echo ""
echo "1. Create \`__tests__/*.bench.ts\` files using Vitest"
echo "2. Add benchmark functions with \`bench()\`"
echo "3. Run \`pnpm run bench\` locally to test"
} >> performance-report-node${{ matrix.node-version }}.md
fi
{
echo ""
echo "*Report generated: $(date -u)*"
} >> performance-report-node${{ matrix.node-version }}.md
- name: Upload performance artifacts
uses: actions/upload-artifact@de65e23aa2b7e23d713bb51fbfcb6d502f8667d8 # v4.6.2
with:
name: performance-node${{ matrix.node-version }}-${{ github.sha }}
path: |
performance-metrics-node${{ matrix.node-version }}.json
benchmark-results-node${{ matrix.node-version }}.json
benchmark-results-node${{ matrix.node-version }}.txt
performance-report-node${{ matrix.node-version }}.md
baseline-node${{ matrix.node-version }}.json
retention-days: 30
performance-gate:
name: Performance Regression Gate
runs-on: ubuntu-latest
needs: benchmark
if: always()
steps:
- name: Check for performance regressions
run: |
regression_22="${{ needs.benchmark.outputs.regression-detected }}"
regression_24="${{ needs.benchmark.outputs.regression-detected }}"
echo "Performance regression check:"
echo " Node.js 22: $regression_22"
echo " Node.js 24: $regression_24"
if [ "$regression_22" = "true" ] || [ "$regression_24" = "true" ]; then
echo "❌ Performance regression detected!"
echo ""
echo "A significant performance regression has been detected."
echo "This typically means the bundle size has increased beyond the threshold."
echo ""
echo "To resolve:"
echo "1. Review recent changes that might affect bundle size"
echo "2. Consider optimizing imports or dependencies"
echo "3. Run 'pnpm run package' locally and check dist/index.js size"
echo "4. If the increase is intentional, update the baseline"
exit 1
else
echo "✅ No performance regressions detected"
fi
publish-performance:
name: Publish Performance Reports
runs-on: ubuntu-latest
needs: [benchmark, performance-gate]
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
permissions:
pages: write
id-token: write
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}/performance/
steps:
- name: Checkout code
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Download performance artifacts
uses: actions/download-artifact@abefc31eafcfbdf6c5336127c1346fdae79ff41c # v5.0.0
with:
pattern: 'performance-node*-${{ github.sha }}'
merge-multiple: true
- name: Create performance site
run: |
mkdir -p _site/performance
# Create main performance page
cat > _site/performance/index.html << 'EOF'
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Performance Reports - conditional-paths-action</title>
<style>
body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; margin: 40px; line-height: 1.6; }
.matrix { display: grid; grid-template-columns: 1fr 1fr; gap: 20px; }
.node-card { border: 1px solid #e1e4e8; border-radius: 8px; padding: 20px; }
.metric { display: flex; justify-content: space-between; margin: 10px 0; }
.status-good { color: #28a745; }
.status-warning { color: #ffc107; }
.status-error { color: #dc3545; }
.back { margin-bottom: 20px; }
.back a { color: #0366d6; text-decoration: none; }
</style>
</head>
<body>
<div class="back">
<a href="../">← Back to Documentation</a>
</div>
<h1>⚡ Performance Reports</h1>
<p>Real-time performance metrics and benchmarks for conditional-paths-action</p>
<h2>📊 Node.js Compatibility Matrix</h2>
<div class="matrix">
<div class="node-card">
<h3>Node.js 22 (Development)</h3>
<div id="node22-metrics">Loading...</div>
</div>
<div class="node-card">
<h3>Node.js 24 (Runtime)</h3>
<div id="node24-metrics">Loading...</div>
</div>
</div>
<h2>📈 Performance Trends</h2>
<p>Performance metrics are tracked over time to detect regressions and improvements.</p>
<h2>🎯 Performance Targets</h2>
<ul>
<li><strong>Bundle Size:</strong> ≤ 672 KB (excellent/baseline), < 700 KB (acceptable)</li>
<li><strong>Regression Threshold:</strong> < 5% increase from baseline</li>
<li><strong>Current Baseline:</strong> 672 KB</li>
</ul>
<script>
Promise.all([
fetch('performance-metrics-node22.json').then(r => r.json()).catch(() => null),
fetch('performance-metrics-node24.json').then(r => r.json()).catch(() => null)
]).then(([metrics22, metrics24]) => {
if (metrics22) {
document.getElementById('node22-metrics').innerHTML = formatMetrics(metrics22);
}
if (metrics24) {
document.getElementById('node24-metrics').innerHTML = formatMetrics(metrics24);
}
});
function formatMetrics(metrics) {
const bundleHealthy = metrics.performance?.bundleHealthy;
const statusClass = bundleHealthy ? 'status-good' : 'status-warning';
return `
<div class="metric">
<span>Bundle Size:</span>
<span class="${statusClass}">${metrics.bundle.sizeKB} KB</span>
</div>
<div class="metric">
<span>Gzipped:</span>
<span>${metrics.bundle.gzipSizeKB} KB</span>
</div>
<div class="metric">
<span>Status:</span>
<span class="${statusClass}">${bundleHealthy ? '✅ Healthy' : '⚠️ Needs attention'}</span>
</div>
<div class="metric">
<span>Last Updated:</span>
<span>${new Date(metrics.timestamp).toLocaleDateString()}</span>
</div>
`;
}
</script>
</body>
</html>
EOF
# Copy performance data files
cp performance-metrics-node*.json _site/performance/ 2>/dev/null || true
cp benchmark-results-node*.json _site/performance/ 2>/dev/null || true
cp performance-report-node*.md _site/performance/ 2>/dev/null || true
- name: Setup performance baselines
run: |
# Copy baseline files to the site for future comparisons via GitHub Pages
cp performance-metrics-node22.json _site/performance/performance-baseline-node22.json
cp performance-metrics-node24.json _site/performance/performance-baseline-node24.json
echo "✅ Performance baselines prepared for GitHub Pages deployment"
- name: Setup GitHub Pages
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # v5.2.0
- name: Upload performance site to Pages
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # v3.1.0
with:
path: _site/
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4.1.0
pr-comment:
name: Comment Performance Results
runs-on: ubuntu-latest
needs: [benchmark, performance-gate]
if: github.event_name == 'pull_request'
steps:
- name: Download performance artifacts
uses: actions/download-artifact@abefc31eafcfbdf6c5336127c1346fdae79ff41c # v5.0.0
with:
pattern: 'performance-node*-${{ github.sha }}'
merge-multiple: true
- name: Comment performance results on PR
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
let comment = '## ⚡ Performance Analysis Results\n\n';
comment += '<!-- performance-bot-comment -->\n\n'; // Unique identifier for finding this comment
// Load performance data for both Node versions
let node22Data, node24Data;
try {
node22Data = JSON.parse(fs.readFileSync('performance-metrics-node22.json', 'utf8'));
} catch (e) {
console.log('Node.js 22 performance data not available');
}
try {
node24Data = JSON.parse(fs.readFileSync('performance-metrics-node24.json', 'utf8'));
} catch (e) {
console.log('Node.js 24 performance data not available');
}
comment += '| Node.js Version | Bundle Size | Status | Change |\n';
comment += '|-----------------|-------------|---------|--------|\n';
if (node22Data) {
const healthy22 = node22Data.performance?.bundleHealthy ? '✅' : '⚠️';
comment += `| 22 (Development) | ${node22Data.bundle.sizeKB} KB | ${healthy22} | ${'${{ needs.benchmark.outputs.regression-detected }}' === 'true' ? '📈 Regression' : '✅ OK'} |\n`;
}
if (node24Data) {
const healthy24 = node24Data.performance?.bundleHealthy ? '✅' : '⚠️';
comment += `| 24 (Runtime) | ${node24Data.bundle.sizeKB} KB | ${healthy24} | ${'${{ needs.benchmark.outputs.regression-detected }}' === 'true' ? '📈 Regression' : '✅ OK'} |\n`;
}
comment += '\n';
const hasRegression = '${{ needs.benchmark.outputs.regression-detected }}' === 'true' ||
'${{ needs.benchmark.outputs.regression-detected }}' === 'true';
if (hasRegression) {
comment += '⚠️ **Performance regression detected!** Bundle size increased beyond the acceptable threshold.\n\n';
} else {
comment += '✅ **No performance regressions detected.** Bundle size is within acceptable limits.\n\n';
}
comment += '### 📊 Thresholds\n';
comment += '- **Excellent/Baseline**: ≤ 672 KB\n';
comment += '- **Acceptable**: < 700 KB\n';
comment += '- **Regression Alert**: > 5% increase\n\n';
comment += '*Performance data updated on every commit. [View detailed reports →](https://santosr2.github.io/conditional-paths-action/performance/)*';
// Find existing comment to update
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.pull_request.number
});
const botComment = comments.find(comment =>
comment.body.includes('<!-- performance-bot-comment -->')
);
if (botComment) {
// Update existing comment
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body: comment
});
console.log('✅ Updated existing performance comment');
} else {
// Create new comment only if none exists
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.pull_request.number,
body: comment
});
console.log('✅ Created new performance comment');
}