Skip to content

ci: add Dependabot,MegaLinter workflows #2

ci: add Dependabot,MegaLinter workflows

ci: add Dependabot,MegaLinter workflows #2

Workflow file for this run

name: Benchmarks
on:
pull_request:
branches: [main]
# Allow manual trigger
workflow_dispatch:
inputs:
save_baseline:
description: 'Save results as new baseline'
required: false
default: 'false'
jobs:
benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: "3.11"
cache: 'pip'
cache-dependency-path: assistant/requirements.txt
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest pytest-asyncio pytest-benchmark
pip install -r assistant/requirements.txt
- name: Download baseline (if exists)
uses: actions/cache@v4
with:
path: assistant/memory/benchmarks/baseline.json
key: benchmark-baseline-${{ github.base_ref || 'main' }}
restore-keys: |
benchmark-baseline-
- name: Run benchmarks
working-directory: assistant
run: |
python -m benchmarks --compare --ci --threshold 20
env:
OPENAI_API_KEY: "test-key-not-real"
- name: Save baseline (manual trigger only)
if: github.event.inputs.save_baseline == 'true'
working-directory: assistant
run: |
python -m benchmarks --save-baseline
- name: Upload benchmark results
uses: actions/upload-artifact@v4
if: always()
with:
name: benchmark-results
path: |
assistant/memory/benchmarks/latest.json
assistant/memory/benchmarks/history.json
retention-days: 30
- name: Save baseline cache (on main branch)
if: github.ref == 'refs/heads/main' && success()
uses: actions/cache/save@v4
with:
path: assistant/memory/benchmarks/baseline.json
key: benchmark-baseline-main-${{ github.sha }}
- name: Comment PR with results
if: github.event_name == 'pull_request' && always()
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
let body = '## Performance Benchmark Results\n\n';
try {
const results = JSON.parse(fs.readFileSync('assistant/memory/benchmarks/latest.json', 'utf8'));
if (results.benchmarks && results.benchmarks.length > 0) {
body += '| Benchmark | Mean | StdDev | Rounds |\n';
body += '|-----------|------|--------|--------|\n';
results.benchmarks.slice(0, 15).forEach(bench => {
const mean = (bench.stats.mean * 1000).toFixed(3);
const stddev = (bench.stats.stddev * 1000).toFixed(3);
body += `| ${bench.name} | ${mean}ms | ±${stddev}ms | ${bench.stats.rounds} |\n`;
});
if (results.benchmarks.length > 15) {
body += `\n_...and ${results.benchmarks.length - 15} more benchmarks_\n`;
}
} else {
body += '_No benchmark results found_\n';
}
} catch (e) {
body += `_Could not parse benchmark results: ${e.message}_\n`;
}
body += '\n---\n_Generated by benchmark workflow_';
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: body
});