Datasets:
Add professional benchmark suite
Browse files- README.md +41 -0
- cli.ts +163 -0
- config/defaults.ts +40 -0
- fixtures/headless-offline.stream.jsonl +4 -0
- fixtures/headless-offline.txt +1 -0
- harness/process.ts +66 -0
- harness/runner.ts +75 -0
- observability/adapters.ts +68 -0
- reporting/report.ts +167 -0
- scenarios/correctnessTools.ts +168 -0
- scenarios/headless.ts +201 -0
- scenarios/restoration.ts +48 -0
- scenarios/startupCommand.ts +199 -0
- types.ts +126 -0
- utils/files.ts +17 -0
- utils/stats.ts +30 -0
README.md
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Benchmark Suite / 基准测试套件
|
| 2 |
+
|
| 3 |
+
Professional benchmark harness for this restored Claude Code repository.
|
| 4 |
+
|
| 5 |
+
面向本还原版 Claude Code 仓库的专业级 Benchmark 框架。
|
| 6 |
+
|
| 7 |
+
## Commands / 命令
|
| 8 |
+
|
| 9 |
+
- `bun run bench:smoke` - quick hybrid pass for local validation
|
| 10 |
+
快速混合模式验证(本地冒烟)
|
| 11 |
+
- `bun run bench:full` - full hybrid suite
|
| 12 |
+
完整混合模式基准
|
| 13 |
+
- `bun run bench:offline` - deterministic offline-only run
|
| 14 |
+
仅离线可复现模式
|
| 15 |
+
- `bun run bench:online-sample` - online-only sampled run (requires credentials)
|
| 16 |
+
仅在线抽样模式(需要凭据)
|
| 17 |
+
- `bun run bench:compare --baseline=<path> --current=<path>` - compare two JSON reports
|
| 18 |
+
对比两份 JSON 报告
|
| 19 |
+
|
| 20 |
+
## Scenario Mapping / 场景映射
|
| 21 |
+
|
| 22 |
+
- `B01` version fast path / 版本快路径
|
| 23 |
+
- `B02` startup phase profiling / 启动分段剖析
|
| 24 |
+
- `B03` bare-mode delta / bare 模式差分
|
| 25 |
+
- `B04` command catalog load / 命令目录加载
|
| 26 |
+
- `B05` headless single round (offline fixture + online sample) / 无头单轮(离线夹具 + 在线抽检)
|
| 27 |
+
- `B06` headless streaming format and timing checks / 无头流式格式与时延检查
|
| 28 |
+
- `B07` slash/queue batching correctness / 斜杠命令与队列批处理正确性
|
| 29 |
+
- `B08` tool orchestration pipeline behavior / 工具编排流水线行为
|
| 30 |
+
- `B09` restoration health gate via `dev-entry` / 通过 `dev-entry` 的恢复健康门禁
|
| 31 |
+
|
| 32 |
+
## Outputs / 输出
|
| 33 |
+
|
| 34 |
+
Reports are written to `benchmark/results/`.
|
| 35 |
+
|
| 36 |
+
报告会写入 `benchmark/results/` 目录:
|
| 37 |
+
|
| 38 |
+
- `benchmark-<run>-<timestamp>.json`
|
| 39 |
+
- `latest.json`
|
| 40 |
+
- `latest-summary.md`
|
| 41 |
+
- `comparison.md` (when running compare / 运行 compare 时生成)
|
cli.ts
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { mkdir } from 'node:fs/promises'
|
| 2 |
+
import { basename, isAbsolute, join, resolve } from 'node:path'
|
| 3 |
+
import { createConfig } from './config/defaults.js'
|
| 4 |
+
import { runScenarios } from './harness/runner.js'
|
| 5 |
+
import {
|
| 6 |
+
collectBootstrapObservability,
|
| 7 |
+
collectProcessObservability,
|
| 8 |
+
} from './observability/adapters.js'
|
| 9 |
+
import {
|
| 10 |
+
buildBenchmarkReport,
|
| 11 |
+
buildComparisonText,
|
| 12 |
+
} from './reporting/report.js'
|
| 13 |
+
import { createCorrectnessAndToolScenarios } from './scenarios/correctnessTools.js'
|
| 14 |
+
import { createHeadlessScenarios } from './scenarios/headless.js'
|
| 15 |
+
import { createRestorationScenario } from './scenarios/restoration.js'
|
| 16 |
+
import { createStartupAndCommandScenarios } from './scenarios/startupCommand.js'
|
| 17 |
+
import type { BenchmarkMode, BenchmarkReport, BenchmarkRunKind } from './types.js'
|
| 18 |
+
import { readJsonFile, writeJsonFile, writeTextFile } from './utils/files.js'
|
| 19 |
+
|
| 20 |
+
function parseArg(name: string): string | undefined {
|
| 21 |
+
const hit = process.argv.find(value => value.startsWith(`${name}=`))
|
| 22 |
+
if (!hit) return undefined
|
| 23 |
+
return hit.slice(name.length + 1)
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
function resolveMode(): BenchmarkMode {
|
| 27 |
+
const raw = parseArg('--mode')
|
| 28 |
+
if (raw === 'online' || raw === 'offline' || raw === 'hybrid') return raw
|
| 29 |
+
return 'hybrid'
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
function resolveRunKind(): BenchmarkRunKind {
|
| 33 |
+
const raw = parseArg('--run')
|
| 34 |
+
if (raw === 'smoke' || raw === 'full') return raw
|
| 35 |
+
return 'smoke'
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
function resolveOutputDir(rootDir: string): string {
|
| 39 |
+
const input = parseArg('--out')
|
| 40 |
+
if (!input) return join(rootDir, 'benchmark', 'results')
|
| 41 |
+
return isAbsolute(input) ? input : resolve(rootDir, input)
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
async function runBench(): Promise<void> {
|
| 45 |
+
const rootDir = process.cwd()
|
| 46 |
+
const nowIso = new Date().toISOString()
|
| 47 |
+
const mode = resolveMode()
|
| 48 |
+
const runKind = resolveRunKind()
|
| 49 |
+
const config = createConfig(mode, runKind)
|
| 50 |
+
const outputDir = resolveOutputDir(rootDir)
|
| 51 |
+
await mkdir(outputDir, { recursive: true })
|
| 52 |
+
const context = {
|
| 53 |
+
rootDir,
|
| 54 |
+
mode,
|
| 55 |
+
runKind,
|
| 56 |
+
nowIso,
|
| 57 |
+
outputDir,
|
| 58 |
+
}
|
| 59 |
+
const scenarios = [
|
| 60 |
+
...createStartupAndCommandScenarios(config),
|
| 61 |
+
...createHeadlessScenarios(config),
|
| 62 |
+
...createCorrectnessAndToolScenarios(config),
|
| 63 |
+
createRestorationScenario(config),
|
| 64 |
+
]
|
| 65 |
+
const summaries = await runScenarios({
|
| 66 |
+
scenarios,
|
| 67 |
+
context,
|
| 68 |
+
config,
|
| 69 |
+
})
|
| 70 |
+
const report = buildBenchmarkReport({
|
| 71 |
+
rootDir,
|
| 72 |
+
generatedAt: nowIso,
|
| 73 |
+
config,
|
| 74 |
+
scenarios: summaries,
|
| 75 |
+
observability: {
|
| 76 |
+
bootstrapState: collectBootstrapObservability(),
|
| 77 |
+
process: collectProcessObservability(),
|
| 78 |
+
},
|
| 79 |
+
})
|
| 80 |
+
const reportPath = join(
|
| 81 |
+
outputDir,
|
| 82 |
+
`benchmark-${runKind}-${nowIso.replace(/[:.]/gu, '-')}.json`,
|
| 83 |
+
)
|
| 84 |
+
await writeJsonFile(reportPath, report)
|
| 85 |
+
const latestPath = join(outputDir, 'latest.json')
|
| 86 |
+
await writeJsonFile(latestPath, report)
|
| 87 |
+
await writeTextFile(
|
| 88 |
+
join(outputDir, 'latest-summary.md'),
|
| 89 |
+
renderSummaryMarkdown(report, basename(reportPath)),
|
| 90 |
+
)
|
| 91 |
+
process.stdout.write(`Benchmark report written: ${reportPath}\n`)
|
| 92 |
+
process.stdout.write(
|
| 93 |
+
`Quality gate: ${report.qualityGate.passed ? 'PASS' : 'FAIL'}\n`,
|
| 94 |
+
)
|
| 95 |
+
if (!report.qualityGate.passed) {
|
| 96 |
+
for (const reason of report.qualityGate.reasons) {
|
| 97 |
+
process.stdout.write(`- ${reason}\n`)
|
| 98 |
+
}
|
| 99 |
+
process.exitCode = 1
|
| 100 |
+
}
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
function renderSummaryMarkdown(
|
| 104 |
+
report: BenchmarkReport,
|
| 105 |
+
fileName: string,
|
| 106 |
+
): string {
|
| 107 |
+
const lines: string[] = []
|
| 108 |
+
lines.push('# Benchmark Summary')
|
| 109 |
+
lines.push('')
|
| 110 |
+
lines.push(`- Report: ${fileName}`)
|
| 111 |
+
lines.push(`- Mode: ${report.mode}`)
|
| 112 |
+
lines.push(`- Run: ${report.runKind}`)
|
| 113 |
+
lines.push(
|
| 114 |
+
`- Overall score: ${report.aggregate.score.total.toFixed(2)} (latency ${report.aggregate.score.latency.toFixed(2)}, stability ${report.aggregate.score.stability.toFixed(2)}, quality ${report.aggregate.score.quality.toFixed(2)}, cost ${report.aggregate.score.cost.toFixed(2)})`,
|
| 115 |
+
)
|
| 116 |
+
lines.push(`- Success rate: ${report.aggregate.successRate.toFixed(2)}%`)
|
| 117 |
+
lines.push('')
|
| 118 |
+
lines.push('## Scenario Results')
|
| 119 |
+
for (const scenario of report.scenarios) {
|
| 120 |
+
const p95 = scenario.durationMs?.p95 ?? 0
|
| 121 |
+
lines.push(
|
| 122 |
+
`- ${scenario.id} ${scenario.name}: success ${scenario.successRate.toFixed(2)}%, p95 ${p95.toFixed(2)}ms`,
|
| 123 |
+
)
|
| 124 |
+
}
|
| 125 |
+
lines.push('')
|
| 126 |
+
lines.push(
|
| 127 |
+
`- Quality gate: ${report.qualityGate.passed ? 'PASS' : 'FAIL'} (${report.qualityGate.reasons.length} issue(s))`,
|
| 128 |
+
)
|
| 129 |
+
if (!report.qualityGate.passed) {
|
| 130 |
+
for (const reason of report.qualityGate.reasons) {
|
| 131 |
+
lines.push(` - ${reason}`)
|
| 132 |
+
}
|
| 133 |
+
}
|
| 134 |
+
lines.push('')
|
| 135 |
+
return `${lines.join('\n')}\n`
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
async function runCompare(): Promise<void> {
|
| 139 |
+
const rootDir = process.cwd()
|
| 140 |
+
const outputDir = resolveOutputDir(rootDir)
|
| 141 |
+
const baselinePath = parseArg('--baseline')
|
| 142 |
+
const currentPath = parseArg('--current')
|
| 143 |
+
if (!baselinePath || !currentPath) {
|
| 144 |
+
throw new Error('--baseline and --current are required for compare mode')
|
| 145 |
+
}
|
| 146 |
+
const baseline = await readJsonFile<BenchmarkReport>(
|
| 147 |
+
isAbsolute(baselinePath) ? baselinePath : resolve(rootDir, baselinePath),
|
| 148 |
+
)
|
| 149 |
+
const current = await readJsonFile<BenchmarkReport>(
|
| 150 |
+
isAbsolute(currentPath) ? currentPath : resolve(rootDir, currentPath),
|
| 151 |
+
)
|
| 152 |
+
const text = buildComparisonText(baseline, current)
|
| 153 |
+
const outPath = join(outputDir, 'comparison.md')
|
| 154 |
+
await writeTextFile(outPath, text)
|
| 155 |
+
process.stdout.write(`Comparison written: ${outPath}\n`)
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
const command = parseArg('--command') ?? 'run'
|
| 159 |
+
if (command === 'compare') {
|
| 160 |
+
await runCompare()
|
| 161 |
+
} else {
|
| 162 |
+
await runBench()
|
| 163 |
+
}
|
config/defaults.ts
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { BenchmarkConfig, BenchmarkMode, BenchmarkRunKind } from '../types.js'
|
| 2 |
+
|
| 3 |
+
export function createConfig(
|
| 4 |
+
mode: BenchmarkMode,
|
| 5 |
+
runKind: BenchmarkRunKind,
|
| 6 |
+
): BenchmarkConfig {
|
| 7 |
+
const isSmoke = runKind === 'smoke'
|
| 8 |
+
return {
|
| 9 |
+
mode,
|
| 10 |
+
runKind,
|
| 11 |
+
iterations: {
|
| 12 |
+
startup: isSmoke ? 2 : 8,
|
| 13 |
+
commandLoad: isSmoke ? 3 : 16,
|
| 14 |
+
queueCorrectness: isSmoke ? 4 : 24,
|
| 15 |
+
toolPipeline: isSmoke ? 3 : 20,
|
| 16 |
+
},
|
| 17 |
+
timeoutsMs: {
|
| 18 |
+
command: isSmoke ? 20_000 : 60_000,
|
| 19 |
+
onlineHeadless: isSmoke ? 30_000 : 120_000,
|
| 20 |
+
},
|
| 21 |
+
weights: {
|
| 22 |
+
latency: 0.3,
|
| 23 |
+
stability: 0.3,
|
| 24 |
+
quality: 0.25,
|
| 25 |
+
cost: 0.15,
|
| 26 |
+
},
|
| 27 |
+
thresholds: {
|
| 28 |
+
minimumSuccessRatePct: isSmoke ? 85 : 95,
|
| 29 |
+
maximumP95MsByCategory: {
|
| 30 |
+
startup: isSmoke ? 20_000 : 8_000,
|
| 31 |
+
commands: isSmoke ? 15_000 : 5_000,
|
| 32 |
+
headless: isSmoke ? 60_000 : 25_000,
|
| 33 |
+
correctness: isSmoke ? 5_000 : 2_000,
|
| 34 |
+
tools: isSmoke ? 5_000 : 2_000,
|
| 35 |
+
restoration: isSmoke ? 20_000 : 10_000,
|
| 36 |
+
},
|
| 37 |
+
maximumMissingImports: 0,
|
| 38 |
+
},
|
| 39 |
+
}
|
| 40 |
+
}
|
fixtures/headless-offline.stream.jsonl
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"type":"session.start","session_id":"offline-bench"}
|
| 2 |
+
{"type":"assistant.delta","text":"Hello"}
|
| 3 |
+
{"type":"assistant.delta","text":" world"}
|
| 4 |
+
{"type":"assistant.final","text":"Hello world","usage":{"input_tokens":8,"output_tokens":3}}
|
fixtures/headless-offline.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
Hello world
|
harness/process.ts
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { spawn } from 'node:child_process'
|
| 2 |
+
|
| 3 |
+
export type CommandResult = {
|
| 4 |
+
code: number | null
|
| 5 |
+
signal: NodeJS.Signals | null
|
| 6 |
+
stdout: string
|
| 7 |
+
stderr: string
|
| 8 |
+
durationMs: number
|
| 9 |
+
timedOut: boolean
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
export function runCommand(
|
| 13 |
+
command: string,
|
| 14 |
+
args: string[],
|
| 15 |
+
options: {
|
| 16 |
+
cwd: string
|
| 17 |
+
env?: Record<string, string | undefined>
|
| 18 |
+
timeoutMs: number
|
| 19 |
+
stdinText?: string
|
| 20 |
+
},
|
| 21 |
+
): Promise<CommandResult> {
|
| 22 |
+
return new Promise(resolve => {
|
| 23 |
+
const startedAt = performance.now()
|
| 24 |
+
const child = spawn(command, args, {
|
| 25 |
+
cwd: options.cwd,
|
| 26 |
+
env: {
|
| 27 |
+
...process.env,
|
| 28 |
+
...options.env,
|
| 29 |
+
},
|
| 30 |
+
stdio: 'pipe',
|
| 31 |
+
windowsHide: true,
|
| 32 |
+
})
|
| 33 |
+
let stdout = ''
|
| 34 |
+
let stderr = ''
|
| 35 |
+
let timedOut = false
|
| 36 |
+
|
| 37 |
+
child.stdout.on('data', chunk => {
|
| 38 |
+
stdout += String(chunk)
|
| 39 |
+
})
|
| 40 |
+
child.stderr.on('data', chunk => {
|
| 41 |
+
stderr += String(chunk)
|
| 42 |
+
})
|
| 43 |
+
|
| 44 |
+
if (options.stdinText !== undefined) {
|
| 45 |
+
child.stdin.write(options.stdinText)
|
| 46 |
+
}
|
| 47 |
+
child.stdin.end()
|
| 48 |
+
|
| 49 |
+
const timer = setTimeout(() => {
|
| 50 |
+
timedOut = true
|
| 51 |
+
child.kill()
|
| 52 |
+
}, options.timeoutMs)
|
| 53 |
+
|
| 54 |
+
child.on('close', (code, signal) => {
|
| 55 |
+
clearTimeout(timer)
|
| 56 |
+
resolve({
|
| 57 |
+
code,
|
| 58 |
+
signal,
|
| 59 |
+
stdout,
|
| 60 |
+
stderr,
|
| 61 |
+
durationMs: performance.now() - startedAt,
|
| 62 |
+
timedOut,
|
| 63 |
+
})
|
| 64 |
+
})
|
| 65 |
+
})
|
| 66 |
+
}
|
harness/runner.ts
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { buildDistribution } from '../utils/stats.js'
|
| 2 |
+
import type {
|
| 3 |
+
BenchmarkConfig,
|
| 4 |
+
RunContext,
|
| 5 |
+
Scenario,
|
| 6 |
+
ScenarioSummary,
|
| 7 |
+
SingleExecution,
|
| 8 |
+
} from '../types.js'
|
| 9 |
+
|
| 10 |
+
function normalizeError(error: unknown): string {
|
| 11 |
+
if (error instanceof Error) return `${error.name}: ${error.message}`
|
| 12 |
+
return String(error)
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
function summarize(
|
| 16 |
+
scenario: Scenario,
|
| 17 |
+
executions: SingleExecution[],
|
| 18 |
+
): ScenarioSummary {
|
| 19 |
+
const successRuns = executions.filter(item => item.ok && !item.skipped).length
|
| 20 |
+
const failedRuns = executions.filter(item => !item.ok && !item.skipped).length
|
| 21 |
+
const skippedRuns = executions.filter(item => item.skipped).length
|
| 22 |
+
const measured = executions.filter(item => !item.skipped).map(item => item.durationMs)
|
| 23 |
+
return {
|
| 24 |
+
id: scenario.id,
|
| 25 |
+
name: scenario.name,
|
| 26 |
+
category: scenario.category,
|
| 27 |
+
description: scenario.description,
|
| 28 |
+
tags: scenario.tags,
|
| 29 |
+
totalRuns: executions.length,
|
| 30 |
+
skippedRuns,
|
| 31 |
+
successRuns,
|
| 32 |
+
failedRuns,
|
| 33 |
+
successRate:
|
| 34 |
+
executions.length === 0
|
| 35 |
+
? 0
|
| 36 |
+
: (successRuns / Math.max(1, successRuns + failedRuns)) * 100,
|
| 37 |
+
durationMs: buildDistribution(measured),
|
| 38 |
+
examples: executions
|
| 39 |
+
.map(item => item.details)
|
| 40 |
+
.filter(Boolean)
|
| 41 |
+
.slice(0, 3) as Array<Record<string, unknown>>,
|
| 42 |
+
errors: executions
|
| 43 |
+
.map(item => item.error)
|
| 44 |
+
.filter((value): value is string => Boolean(value))
|
| 45 |
+
.slice(0, 8),
|
| 46 |
+
}
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
export async function runScenarios({
|
| 50 |
+
scenarios,
|
| 51 |
+
context,
|
| 52 |
+
}: {
|
| 53 |
+
scenarios: Scenario[]
|
| 54 |
+
context: RunContext
|
| 55 |
+
config: BenchmarkConfig
|
| 56 |
+
}): Promise<ScenarioSummary[]> {
|
| 57 |
+
const results: ScenarioSummary[] = []
|
| 58 |
+
for (const scenario of scenarios) {
|
| 59 |
+
try {
|
| 60 |
+
const executions = await scenario.run(context)
|
| 61 |
+
results.push(summarize(scenario, executions))
|
| 62 |
+
} catch (error) {
|
| 63 |
+
results.push(
|
| 64 |
+
summarize(scenario, [
|
| 65 |
+
{
|
| 66 |
+
ok: false,
|
| 67 |
+
durationMs: 0,
|
| 68 |
+
error: normalizeError(error),
|
| 69 |
+
},
|
| 70 |
+
]),
|
| 71 |
+
)
|
| 72 |
+
}
|
| 73 |
+
}
|
| 74 |
+
return results
|
| 75 |
+
}
|
observability/adapters.ts
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import {
|
| 2 |
+
getModelUsage,
|
| 3 |
+
getTotalAPIDuration,
|
| 4 |
+
getTotalAPIDurationWithoutRetries,
|
| 5 |
+
getTotalCacheCreationInputTokens,
|
| 6 |
+
getTotalCacheReadInputTokens,
|
| 7 |
+
getTotalCost,
|
| 8 |
+
getTotalDuration,
|
| 9 |
+
getTotalInputTokens,
|
| 10 |
+
getTotalLinesAdded,
|
| 11 |
+
getTotalLinesRemoved,
|
| 12 |
+
getTotalOutputTokens,
|
| 13 |
+
getTotalWebSearchRequests,
|
| 14 |
+
} from '../../src/cost-tracker.js'
|
| 15 |
+
import {
|
| 16 |
+
getTotalToolDuration,
|
| 17 |
+
getTurnClassifierCount,
|
| 18 |
+
getTurnClassifierDurationMs,
|
| 19 |
+
getTurnHookCount,
|
| 20 |
+
getTurnHookDurationMs,
|
| 21 |
+
getTurnToolCount,
|
| 22 |
+
getTurnToolDurationMs,
|
| 23 |
+
} from '../../src/bootstrap/state.js'
|
| 24 |
+
|
| 25 |
+
export function collectBootstrapObservability(): Record<string, unknown> {
|
| 26 |
+
return {
|
| 27 |
+
session: {
|
| 28 |
+
totalWallDurationMs: getTotalDuration(),
|
| 29 |
+
totalApiDurationMs: getTotalAPIDuration(),
|
| 30 |
+
totalApiDurationWithoutRetriesMs: getTotalAPIDurationWithoutRetries(),
|
| 31 |
+
totalToolDurationMs: getTotalToolDuration(),
|
| 32 |
+
totalCostUsd: getTotalCost(),
|
| 33 |
+
},
|
| 34 |
+
tokens: {
|
| 35 |
+
input: getTotalInputTokens(),
|
| 36 |
+
output: getTotalOutputTokens(),
|
| 37 |
+
cacheReadInput: getTotalCacheReadInputTokens(),
|
| 38 |
+
cacheCreationInput: getTotalCacheCreationInputTokens(),
|
| 39 |
+
webSearchRequests: getTotalWebSearchRequests(),
|
| 40 |
+
},
|
| 41 |
+
edits: {
|
| 42 |
+
linesAdded: getTotalLinesAdded(),
|
| 43 |
+
linesRemoved: getTotalLinesRemoved(),
|
| 44 |
+
},
|
| 45 |
+
perTurn: {
|
| 46 |
+
hookDurationMs: getTurnHookDurationMs(),
|
| 47 |
+
hookCount: getTurnHookCount(),
|
| 48 |
+
toolDurationMs: getTurnToolDurationMs(),
|
| 49 |
+
toolCount: getTurnToolCount(),
|
| 50 |
+
classifierDurationMs: getTurnClassifierDurationMs(),
|
| 51 |
+
classifierCount: getTurnClassifierCount(),
|
| 52 |
+
},
|
| 53 |
+
modelUsage: getModelUsage(),
|
| 54 |
+
}
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
export function collectProcessObservability(): Record<string, unknown> {
|
| 58 |
+
const memory = process.memoryUsage()
|
| 59 |
+
const cpu = process.cpuUsage()
|
| 60 |
+
return {
|
| 61 |
+
pid: process.pid,
|
| 62 |
+
platform: process.platform,
|
| 63 |
+
node: process.version,
|
| 64 |
+
memory,
|
| 65 |
+
cpu,
|
| 66 |
+
uptimeSec: process.uptime(),
|
| 67 |
+
}
|
| 68 |
+
}
|
reporting/report.ts
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type {
|
| 2 |
+
BenchmarkConfig,
|
| 3 |
+
BenchmarkReport,
|
| 4 |
+
QualityGateResult,
|
| 5 |
+
ScenarioSummary,
|
| 6 |
+
} from '../types.js'
|
| 7 |
+
import { clamp } from '../utils/stats.js'
|
| 8 |
+
|
| 9 |
+
function average(values: number[]): number {
|
| 10 |
+
if (values.length === 0) return 0
|
| 11 |
+
return values.reduce((sum, value) => sum + value, 0) / values.length
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
function computeScore(
|
| 15 |
+
summaries: ScenarioSummary[],
|
| 16 |
+
config: BenchmarkConfig,
|
| 17 |
+
): BenchmarkReport['aggregate']['score'] {
|
| 18 |
+
const p95s = summaries
|
| 19 |
+
.map(summary => summary.durationMs?.p95)
|
| 20 |
+
.filter((value): value is number => typeof value === 'number' && value > 0)
|
| 21 |
+
const avgP95 = average(p95s)
|
| 22 |
+
const latency = clamp(100 - avgP95 / 20, 0, 100)
|
| 23 |
+
const stability = clamp(
|
| 24 |
+
average(summaries.map(summary => summary.successRate)),
|
| 25 |
+
0,
|
| 26 |
+
100,
|
| 27 |
+
)
|
| 28 |
+
const qualityFailures = summaries.filter(summary =>
|
| 29 |
+
summary.tags.includes('correctness'),
|
| 30 |
+
).reduce((sum, item) => sum + item.failedRuns, 0)
|
| 31 |
+
const quality = clamp(100 - qualityFailures * 10, 0, 100)
|
| 32 |
+
const costProxy = clamp(100 - avgP95 / 40, 0, 100)
|
| 33 |
+
const total =
|
| 34 |
+
latency * config.weights.latency +
|
| 35 |
+
stability * config.weights.stability +
|
| 36 |
+
quality * config.weights.quality +
|
| 37 |
+
costProxy * config.weights.cost
|
| 38 |
+
return {
|
| 39 |
+
latency,
|
| 40 |
+
stability,
|
| 41 |
+
quality,
|
| 42 |
+
cost: costProxy,
|
| 43 |
+
total,
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
export function evaluateQualityGate(
|
| 48 |
+
summaries: ScenarioSummary[],
|
| 49 |
+
config: BenchmarkConfig,
|
| 50 |
+
): QualityGateResult {
|
| 51 |
+
const reasons: string[] = []
|
| 52 |
+
for (const summary of summaries) {
|
| 53 |
+
if (summary.successRate < config.thresholds.minimumSuccessRatePct) {
|
| 54 |
+
reasons.push(
|
| 55 |
+
`${summary.id} successRate ${summary.successRate.toFixed(2)}% < ${config.thresholds.minimumSuccessRatePct}%`,
|
| 56 |
+
)
|
| 57 |
+
}
|
| 58 |
+
const maxP95 = config.thresholds.maximumP95MsByCategory[summary.category]
|
| 59 |
+
if (
|
| 60 |
+
maxP95 !== undefined &&
|
| 61 |
+
summary.durationMs &&
|
| 62 |
+
summary.durationMs.p95 > maxP95
|
| 63 |
+
) {
|
| 64 |
+
reasons.push(
|
| 65 |
+
`${summary.id} p95 ${summary.durationMs.p95.toFixed(2)}ms > ${maxP95}ms`,
|
| 66 |
+
)
|
| 67 |
+
}
|
| 68 |
+
if (summary.id === 'B09') {
|
| 69 |
+
const missing =
|
| 70 |
+
summary.examples[0]?.missingRelativeImports as number | undefined
|
| 71 |
+
if (
|
| 72 |
+
typeof missing === 'number' &&
|
| 73 |
+
missing > config.thresholds.maximumMissingImports
|
| 74 |
+
) {
|
| 75 |
+
reasons.push(
|
| 76 |
+
`B09 missing_relative_imports=${missing} exceeds ${config.thresholds.maximumMissingImports}`,
|
| 77 |
+
)
|
| 78 |
+
}
|
| 79 |
+
}
|
| 80 |
+
}
|
| 81 |
+
return {
|
| 82 |
+
passed: reasons.length === 0,
|
| 83 |
+
reasons,
|
| 84 |
+
}
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
export function buildBenchmarkReport({
|
| 88 |
+
rootDir,
|
| 89 |
+
generatedAt,
|
| 90 |
+
config,
|
| 91 |
+
scenarios,
|
| 92 |
+
observability,
|
| 93 |
+
}: {
|
| 94 |
+
rootDir: string
|
| 95 |
+
generatedAt: string
|
| 96 |
+
config: BenchmarkConfig
|
| 97 |
+
scenarios: ScenarioSummary[]
|
| 98 |
+
observability: {
|
| 99 |
+
bootstrapState?: Record<string, unknown>
|
| 100 |
+
process?: Record<string, unknown>
|
| 101 |
+
}
|
| 102 |
+
}): BenchmarkReport {
|
| 103 |
+
const totalRuns = scenarios.reduce((sum, item) => sum + item.totalRuns, 0)
|
| 104 |
+
const failedScenarios = scenarios.filter(item => item.failedRuns > 0).length
|
| 105 |
+
const skippedScenarios = scenarios.filter(
|
| 106 |
+
item => item.skippedRuns === item.totalRuns,
|
| 107 |
+
).length
|
| 108 |
+
const successRate =
|
| 109 |
+
scenarios.length === 0
|
| 110 |
+
? 0
|
| 111 |
+
: average(scenarios.map(item => item.successRate))
|
| 112 |
+
const score = computeScore(scenarios, config)
|
| 113 |
+
const qualityGate = evaluateQualityGate(scenarios, config)
|
| 114 |
+
return {
|
| 115 |
+
version: 1,
|
| 116 |
+
generatedAt,
|
| 117 |
+
rootDir,
|
| 118 |
+
mode: config.mode,
|
| 119 |
+
runKind: config.runKind,
|
| 120 |
+
config,
|
| 121 |
+
scenarios,
|
| 122 |
+
aggregate: {
|
| 123 |
+
totalScenarios: scenarios.length,
|
| 124 |
+
failedScenarios,
|
| 125 |
+
skippedScenarios,
|
| 126 |
+
totalRuns,
|
| 127 |
+
successRate,
|
| 128 |
+
score,
|
| 129 |
+
},
|
| 130 |
+
observability,
|
| 131 |
+
qualityGate,
|
| 132 |
+
}
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
export function buildComparisonText(
|
| 136 |
+
baseline: BenchmarkReport,
|
| 137 |
+
current: BenchmarkReport,
|
| 138 |
+
): string {
|
| 139 |
+
const lines: string[] = []
|
| 140 |
+
lines.push('# Benchmark Comparison')
|
| 141 |
+
lines.push('')
|
| 142 |
+
lines.push(`- Baseline: ${baseline.generatedAt}`)
|
| 143 |
+
lines.push(`- Current: ${current.generatedAt}`)
|
| 144 |
+
lines.push(
|
| 145 |
+
`- Total score: ${baseline.aggregate.score.total.toFixed(2)} -> ${current.aggregate.score.total.toFixed(2)}`,
|
| 146 |
+
)
|
| 147 |
+
lines.push(
|
| 148 |
+
`- Success rate: ${baseline.aggregate.successRate.toFixed(2)}% -> ${current.aggregate.successRate.toFixed(2)}%`,
|
| 149 |
+
)
|
| 150 |
+
lines.push('')
|
| 151 |
+
lines.push('## Scenario deltas')
|
| 152 |
+
const baselineMap = new Map(baseline.scenarios.map(item => [item.id, item]))
|
| 153 |
+
for (const scenario of current.scenarios) {
|
| 154 |
+
const older = baselineMap.get(scenario.id)
|
| 155 |
+
if (!older) {
|
| 156 |
+
lines.push(`- ${scenario.id}: new scenario`)
|
| 157 |
+
continue
|
| 158 |
+
}
|
| 159 |
+
const oldP95 = older.durationMs?.p95 ?? 0
|
| 160 |
+
const newP95 = scenario.durationMs?.p95 ?? 0
|
| 161 |
+
const delta = newP95 - oldP95
|
| 162 |
+
lines.push(
|
| 163 |
+
`- ${scenario.id}: p95 ${oldP95.toFixed(2)}ms -> ${newP95.toFixed(2)}ms (delta ${delta.toFixed(2)}ms), success ${older.successRate.toFixed(2)}% -> ${scenario.successRate.toFixed(2)}%`,
|
| 164 |
+
)
|
| 165 |
+
}
|
| 166 |
+
return `${lines.join('\n')}\n`
|
| 167 |
+
}
|
scenarios/correctnessTools.ts
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { ToolUseBlock } from '@anthropic-ai/sdk/resources/index.mjs'
|
| 2 |
+
import { getDefaultAppState } from '../../src/state/AppStateStore.js'
|
| 3 |
+
import { runTools } from '../../src/services/tools/toolOrchestration.js'
|
| 4 |
+
import type { CanUseToolFn } from '../../src/hooks/useCanUseTool.js'
|
| 5 |
+
import { enqueue, resetCommandQueue } from '../../src/utils/messageQueueManager.js'
|
| 6 |
+
import { processQueueIfReady } from '../../src/utils/queueProcessor.js'
|
| 7 |
+
import { createAssistantMessage } from '../../src/utils/messages.js'
|
| 8 |
+
import type { ToolUseContext } from '../../src/Tool.js'
|
| 9 |
+
import type { BenchmarkConfig, RunContext, Scenario, SingleExecution } from '../types.js'
|
| 10 |
+
|
| 11 |
+
async function runQueueCorrectness(
|
| 12 |
+
_context: RunContext,
|
| 13 |
+
config: BenchmarkConfig,
|
| 14 |
+
): Promise<SingleExecution[]> {
|
| 15 |
+
const runs: SingleExecution[] = []
|
| 16 |
+
for (let i = 0; i < config.iterations.queueCorrectness; i++) {
|
| 17 |
+
resetCommandQueue()
|
| 18 |
+
const executedBatches: string[][] = []
|
| 19 |
+
enqueue({ value: '/help', mode: 'prompt', priority: 'next' })
|
| 20 |
+
enqueue({ value: 'first normal', mode: 'prompt', priority: 'next' })
|
| 21 |
+
enqueue({ value: 'second normal', mode: 'prompt', priority: 'next' })
|
| 22 |
+
enqueue({ value: 'echo hi', mode: 'bash', priority: 'next' })
|
| 23 |
+
const startedAt = performance.now()
|
| 24 |
+
while (
|
| 25 |
+
processQueueIfReady({
|
| 26 |
+
executeInput: async commands => {
|
| 27 |
+
executedBatches.push(commands.map(command => String(command.value)))
|
| 28 |
+
},
|
| 29 |
+
}).processed
|
| 30 |
+
) {
|
| 31 |
+
// keep draining
|
| 32 |
+
}
|
| 33 |
+
const durationMs = performance.now() - startedAt
|
| 34 |
+
const slashBatch = executedBatches[0] ?? []
|
| 35 |
+
const normalBatch = executedBatches[1] ?? []
|
| 36 |
+
const bashBatch = executedBatches[2] ?? []
|
| 37 |
+
const ok =
|
| 38 |
+
slashBatch.length === 1 &&
|
| 39 |
+
slashBatch[0] === '/help' &&
|
| 40 |
+
normalBatch.length === 2 &&
|
| 41 |
+
bashBatch.length === 1
|
| 42 |
+
runs.push({
|
| 43 |
+
ok,
|
| 44 |
+
durationMs,
|
| 45 |
+
details: {
|
| 46 |
+
executedBatches,
|
| 47 |
+
},
|
| 48 |
+
error: ok ? undefined : 'queue processor did not preserve expected batching semantics',
|
| 49 |
+
})
|
| 50 |
+
}
|
| 51 |
+
resetCommandQueue()
|
| 52 |
+
return runs
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
function createMinimalToolUseContext(): ToolUseContext {
|
| 56 |
+
let appState = getDefaultAppState()
|
| 57 |
+
let inProgress = new Set<string>()
|
| 58 |
+
return {
|
| 59 |
+
options: {
|
| 60 |
+
commands: [],
|
| 61 |
+
debug: false,
|
| 62 |
+
mainLoopModel: 'claude-sonnet-4-5',
|
| 63 |
+
tools: [],
|
| 64 |
+
verbose: false,
|
| 65 |
+
thinkingConfig: {
|
| 66 |
+
type: 'disabled',
|
| 67 |
+
},
|
| 68 |
+
mcpClients: [],
|
| 69 |
+
mcpResources: {},
|
| 70 |
+
isNonInteractiveSession: true,
|
| 71 |
+
agentDefinitions: { activeAgents: [], allAgents: [] },
|
| 72 |
+
},
|
| 73 |
+
abortController: new AbortController(),
|
| 74 |
+
readFileState: {} as ToolUseContext['readFileState'],
|
| 75 |
+
getAppState: () => appState,
|
| 76 |
+
setAppState: updater => {
|
| 77 |
+
appState = updater(appState)
|
| 78 |
+
},
|
| 79 |
+
setInProgressToolUseIDs: updater => {
|
| 80 |
+
inProgress = updater(inProgress)
|
| 81 |
+
},
|
| 82 |
+
setResponseLength: _updater => {},
|
| 83 |
+
updateFileHistoryState: _updater => {},
|
| 84 |
+
updateAttributionState: _updater => {},
|
| 85 |
+
messages: [],
|
| 86 |
+
}
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
async function runToolPipeline(
|
| 90 |
+
_context: RunContext,
|
| 91 |
+
config: BenchmarkConfig,
|
| 92 |
+
): Promise<SingleExecution[]> {
|
| 93 |
+
const runs: SingleExecution[] = []
|
| 94 |
+
for (let i = 0; i < config.iterations.toolPipeline; i++) {
|
| 95 |
+
const toolUseContext = createMinimalToolUseContext()
|
| 96 |
+
const canUseTool: CanUseToolFn = async (_tool, input) => ({
|
| 97 |
+
behavior: 'allow',
|
| 98 |
+
updatedInput: input,
|
| 99 |
+
})
|
| 100 |
+
const toolUseMessages: ToolUseBlock[] = [
|
| 101 |
+
{
|
| 102 |
+
id: `tool-use-${i}-1`,
|
| 103 |
+
name: 'NonExistentToolA',
|
| 104 |
+
input: {},
|
| 105 |
+
type: 'tool_use',
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
id: `tool-use-${i}-2`,
|
| 109 |
+
name: 'NonExistentToolB',
|
| 110 |
+
input: {},
|
| 111 |
+
type: 'tool_use',
|
| 112 |
+
},
|
| 113 |
+
]
|
| 114 |
+
const assistantMessages = toolUseMessages.map(toolUse =>
|
| 115 |
+
createAssistantMessage({
|
| 116 |
+
content: [toolUse],
|
| 117 |
+
}),
|
| 118 |
+
)
|
| 119 |
+
const startedAt = performance.now()
|
| 120 |
+
let yieldedMessages = 0
|
| 121 |
+
for await (const update of runTools(
|
| 122 |
+
toolUseMessages,
|
| 123 |
+
assistantMessages,
|
| 124 |
+
canUseTool,
|
| 125 |
+
toolUseContext,
|
| 126 |
+
)) {
|
| 127 |
+
if (update.message) {
|
| 128 |
+
yieldedMessages++
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
const durationMs = performance.now() - startedAt
|
| 132 |
+
const ok = yieldedMessages >= toolUseMessages.length
|
| 133 |
+
runs.push({
|
| 134 |
+
ok,
|
| 135 |
+
durationMs,
|
| 136 |
+
details: {
|
| 137 |
+
yieldedMessages,
|
| 138 |
+
},
|
| 139 |
+
error: ok ? undefined : 'tool orchestration produced fewer messages than expected',
|
| 140 |
+
})
|
| 141 |
+
}
|
| 142 |
+
return runs
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
export function createCorrectnessAndToolScenarios(
|
| 146 |
+
config: BenchmarkConfig,
|
| 147 |
+
): Scenario[] {
|
| 148 |
+
return [
|
| 149 |
+
{
|
| 150 |
+
id: 'B07',
|
| 151 |
+
name: 'Slash and Queue Correctness',
|
| 152 |
+
category: 'correctness',
|
| 153 |
+
description:
|
| 154 |
+
'Validate slash command isolation, normal prompt batching, and bash single-item behavior.',
|
| 155 |
+
tags: ['queue', 'slash', 'correctness'],
|
| 156 |
+
run: context => runQueueCorrectness(context, config),
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
id: 'B08',
|
| 160 |
+
name: 'Tool Pipeline Execution',
|
| 161 |
+
category: 'tools',
|
| 162 |
+
description:
|
| 163 |
+
'Measure runTools orchestration overhead and ensure deterministic tool-result plumbing.',
|
| 164 |
+
tags: ['tools', 'orchestration', 'pipeline'],
|
| 165 |
+
run: context => runToolPipeline(context, config),
|
| 166 |
+
},
|
| 167 |
+
]
|
| 168 |
+
}
|
scenarios/headless.ts
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { readFile } from 'node:fs/promises'
|
| 2 |
+
import { join } from 'node:path'
|
| 3 |
+
import type { BenchmarkConfig, RunContext, Scenario, SingleExecution } from '../types.js'
|
| 4 |
+
import { runCommand } from '../harness/process.js'
|
| 5 |
+
|
| 6 |
+
function hasOnlineCredential(): boolean {
|
| 7 |
+
return Boolean(
|
| 8 |
+
process.env.ANTHROPIC_API_KEY ||
|
| 9 |
+
process.env.AWS_ACCESS_KEY_ID ||
|
| 10 |
+
process.env.AZURE_OPENAI_API_KEY,
|
| 11 |
+
)
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
async function offlineSingleRound(context: RunContext): Promise<SingleExecution[]> {
|
| 15 |
+
const startedAt = performance.now()
|
| 16 |
+
const output = await readFile(
|
| 17 |
+
join(context.rootDir, 'benchmark', 'fixtures', 'headless-offline.txt'),
|
| 18 |
+
'utf8',
|
| 19 |
+
)
|
| 20 |
+
const ok = output.trim().length > 0
|
| 21 |
+
return [
|
| 22 |
+
{
|
| 23 |
+
ok,
|
| 24 |
+
durationMs: performance.now() - startedAt,
|
| 25 |
+
details: {
|
| 26 |
+
outputLength: output.trim().length,
|
| 27 |
+
},
|
| 28 |
+
error: ok ? undefined : 'offline fixture output is empty',
|
| 29 |
+
},
|
| 30 |
+
]
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
async function offlineStreaming(context: RunContext): Promise<SingleExecution[]> {
|
| 34 |
+
const startedAt = performance.now()
|
| 35 |
+
const raw = await readFile(
|
| 36 |
+
join(context.rootDir, 'benchmark', 'fixtures', 'headless-offline.stream.jsonl'),
|
| 37 |
+
'utf8',
|
| 38 |
+
)
|
| 39 |
+
const lines = raw
|
| 40 |
+
.split(/\r?\n/)
|
| 41 |
+
.map(line => line.trim())
|
| 42 |
+
.filter(Boolean)
|
| 43 |
+
const parsed = lines.map(line => JSON.parse(line) as Record<string, unknown>)
|
| 44 |
+
const valid = parsed.every(item => typeof item.type === 'string')
|
| 45 |
+
const firstEventDelayMs = lines.length > 0 ? 0 : -1
|
| 46 |
+
return [
|
| 47 |
+
{
|
| 48 |
+
ok: valid && lines.length > 0,
|
| 49 |
+
durationMs: performance.now() - startedAt,
|
| 50 |
+
details: {
|
| 51 |
+
eventCount: lines.length,
|
| 52 |
+
firstEventDelayMs,
|
| 53 |
+
},
|
| 54 |
+
error: valid ? undefined : 'stream-json fixture contains invalid events',
|
| 55 |
+
},
|
| 56 |
+
]
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
async function onlineSingleRound(
|
| 60 |
+
context: RunContext,
|
| 61 |
+
config: BenchmarkConfig,
|
| 62 |
+
): Promise<SingleExecution[]> {
|
| 63 |
+
if (!hasOnlineCredential()) {
|
| 64 |
+
return [
|
| 65 |
+
{
|
| 66 |
+
ok: true,
|
| 67 |
+
durationMs: 0,
|
| 68 |
+
skipped: true,
|
| 69 |
+
skipReason: 'no online credentials found',
|
| 70 |
+
},
|
| 71 |
+
]
|
| 72 |
+
}
|
| 73 |
+
const result = await runCommand(
|
| 74 |
+
'bun',
|
| 75 |
+
['run', './src/bootstrap-entry.ts', '-p', 'Reply with exactly: benchmark_ok'],
|
| 76 |
+
{
|
| 77 |
+
cwd: context.rootDir,
|
| 78 |
+
timeoutMs: config.timeoutsMs.onlineHeadless,
|
| 79 |
+
},
|
| 80 |
+
)
|
| 81 |
+
const text = result.stdout.trim()
|
| 82 |
+
const ok = result.code === 0 && /benchmark_ok/iu.test(text)
|
| 83 |
+
return [
|
| 84 |
+
{
|
| 85 |
+
ok,
|
| 86 |
+
durationMs: result.durationMs,
|
| 87 |
+
details: {
|
| 88 |
+
exitCode: result.code,
|
| 89 |
+
outputPreview: text.slice(0, 120),
|
| 90 |
+
},
|
| 91 |
+
error: ok ? undefined : `online -p failed (${String(result.code)})`,
|
| 92 |
+
},
|
| 93 |
+
]
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
async function onlineStreaming(
|
| 97 |
+
context: RunContext,
|
| 98 |
+
config: BenchmarkConfig,
|
| 99 |
+
): Promise<SingleExecution[]> {
|
| 100 |
+
if (!hasOnlineCredential()) {
|
| 101 |
+
return [
|
| 102 |
+
{
|
| 103 |
+
ok: true,
|
| 104 |
+
durationMs: 0,
|
| 105 |
+
skipped: true,
|
| 106 |
+
skipReason: 'no online credentials found',
|
| 107 |
+
},
|
| 108 |
+
]
|
| 109 |
+
}
|
| 110 |
+
const stdinText =
|
| 111 |
+
'{"type":"user","text":"Reply in one short sentence."}\n{"type":"input_end"}\n'
|
| 112 |
+
const result = await runCommand(
|
| 113 |
+
'bun',
|
| 114 |
+
[
|
| 115 |
+
'run',
|
| 116 |
+
'./src/bootstrap-entry.ts',
|
| 117 |
+
'-p',
|
| 118 |
+
'--input-format=stream-json',
|
| 119 |
+
'--output-format=stream-json',
|
| 120 |
+
],
|
| 121 |
+
{
|
| 122 |
+
cwd: context.rootDir,
|
| 123 |
+
timeoutMs: config.timeoutsMs.onlineHeadless,
|
| 124 |
+
stdinText,
|
| 125 |
+
},
|
| 126 |
+
)
|
| 127 |
+
const lines = result.stdout
|
| 128 |
+
.split(/\r?\n/)
|
| 129 |
+
.map(line => line.trim())
|
| 130 |
+
.filter(Boolean)
|
| 131 |
+
let parsedCount = 0
|
| 132 |
+
for (const line of lines) {
|
| 133 |
+
try {
|
| 134 |
+
JSON.parse(line)
|
| 135 |
+
parsedCount++
|
| 136 |
+
} catch {
|
| 137 |
+
return [
|
| 138 |
+
{
|
| 139 |
+
ok: false,
|
| 140 |
+
durationMs: result.durationMs,
|
| 141 |
+
details: {
|
| 142 |
+
exitCode: result.code,
|
| 143 |
+
parsedCount,
|
| 144 |
+
line,
|
| 145 |
+
},
|
| 146 |
+
error: 'stream-json output contains invalid JSON line',
|
| 147 |
+
},
|
| 148 |
+
]
|
| 149 |
+
}
|
| 150 |
+
}
|
| 151 |
+
return [
|
| 152 |
+
{
|
| 153 |
+
ok: result.code === 0 && parsedCount > 0,
|
| 154 |
+
durationMs: result.durationMs,
|
| 155 |
+
details: {
|
| 156 |
+
exitCode: result.code,
|
| 157 |
+
eventCount: parsedCount,
|
| 158 |
+
},
|
| 159 |
+
error: result.code === 0 ? undefined : `online stream-json failed (${String(result.code)})`,
|
| 160 |
+
},
|
| 161 |
+
]
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
export function createHeadlessScenarios(config: BenchmarkConfig): Scenario[] {
|
| 165 |
+
return [
|
| 166 |
+
{
|
| 167 |
+
id: 'B05',
|
| 168 |
+
name: 'Headless Single Round',
|
| 169 |
+
category: 'headless',
|
| 170 |
+
description:
|
| 171 |
+
'Measure non-interactive single-round behavior with offline fixture and optional online sampling.',
|
| 172 |
+
tags: ['headless', 'print', 'quality'],
|
| 173 |
+
run: async context =>
|
| 174 |
+
context.mode === 'online'
|
| 175 |
+
? onlineSingleRound(context, config)
|
| 176 |
+
: context.mode === 'offline'
|
| 177 |
+
? offlineSingleRound(context)
|
| 178 |
+
: [
|
| 179 |
+
...(await offlineSingleRound(context)),
|
| 180 |
+
...(await onlineSingleRound(context, config)),
|
| 181 |
+
],
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
id: 'B06',
|
| 185 |
+
name: 'Headless Streaming',
|
| 186 |
+
category: 'headless',
|
| 187 |
+
description:
|
| 188 |
+
'Validate stream-json event format and first-event responsiveness with offline fixture and optional online sampling.',
|
| 189 |
+
tags: ['headless', 'stream-json', 'stability'],
|
| 190 |
+
run: async context =>
|
| 191 |
+
context.mode === 'online'
|
| 192 |
+
? onlineStreaming(context, config)
|
| 193 |
+
: context.mode === 'offline'
|
| 194 |
+
? offlineStreaming(context)
|
| 195 |
+
: [
|
| 196 |
+
...(await offlineStreaming(context)),
|
| 197 |
+
...(await onlineStreaming(context, config)),
|
| 198 |
+
],
|
| 199 |
+
},
|
| 200 |
+
]
|
| 201 |
+
}
|
scenarios/restoration.ts
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { BenchmarkConfig, RunContext, Scenario, SingleExecution } from '../types.js'
|
| 2 |
+
import { runCommand } from '../harness/process.js'
|
| 3 |
+
|
| 4 |
+
function extractMissingImports(output: string): number | null {
|
| 5 |
+
const match = /missing_relative_imports=(\d+)/u.exec(output)
|
| 6 |
+
if (!match) return null
|
| 7 |
+
return Number(match[1])
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
async function runRestorationGate(
|
| 11 |
+
context: RunContext,
|
| 12 |
+
config: BenchmarkConfig,
|
| 13 |
+
): Promise<SingleExecution[]> {
|
| 14 |
+
const result = await runCommand('bun', ['run', './src/dev-entry.ts', '--version'], {
|
| 15 |
+
cwd: context.rootDir,
|
| 16 |
+
timeoutMs: config.timeoutsMs.command,
|
| 17 |
+
})
|
| 18 |
+
const combined = `${result.stdout}\n${result.stderr}`
|
| 19 |
+
const missing = extractMissingImports(combined)
|
| 20 |
+
const ok =
|
| 21 |
+
result.code === 0 &&
|
| 22 |
+
(missing === null || missing <= config.thresholds.maximumMissingImports)
|
| 23 |
+
return [
|
| 24 |
+
{
|
| 25 |
+
ok,
|
| 26 |
+
durationMs: result.durationMs,
|
| 27 |
+
details: {
|
| 28 |
+
exitCode: result.code,
|
| 29 |
+
missingRelativeImports: missing,
|
| 30 |
+
},
|
| 31 |
+
error: ok
|
| 32 |
+
? undefined
|
| 33 |
+
: `restoration gate failed (missing_relative_imports=${String(missing)})`,
|
| 34 |
+
},
|
| 35 |
+
]
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
export function createRestorationScenario(config: BenchmarkConfig): Scenario {
|
| 39 |
+
return {
|
| 40 |
+
id: 'B09',
|
| 41 |
+
name: 'Restoration Health Gate',
|
| 42 |
+
category: 'restoration',
|
| 43 |
+
description:
|
| 44 |
+
'Run restored dev entry check and enforce missing-relative-imports gate.',
|
| 45 |
+
tags: ['restoration', 'health', 'gate'],
|
| 46 |
+
run: context => runRestorationGate(context, config),
|
| 47 |
+
}
|
| 48 |
+
}
|
scenarios/startupCommand.ts
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { readdir, readFile } from 'node:fs/promises'
|
| 2 |
+
import { join } from 'node:path'
|
| 3 |
+
import { clearCommandsCache, getCommands } from '../../src/commands.js'
|
| 4 |
+
import { getClaudeConfigHomeDir } from '../../src/utils/envUtils.js'
|
| 5 |
+
import type { BenchmarkConfig, RunContext, Scenario, SingleExecution } from '../types.js'
|
| 6 |
+
import { runCommand } from '../harness/process.js'
|
| 7 |
+
|
| 8 |
+
function parseStartupReport(content: string): Record<string, number> {
|
| 9 |
+
const lines = content.split(/\r?\n/)
|
| 10 |
+
const checkpoints = new Map<string, number>()
|
| 11 |
+
const pattern = /^\[\+\s*([0-9.]+)ms\]\s+\(\+\s*([0-9.]+)ms\)\s+(.+)$/u
|
| 12 |
+
for (const line of lines) {
|
| 13 |
+
const match = pattern.exec(line.trim())
|
| 14 |
+
if (!match) continue
|
| 15 |
+
checkpoints.set(match[3]!, Number(match[1]))
|
| 16 |
+
}
|
| 17 |
+
const resolve = (start: string, end: string): number | undefined => {
|
| 18 |
+
const s = checkpoints.get(start)
|
| 19 |
+
const e = checkpoints.get(end)
|
| 20 |
+
if (s === undefined || e === undefined) return undefined
|
| 21 |
+
return Math.max(0, e - s)
|
| 22 |
+
}
|
| 23 |
+
return {
|
| 24 |
+
importTimeMs: resolve('cli_entry', 'main_tsx_imports_loaded') ?? -1,
|
| 25 |
+
initTimeMs: resolve('init_function_start', 'init_function_end') ?? -1,
|
| 26 |
+
settingsTimeMs: resolve('eagerLoadSettings_start', 'eagerLoadSettings_end') ?? -1,
|
| 27 |
+
totalTimeMs: resolve('cli_entry', 'main_after_run') ?? -1,
|
| 28 |
+
}
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
async function readLatestStartupReport(rootDir: string): Promise<string | null> {
|
| 32 |
+
const dir = join(getClaudeConfigHomeDir(), 'startup-perf')
|
| 33 |
+
const entries = await readdir(dir, { withFileTypes: true }).catch(() => [])
|
| 34 |
+
const txtFiles = entries.filter(entry => entry.isFile() && entry.name.endsWith('.txt'))
|
| 35 |
+
if (txtFiles.length === 0) return null
|
| 36 |
+
const sorted = txtFiles.sort((a, b) => b.name.localeCompare(a.name))
|
| 37 |
+
const first = sorted[0]
|
| 38 |
+
if (!first) return null
|
| 39 |
+
return readFile(join(dir, first.name), 'utf8').catch(() => null)
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
async function runVersionBenchmark(
|
| 43 |
+
context: RunContext,
|
| 44 |
+
config: BenchmarkConfig,
|
| 45 |
+
): Promise<SingleExecution[]> {
|
| 46 |
+
const runs: SingleExecution[] = []
|
| 47 |
+
for (let i = 0; i < config.iterations.startup; i++) {
|
| 48 |
+
const result = await runCommand(
|
| 49 |
+
'bun',
|
| 50 |
+
['run', './src/bootstrap-entry.ts', '--version'],
|
| 51 |
+
{
|
| 52 |
+
cwd: context.rootDir,
|
| 53 |
+
timeoutMs: config.timeoutsMs.command,
|
| 54 |
+
},
|
| 55 |
+
)
|
| 56 |
+
runs.push({
|
| 57 |
+
ok: result.code === 0 && !result.timedOut,
|
| 58 |
+
durationMs: result.durationMs,
|
| 59 |
+
details: {
|
| 60 |
+
exitCode: result.code,
|
| 61 |
+
hasVersion: /^\d+/u.test(result.stdout.trim()),
|
| 62 |
+
},
|
| 63 |
+
error:
|
| 64 |
+
result.code === 0
|
| 65 |
+
? undefined
|
| 66 |
+
: `version command failed (code=${String(result.code)}): ${result.stderr.trim()}`,
|
| 67 |
+
})
|
| 68 |
+
}
|
| 69 |
+
return runs
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
async function runStartupProfiling(
|
| 73 |
+
context: RunContext,
|
| 74 |
+
config: BenchmarkConfig,
|
| 75 |
+
): Promise<SingleExecution[]> {
|
| 76 |
+
const result = await runCommand('bun', ['run', './src/bootstrap-entry.ts', '--help'], {
|
| 77 |
+
cwd: context.rootDir,
|
| 78 |
+
timeoutMs: config.timeoutsMs.command,
|
| 79 |
+
env: {
|
| 80 |
+
CLAUDE_CODE_PROFILE_STARTUP: '1',
|
| 81 |
+
},
|
| 82 |
+
})
|
| 83 |
+
const report = await readLatestStartupReport(context.rootDir)
|
| 84 |
+
const parsed = report ? parseStartupReport(report) : {}
|
| 85 |
+
return [
|
| 86 |
+
{
|
| 87 |
+
ok: result.code === 0 && !result.timedOut && Boolean(report),
|
| 88 |
+
durationMs: result.durationMs,
|
| 89 |
+
details: {
|
| 90 |
+
exitCode: result.code,
|
| 91 |
+
...parsed,
|
| 92 |
+
},
|
| 93 |
+
error:
|
| 94 |
+
report || result.code === 0
|
| 95 |
+
? undefined
|
| 96 |
+
: `startup profile run failed (code=${String(result.code)})`,
|
| 97 |
+
},
|
| 98 |
+
]
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
async function runBareComparison(
|
| 102 |
+
context: RunContext,
|
| 103 |
+
config: BenchmarkConfig,
|
| 104 |
+
): Promise<SingleExecution[]> {
|
| 105 |
+
const normal = await runCommand('bun', ['run', './src/bootstrap-entry.ts', '--help'], {
|
| 106 |
+
cwd: context.rootDir,
|
| 107 |
+
timeoutMs: config.timeoutsMs.command,
|
| 108 |
+
})
|
| 109 |
+
const bare = await runCommand(
|
| 110 |
+
'bun',
|
| 111 |
+
['run', './src/bootstrap-entry.ts', '--help', '--bare'],
|
| 112 |
+
{
|
| 113 |
+
cwd: context.rootDir,
|
| 114 |
+
timeoutMs: config.timeoutsMs.command,
|
| 115 |
+
},
|
| 116 |
+
)
|
| 117 |
+
return [
|
| 118 |
+
{
|
| 119 |
+
ok: normal.code === 0 && bare.code === 0,
|
| 120 |
+
durationMs: normal.durationMs + bare.durationMs,
|
| 121 |
+
details: {
|
| 122 |
+
normalMs: normal.durationMs,
|
| 123 |
+
bareMs: bare.durationMs,
|
| 124 |
+
deltaMs: normal.durationMs - bare.durationMs,
|
| 125 |
+
},
|
| 126 |
+
error:
|
| 127 |
+
normal.code === 0 && bare.code === 0
|
| 128 |
+
? undefined
|
| 129 |
+
: `--bare comparison failed (normal=${String(normal.code)}, bare=${String(bare.code)})`,
|
| 130 |
+
},
|
| 131 |
+
]
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
async function runCommandsLoad(
|
| 135 |
+
context: RunContext,
|
| 136 |
+
config: BenchmarkConfig,
|
| 137 |
+
): Promise<SingleExecution[]> {
|
| 138 |
+
const runs: SingleExecution[] = []
|
| 139 |
+
for (let i = 0; i < config.iterations.commandLoad; i++) {
|
| 140 |
+
clearCommandsCache()
|
| 141 |
+
const startedAt = performance.now()
|
| 142 |
+
const cold = await getCommands(context.rootDir)
|
| 143 |
+
const coldMs = performance.now() - startedAt
|
| 144 |
+
const warmStart = performance.now()
|
| 145 |
+
const warm = await getCommands(context.rootDir)
|
| 146 |
+
const warmMs = performance.now() - warmStart
|
| 147 |
+
runs.push({
|
| 148 |
+
ok: cold.length > 0 && warm.length > 0,
|
| 149 |
+
durationMs: coldMs + warmMs,
|
| 150 |
+
details: {
|
| 151 |
+
coldMs,
|
| 152 |
+
warmMs,
|
| 153 |
+
coldCount: cold.length,
|
| 154 |
+
warmCount: warm.length,
|
| 155 |
+
},
|
| 156 |
+
error: cold.length > 0 ? undefined : 'commands list is empty',
|
| 157 |
+
})
|
| 158 |
+
}
|
| 159 |
+
return runs
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
export function createStartupAndCommandScenarios(
|
| 163 |
+
config: BenchmarkConfig,
|
| 164 |
+
): Scenario[] {
|
| 165 |
+
return [
|
| 166 |
+
{
|
| 167 |
+
id: 'B01',
|
| 168 |
+
name: 'Version Fast Path',
|
| 169 |
+
category: 'startup',
|
| 170 |
+
description: 'Measure --version cold-start latency and exit stability.',
|
| 171 |
+
tags: ['cli', 'startup', 'version'],
|
| 172 |
+
run: context => runVersionBenchmark(context, config),
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
id: 'B02',
|
| 176 |
+
name: 'Startup Phase Profiling',
|
| 177 |
+
category: 'startup',
|
| 178 |
+
description: 'Capture startup phase durations from startup profiler checkpoints.',
|
| 179 |
+
tags: ['cli', 'startup', 'profiling'],
|
| 180 |
+
run: context => runStartupProfiling(context, config),
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
id: 'B03',
|
| 184 |
+
name: 'Bare Mode Delta',
|
| 185 |
+
category: 'startup',
|
| 186 |
+
description: 'Compare --help startup latency between normal and --bare.',
|
| 187 |
+
tags: ['cli', 'startup', 'bare'],
|
| 188 |
+
run: context => runBareComparison(context, config),
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
id: 'B04',
|
| 192 |
+
name: 'Command Catalog Load',
|
| 193 |
+
category: 'commands',
|
| 194 |
+
description: 'Measure cold/warm command catalog loading via getCommands(cwd).',
|
| 195 |
+
tags: ['commands', 'cache'],
|
| 196 |
+
run: context => runCommandsLoad(context, config),
|
| 197 |
+
},
|
| 198 |
+
]
|
| 199 |
+
}
|
types.ts
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export type BenchmarkMode = 'offline' | 'online' | 'hybrid'
|
| 2 |
+
|
| 3 |
+
export type BenchmarkRunKind = 'smoke' | 'full'
|
| 4 |
+
|
| 5 |
+
export type ScenarioCategory =
|
| 6 |
+
| 'startup'
|
| 7 |
+
| 'commands'
|
| 8 |
+
| 'headless'
|
| 9 |
+
| 'correctness'
|
| 10 |
+
| 'tools'
|
| 11 |
+
| 'restoration'
|
| 12 |
+
|
| 13 |
+
export type RunContext = {
|
| 14 |
+
rootDir: string
|
| 15 |
+
mode: BenchmarkMode
|
| 16 |
+
runKind: BenchmarkRunKind
|
| 17 |
+
nowIso: string
|
| 18 |
+
outputDir: string
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
export type SingleExecution = {
|
| 22 |
+
ok: boolean
|
| 23 |
+
durationMs: number
|
| 24 |
+
details?: Record<string, unknown>
|
| 25 |
+
error?: string
|
| 26 |
+
skipped?: boolean
|
| 27 |
+
skipReason?: string
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
export type Scenario = {
|
| 31 |
+
id: string
|
| 32 |
+
name: string
|
| 33 |
+
category: ScenarioCategory
|
| 34 |
+
description: string
|
| 35 |
+
tags: string[]
|
| 36 |
+
run: (context: RunContext) => Promise<SingleExecution[]>
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
export type Distribution = {
|
| 40 |
+
min: number
|
| 41 |
+
max: number
|
| 42 |
+
mean: number
|
| 43 |
+
p50: number
|
| 44 |
+
p95: number
|
| 45 |
+
p99: number
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
export type ScenarioSummary = {
|
| 49 |
+
id: string
|
| 50 |
+
name: string
|
| 51 |
+
category: ScenarioCategory
|
| 52 |
+
description: string
|
| 53 |
+
tags: string[]
|
| 54 |
+
totalRuns: number
|
| 55 |
+
skippedRuns: number
|
| 56 |
+
successRuns: number
|
| 57 |
+
failedRuns: number
|
| 58 |
+
successRate: number
|
| 59 |
+
durationMs: Distribution | null
|
| 60 |
+
examples: Array<Record<string, unknown>>
|
| 61 |
+
errors: string[]
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
export type BenchmarkWeights = {
|
| 65 |
+
latency: number
|
| 66 |
+
stability: number
|
| 67 |
+
quality: number
|
| 68 |
+
cost: number
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
export type BenchmarkThresholds = {
|
| 72 |
+
minimumSuccessRatePct: number
|
| 73 |
+
maximumP95MsByCategory: Partial<Record<ScenarioCategory, number>>
|
| 74 |
+
maximumMissingImports: number
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
export type BenchmarkConfig = {
|
| 78 |
+
mode: BenchmarkMode
|
| 79 |
+
runKind: BenchmarkRunKind
|
| 80 |
+
iterations: {
|
| 81 |
+
startup: number
|
| 82 |
+
commandLoad: number
|
| 83 |
+
queueCorrectness: number
|
| 84 |
+
toolPipeline: number
|
| 85 |
+
}
|
| 86 |
+
timeoutsMs: {
|
| 87 |
+
command: number
|
| 88 |
+
onlineHeadless: number
|
| 89 |
+
}
|
| 90 |
+
weights: BenchmarkWeights
|
| 91 |
+
thresholds: BenchmarkThresholds
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
export type QualityGateResult = {
|
| 95 |
+
passed: boolean
|
| 96 |
+
reasons: string[]
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
export type BenchmarkReport = {
|
| 100 |
+
version: 1
|
| 101 |
+
generatedAt: string
|
| 102 |
+
rootDir: string
|
| 103 |
+
mode: BenchmarkMode
|
| 104 |
+
runKind: BenchmarkRunKind
|
| 105 |
+
config: BenchmarkConfig
|
| 106 |
+
scenarios: ScenarioSummary[]
|
| 107 |
+
aggregate: {
|
| 108 |
+
totalScenarios: number
|
| 109 |
+
failedScenarios: number
|
| 110 |
+
skippedScenarios: number
|
| 111 |
+
totalRuns: number
|
| 112 |
+
successRate: number
|
| 113 |
+
score: {
|
| 114 |
+
latency: number
|
| 115 |
+
stability: number
|
| 116 |
+
quality: number
|
| 117 |
+
cost: number
|
| 118 |
+
total: number
|
| 119 |
+
}
|
| 120 |
+
}
|
| 121 |
+
observability: {
|
| 122 |
+
bootstrapState?: Record<string, unknown>
|
| 123 |
+
process?: Record<string, unknown>
|
| 124 |
+
}
|
| 125 |
+
qualityGate: QualityGateResult
|
| 126 |
+
}
|
utils/files.ts
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { mkdir, readFile, writeFile } from 'node:fs/promises'
|
| 2 |
+
import { dirname } from 'node:path'
|
| 3 |
+
|
| 4 |
+
export async function readJsonFile<T>(path: string): Promise<T> {
|
| 5 |
+
const raw = await readFile(path, 'utf8')
|
| 6 |
+
return JSON.parse(raw) as T
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
export async function writeJsonFile(path: string, value: unknown): Promise<void> {
|
| 10 |
+
await mkdir(dirname(path), { recursive: true })
|
| 11 |
+
await writeFile(path, JSON.stringify(value, null, 2), 'utf8')
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
export async function writeTextFile(path: string, value: string): Promise<void> {
|
| 15 |
+
await mkdir(dirname(path), { recursive: true })
|
| 16 |
+
await writeFile(path, value, 'utf8')
|
| 17 |
+
}
|
utils/stats.ts
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { Distribution } from '../types.js'
|
| 2 |
+
|
| 3 |
+
function quantile(sortedValues: number[], q: number): number {
|
| 4 |
+
if (sortedValues.length === 0) return 0
|
| 5 |
+
if (sortedValues.length === 1) return sortedValues[0]!
|
| 6 |
+
const pos = (sortedValues.length - 1) * q
|
| 7 |
+
const lower = Math.floor(pos)
|
| 8 |
+
const upper = Math.ceil(pos)
|
| 9 |
+
if (lower === upper) return sortedValues[lower]!
|
| 10 |
+
const weight = pos - lower
|
| 11 |
+
return sortedValues[lower]! * (1 - weight) + sortedValues[upper]! * weight
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
export function buildDistribution(values: number[]): Distribution | null {
|
| 15 |
+
if (values.length === 0) return null
|
| 16 |
+
const sorted = [...values].sort((a, b) => a - b)
|
| 17 |
+
const total = sorted.reduce((sum, value) => sum + value, 0)
|
| 18 |
+
return {
|
| 19 |
+
min: sorted[0]!,
|
| 20 |
+
max: sorted[sorted.length - 1]!,
|
| 21 |
+
mean: total / sorted.length,
|
| 22 |
+
p50: quantile(sorted, 0.5),
|
| 23 |
+
p95: quantile(sorted, 0.95),
|
| 24 |
+
p99: quantile(sorted, 0.99),
|
| 25 |
+
}
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
export function clamp(value: number, min: number, max: number): number {
|
| 29 |
+
return Math.min(max, Math.max(min, value))
|
| 30 |
+
}
|