| #!/bin/bash |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| set -euo pipefail |
|
|
| REPO_ROOT="${SLURM_SUBMIT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}" |
| cd "$REPO_ROOT" |
| umask 0002 |
|
|
| mkdir -p logs |
| chmod 2775 logs 2>/dev/null || true |
|
|
| echo "=== Experiment 1 Cell A + B Capture ===" |
| echo "Node: $(hostname)" |
| echo "Slurm job: ${SLURM_JOB_ID:-N/A}" |
| echo "Repo root: $REPO_ROOT" |
| echo "Started: $(date -u +%Y-%m-%dT%H:%M:%SZ)" |
| echo "" |
|
|
| |
| |
| |
| JOB="${SLURM_JOB_ID:?expected to be running inside a Slurm job}" |
|
|
| CELL_A_RUN_ID="${JOB}_aat_direct" |
| CELL_B_RUN_ID="${JOB}_aat_mcp_baseline" |
|
|
| CELL_A_BENCH="benchmarks/cell_A_direct/raw/${CELL_A_RUN_ID}" |
| CELL_B_BENCH="benchmarks/cell_B_mcp_baseline/raw/${CELL_B_RUN_ID}" |
|
|
| CELL_A_PROF="profiling/traces/${JOB}_cell_a" |
| CELL_B_PROF="profiling/traces/${JOB}_cell_b" |
|
|
| mkdir -p "$CELL_A_PROF" "$CELL_B_PROF" |
|
|
| |
|
|
| echo "--- Cell A: direct tools (configs/aat_direct.env) ---" |
| echo " Benchmark run dir: $CELL_A_BENCH" |
| echo " Profiling dir: $CELL_A_PROF" |
| echo "" |
|
|
| BENCHMARK_RUN_DIR="$CELL_A_BENCH" \ |
| bash profiling/scripts/capture_around.sh "$CELL_A_PROF" \ |
| -- bash scripts/run_experiment.sh configs/aat_direct.env |
|
|
| echo "" |
| echo "Cell A complete." |
| echo " nvidia-smi: $CELL_A_PROF/nvidia_smi.csv" |
| echo " capture meta: $CELL_A_PROF/capture_meta.json" |
| [ -f "$CELL_A_BENCH/summary.json" ] && \ |
| python3 -c "import json; s=json.load(open('$CELL_A_BENCH/summary.json')); print(f' summary: {s[\"scenarios_completed\"]}/{s[\"scenarios_attempted\"]} passed mean={s[\"latency_seconds_mean\"]:.1f}s')" || true |
| echo "" |
|
|
| |
|
|
| echo "--- Cell B: MCP baseline (configs/aat_mcp_baseline.env) ---" |
| echo " Benchmark run dir: $CELL_B_BENCH" |
| echo " Profiling dir: $CELL_B_PROF" |
| echo "" |
|
|
| BENCHMARK_RUN_DIR="$CELL_B_BENCH" \ |
| bash profiling/scripts/capture_around.sh "$CELL_B_PROF" \ |
| -- bash scripts/run_experiment.sh configs/aat_mcp_baseline.env |
|
|
| echo "" |
| echo "Cell B complete." |
| echo " nvidia-smi: $CELL_B_PROF/nvidia_smi.csv" |
| echo " capture meta: $CELL_B_PROF/capture_meta.json" |
| [ -f "$CELL_B_BENCH/summary.json" ] && \ |
| python3 -c "import json; s=json.load(open('$CELL_B_BENCH/summary.json')); print(f' summary: {s[\"scenarios_completed\"]}/{s[\"scenarios_attempted\"]} passed mean={s[\"latency_seconds_mean\"]:.1f}s')" || true |
| echo "" |
|
|
| |
|
|
| echo "=== Capture complete ===" |
| echo "Cell A bench: $CELL_A_BENCH" |
| echo "Cell B bench: $CELL_B_BENCH" |
| echo "Cell A prof: $CELL_A_PROF" |
| echo "Cell B prof: $CELL_B_PROF" |
| echo "" |
| echo "Next steps:" |
| echo " 1. Check both summary.json files above for pass/fail counts." |
| echo " 2. Verify WandB runs in your configured W&B project" |
| echo " 3. Add a docs/validation_log.md entry referencing these run dirs and the WandB run URLs." |
| echo " 4. Run Notebook 02 parser checks against the latencies.jsonl files:" |
| echo " $CELL_A_BENCH/latencies.jsonl" |
| echo " $CELL_B_BENCH/latencies.jsonl" |
| echo " 5. Check torch profiler traces (if TORCH_PROFILE=1 was set in the configs):" |
| echo " profiling/traces/${JOB}_aat_direct_torch/" |
| echo " profiling/traces/${JOB}_aat_mcp_baseline_torch/" |
| echo " vLLM 0.19 emits *.pt.trace.json.gz; open via https://ui.perfetto.dev (handles .gz)" |
| echo " or 'gunzip -k <file>.pt.trace.json.gz' first then chrome://tracing" |
| echo "Finished: $(date -u +%Y-%m-%dT%H:%M:%SZ)" |
|
|