garn-garn's picture
Add anonymized executable code package
881f9f2 verified
# Experiment 1 Cell A — Agent-as-Tool + Direct Python tool calls (no MCP).
#
# This cell measures the baseline cost of the tool set without the MCP
# JSON-RPC layer, so (Cell B latency) - (Cell A latency) isolates MCP
# transport overhead. See docs/experiment1_capture_plan.md for the full
# design and docs/execution_plan.md for the 5-cell experimental grid.
#
# STATUS (2026-04-24): runner wired via scripts/aat_runner.py (#104).
# Cell A smoke evidence is the follow-up in Task 8 of the plan; the
# canonical Insomnia Llama-3.1-8B capture for #25 is also a follow-up.
#
# Usage once unblocked:
# sbatch --mail-type=BEGIN,END,FAIL --mail-user=$MAIL_USER \
# scripts/run_experiment.sh configs/aat_direct.env
EXPERIMENT_NAME="aat_direct"
EXPERIMENT_CELL="A"
EXPERIMENT_FAMILY="exp1_mcp_overhead"
SCENARIO_SET_NAME="smartgrid_multi_domain"
SCENARIOS_GLOB="data/scenarios/multi_*.json"
MODEL_ID="openai/Llama-3.1-8B-Instruct"
ORCHESTRATION="agent_as_tool"
MCP_MODE="direct"
TRIALS=3
ENABLE_SMARTGRID_SERVERS=0 # Direct path bypasses MCP servers entirely
CONTRIBUTING_EXPERIMENTS="exp1_mcp_overhead"
SCENARIO_DOMAIN_SCOPE="multi_domain"
MODEL_PROVIDER="vllm"
SERVING_STACK="insomnia_vllm"
QUANTIZATION_MODE="fp16"
# 32768 matches the repo's canonical benchmark-context lane (configs/example_*.env,
# configs/experiment2/*.env). Smoke configs (configs/aat_*_smoke.env) stay at
# 8192. See #135 for context: Cell A run 8979314 hit a replay context-window
# exceeded path at 8192 → 8193 input tokens; 32768 is the proven replay headroom.
MAX_MODEL_LEN=32768
TEMPERATURE=0.0
MAX_TOKENS=0
LAUNCH_VLLM=1
VLLM_MODEL_PATH="models/Llama-3.1-8B-Instruct"
VLLM_PORT=8000
VLLM_ENABLE_AUTO_TOOL_CHOICE=1
VLLM_TOOL_CALL_PARSER="llama3_json"
ENABLE_WANDB=1
WANDB_ENTITY="assetopsbench-smartgrid"
WANDB_PROJECT="assetopsbench-smartgrid"
WANDB_MODE="online"
# Agent-as-Tool dispatch now defaults to scripts/aat_runner.py. Set
# AAT_RUNNER_TEMPLATE only for custom parity/variant smoke commands.
AAT_PARALLEL_TOOL_CALLS=false
# Torch profiler: captures one replay pass per run while vLLM is still live.
# Trace lands in profiling/traces/<RUN_ID>_torch/. Override: TORCH_PROFILE=0 sbatch ...
TORCH_PROFILE="${TORCH_PROFILE:-1}"