| # Experiment 1 Cell C — Agent-as-Tool + MCP optimized. | |
| # | |
| # Same tools as Cell B (MCP baseline), with two layers of optimization: | |
| # - MCP transport: batch runner + connection reuse (Anonymous reviewer's #31) | |
| # - vLLM serving: prefix caching only (this config, Lane 2 / #30) | |
| # (Cell B - Cell C) latency quantifies how much of the transport overhead is | |
| # recoverable when both optimizations are stacked. See | |
| # docs/experiment1_capture_plan.md for the full design and | |
| # docs/lane2_int8_kv_status.md for the Lane 2 rationale. | |
| # | |
| # STATUS (2026-04-30): Both optimization layers now wired and captured: | |
| # - KV-cache: prefix caching only (fp8 deferred — see status doc §"#30 KV-Cache") | |
| # - MCP transport: batch runner + connection reuse (#31) | |
| # - parallel tool calls disabled: vLLM 0.19.0 + Llama-3.1-8B-Instruct | |
| # rejects parallel tool-call requests ("This model only supports single | |
| # tool-calls at once!"). Successful Cell C proof: Slurm job 9071639. | |
| # INT8 deferred (see status doc). | |
| EXPERIMENT_NAME="aat_mcp_optimized" | |
| EXPERIMENT_CELL="C" | |
| EXPERIMENT_FAMILY="exp1_mcp_overhead" | |
| SCENARIO_SET_NAME="smartgrid_multi_domain" | |
| SCENARIOS_GLOB="data/scenarios/multi_*.json" | |
| MODEL_ID="openai/Llama-3.1-8B-Instruct" | |
| ORCHESTRATION="agent_as_tool" | |
| MCP_MODE="optimized" | |
| TRIALS=3 | |
| ENABLE_SMARTGRID_SERVERS=1 | |
| CONTRIBUTING_EXPERIMENTS="exp1_mcp_overhead" | |
| SCENARIO_DOMAIN_SCOPE="multi_domain" | |
| MODEL_PROVIDER="vllm" | |
| SERVING_STACK="insomnia_vllm" | |
| QUANTIZATION_MODE="fp16" | |
| # 32768 matches the repo's canonical benchmark-context lane. See #135. | |
| MAX_MODEL_LEN=32768 | |
| TEMPERATURE=0.0 | |
| MAX_TOKENS=0 | |
| LAUNCH_VLLM=1 | |
| VLLM_MODEL_PATH="models/Llama-3.1-8B-Instruct" | |
| VLLM_PORT=8000 | |
| VLLM_ENABLE_AUTO_TOOL_CHOICE=1 | |
| VLLM_TOOL_CALL_PARSER="llama3_json" | |
| # Lane 2 optimization knobs (#30): | |
| # --enable-prefix-caching: AOB system prompt + tool catalog are identical | |
| # across every turn and every scenario. Prefix caching skips re-prefill | |
| # of those tokens after the first turn. Direct win for ReAct workloads. | |
| # Lane 2 smoke (job 8979532) measured 7.77s → 5.64s wall-clock on | |
| # multi_01_end_to_end_fault_response (-27%). See status doc. | |
| # `--kv-cache-dtype fp8` was the original Lane 2 second pick but failed in | |
| # the smoke: vLLM 0.19.0 FlashAttention-3 kernel rejects fp8 KV with FP16 | |
| # weights ("For FP8 input, output must have dtype BF16"). Switching the | |
| # model to BF16 to enable fp8 KV would change inference precision and | |
| # confound (B-A) and (B-C). Dropped from this config; documented in | |
| # status doc. | |
| # INT8 weight quantization is intentionally NOT enabled — see | |
| # docs/lane2_int8_kv_status.md §"Why we're deferring INT8". | |
| EXTRA_VLLM_ARGS="--enable-prefix-caching" | |
| ENABLE_WANDB=1 | |
| WANDB_ENTITY="assetopsbench-smartgrid" | |
| WANDB_PROJECT="assetopsbench-smartgrid" | |
| WANDB_MODE="online" | |
| # Cell C optimization knobs — wired into scripts/aat_runner.py --mcp-mode optimized. | |
| # Keep parallel tool calls disabled for the canonical Insomnia/Llama-vLLM path; | |
| # Cell C's measured optimization is connection reuse plus prefix caching. | |
| AAT_MCP_SERVER_LAUNCH_MODE="python" | |
| AAT_MCP_CLIENT_TIMEOUT_SECONDS=120 | |
| AAT_PARALLEL_TOOL_CALLS=false | |
| # Torch profiler: captures one replay pass per run while vLLM is still live. | |
| TORCH_PROFILE="${TORCH_PROFILE:-1}" | |