File size: 2,285 Bytes
881f9f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# Experiment 1 exploratory Cell D — Agent-as-Tool + optimized MCP +
# model-side serving optimizations.
#
# Cell C keeps model precision fixed and measures recoverable MCP transport
# overhead. Cell D intentionally changes model-side serving too:
#   - optimized AaT MCP batch runner + connection reuse
#   - vLLM prefix caching
#   - compressed-tensors W8A8 INT8 checkpoint
#   - BF16 model dtype, which unlocks fp8 KV cache on vLLM 0.19.0
#
# This is a follow-on / "best-effort optimized serving" condition, not part of
# the clean A/B/C transport-only headline.

EXPERIMENT_NAME="aat_mcp_model_optimized"
EXPERIMENT_CELL="D"
EXPERIMENT_FAMILY="exp1_model_optimization"
SCENARIO_SET_NAME="smartgrid_multi_domain"
SCENARIOS_GLOB="data/scenarios/multi_*.json"

MODEL_ID="openai/Llama-3.1-8B-Instruct-int8"

ORCHESTRATION="agent_as_tool"
MCP_MODE="optimized"
TRIALS=3
ENABLE_SMARTGRID_SERVERS=1
CONTRIBUTING_EXPERIMENTS="exp1_model_optimization"
SCENARIO_DOMAIN_SCOPE="multi_domain"

MODEL_PROVIDER="vllm"
SERVING_STACK="insomnia_vllm"
QUANTIZATION_MODE="compressed-tensors-int8-bf16-fp8kv"
# Keep the first exploratory D run aligned with the proven INT8 smoke path.
# If this hits context length under full AaT prompts, rerun D at 16384/32768.
MAX_MODEL_LEN=8192
TEMPERATURE=0.0
MAX_TOKENS=0

LAUNCH_VLLM=1
VLLM_MODEL_PATH="models/Llama-3.1-8B-Instruct-int8"
VLLM_SERVED_MODEL_NAME="Llama-3.1-8B-Instruct-int8"
VLLM_DTYPE="bfloat16"
VLLM_PORT=8000
VLLM_ENABLE_AUTO_TOOL_CHOICE=1
VLLM_TOOL_CALL_PARSER="llama3_json"
VLLM_STARTUP_TIMEOUT=1200

# Model-side optimization knobs (#29 + #30 follow-on):
#   --quantization compressed-tensors: RedHatAI W8A8 checkpoint, smoke-proven
#       in Slurm job 8979660.
#   --kv-cache-dtype fp8: compatible with the BF16 dtype used by the INT8 path.
#   --enable-prefix-caching: same prompt-prefix reuse used by Cell C.
EXTRA_VLLM_ARGS="--quantization compressed-tensors --kv-cache-dtype fp8 --enable-prefix-caching"

ENABLE_WANDB=1
WANDB_ENTITY="assetopsbench-smartgrid"
WANDB_PROJECT="assetopsbench-smartgrid"
WANDB_MODE="online"

AAT_MCP_SERVER_LAUNCH_MODE="python"
AAT_MCP_CLIENT_TIMEOUT_SECONDS=120
AAT_PARALLEL_TOOL_CALLS=false

# Torch profiler: captures one replay pass per run while vLLM is still live.
TORCH_PROFILE="${TORCH_PROFILE:-1}"