File size: 2,436 Bytes
881f9f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
# Example benchmark config for scripts/run_experiment.sh
# Sourced as bash; quote any values with whitespace.
#
# Usage:
#   DRY_RUN=1 bash scripts/run_experiment.sh configs/example_baseline.env
#   sbatch scripts/run_experiment.sh configs/example_baseline.env

# --- Required benchmark identity ---

EXPERIMENT_NAME="pe_mcp_baseline_smoke"
EXPERIMENT_CELL="Y"
EXPERIMENT_FAMILY="exp2_orchestration"
SCENARIO_SET_NAME="smartgrid_multi_domain_smoke"
SCENARIOS_GLOB="data/scenarios/multi_*.json"

# Model string passed to AssetOpsBench.
#
# WatsonX path (fully documented in docs/eval_harness_readme.md):
# MODEL_ID="watsonx/meta-llama/llama-3-3-70b-instruct"
#
# Local vLLM / OpenAI-compatible path:
# - keep LAUNCH_VLLM=1
# - use the model string your local LiteLLM client accepts for the endpoint
#   behind LITELLM_BASE_URL
# - current shared-team smoke testing uses this openai-style model string
MODEL_ID="openai/Llama-3.1-8B-Instruct"

# --- Experiment condition ---

ORCHESTRATION="plan_execute"
MCP_MODE="baseline"
TRIALS=1
ENABLE_SMARTGRID_SERVERS=1

# Mark shared Cell B style dual-use runs explicitly when relevant, e.g.:
# CONTRIBUTING_EXPERIMENTS="exp1_mcp_overhead,exp2_orchestration"
CONTRIBUTING_EXPERIMENTS=""
SCENARIO_DOMAIN_SCOPE="multi_domain"

# --- Model / serving metadata ---

MODEL_PROVIDER="vllm"
SERVING_STACK="insomnia_vllm"
QUANTIZATION_MODE="fp16"
# 32768 matches the repo's canonical benchmark-context lane. See #135.
MAX_MODEL_LEN=32768
TEMPERATURE=0.0
MAX_TOKENS=0
JUDGE_MODEL=""

# --- Local vLLM path ---

LAUNCH_VLLM=1
VLLM_MODEL_PATH="models/Llama-3.1-8B-Instruct"
VLLM_PORT=8000

# --- Benchmark harness checkout ---

# Defaults to ../AssetOpsBench relative to the shared project root, which works
# from both the main checkout and worktrees. Override if your checkout lives
# elsewhere.
# AOB_PATH="/abs/path/to/AssetOpsBench"

# --- WandB ---

ENABLE_WANDB=1
WANDB_ENTITY="assetopsbench-smartgrid"
WANDB_PROJECT="assetopsbench-smartgrid"

# Use "offline" for a dry local smoke if WANDB_API_KEY is not available.
WANDB_MODE="online"

# --- Optional adapter surfaces for non-PE orchestration modes ---

# Required if ORCHESTRATION="agent_as_tool"
# AAT_RUNNER_TEMPLATE='cd "$AOB_PATH" && uv run python path/to/aat_runner.py "$PROMPT" >"$OUTPUT_PATH"'

# Required if ORCHESTRATION="hybrid"
# HYBRID_RUNNER_TEMPLATE='cd "$AOB_PATH" && uv run python path/to/hybrid_runner.py "$PROMPT" >"$OUTPUT_PATH"'