# Example benchmark config for scripts/run_experiment.sh # Plan-Execute baseline with the repo-local Self-Ask clarification hook enabled. EXPERIMENT_NAME="pe_self_ask_mcp_baseline_smoke" EXPERIMENT_CELL="Y" EXPERIMENT_FAMILY="exp2_orchestration" SCENARIO_SET_NAME="smartgrid_multi_domain_smoke" SCENARIOS_GLOB="data/scenarios/multi_*.json" MODEL_ID="openai/Llama-3.1-8B-Instruct" ORCHESTRATION="plan_execute" MCP_MODE="baseline" TRIALS=1 ENABLE_SMARTGRID_SERVERS=1 ENABLE_SELF_ASK=1 CONTRIBUTING_EXPERIMENTS="exp2_orchestration" SCENARIO_DOMAIN_SCOPE="multi_domain" MODEL_PROVIDER="vllm" SERVING_STACK="insomnia_vllm" QUANTIZATION_MODE="fp16" MAX_MODEL_LEN=32768 TEMPERATURE=0.0 MAX_TOKENS=0 JUDGE_MODEL="" LAUNCH_VLLM=1 VLLM_MODEL_PATH="models/Llama-3.1-8B-Instruct" VLLM_SERVED_MODEL_NAME="Llama-3.1-8B-Instruct" VLLM_PORT=8000 ENABLE_WANDB=1 WANDB_ENTITY="assetopsbench-smartgrid" WANDB_PROJECT="assetopsbench-smartgrid" WANDB_MODE="online"