smartgridbench-review-artifact / code /configs /example_baseline.env
garn-garn's picture
Add anonymized executable code package
881f9f2 verified
# Example benchmark config for scripts/run_experiment.sh
# Sourced as bash; quote any values with whitespace.
#
# Usage:
# DRY_RUN=1 bash scripts/run_experiment.sh configs/example_baseline.env
# sbatch scripts/run_experiment.sh configs/example_baseline.env
# --- Required benchmark identity ---
EXPERIMENT_NAME="pe_mcp_baseline_smoke"
EXPERIMENT_CELL="Y"
EXPERIMENT_FAMILY="exp2_orchestration"
SCENARIO_SET_NAME="smartgrid_multi_domain_smoke"
SCENARIOS_GLOB="data/scenarios/multi_*.json"
# Model string passed to AssetOpsBench.
#
# WatsonX path (fully documented in docs/eval_harness_readme.md):
# MODEL_ID="watsonx/meta-llama/llama-3-3-70b-instruct"
#
# Local vLLM / OpenAI-compatible path:
# - keep LAUNCH_VLLM=1
# - use the model string your local LiteLLM client accepts for the endpoint
# behind LITELLM_BASE_URL
# - current shared-team smoke testing uses this openai-style model string
MODEL_ID="openai/Llama-3.1-8B-Instruct"
# --- Experiment condition ---
ORCHESTRATION="plan_execute"
MCP_MODE="baseline"
TRIALS=1
ENABLE_SMARTGRID_SERVERS=1
# Mark shared Cell B style dual-use runs explicitly when relevant, e.g.:
# CONTRIBUTING_EXPERIMENTS="exp1_mcp_overhead,exp2_orchestration"
CONTRIBUTING_EXPERIMENTS=""
SCENARIO_DOMAIN_SCOPE="multi_domain"
# --- Model / serving metadata ---
MODEL_PROVIDER="vllm"
SERVING_STACK="insomnia_vllm"
QUANTIZATION_MODE="fp16"
# 32768 matches the repo's canonical benchmark-context lane. See #135.
MAX_MODEL_LEN=32768
TEMPERATURE=0.0
MAX_TOKENS=0
JUDGE_MODEL=""
# --- Local vLLM path ---
LAUNCH_VLLM=1
VLLM_MODEL_PATH="models/Llama-3.1-8B-Instruct"
VLLM_PORT=8000
# --- Benchmark harness checkout ---
# Defaults to ../AssetOpsBench relative to the shared project root, which works
# from both the main checkout and worktrees. Override if your checkout lives
# elsewhere.
# AOB_PATH="/abs/path/to/AssetOpsBench"
# --- WandB ---
ENABLE_WANDB=1
WANDB_ENTITY="assetopsbench-smartgrid"
WANDB_PROJECT="assetopsbench-smartgrid"
# Use "offline" for a dry local smoke if WANDB_API_KEY is not available.
WANDB_MODE="online"
# --- Optional adapter surfaces for non-PE orchestration modes ---
# Required if ORCHESTRATION="agent_as_tool"
# AAT_RUNNER_TEMPLATE='cd "$AOB_PATH" && uv run python path/to/aat_runner.py "$PROMPT" >"$OUTPUT_PATH"'
# Required if ORCHESTRATION="hybrid"
# HYBRID_RUNNER_TEMPLATE='cd "$AOB_PATH" && uv run python path/to/hybrid_runner.py "$PROMPT" >"$OUTPUT_PATH"'