| # Mitigation rerun - Plan-Execute + Self-Ask with missing-evidence guard. | |
| # | |
| # Rerun this against the same frozen scenario set as the existing PE + Self-Ask | |
| # baseline to populate results/metrics/mitigation_before_after.csv for #66. | |
| EXPERIMENT_NAME="mitigation_missing_evidence_guard_pe_self_ask" | |
| EXPERIMENT_CELL="Y" | |
| EXPERIMENT_FAMILY="failure_mitigation" | |
| SCENARIO_SET_NAME="smartgrid_multi_domain" | |
| SCENARIOS_GLOB="data/scenarios/multi_*.json" | |
| MODEL_ID="openai/Llama-3.1-8B-Instruct" | |
| ORCHESTRATION="plan_execute" | |
| MCP_MODE="baseline" | |
| TRIALS=3 | |
| ENABLE_SELF_ASK=1 | |
| ENABLE_MISSING_EVIDENCE_GUARD=1 | |
| ENABLE_SMARTGRID_SERVERS=1 | |
| CONTRIBUTING_EXPERIMENTS="failure_mitigation,exp2_orchestration" | |
| SCENARIO_DOMAIN_SCOPE="multi_domain" | |
| MODEL_PROVIDER="vllm" | |
| SERVING_STACK="insomnia_vllm" | |
| QUANTIZATION_MODE="fp16" | |
| MAX_MODEL_LEN=32768 | |
| TEMPERATURE=0.0 | |
| MAX_TOKENS=0 | |
| JUDGE_MODEL="" | |
| LAUNCH_VLLM=1 | |
| VLLM_MODEL_PATH="models/Llama-3.1-8B-Instruct" | |
| VLLM_PORT=8000 | |
| ENABLE_WANDB=1 | |
| WANDB_ENTITY="assetopsbench-smartgrid" | |
| WANDB_PROJECT="assetopsbench-smartgrid" | |
| WANDB_MODE="online" | |
| TORCH_PROFILE="${TORCH_PROFILE:-1}" | |