| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| EXPERIMENT_NAME="aat_mcp_model_optimized" |
| EXPERIMENT_CELL="D" |
| EXPERIMENT_FAMILY="exp1_model_optimization" |
| SCENARIO_SET_NAME="smartgrid_multi_domain" |
| SCENARIOS_GLOB="data/scenarios/multi_*.json" |
|
|
| MODEL_ID="openai/Llama-3.1-8B-Instruct-int8" |
|
|
| ORCHESTRATION="agent_as_tool" |
| MCP_MODE="optimized" |
| TRIALS=3 |
| ENABLE_SMARTGRID_SERVERS=1 |
| CONTRIBUTING_EXPERIMENTS="exp1_model_optimization" |
| SCENARIO_DOMAIN_SCOPE="multi_domain" |
|
|
| MODEL_PROVIDER="vllm" |
| SERVING_STACK="insomnia_vllm" |
| QUANTIZATION_MODE="compressed-tensors-int8-bf16-fp8kv" |
| |
| |
| MAX_MODEL_LEN=8192 |
| TEMPERATURE=0.0 |
| MAX_TOKENS=0 |
|
|
| LAUNCH_VLLM=1 |
| VLLM_MODEL_PATH="models/Llama-3.1-8B-Instruct-int8" |
| VLLM_SERVED_MODEL_NAME="Llama-3.1-8B-Instruct-int8" |
| VLLM_DTYPE="bfloat16" |
| VLLM_PORT=8000 |
| VLLM_ENABLE_AUTO_TOOL_CHOICE=1 |
| VLLM_TOOL_CALL_PARSER="llama3_json" |
| VLLM_STARTUP_TIMEOUT=1200 |
|
|
| |
| |
| |
| |
| |
| EXTRA_VLLM_ARGS="--quantization compressed-tensors --kv-cache-dtype fp8 --enable-prefix-caching" |
|
|
| ENABLE_WANDB=1 |
| WANDB_ENTITY="assetopsbench-smartgrid" |
| WANDB_PROJECT="assetopsbench-smartgrid" |
| WANDB_MODE="online" |
|
|
| AAT_MCP_SERVER_LAUNCH_MODE="python" |
| AAT_MCP_CLIENT_TIMEOUT_SECONDS=120 |
| AAT_PARALLEL_TOOL_CALLS=false |
|
|
| |
| TORCH_PROFILE="${TORCH_PROFILE:-1}" |
|
|