Spaces:
Running
Running
File size: 1,440 Bytes
df97e68 c7e793a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 | # Standard OpenEnv / inference variables
API_BASE_URL=https://integrate.api.nvidia.com/v1
MODEL_NAME=meta/llama-3.3-70b-instruct
HF_TOKEN=
OPENAI_API_KEY=
API_KEY=
LOCAL_IMAGE_NAME=gov-workflow-openenv:latest
MAX_STEPS=80
SUCCESS_SCORE_THRESHOLD=0.50
# Provider-specific API base URLs (used by frontend simulation bridge)
OPENAI_API_BASE_URL=https://api.openai.com/v1
NVIDIA_API_BASE_URL=https://integrate.api.nvidia.com/v1
# Optional fallback model lists (comma-separated)
MODEL_FALLBACKS=
NVIDIA_MODEL_FALLBACKS=
# NVIDIA Build API (fallback / internal)
# Copy this file to .env and fill in your values
# Get your key at: https://build.nvidia.com/explore/discover
NVIDIA_API_KEY=nvapi-your-key-here
NVIDIA_API_KEY_2=
# LLM Model Selection
NVIDIA_MODEL=meta/llama-3.3-70b-instruct
# Server Settings
SERVER_HOST=0.0.0.0
SERVER_PORT=7860
SERVER_LOG_LEVEL=info
SERVER_WORKERS=1
# Environment Settings
ENV_DEFAULT_TASK_ID=district_backlog_easy
ENV_DEFAULT_SEED=11
ENV_MAX_SESSIONS=100
ENV_MAX_STEPS_PER_EPISODE=500
# API Throttling
LLM_CALL_DELAY=12.0
# Persistence (SQLite + filesystem)
# For Hugging Face persistent storage, set OPENENV_DATA_DIR=/data/openenv_rl
STORAGE_ENABLED=true
OPENENV_DATA_DIR=
# Optional CSV list of extra model directories to scan for RL checkpoints.
# Example:
# OPENENV_MODEL_SEARCH_DIRS=/data/openenv_rl/results/best_model/phase1,/data/openenv_rl/results/best_model/phase2
OPENENV_MODEL_SEARCH_DIRS=
|