Spaces:
Running
Running
| # Gov Workflow OpenEnv | |
| # Detailed environment template for local run, E2E validation, Docker preflight, | |
| # and release deployment. | |
| # | |
| # Usage: | |
| # 1) Copy this file to .env | |
| # 2) Fill only the auth/provider values you use | |
| # 3) Keep defaults unless you intentionally need different behavior | |
| # ----------------------------------------------------------------------------- | |
| # 1) LLM Provider Endpoints and Auth | |
| # ----------------------------------------------------------------------------- | |
| # Primary OpenAI-compatible endpoint used by inference/simulation runtime. | |
| API_BASE_URL=https://integrate.api.nvidia.com/v1 | |
| # OpenAI-compatible model used for LLM inference mode. | |
| MODEL_NAME=meta/llama-3.3-70b-instruct | |
| # Auth precedence in runtime: | |
| # HF_TOKEN -> OPENAI_API_KEY -> API_KEY | |
| HF_TOKEN= | |
| OPENAI_API_KEY= | |
| API_KEY= | |
| # Optional image tag used by inference / utility flows. | |
| LOCAL_IMAGE_NAME=gov-workflow-openenv:latest | |
| # Inference acceptance criteria (inference.py). | |
| MAX_STEPS=80 | |
| SUCCESS_SCORE_THRESHOLD=0.50 | |
| # ----------------------------------------------------------------------------- | |
| # 2) Provider-Specific API Base URLs | |
| # ----------------------------------------------------------------------------- | |
| # OpenAI-compatible provider URL (fallback path in engine/simulator). | |
| OPENAI_API_BASE_URL=https://api.openai.com/v1 | |
| # NVIDIA provider URL for NIM calls. | |
| NVIDIA_API_BASE_URL=https://integrate.api.nvidia.com/v1 | |
| # ----------------------------------------------------------------------------- | |
| # 3) Model Routing and Fallback Pools | |
| # ----------------------------------------------------------------------------- | |
| # Optional CSV fallback models for OpenAI-compatible runtime. | |
| # Example: | |
| # MODEL_FALLBACKS=meta/llama-3.1-8b-instruct,microsoft/phi-4-mini-instruct | |
| MODEL_FALLBACKS= | |
| # Optional CSV fallback models for NVIDIA runtime. | |
| NVIDIA_MODEL_FALLBACKS= | |
| # Primary NVIDIA model for NVIDIA-key runtime path. | |
| NVIDIA_MODEL=meta/llama-3.3-70b-instruct | |
| # NVIDIA keys for baseline and simulation fallback behavior. | |
| # Get keys at: https://build.nvidia.com/explore/discover | |
| NVIDIA_API_KEY= | |
| NVIDIA_API_KEY_2= | |
| # ----------------------------------------------------------------------------- | |
| # 4) Environment Transport (Direct vs HTTP) | |
| # ----------------------------------------------------------------------------- | |
| # Used by inference / gateway code. | |
| # Allowed: auto, http, direct | |
| OPENENV_ENV_TRANSPORT=auto | |
| # Base URL for HTTP transport path. | |
| OPENENV_ENV_BASE_URL=http://127.0.0.1:7860 | |
| # Optional explicit API prefix for /reset /step /grade calls. | |
| # Typical values: (empty), /api, /api/v1 | |
| OPENENV_ENV_API_PREFIX= | |
| # Optional candidate prefixes (CSV) tried before built-in fallback sequence. | |
| # Example: /api/v1,/api | |
| OPENENV_ENV_API_PREFIX_CANDIDATES= | |
| # Force HTTP/FastAPI gateway even when direct transport is available. | |
| # Allowed truthy values: 1, true, yes, on | |
| FORCE_FASTAPI_GATEWAY=0 | |
| # ----------------------------------------------------------------------------- | |
| # 5) Structured API Alias Controls (app.main) | |
| # ----------------------------------------------------------------------------- | |
| # Enables automatic aliasing from source prefix to versioned prefix. | |
| ENABLE_STRUCTURED_V1_API=1 | |
| OPENENV_API_SOURCE_PREFIX=/api | |
| OPENENV_API_V1_PREFIX=/api/v1 | |
| # ----------------------------------------------------------------------------- | |
| # 6) FastAPI Server Settings (SERVER_* in app/config.py) | |
| # ----------------------------------------------------------------------------- | |
| SERVER_HOST=0.0.0.0 | |
| SERVER_PORT=7860 | |
| SERVER_LOG_LEVEL=info | |
| # Keep 1 for in-memory session store unless external shared state is added. | |
| SERVER_WORKERS=1 | |
| # JSON list string expected by Pydantic settings. | |
| SERVER_CORS_ORIGINS=["*"] | |
| # ----------------------------------------------------------------------------- | |
| # 7) Environment Defaults (ENV_* in app/config.py) | |
| # ----------------------------------------------------------------------------- | |
| ENV_DEFAULT_TASK_ID=district_backlog_easy | |
| ENV_DEFAULT_SEED=11 | |
| ENV_MAX_SESSIONS=100 | |
| ENV_MAX_STEPS_PER_EPISODE=500 | |
| # ----------------------------------------------------------------------------- | |
| # 8) Runtime Throttling | |
| # ----------------------------------------------------------------------------- | |
| # Delay between LLM calls used by baseline_openai.py. | |
| LLM_CALL_DELAY=12.0 | |
| # ----------------------------------------------------------------------------- | |
| # 9) Persistence and Storage | |
| # ----------------------------------------------------------------------------- | |
| # Enables SQLite/filesystem persistence. | |
| STORAGE_ENABLED=true | |
| # Preferred persistence root (used by app/persistence.py). | |
| # Local example: C:/Users/your-user/OPENENV_RL/outputs/persist | |
| # HF Spaces example: /data/openenv_rl | |
| OPENENV_DATA_DIR= | |
| # Legacy fallback path key still supported by code. | |
| STORAGE_DATA_DIR= | |
| # ----------------------------------------------------------------------------- | |
| # 10) Frontend Dev Proxy (Vite) | |
| # ----------------------------------------------------------------------------- | |
| # Used by frontend/react/vite.config.js for local /api proxy target. | |
| VITE_DEV_API_TARGET=http://127.0.0.1:7860 | |