File size: 5,092 Bytes
3eae4cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
# Gov Workflow OpenEnv
# Detailed environment template for local run, E2E validation, Docker preflight,
# and release deployment.
#
# Usage:
# 1) Copy this file to .env
# 2) Fill only the auth/provider values you use
# 3) Keep defaults unless you intentionally need different behavior

# -----------------------------------------------------------------------------
# 1) LLM Provider Endpoints and Auth
# -----------------------------------------------------------------------------
# Primary OpenAI-compatible endpoint used by inference/simulation runtime.
API_BASE_URL=https://integrate.api.nvidia.com/v1

# OpenAI-compatible model used for LLM inference mode.
MODEL_NAME=meta/llama-3.3-70b-instruct

# Auth precedence in runtime:
#   HF_TOKEN -> OPENAI_API_KEY -> API_KEY
HF_TOKEN=
OPENAI_API_KEY=
API_KEY=

# Optional image tag used by inference / utility flows.
LOCAL_IMAGE_NAME=gov-workflow-openenv:latest

# Inference acceptance criteria (inference.py).
MAX_STEPS=80
SUCCESS_SCORE_THRESHOLD=0.50


# -----------------------------------------------------------------------------
# 2) Provider-Specific API Base URLs
# -----------------------------------------------------------------------------
# OpenAI-compatible provider URL (fallback path in engine/simulator).
OPENAI_API_BASE_URL=https://api.openai.com/v1

# NVIDIA provider URL for NIM calls.
NVIDIA_API_BASE_URL=https://integrate.api.nvidia.com/v1


# -----------------------------------------------------------------------------
# 3) Model Routing and Fallback Pools
# -----------------------------------------------------------------------------
# Optional CSV fallback models for OpenAI-compatible runtime.
# Example:
# MODEL_FALLBACKS=meta/llama-3.1-8b-instruct,microsoft/phi-4-mini-instruct
MODEL_FALLBACKS=

# Optional CSV fallback models for NVIDIA runtime.
NVIDIA_MODEL_FALLBACKS=

# Primary NVIDIA model for NVIDIA-key runtime path.
NVIDIA_MODEL=meta/llama-3.3-70b-instruct

# NVIDIA keys for baseline and simulation fallback behavior.
# Get keys at: https://build.nvidia.com/explore/discover
NVIDIA_API_KEY=
NVIDIA_API_KEY_2=


# -----------------------------------------------------------------------------
# 4) Environment Transport (Direct vs HTTP)
# -----------------------------------------------------------------------------
# Used by inference / gateway code.
# Allowed: auto, http, direct
OPENENV_ENV_TRANSPORT=auto

# Base URL for HTTP transport path.
OPENENV_ENV_BASE_URL=http://127.0.0.1:7860

# Optional explicit API prefix for /reset /step /grade calls.
# Typical values: (empty), /api, /api/v1
OPENENV_ENV_API_PREFIX=

# Optional candidate prefixes (CSV) tried before built-in fallback sequence.
# Example: /api/v1,/api
OPENENV_ENV_API_PREFIX_CANDIDATES=

# Force HTTP/FastAPI gateway even when direct transport is available.
# Allowed truthy values: 1, true, yes, on
FORCE_FASTAPI_GATEWAY=0


# -----------------------------------------------------------------------------
# 5) Structured API Alias Controls (app.main)
# -----------------------------------------------------------------------------
# Enables automatic aliasing from source prefix to versioned prefix.
ENABLE_STRUCTURED_V1_API=1
OPENENV_API_SOURCE_PREFIX=/api
OPENENV_API_V1_PREFIX=/api/v1


# -----------------------------------------------------------------------------
# 6) FastAPI Server Settings (SERVER_* in app/config.py)
# -----------------------------------------------------------------------------
SERVER_HOST=0.0.0.0
SERVER_PORT=7860
SERVER_LOG_LEVEL=info

# Keep 1 for in-memory session store unless external shared state is added.
SERVER_WORKERS=1

# JSON list string expected by Pydantic settings.
SERVER_CORS_ORIGINS=["*"]


# -----------------------------------------------------------------------------
# 7) Environment Defaults (ENV_* in app/config.py)
# -----------------------------------------------------------------------------
ENV_DEFAULT_TASK_ID=district_backlog_easy
ENV_DEFAULT_SEED=11
ENV_MAX_SESSIONS=100
ENV_MAX_STEPS_PER_EPISODE=500


# -----------------------------------------------------------------------------
# 8) Runtime Throttling
# -----------------------------------------------------------------------------
# Delay between LLM calls used by baseline_openai.py.
LLM_CALL_DELAY=12.0


# -----------------------------------------------------------------------------
# 9) Persistence and Storage
# -----------------------------------------------------------------------------
# Enables SQLite/filesystem persistence.
STORAGE_ENABLED=true

# Preferred persistence root (used by app/persistence.py).
# Local example: C:/Users/your-user/OPENENV_RL/outputs/persist
# HF Spaces example: /data/openenv_rl
OPENENV_DATA_DIR=

# Legacy fallback path key still supported by code.
STORAGE_DATA_DIR=


# -----------------------------------------------------------------------------
# 10) Frontend Dev Proxy (Vite)
# -----------------------------------------------------------------------------
# Used by frontend/react/vite.config.js for local /api proxy target.
VITE_DEV_API_TARGET=http://127.0.0.1:7860