chore: update environment variable defaults in .env.example and inference.py for clarity and consistency
Browse files- .env.example +4 -14
- python/inference.py +2 -2
.env.example
CHANGED
|
@@ -4,18 +4,8 @@
|
|
| 4 |
# Mandatory hackathon secret (set this in HF Space secrets too)
|
| 5 |
HF_TOKEN=your_provider_api_key_here
|
| 6 |
|
| 7 |
-
# OpenAI-compatible endpoint (
|
| 8 |
-
API_BASE_URL=
|
| 9 |
|
| 10 |
-
# Model to use (
|
| 11 |
-
MODEL_NAME=
|
| 12 |
-
|
| 13 |
-
# Optional: provider-specific API key fallback for development
|
| 14 |
-
OPENAI_API_KEY=your_api_key_here
|
| 15 |
-
|
| 16 |
-
# Environment server URL (local Docker)
|
| 17 |
-
ENV_URL=http://localhost:7860
|
| 18 |
-
|
| 19 |
-
# Inference script flags
|
| 20 |
-
# --fast-mode : run heuristic (no LLM calls) for deterministic, instant runs
|
| 21 |
-
# --episodes N : number of episodes per task
|
|
|
|
| 4 |
# Mandatory hackathon secret (set this in HF Space secrets too)
|
| 5 |
HF_TOKEN=your_provider_api_key_here
|
| 6 |
|
| 7 |
+
# OpenAI-compatible endpoint (required - fill with your LLM provider URL)
|
| 8 |
+
API_BASE_URL=<your-active-endpoint>
|
| 9 |
|
| 10 |
+
# Model to use (required - fill with your model identifier)
|
| 11 |
+
MODEL_NAME=<your-active-model>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
python/inference.py
CHANGED
|
@@ -45,8 +45,8 @@ except ImportError:
|
|
| 45 |
# ── Constants ──────────────────────────────────────────────────────────────
|
| 46 |
|
| 47 |
ENV_URL = os.getenv("ENV_URL", "http://localhost:7860")
|
| 48 |
-
MODEL_NAME = os.getenv("MODEL_NAME", "
|
| 49 |
-
API_BASE_URL = os.getenv("API_BASE_URL", "
|
| 50 |
|
| 51 |
# ── Environment Variable Handling ─────────────────────────────────────────
|
| 52 |
# The LLM API credential is read from HF_TOKEN or OPENAI_API_KEY environment variables
|
|
|
|
| 45 |
# ── Constants ──────────────────────────────────────────────────────────────
|
| 46 |
|
| 47 |
ENV_URL = os.getenv("ENV_URL", "http://localhost:7860")
|
| 48 |
+
MODEL_NAME = os.getenv("MODEL_NAME", "<your-active-model>")
|
| 49 |
+
API_BASE_URL = os.getenv("API_BASE_URL", "<your-active-endpoint>")
|
| 50 |
|
| 51 |
# ── Environment Variable Handling ─────────────────────────────────────────
|
| 52 |
# The LLM API credential is read from HF_TOKEN or OPENAI_API_KEY environment variables
|