adityss commited on
Commit
eb251de
·
1 Parent(s): 4c68447

chore: update environment variable defaults in .env.example and inference.py for clarity and consistency

Browse files
Files changed (2) hide show
  1. .env.example +4 -14
  2. python/inference.py +2 -2
.env.example CHANGED
@@ -4,18 +4,8 @@
4
  # Mandatory hackathon secret (set this in HF Space secrets too)
5
  HF_TOKEN=your_provider_api_key_here
6
 
7
- # OpenAI-compatible endpoint (default: OpenRouter free-tier)
8
- API_BASE_URL=https://openrouter.ai/api/v1
9
 
10
- # Model to use (change to smaller model if you need lower latency/cost)
11
- MODEL_NAME=your_chosen_model_name_here
12
-
13
- # Optional: provider-specific API key fallback for development
14
- OPENAI_API_KEY=your_api_key_here
15
-
16
- # Environment server URL (local Docker)
17
- ENV_URL=http://localhost:7860
18
-
19
- # Inference script flags
20
- # --fast-mode : run heuristic (no LLM calls) for deterministic, instant runs
21
- # --episodes N : number of episodes per task
 
4
  # Mandatory hackathon secret (set this in HF Space secrets too)
5
  HF_TOKEN=your_provider_api_key_here
6
 
7
+ # OpenAI-compatible endpoint (required - fill with your LLM provider URL)
8
+ API_BASE_URL=<your-active-endpoint>
9
 
10
+ # Model to use (required - fill with your model identifier)
11
+ MODEL_NAME=<your-active-model>
 
 
 
 
 
 
 
 
 
 
python/inference.py CHANGED
@@ -45,8 +45,8 @@ except ImportError:
45
  # ── Constants ──────────────────────────────────────────────────────────────
46
 
47
  ENV_URL = os.getenv("ENV_URL", "http://localhost:7860")
48
- MODEL_NAME = os.getenv("MODEL_NAME", "meta-llama/llama-3.3-70b-instruct:free")
49
- API_BASE_URL = os.getenv("API_BASE_URL", "https://openrouter.ai/api/v1")
50
 
51
  # ── Environment Variable Handling ─────────────────────────────────────────
52
  # The LLM API credential is read from HF_TOKEN or OPENAI_API_KEY environment variables
 
45
  # ── Constants ──────────────────────────────────────────────────────────────
46
 
47
  ENV_URL = os.getenv("ENV_URL", "http://localhost:7860")
48
+ MODEL_NAME = os.getenv("MODEL_NAME", "<your-active-model>")
49
+ API_BASE_URL = os.getenv("API_BASE_URL", "<your-active-endpoint>")
50
 
51
  # ── Environment Variable Handling ─────────────────────────────────────────
52
  # The LLM API credential is read from HF_TOKEN or OPENAI_API_KEY environment variables