# Copy to .env and fill in values. Do not commit .env. # --- Baseline inference (`python inference.py`) --- # OpenAI-compatible API (e.g. OpenAI, Hugging Face Inference Providers router). API_BASE_URL=https://router.huggingface.co/v1 # Model id as accepted by that API. MODEL_NAME=Qwen/Qwen2.5-7B-Instruct # API key for the LLM. Hugging Face token for HF router; OpenAI key for api.openai.com. # At least one of HF_TOKEN or OPENAI_API_KEY should be set for inference. HF_TOKEN= OPENAI_API_KEY= # HTTP base URL of this environment (local server or deployed Space). ENV_BASE_URL=http://localhost:7860