Add .env.example with all configurable environment variables"
Browse files- .env.example +65 -0
.env.example
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 2 |
+
# Alpha Factory β Environment Configuration
|
| 3 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 4 |
+
# Copy this file to .env and fill in your values:
|
| 5 |
+
# cp .env.example .env
|
| 6 |
+
#
|
| 7 |
+
# The pipeline reads these automatically via os.getenv()
|
| 8 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 9 |
+
|
| 10 |
+
# βββ HuggingFace (cloud models) βββββββββββββββββββββββββββββββ
|
| 11 |
+
# Get your token at: https://huggingface.co/settings/tokens
|
| 12 |
+
# Required for cloud models (Qwen-72B, DeepSeek-V3, Llama-4, etc.)
|
| 13 |
+
# Not needed if using only local Ollama models
|
| 14 |
+
HF_TOKEN=hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
| 15 |
+
|
| 16 |
+
# βββ Ollama (local models) ββββββββββββββββββββββββββββββββββββ
|
| 17 |
+
# Default: http://localhost:11434
|
| 18 |
+
# Change if running Ollama on a different machine/port
|
| 19 |
+
OLLAMA_URL=http://localhost:11434
|
| 20 |
+
|
| 21 |
+
# βββ WorldQuant BRAIN API βββββββββββββββββββββββββββββββββββββ
|
| 22 |
+
# Your BRAIN session credentials
|
| 23 |
+
# Get these from browser devtools after logging into brain.worldquant.com
|
| 24 |
+
# Network tab β any API request β copy cookie/authorization header
|
| 25 |
+
BRAIN_API_URL=https://api.worldquantbrain.com
|
| 26 |
+
BRAIN_SESSION_TOKEN=your_session_token_here
|
| 27 |
+
|
| 28 |
+
# βββ Pipeline Settings ββββββββββββββββββββββββββββββββββββββββ
|
| 29 |
+
# Root directory for all pipeline data (factor store, logs, prompts)
|
| 30 |
+
AF_ROOT=.
|
| 31 |
+
|
| 32 |
+
# Batch size: how many alpha candidates to generate per run
|
| 33 |
+
AF_BATCH_SIZE=10
|
| 34 |
+
|
| 35 |
+
# Max BRAIN submissions per day (kill switch)
|
| 36 |
+
AF_DAILY_SUBMISSIONS_MAX=200
|
| 37 |
+
|
| 38 |
+
# βββ Model Defaults (used when not in interactive mode) βββββββ
|
| 39 |
+
# These are Ollama model names for local inference
|
| 40 |
+
# Override by running with --interactive flag
|
| 41 |
+
AF_MICROFISH_MODEL=qwen2.5:1.5b
|
| 42 |
+
AF_TINYFISH_MODEL=qwen2.5:3b
|
| 43 |
+
AF_MEDIUMFISH_MODEL=qwen2.5:7b
|
| 44 |
+
AF_BIGFISH_MODEL=qwen2.5:14b
|
| 45 |
+
|
| 46 |
+
# βββ LLM Server (if using vLLM instead of Ollama) ββββββββββββ
|
| 47 |
+
# Uncomment if you're running vLLM or another OpenAI-compatible server
|
| 48 |
+
# LLM_BASE_URL=http://localhost:8000/v1
|
| 49 |
+
# LLM_API_KEY=dummy
|
| 50 |
+
|
| 51 |
+
# βββ RAG / Paper Index ββββββββββββββββββββββββββββββββββββββββ
|
| 52 |
+
# ChromaDB persistence directory for arXiv paper embeddings
|
| 53 |
+
CHROMA_PERSIST_DIR=./data/chroma
|
| 54 |
+
|
| 55 |
+
# βββ Logging ββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 56 |
+
# Langfuse (optional β self-hosted trace logging)
|
| 57 |
+
# LANGFUSE_HOST=http://localhost:3000
|
| 58 |
+
# LANGFUSE_PUBLIC_KEY=pk-xxx
|
| 59 |
+
# LANGFUSE_SECRET_KEY=sk-xxx
|
| 60 |
+
|
| 61 |
+
# βββ Advanced βββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 62 |
+
# Fitness function thresholds (tune after 20+ alphas)
|
| 63 |
+
AF_MIN_SHARPE_LOCAL=1.0
|
| 64 |
+
AF_CORRELATION_THRESHOLD=0.65
|
| 65 |
+
AF_MAX_ITERATIONS_PER_FAMILY=3
|