| # βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| # Alpha Factory β Environment Configuration | |
| # βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| # Copy this file to .env and fill in your values: | |
| # cp .env.example .env | |
| # | |
| # The pipeline reads these automatically via os.getenv() | |
| # βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| # βββ HuggingFace (cloud models) βββββββββββββββββββββββββββββββ | |
| # Get your token at: https://huggingface.co/settings/tokens | |
| # Required for cloud models (Qwen-72B, DeepSeek-V3, Llama-4, etc.) | |
| # Not needed if using only local Ollama models | |
| HF_TOKEN=hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxx | |
| # βββ Ollama (local models) ββββββββββββββββββββββββββββββββββββ | |
| # Default: http://localhost:11434 | |
| # Change if running Ollama on a different machine/port | |
| OLLAMA_URL=http://localhost:11434 | |
| # βββ WorldQuant BRAIN API βββββββββββββββββββββββββββββββββββββ | |
| # Your BRAIN session credentials | |
| # Get these from browser devtools after logging into brain.worldquant.com | |
| # Network tab β any API request β copy cookie/authorization header | |
| BRAIN_API_URL=https://api.worldquantbrain.com | |
| BRAIN_SESSION_TOKEN=your_session_token_here | |
| # βββ Pipeline Settings ββββββββββββββββββββββββββββββββββββββββ | |
| # Root directory for all pipeline data (factor store, logs, prompts) | |
| AF_ROOT=. | |
| # Batch size: how many alpha candidates to generate per run | |
| AF_BATCH_SIZE=10 | |
| # Max BRAIN submissions per day (kill switch) | |
| AF_DAILY_SUBMISSIONS_MAX=200 | |
| # βββ Model Defaults (used when not in interactive mode) βββββββ | |
| # These are Ollama model names for local inference | |
| # Override by running with --interactive flag | |
| AF_MICROFISH_MODEL=qwen2.5:1.5b | |
| AF_TINYFISH_MODEL=qwen2.5:3b | |
| AF_MEDIUMFISH_MODEL=qwen2.5:7b | |
| AF_BIGFISH_MODEL=qwen2.5:14b | |
| # βββ LLM Server (if using vLLM instead of Ollama) ββββββββββββ | |
| # Uncomment if you're running vLLM or another OpenAI-compatible server | |
| # LLM_BASE_URL=http://localhost:8000/v1 | |
| # LLM_API_KEY=dummy | |
| # βββ RAG / Paper Index ββββββββββββββββββββββββββββββββββββββββ | |
| # ChromaDB persistence directory for arXiv paper embeddings | |
| CHROMA_PERSIST_DIR=./data/chroma | |
| # βββ Logging ββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| # Langfuse (optional β self-hosted trace logging) | |
| # LANGFUSE_HOST=http://localhost:3000 | |
| # LANGFUSE_PUBLIC_KEY=pk-xxx | |
| # LANGFUSE_SECRET_KEY=sk-xxx | |
| # βββ Advanced βββββββββββββββββββββββββββββββββββββββββββββββββ | |
| # Fitness function thresholds (tune after 20+ alphas) | |
| AF_MIN_SHARPE_LOCAL=1.0 | |
| AF_CORRELATION_THRESHOLD=0.65 | |
| AF_MAX_ITERATIONS_PER_FAMILY=3 | |