| # Application | |
| APP_NAME="Graph RAG Service" | |
| DEBUG=false | |
| ENVIRONMENT=development | |
| # API Server | |
| API_HOST=0.0.0.0 | |
| API_PORT=8000 | |
| # Security | |
| # ⚠️ CRITICAL: Change SECRET_KEY before ANY deployment. | |
| # Generate one with: python -c "import secrets; print(secrets.token_hex(32))" | |
| SECRET_KEY=change-this-in-production-to-a-secure-random-key | |
| ACCESS_TOKEN_EXPIRE_MINUTES=30 | |
| # CORS: comma-separated list of allowed origins. | |
| # Default allows only the local Vite dev server. | |
| # Example for production: CORS_ORIGINS=https://yourdomain.com | |
| CORS_ORIGINS=http://localhost:3000,http://localhost:5173 | |
| # Neo4j | |
| NEO4J_URI=bolt://localhost:7687 | |
| NEO4J_USER=neo4j | |
| NEO4J_PASSWORD=password | |
| NEO4J_DATABASE=neo4j | |
| # Redis | |
| REDIS_HOST=localhost | |
| REDIS_PORT=6379 | |
| REDIS_DB=0 | |
| # Celery | |
| CELERY_BROKER_URL=redis://localhost:6379/0 | |
| CELERY_RESULT_BACKEND=redis://localhost:6379/0 | |
| # LLMs | |
| DEFAULT_LLM_PROVIDER=ollama | |
| # OpenAI | |
| OPENAI_API_KEY= | |
| # Anthropic | |
| ANTHROPIC_API_KEY= | |
| # Google Gemini | |
| GOOGLE_API_KEY= | |
| # LlamaCloud (for LlamaParse) | |
| LLAMA_CLOUD_API_KEY= | |
| USE_LLAMA_PARSE=true | |
| # Ollama | |
| OLLAMA_BASE_URL=http://localhost:11434 | |
| OLLAMA_MODEL=deepseek-v3.1:671b-cloud | |
| OLLAMA_EMBEDDING_MODEL=nomic-embed-text | |
| # Embedding | |
| EMBEDDING_PROVIDER=ollama | |
| EMBEDDING_DIMENSION=768 | |
| # Agent Configuration | |
| MAX_AGENT_ITERATIONS=5 | |
| AGENT_TIMEOUT_SECONDS=30 | |
| # Observability | |
| ENABLE_TRACING=true | |
| ENABLE_METRICS=true | |
| LOG_LEVEL=INFO | |