| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
| # π¦ HuggingClaw β OpenClaw Gateway for HuggingFace Spaces |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
| # Copy this file to .env and fill in your values. |
| # For local development: cp .env.example .env && nano .env |
| OPENCLAW_DOCKER_APT_PACKAGES="ffmpeg" |
| # ββ REQUIRED: Core Configuration ββ |
| # [REQUIRED] LLM provider API key |
| # - Anthropic: sk-ant-v0-... |
| # - OpenAI: sk-... |
| # - Google: AIzaSy... |
| # - OpenRouter: sk-or-v1-... (300+ models via single key) |
| LLM_API_KEY=your_api_key_here |
| |
| # [REQUIRED] LLM model to use (format: provider/model-name) |
| # Auto-detects provider from prefix β any provider is supported! |
| # Provider IDs from OpenClaw docs: docs.openclaw.ai/concepts/model-providers |
| # |
| |
| # |
| |
| # - anthropic/claude-opus-4-6 |
| # - anthropic/claude-sonnet-4-6 |
| # - anthropic/claude-sonnet-4-5 |
| # - anthropic/claude-haiku-4-5 |
| # |
| |
| # - openai/gpt-5.4-pro |
| # - openai/gpt-5.4 |
| # - openai/gpt-5.4-mini |
| # - openai/gpt-5.4-nano |
| # - openai/gpt-4.1 |
| # - openai/gpt-4.1-mini |
| # |
| |
| # - google/gemini-3.1-pro-preview |
| # - google/gemini-3-flash-preview |
| # - google/gemini-2.5-pro |
| # - google/gemini-2.5-flash |
| # |
| |
| # - deepseek/deepseek-v3.2 |
| # - deepseek/deepseek-r1-0528 |
| # - deepseek/deepseek-r1 |
| # |
| |
| # |
| |
| # - opencode/claude-opus-4-6 |
| # - opencode/gpt-5.4 |
| # Get key from: https://opencode.ai/auth |
| # |
| |
| # - opencode-go/kimi-k2.5 |
| # |
| |
| # |
| |
| # - openrouter/anthropic/claude-sonnet-4-6 |
| # - openrouter/openai/gpt-5.4 |
| # - openrouter/deepseek/deepseek-v3.2 |
| # - openrouter/meta-llama/llama-3.3-70b-instruct:free |
| # Get key from: https://openrouter.ai |
| # |
| |
| # - kilocode/anthropic/claude-opus-4.6 |
| # |
| |
| # |
| |
| # - zai/glm-5 |
| # - zai/glm-5-turbo |
| # - zai/glm-4.7 |
| # - zai/glm-4.7-flash |
| # |
| |
| # - moonshot/kimi-k2.5 |
| # - moonshot/kimi-k2-thinking |
| # |
| |
| # - minimax/minimax-m2.7 |
| # - minimax/minimax-m2.5 |
| # |
| |
| # - xiaomi/mimo-v2-pro |
| # - xiaomi/mimo-v2-omni |
| # |
| |
| # - volcengine/doubao-seed-1-8-251228 |
| # - volcengine/kimi-k2-5-260127 |
| # |
| |
| # - byteplus/seed-1-8-251228 |
| # |
| |
| # |
| |
| # - mistral/mistral-large-latest |
| # - mistral/mistral-small-2603 |
| # - mistral/devstral-medium |
| # |
| |
| # - xai/grok-4.20-beta |
| # - xai/grok-4 |
| # |
| |
| # - nvidia/nemotron-3-super-120b-a12b |
| # |
| |
| # - groq/mixtral-8x7b-32768 |
| # |
| |
| # - cohere/command-a |
| # |
| |
| # - together/meta-llama/llama-3.3-70b-instruct |
| # |
| |
| # - cerebras/zai-glm-4.7 |
| # |
| |
| # - huggingface/deepseek-ai/DeepSeek-R1 |
| # |
| |
| LLM_MODEL=anthropic/claude-sonnet-4-5 |
| |
| # [REQUIRED] Gateway authentication token |
| # Generate: openssl rand -hex 32 |
| GATEWAY_TOKEN=your_gateway_token_here |
| |
| # (Optional) Password auth β simpler alternative to token for casual users |
| # If set, users can log in with this password instead of the token |
| # OPENCLAW_PASSWORD=your_password_here |
| |
| # ββ OPTIONAL: Chat Integrations ββ |
| # Enable WhatsApp pairing flow |
| # Set to true only if you want WhatsApp enabled |
| WHATSAPP_ENABLED=false |
| |
| # Get bot token from: https://t.me/BotFather |
| TELEGRAM_BOT_TOKEN=your_bot_token_here |
| |
| # Single user ID (from https://t.me/userinfobot) |
| TELEGRAM_USER_ID=123456789 |
| |
| # Multiple user IDs (comma-separated for team access) |
| # TELEGRAM_USER_IDS=123456789,987654321,555555555 |
| |
| # ββ OPTIONAL: Workspace Backup to HF Dataset ββ |
| HF_USERNAME=your_hf_username |
| HF_TOKEN=hf_your_token_here |
| |
| # Backup dataset name (auto-created if missing) |
| # Default: huggingclaw-backup |
| BACKUP_DATASET_NAME=videoclaw-backup |
| |
| # Git commit identity for workspace syncs |
| WORKSPACE_GIT_USER=openclaw@example.com |
| WORKSPACE_GIT_NAME=OpenClaw Bot |
| |
| # ββ OPTIONAL: Background Services ββ |
| # Keep-alive ping interval (seconds). Default: 300. Set 0 to disable. |
| KEEP_ALIVE_INTERVAL=300 |
| |
| # Workspace auto-sync interval (seconds). Default: 180. |
| SYNC_INTERVAL=180 |
| |
| # Webhooks: Standard POST notifications for lifecycle events |
| # WEBHOOK_URL=https://your-webhook-endpoint.com/log |
| |
| # Optional: external keep-alive via UptimeRobot |
| # Use the Main API key from UptimeRobot -> Integrations. |
| # Do not use the Read-only API key or a Monitor-specific API key. |
| # Run setup-uptimerobot.sh once from your own terminal to create the monitor. |
| # UPTIMEROBOT_API_KEY=ur_your_api_key_here |
| |
| # Trusted proxies (comma-separated IPs) |
| # Fixes "Proxy headers detected from untrusted address" behind reverse proxies |
| # Only set if you see pairing/auth errors. Find IPs in Space logs (remote=x.x.x.x) |
| # TRUSTED_PROXIES=10.20.31.87,10.20.26.157 |
| |
| # Allowed origins for Control UI (comma-separated URLs) |
| # Locks down the web UI to only these origins |
| ALLOWED_ORIGINS=https://micas23-huggingclaw.hf.space/ |
| |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
| # QUICK START: Only 3 secrets required! |
| # 1. LLM_API_KEY β From your LLM provider |
| # 2. LLM_MODEL β Pick a model above |
| # 3. GATEWAY_TOKEN β Run: openssl rand -hex 32 |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
|
|