wd21 commited on
Commit
6b8e0fb
·
verified ·
1 Parent(s): b08f651

Upload Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +51 -67
Dockerfile CHANGED
@@ -1,7 +1,7 @@
1
  FROM node:22-slim
2
 
3
  RUN apt-get update && apt-get install -y --no-install-recommends \
4
- git ca-certificates build-essential python3 python3-pip curl jq \
5
  && rm -rf /var/lib/apt/lists/*
6
 
7
  RUN pip3 install --no-cache-dir huggingface_hub --break-system-packages
@@ -80,7 +80,7 @@ SYNC_EOF
80
 
81
  RUN chmod +x /usr/local/bin/sync.py
82
 
83
- # 启动脚本 start-openclaw(Hugging Face 使用新路由,contextWindow 提升到 32000)
84
  RUN cat > /usr/local/bin/start-openclaw << 'EOF'
85
  #!/bin/bash
86
  set -e
@@ -104,66 +104,60 @@ touch /root/.openclaw/workspace/memory/$DATE.md
104
 
105
  CLEAN_BASE=$(echo "$OPENAI_API_BASE" | sed "s|/chat/completions||g" | sed "s|/v1/|/v1|g" | sed "s|/v1$|/v1|g")
106
 
107
- # 构建 providers JSON 对象
108
- PROVIDERS_JSON="{}"
109
-
110
  if [ -n "$GEMINI_API_KEY" ]; then
111
- PROVIDERS_JSON=$(echo "$PROVIDERS_JSON" | jq \
112
- --arg baseUrl "https://generativelanguage.googleapis.com/v1beta" \
113
- --arg apiKey "$GEMINI_API_KEY" \
114
- --arg model "$MODEL" \
115
- '.google = {"baseUrl": $baseUrl, "apiKey": $apiKey, "api": "google-generative-ai", "models": [{"id": $model, "name": $model, "contextWindow": 128000}]}')
116
- elif [ -n "$OPENAI_API_KEY" ]; then
117
- PROVIDERS_JSON=$(echo "$PROVIDERS_JSON" | jq \
118
- --arg baseUrl "$CLEAN_BASE" \
119
- --arg apiKey "$OPENAI_API_KEY" \
120
- --arg model "$MODEL" \
121
- '.openai = {"baseUrl": $baseUrl, "apiKey": $apiKey, "api": "openai-completions", "models": [{"id": $model, "name": $model, "contextWindow": 128000}]}')
122
- fi
123
-
124
- if [ -n "$HF_API_TOKEN" ] && [ -n "$HF_MODEL" ]; then
125
- # 使用新的路由端点 https://router.huggingface.co,并将 contextWindow 提高到 32000 以容纳更长对话
126
- PROVIDERS_JSON=$(echo "$PROVIDERS_JSON" | jq \
127
- --arg baseUrl "https://router.huggingface.co" \
128
- --arg apiKey "$HF_API_TOKEN" \
129
- --arg model "$HF_MODEL" \
130
- '.huggingface = {"baseUrl": $baseUrl, "apiKey": $apiKey, "api": "openai-completions", "models": [{"id": $model, "name": $model, "contextWindow": 32000}]}')
131
  fi
132
 
133
- DEFAULT_PROVIDER=""
134
- DEFAULT_MODEL=""
135
- if [ -n "$HF_API_TOKEN" ] && [ -n "$HF_MODEL" ]; then
136
- DEFAULT_PROVIDER="huggingface"
137
- DEFAULT_MODEL="$HF_MODEL"
138
- elif [ -n "$GEMINI_API_KEY" ]; then
139
- DEFAULT_PROVIDER="google"
140
- DEFAULT_MODEL="$MODEL"
141
- elif [ -n "$OPENAI_API_KEY" ]; then
142
- DEFAULT_PROVIDER="openai"
143
- DEFAULT_MODEL="$MODEL"
144
- else
145
- echo "ERROR: No model provider configured."
146
- exit 1
147
  fi
148
 
149
- # 生成基础 openclaw.json
150
- jq -n \
151
- --argjson providers "$PROVIDERS_JSON" \
152
- --arg defaultProvider "$DEFAULT_PROVIDER" \
153
- --arg defaultModel "$DEFAULT_MODEL" \
154
- --arg port "$PORT" \
155
- --arg password "$OPENCLAW_GATEWAY_PASSWORD" \
156
- --arg telegramToken "$TELEGRAM_BOT_TOKEN" \
157
- '{
158
- "models": { "providers": $providers },
 
 
159
  "agents": {
160
  "defaults": {
161
- "model": { "primary": ($defaultProvider + "/" + $defaultModel) },
162
  "imageModel": { "primary": "openai/gpt-4o-mini" },
163
  "tools": {
164
  "elevated": {
165
  "enabled": true,
166
- "allowFrom": { "openclaw-weixin": true }
 
 
167
  }
168
  }
169
  }
@@ -171,9 +165,9 @@ jq -n \
171
  "gateway": {
172
  "mode": "local",
173
  "bind": "lan",
174
- "port": ($port | tonumber),
175
  "trustedProxies": ["0.0.0.0/0", "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"],
176
- "auth": { "mode": "token", "token": $password },
177
  "controlUi": { "allowInsecureAuth": true, "allowedOrigins": ["*"] }
178
  },
179
  "plugins": { "allow": ["openclaw-weixin"] },
@@ -185,27 +179,17 @@ jq -n \
185
  },
186
  "telegram": {
187
  "enabled": true,
188
- "botToken": $telegramToken,
189
  "retry": {
190
  "attempts": 10,
191
  "minDelayMs": 500,
192
  "maxDelayMs": 30000,
193
  "jitter": 0.1
194
- }
195
  }
196
  }
197
- }' > /root/.openclaw/openclaw.json
198
-
199
- # 添加 Telegram 群组(如果设置了)
200
- if [ -n "$TELEGRAM_GROUP_IDS" ]; then
201
- GROUPS_JSON="{}"
202
- IFS=',' read -ra GROUP_ARRAY <<< "$TELEGRAM_GROUP_IDS"
203
- for GROUP_ID in "${GROUP_ARRAY[@]}"; do
204
- GROUP_ID=$(echo "$GROUP_ID" | xargs)
205
- GROUPS_JSON=$(echo "$GROUPS_JSON" | jq --arg gid "$GROUP_ID" '. + {($gid): {"requireMention": true}}')
206
- done
207
- jq --argjson groups "$GROUPS_JSON" '.channels.telegram.groups = $groups' /root/.openclaw/openclaw.json > /tmp/openclaw.json && mv /tmp/openclaw.json /root/.openclaw/openclaw.json
208
- fi
209
 
210
  echo "=== openclaw.json content ==="
211
  cat /root/.openclaw/openclaw.json
 
1
  FROM node:22-slim
2
 
3
  RUN apt-get update && apt-get install -y --no-install-recommends \
4
+ git ca-certificates build-essential python3 python3-pip curl \
5
  && rm -rf /var/lib/apt/lists/*
6
 
7
  RUN pip3 install --no-cache-dir huggingface_hub --break-system-packages
 
80
 
81
  RUN chmod +x /usr/local/bin/sync.py
82
 
83
+ # 启动脚本 start-openclaw
84
  RUN cat > /usr/local/bin/start-openclaw << 'EOF'
85
  #!/bin/bash
86
  set -e
 
104
 
105
  CLEAN_BASE=$(echo "$OPENAI_API_BASE" | sed "s|/chat/completions||g" | sed "s|/v1/|/v1|g" | sed "s|/v1$|/v1|g")
106
 
 
 
 
107
  if [ -n "$GEMINI_API_KEY" ]; then
108
+ PROVIDER="google"
109
+ API_KEY_VAR="$GEMINI_API_KEY"
110
+ BASE_URL_VAR="https://generativelanguage.googleapis.com/v1beta"
111
+ MODEL_VAR="$MODEL"
112
+ PRIMARY_MODEL="google/$MODEL_VAR"
113
+ API_TYPE="google-generative-ai"
114
+ else
115
+ PROVIDER="openai"
116
+ API_KEY_VAR="$OPENAI_API_KEY"
117
+ BASE_URL_VAR="$CLEAN_BASE"
118
+ MODEL_VAR="$MODEL"
119
+ PRIMARY_MODEL="openai/$MODEL_VAR"
120
+ API_TYPE="openai-completions"
 
 
 
 
 
 
 
121
  fi
122
 
123
+ # 构建 groups 配置字符串
124
+ GROUPS_CONFIG=""
125
+ if [ -n "$TELEGRAM_GROUP_IDS" ]; then
126
+ GROUPS_CONFIG=', "groups": {'
127
+ IFS=',' read -ra GROUP_ARRAY <<< "$TELEGRAM_GROUP_IDS"
128
+ for i in "${!GROUP_ARRAY[@]}"; do
129
+ GROUP_ID="${GROUP_ARRAY[$i]}"
130
+ GROUP_ID=$(echo "$GROUP_ID" | xargs)
131
+ GROUPS_CONFIG="$GROUPS_CONFIG\"$GROUP_ID\": {\"requireMention\": true}"
132
+ if [ $i -lt $((${#GROUP_ARRAY[@]}-1)) ]; then
133
+ GROUPS_CONFIG="$GROUPS_CONFIG, "
134
+ fi
135
+ done
136
+ GROUPS_CONFIG="$GROUPS_CONFIG }"
137
  fi
138
 
139
+ cat > /root/.openclaw/openclaw.json <<EOF2
140
+ {
141
+ "models": {
142
+ "providers": {
143
+ "$PROVIDER": {
144
+ "baseUrl": "$BASE_URL_VAR",
145
+ "apiKey": "$API_KEY_VAR",
146
+ "api": "$API_TYPE",
147
+ "models": [{ "id": "$MODEL_VAR", "name": "$MODEL_VAR", "contextWindow": 128000 }]
148
+ }
149
+ }
150
+ },
151
  "agents": {
152
  "defaults": {
153
+ "model": { "primary": "$PRIMARY_MODEL" },
154
  "imageModel": { "primary": "openai/gpt-4o-mini" },
155
  "tools": {
156
  "elevated": {
157
  "enabled": true,
158
+ "allowFrom": {
159
+ "openclaw-weixin": true
160
+ }
161
  }
162
  }
163
  }
 
165
  "gateway": {
166
  "mode": "local",
167
  "bind": "lan",
168
+ "port": $PORT,
169
  "trustedProxies": ["0.0.0.0/0", "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"],
170
+ "auth": { "mode": "token", "token": "$OPENCLAW_GATEWAY_PASSWORD" },
171
  "controlUi": { "allowInsecureAuth": true, "allowedOrigins": ["*"] }
172
  },
173
  "plugins": { "allow": ["openclaw-weixin"] },
 
179
  },
180
  "telegram": {
181
  "enabled": true,
182
+ "botToken": "$TELEGRAM_BOT_TOKEN",
183
  "retry": {
184
  "attempts": 10,
185
  "minDelayMs": 500,
186
  "maxDelayMs": 30000,
187
  "jitter": 0.1
188
+ }$GROUPS_CONFIG
189
  }
190
  }
191
+ }
192
+ EOF2
 
 
 
 
 
 
 
 
 
 
193
 
194
  echo "=== openclaw.json content ==="
195
  cat /root/.openclaw/openclaw.json